content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from rest_framework.documentation import include_docs_urls
import notifications.urls
urlpatterns = [
path(r'', include('rbac.urls')),
path(r'', include('cmdb.urls')),
path(r'', include('rmms.urls')),
path(r'', include('workflow.urls')),
path(r'', include('notice.urls')),
path(r'', include('deployment.urls')),
path(r'', include('filemanage.urls')),
path(r'', include('codegenerator.urls')),
path(r'', include('cms.urls')),
path('docs/', include_docs_urls()),
path('inbox/notifications/', include(notifications.urls, namespace='notifications')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
python
|
# Copyright 2018-2021 Jakub Kuczys (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
from pathlib import Path
import requests
ROOT_PATH = Path(__file__).parent.parent.absolute()
VARIATIONS_FILE = ROOT_PATH / "emojiinfo/variations.py"
r = requests.get(
"https://www.unicode.org/Public/UCD/latest/ucd/emoji/emoji-variation-sequences.txt"
)
r.raise_for_status()
backslash_emoji_reprs = []
for line in r.text.splitlines():
if not line or line.startswith("#"):
continue
variation_sequence = list(
map(
lambda x: chr(int(x, base=16)),
line.split(";", maxsplit=1)[0].strip().split(),
)
)
if variation_sequence[1] != "\N{VARIATION SELECTOR-16}":
continue
emoji = variation_sequence[0]
backslash_repr = emoji.encode("ascii", "backslashreplace").decode("utf-8")
backslash_emoji_reprs.append(backslash_repr)
inner_code = textwrap.indent(
",\n".join(f'"{backslash_repr}"' for backslash_repr in backslash_emoji_reprs),
" ",
)
code = f"EMOJIS_WITH_VARIATIONS = {{\n{inner_code},\n}}\n"
with VARIATIONS_FILE.open("w", encoding="utf-8", newline="\n") as fp:
fp.write(code)
|
python
|
import logging
import os
import numpy as np
from .siamese_net import DNN
logger = logging.getLogger(__name__)
package_directory = os.path.dirname(os.path.abspath(__file__))
similarity_model_pretrained = os.path.join(package_directory, "model")
class SimilarityModel:
def __init__(self):
self.model = None
def predict(self, file_feature_dict):
"""
Args:
file_feature_dict: A dictionary mapping from original (path,hash)
to video-level feature tensor.
"""
# Get array of (path,hash) and array of corresponding feature
# values in the same order
keys, features = zip(*file_feature_dict.items())
features = np.array([tensor[0] for tensor in features])
embeddings = self.predict_from_features(features)
return dict(zip(keys, embeddings))
def predict_from_features(self, features):
# Create model
if self.model is None:
logger.info("Creating similarity model for shape %s", features.shape)
self.model = DNN(features.shape[1], None, similarity_model_pretrained, load_model=True, trainable=False)
embeddings = self.model.embeddings(features)
embeddings = np.nan_to_num(embeddings)
return embeddings
|
python
|
from __future__ import annotations
from yarl import URL
import io
import typing
import os
from .factory import construct_repr
if typing.TYPE_CHECKING:
from .factory import HttpHandler
__all__ = ("Image",)
class Image:
"""
A class representing an `Image`.
Attributes:
url (str): URL of the image.
"""
__slots__ = ("url", "_http")
def __init__(self, url: str, http: HttpHandler) -> None:
self.url = url
self._http = http
def __str__(self) -> str:
"""
Returns:
URL of the image.
"""
return self.url
def __repr__(self) -> str:
"""
Returns:
Representation of `Image`.
"""
return construct_repr(self.__class__, self)
async def read(self) -> io.BytesIO:
"""
Reads bytes of the image.
Returns:
An [io.BytesIO](https://docs.python.org/3/library/io.html#io.BytesIO) object.
""" # noqa E501
read_ret = await self._http._read(url=self.url)
return read_ret
async def save(
self,
fp: typing.Union[
typing.Union[str, bytes, os.PathLike[str], os.PathLike[bytes]], int
],
) -> int:
"""
Saves the image at the given file path.
Args:
fp: Path where the file has to be saved.
Returns:
The bytes written in int.
"""
saved = await self._http._save(self.url, fp)
return saved
def deconstruct_url(self) -> URL:
"""
Makes a [yarl.URL](https://yarl.readthedocs.io/en/latest/api.html#yarl.URL) object out of the image url.
Returns:
A [yarl.URL](https://yarl.readthedocs.io/en/latest/api.html#yarl.URL) object.
""" # noqa E501
url = URL(self.url)
return url
|
python
|
import logging
import enum
import copy
import telegram.error
from telegram import (
InlineKeyboardButton,
InlineKeyboardMarkup,
ParseMode
)
from app.entities import KnowledgeStatus
from app.card import Card
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
class KnowledgeCard(Card):
def __init__(self, bot, word, listener):
self._view = KnowledgeCardView(bot)
self._model = KnowledgeCardModel(
view=self._view,
word=word,
listener=listener,
)
self._controller = KnowledgeCardController(self._model)
self._is_deleted = False
def set_old(self):
self._model.is_old = True
@property
def is_old(self):
return self._model.is_old
def set_as_deleted(self, update, context):
self._model.set_as_deleted(update, context)
def is_deleted(self) -> bool:
return self._is_deleted
def get_word(self):
return copy.copy(self._model._word)
def start(self, update, context) -> str:
return self._model.start(update, context)
def button_clicked(self, update, context):
self._controller.button_clicked(update, context)
class Knowledge(enum.Enum):
true = "✅"
false = "❌"
class KnowledgeCardModel:
def __init__(self, view, word, listener):
self._view = view
self._word = word
self._listener = listener
self._message_id = None
self.is_old = False
def start(self, update, context) -> str:
self._message_id = self._view.send_card(
update=update,
word=self._word,
translation=None,
)
return self._message_id
def show_translation(self, update, context, knowledge):
knowledge_status = KnowledgeStatus.new_word_know
if knowledge == Knowledge.false:
knowledge_status = KnowledgeStatus.new_word_forgot
self._view.update_card(
update=update,
translation=self._word.get_translation() + " " + knowledge.value,
)
self._listener.on_correct_answer_clicked(
update=update,
context=context,
knowledge_status=knowledge_status,
)
def set_as_deleted(self, update, context):
self._view.update_card_as_deleted(
update=update,
context=context,
message_id=self._message_id,
)
self._is_deleted = True
class KnowledgeCardController:
def __init__(self, model):
self._model = model
def button_clicked(self, update, context):
query_data = update.callback_query.data
if query_data == "know":
self._model.show_translation(
update=update,
context=context,
knowledge=Knowledge.true,
)
elif query_data == "forgot":
self._model.show_translation(
update=update,
context=context,
knowledge=Knowledge.false,
)
class KnowledgeCardView:
def __init__(self, bot):
self._bot = bot
@staticmethod
def _get_card_markup(translation=None):
keyboard = [[
InlineKeyboardButton(
text="Know " + Knowledge.true.value,
callback_data="know"
),
InlineKeyboardButton(
text="Forgot " + Knowledge.false.value,
callback_data="forgot"
),
]]
if translation is not None:
keyboard.pop(0)
keyboard.append([
InlineKeyboardButton(
text=translation,
callback_data="translation")
])
return InlineKeyboardMarkup(keyboard)
def send_card(self, update, word, translation):
markup = KnowledgeCardView._get_card_markup(
translation=translation,
)
return self._bot.send_message(
chat_id=update.effective_message.chat_id,
text="*"+word.get_word()+"*",
reply_markup=markup,
parse_mode=ParseMode.MARKDOWN
).message_id
def update_card(self, update, translation):
reply_markup = KnowledgeCardView._get_card_markup(
translation=translation,
)
try:
return self._bot.edit_message_reply_markup(
chat_id=update.effective_message.chat_id,
message_id=update.effective_message.message_id,
reply_markup=reply_markup
)
except telegram.error.BadRequest:
return None
def update_card_as_deleted(self, update, context, message_id):
return self._bot.edit_message_reply_markup(
chat_id=update.effective_message.chat_id,
message_id=message_id,
reply_markup=None
)
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import socket
default_port = 8003 # default port when adding a new server
listen_port = 8003 # server listens on this port
my_address = [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
if not ip.startswith("1.")][0]
dns_module_listen_port = 8004
default_ttl = 60 # time to live in seconds
default_record_lifetime = 3600 * 24 * 30 # 1 month
|
python
|
import datetime
import logging
from django.core.management.base import BaseCommand
from django.db import connection, transaction
from framework.celery_tasks import app as celery_app
logger = logging.getLogger(__name__)
TABLES_TO_POPULATE_WITH_MODIFIED = [
'addons_zotero_usersettings',
'addons_dropbox_usersettings',
'addons_dropbox_nodesettings',
'addons_figshare_nodesettings',
'addons_figshare_usersettings',
'addons_forward_nodesettings',
'addons_github_nodesettings',
'addons_github_usersettings',
'addons_gitlab_nodesettings',
'addons_gitlab_usersettings',
'addons_googledrive_nodesettings',
'addons_googledrive_usersettings',
'addons_mendeley_nodesettings',
'addons_mendeley_usersettings',
'addons_onedrive_nodesettings',
'addons_onedrive_usersettings',
'addons_osfstorage_nodesettings',
'addons_osfstorage_usersettings',
'addons_bitbucket_nodesettings',
'addons_bitbucket_usersettings',
'addons_owncloud_nodesettings',
'addons_box_nodesettings',
'addons_owncloud_usersettings',
'addons_box_usersettings',
'addons_dataverse_nodesettings',
'addons_dataverse_usersettings',
'addons_s3_nodesettings',
'addons_s3_usersettings',
'addons_twofactor_usersettings',
'addons_wiki_nodesettings',
'addons_zotero_nodesettings'
]
UPDATE_DELETED_WITH_MODIFIED = """UPDATE {} SET deleted=modified
WHERE id IN (SELECT id FROM {} WHERE is_deleted AND deleted IS NULL LIMIT {}) RETURNING id;"""
@celery_app.task(name='management.commands.addon_deleted_date')
def populate_deleted(dry_run=False, page_size=1000):
with transaction.atomic():
for table in TABLES_TO_POPULATE_WITH_MODIFIED:
run_statements(UPDATE_DELETED_WITH_MODIFIED, page_size, table)
if dry_run:
raise RuntimeError('Dry Run -- Transaction rolled back')
def run_statements(statement, page_size, table):
logger.info('Populating deleted column in table {}'.format(table))
with connection.cursor() as cursor:
cursor.execute(statement.format(table, table, page_size))
rows = cursor.fetchall()
if rows:
logger.info('Table {} still has rows to populate'.format(table))
class Command(BaseCommand):
help = '''Populates new deleted field for various models. Ensure you have run migrations
before running this script.'''
def add_arguments(self, parser):
parser.add_argument(
'--dry_run',
type=bool,
default=False,
help='Run queries but do not write files',
)
parser.add_argument(
'--page_size',
type=int,
default=1000,
help='How many rows to process at a time',
)
def handle(self, *args, **options):
script_start_time = datetime.datetime.now()
logger.info('Script started time: {}'.format(script_start_time))
logger.debug(options)
dry_run = options['dry_run']
page_size = options['page_size']
if dry_run:
logger.info('DRY RUN')
populate_deleted(dry_run, page_size)
script_finish_time = datetime.datetime.now()
logger.info('Script finished time: {}'.format(script_finish_time))
logger.info('Run time {}'.format(script_finish_time - script_start_time))
|
python
|
#!/usr/bin/env python
#Makes the output file "grid" which is used in various postprocessing scripts
import argparse
import numpy as np
import sys
import os
import re
# parse command line options
parser = argparse.ArgumentParser(description="Generates a cartesian mesh with a uniform region surrounded by a stretched grid", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--output", dest="output", help="name of file generated", default="domain.yaml")
parser.add_argument("--input", dest="input", help="name of input file", default="gridOptions")
args = parser.parse_args()
inFile = os.path.expandvars("${CUIBM_DIR}/scripts/grid/"+args.input);
f = open(inFile, 'r')
for line in f:
b = filter(None, re.split('\[|\]|\n|:|,| ', line))
if b != []:
if b[0] == 'DomainBottomLeft':
d_blx = float(b[1])
d_bly = float(b[2])
elif b[0] == 'DomainTopRight':
d_trx = float(b[1])
d_try = float(b[2])
elif b[0] == 'UniformRegionBottomLeft':
u_blx = float(b[1])
u_bly = float(b[2])
elif b[0] == 'UniformRegionTopRight':
u_trx = float(b[1])
u_try = float(b[2])
elif b[0] == 'FinestMeshSpacing':
h = float(b[1])
elif b[0] == 'StretchingRatio':
sr = float(b[1])
print '-'*120
print "Domain:"
print "\t(%s, %s) to (%s, %s)\n" % (str(d_blx), str(d_bly), str(d_trx), str(d_try))
print 'Uniform region:'
print "\t(%s, %s) to (%s, %s)\n" % (str(u_blx), str(u_bly), str(u_trx), str(u_try))
unx = int((u_trx-u_blx)/h + 0.5)
chx = abs((u_trx-u_blx)/h - unx)
uny = int((u_try-u_bly)/h + 0.5)
chy = abs((u_try-u_bly)/h - uny)
if chx > 1e-6 or chy > 1e-6:
print "Non-integer number of cells in the uniform region! Choose a different h or change the extent of the uniform region.\n"
sys.exit(0)
f = open(args.output, 'w')
# domain description in the x-direction
nx = unx
f.write("- direction: x\n")
f.write(" start: %s\n" % str(d_blx))
f.write(" subDomains:\n")
n = 2
L = (u_blx - d_blx)
h1 = L * (sr - 1)/(sr**n - 1)
while h1>h:
n = n+1
h1 = L * (sr - 1)/(sr**n - 1)
n = n-1
nx = nx + n
h1 = L * (sr - 1)/(sr**n - 1)
h2 = h1 * sr**(n-1)
print "-X : h1: %.6f h2: %.6f AR: %.1f" % (h1, h2, h2/h1)
f.write(" - end: %s\n" % str(u_blx))
f.write(" cells: %d\n" % n)
f.write(" stretchRatio: %s\n" % str(1.0/sr))
f.write(" - end: %s\n" % str(u_trx))
f.write(" cells: %d\n" % unx)
f.write(" stretchRatio: 1.0\n")
n = 2
L = (d_trx - u_trx)
h1 = L * (sr - 1)/(sr**n - 1)
while h1>h:
n = n+1
h1 = L * (sr - 1)/(sr**n - 1)
n = n-1
nx = nx + n
h1 = L * (sr - 1)/(sr**n - 1)
h2 = h1 * sr**(n-1)
print "+X : h1: %.6f h2: %.6f AR: %.1f" % (h1, h2, h2/h1)
f.write(" - end: %s\n" % str(d_trx))
f.write(" cells: %d\n" % n)
f.write(" stretchRatio: %s\n\n" % str(sr))
# domain description in the y-direction
ny = uny
f.write("- direction: y\n")
f.write(" start: %s\n" % str(d_bly))
f.write(" subDomains:\n")
n = 2
L = (u_bly - d_bly)
h1 = L * (sr - 1)/(sr**n - 1)
while h1>h:
n = n+1
h1 = L * (sr - 1)/(sr**n - 1)
n = n-1
ny = ny + n
h1 = L * (sr - 1)/(sr**n - 1)
h2 = h1 * sr**(n-1)
print "-Y : h1: %.6f h2: %.6f AR: %.1f" % (h1, h2, h2/h1)
f.write(" - end: %s\n" % str(u_bly))
f.write(" cells: %d\n" % n)
f.write(" stretchRatio: %s\n" % str(1.0/sr))
f.write(" - end: %s\n" % str(u_try))
f.write(" cells: %d\n" % uny)
f.write(" stretchRatio: 1.0\n")
n = 2
L = (u_bly - d_bly)
h1 = L * (sr - 1)/(sr**n - 1)
while h1>h:
n = n+1
h1 = L * (sr - 1)/(sr**n - 1)
n = n-1
ny = ny + n
h1 = L * (sr - 1)/(sr**n - 1)
h2 = h1 * sr**(n-1)
print "+Y : h1: %.6f h2: %.6f AR: %.1f" % (h1, h2, h2/h1)
print ''
f.write(" - end: %s\n" % str(d_try))
f.write(" cells: %d\n" % n)
f.write(" stretchRatio: %s\n" % str(sr))
f.close()
print "h : " + str(h)
print "sr : " + str(sr) + '\n'
print "Mesh size : " + str(nx) + " x " + str(ny) + '\n'
print "Domain information written to file " + args.output
print '-'*120
|
python
|
"""
App entry point which register FastAPI routers.
More info about used app structure: https://fastapi.tiangolo.com/tutorial/bigger-applications/
If you want to create a new API endpoint add endpoint handler to existed router or
create a new module in `routes` directory.
"""
from fastapi import FastAPI
from fastapi.routing import APIRoute
import ppm_telegram_bot
from ppm_telegram_bot.routers import (
telegram,
triggers,
)
app = FastAPI(title="PPM Telegram Bot", version=ppm_telegram_bot.__version__,)
app.include_router(telegram.router, prefix="/telegram", tags=["telegram"])
app.include_router(triggers.router, prefix="/triggers", tags=["triggers"])
# Simplify route names in OpenAPI. Idea from https://fastapi-utils.davidmontague.xyz/user-guide/openapi/
for route in app.routes:
if isinstance(route, APIRoute):
route.operation_id = route.name
|
python
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from matplotlib import cm
from scipy.interpolate import interp1d
from scipy.spatial import distance
from scipy.optimize import differential_evolution
CELL_INFO_DIR = os.path.join('data-share', 'raw', 'cell_info')
def blend_electrodes(electrode_1, electrode_2_pos, electrode_2_neg, x_2):
"""
Inputs:
electrode_1: Primary material in electrode, typically Gr. DataFrame supplied with SOC evenly spaced and voltage.
electrode_2: Secondary material in electrode, typically Si. DataFrame supplied with SOC evenly spaced and
voltage as an additional column.
x_2: Fraction of electrode_2 material. Supplied as scalar value.
"""
if electrode_2_pos.empty:
df_blended = electrode_1
return df_blended
if electrode_2_neg.empty:
electrode_2 = electrode_2_pos
x_2 = np.abs(x_2)
elif x_2 > 0:
electrode_2 = electrode_2_pos
else:
electrode_2 = electrode_2_neg
x_2 = np.abs(x_2)
electrode_1_interper = interp1d(electrode_1['Voltage_aligned'], electrode_1['SOC_aligned'], bounds_error=False,
fill_value='extrapolate')
electrode_2_interper = interp1d(electrode_2['Voltage_aligned'], electrode_2['SOC_aligned'], bounds_error=False,
fill_value='extrapolate')
voltage_vec = np.linspace(np.min((np.min(electrode_1['Voltage_aligned']),
np.min(electrode_2['Voltage_aligned']))),
np.max((np.max(electrode_1['Voltage_aligned']),
np.max(electrode_2['Voltage_aligned']))),
1001)
electrode_1_voltage_aligned = pd.DataFrame(electrode_1_interper(voltage_vec), columns=['SOC'])
electrode_2_voltage_aligned = pd.DataFrame(electrode_2_interper(voltage_vec), columns=['SOC'])
electrode_1_voltage_aligned['Voltage'] = voltage_vec
electrode_2_voltage_aligned['Voltage'] = voltage_vec
df_blend_voltage_aligned = pd.DataFrame(
(1 - x_2) * electrode_1_voltage_aligned['SOC'] + x_2 * electrode_2_voltage_aligned['SOC'], columns=['SOC'])
df_blend_voltage_aligned['Voltage'] = electrode_1_voltage_aligned.merge(electrode_2_voltage_aligned,
on='Voltage')['Voltage']
df_blended_interper = interp1d(df_blend_voltage_aligned['SOC'], df_blend_voltage_aligned['Voltage'],
bounds_error=False)
soc_vec = np.linspace(0, 100, 1001)
df_blended = pd.DataFrame(df_blended_interper(soc_vec), columns=['Voltage_aligned'])
df_blended['SOC_aligned'] = soc_vec
# Modify NE to fully span 100% SOC within its valid voltage window
df_blended_soc_mod_interper = interp1d(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()],
df_blended['Voltage_aligned'].loc[~df_blended['Voltage_aligned'].isna()],
bounds_error=False)
soc_vec = np.linspace(np.min(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]),
np.max(df_blended['SOC_aligned'].loc[~df_blended['Voltage_aligned'].isna()]),
1001)
df_blended_soc_mod = pd.DataFrame(df_blended_soc_mod_interper(soc_vec), columns=['Voltage_aligned'])
df_blended_soc_mod['SOC_aligned'] = soc_vec / np.max(soc_vec) * 100
return df_blended_soc_mod
def get_halfcell_voltages(pe_out_centered, ne_out_centered):
pe_minus_ne_centered = pd.DataFrame(pe_out_centered['Voltage_aligned'] - ne_out_centered['Voltage_aligned'],
columns=['Voltage_aligned'])
pe_minus_ne_centered['SOC_aligned'] = pe_out_centered['SOC_aligned']
soc_upper = pe_minus_ne_centered.iloc[
np.argmin(np.abs(pe_minus_ne_centered.Voltage_aligned - 4.20))].SOC_aligned
soc_lower = pe_minus_ne_centered.iloc[
np.argmin(np.abs(pe_minus_ne_centered.Voltage_aligned - 2.70))].SOC_aligned
pe_upper_voltage = pe_out_centered.loc[pe_out_centered.SOC_aligned == soc_upper].Voltage_aligned.values[0]
pe_lower_voltage = pe_out_centered.loc[pe_out_centered.SOC_aligned == soc_lower].Voltage_aligned.values[0]
pe_upper_soc = ((pe_out_centered.loc[pe_out_centered.Voltage_aligned == pe_upper_voltage]['SOC_aligned'] -
np.min(pe_out_centered['SOC_aligned'].loc[~pe_out_centered['Voltage_aligned'].isna()])) / (
np.max(pe_out_centered['SOC_aligned'].loc[~pe_out_centered['Voltage_aligned'].isna()]) -
np.min(pe_out_centered['SOC_aligned'].loc[~pe_out_centered['Voltage_aligned'].isna()]))
).values[0] * 100
pe_lower_soc = ((pe_out_centered.loc[pe_out_centered.Voltage_aligned == pe_lower_voltage]['SOC_aligned'] -
np.min(pe_out_centered['SOC_aligned'].loc[~pe_out_centered['Voltage_aligned'].isna()])) / (
np.max(pe_out_centered['SOC_aligned'].loc[~pe_out_centered['Voltage_aligned'].isna()]) -
np.min(pe_out_centered['SOC_aligned'].loc[~pe_out_centered['Voltage_aligned'].isna()]))
).values[0] * 100
pe_mass = np.max(pe_out_centered['SOC_aligned'].loc[~pe_out_centered['Voltage_aligned'].isna()]) - np.min(
pe_out_centered['SOC_aligned'].loc[~pe_out_centered['Voltage_aligned'].isna()])
ne_upper_voltage = ne_out_centered.loc[ne_out_centered.SOC_aligned == soc_upper].Voltage_aligned.values[0]
ne_lower_voltage = ne_out_centered.loc[ne_out_centered.SOC_aligned == soc_lower].Voltage_aligned.values[0]
ne_upper_soc = ((ne_out_centered.loc[ne_out_centered.Voltage_aligned == ne_upper_voltage]['SOC_aligned'] -
np.min(ne_out_centered['SOC_aligned'].loc[~ne_out_centered['Voltage_aligned'].isna()])) / (
np.max(ne_out_centered['SOC_aligned'].loc[~ne_out_centered['Voltage_aligned'].isna()]) -
np.min(ne_out_centered['SOC_aligned'].loc[~ne_out_centered['Voltage_aligned'].isna()]))
).values[0] * 100
ne_lower_soc = ((ne_out_centered.loc[ne_out_centered.Voltage_aligned == ne_lower_voltage]['SOC_aligned'] -
np.min(ne_out_centered['SOC_aligned'].loc[~ne_out_centered['Voltage_aligned'].isna()])) / (
np.max(ne_out_centered['SOC_aligned'].loc[~ne_out_centered['Voltage_aligned'].isna()]) -
np.min(ne_out_centered['SOC_aligned'].loc[~ne_out_centered['Voltage_aligned'].isna()]))
).values[0] * 100
ne_mass = np.max(ne_out_centered['SOC_aligned'].loc[~ne_out_centered['Voltage_aligned'].isna()]) - np.min(
ne_out_centered['SOC_aligned'].loc[~ne_out_centered['Voltage_aligned'].isna()])
li_mass = np.max(pe_minus_ne_centered['SOC_aligned'].loc[~pe_minus_ne_centered.Voltage_aligned.isna()]) - np.min(
pe_minus_ne_centered['SOC_aligned'].loc[~pe_minus_ne_centered.Voltage_aligned.isna()])
return (pe_upper_voltage, pe_lower_voltage, pe_upper_soc, pe_lower_soc, pe_mass,
ne_upper_voltage, ne_lower_voltage, ne_upper_soc, ne_lower_soc, ne_mass,
soc_upper, soc_lower, li_mass)
def plot_voltage_curves_for_cell(processed_cycler_run,
cycle_type='rpt_0.2C',
x_var='capacity',
step_type=0,
fig_size_inches=(6, 4)):
# Plot a series of voltage profiles over cycles for this cell
fig = plt.figure()
# Filter down to only cycles of cycle_type
diag_type_cycles = processed_cycler_run.diagnostic_interpolated.loc[
processed_cycler_run.diagnostic_interpolated['cycle_type'] == cycle_type]
# Loop across cycles
cycle_indexes = diag_type_cycles['cycle_index'].unique()
step_type_list = ['charge', 'discharge']
for i in cycle_indexes:
diag_type_cycle_i = diag_type_cycles.loc[diag_type_cycles.cycle_index == i]
x_charge = diag_type_cycle_i[step_type_list[step_type] + '_' + x_var].loc[
diag_type_cycles['step_type'] == step_type]
y_charge = diag_type_cycle_i.voltage.loc[diag_type_cycles['step_type'] == step_type]
plt.plot(x_charge, y_charge,
label='cycle ' + str(i) + ' ' + step_type_list[step_type],
c=cm.winter((i - np.min(cycle_indexes)) / (np.max(cycle_indexes) - np.min(cycle_indexes))),
figure=fig)
fig.set_size_inches(fig_size_inches)
return fig
class IntracellAnalysis:
def __init__(self,
pe_pristine_file,
ne_pristine_file,
cycle_type='rpt_0.2C',
step_type=0,
ne_2pos_file=None,
ne_2neg_file=None):
"""
Invokes the cell electrode analysis class. This is a class designed to fit the cell and electrode
parameters in order to determine changes of electrodes within the full cell from only full cell cycling data.
Args:
pe_pristine_file (str): file name for the half cell data of the pristine (uncycled) positive
electrode
ne_pristine_file (str): file name for the half cell data of the pristine (uncycled) negative
electrode
cycle_type (str): type of diagnostic cycle for the fitting
step_type (int): charge or discharge (0 for charge, 1 for discharge)
ne_2neg_file (str): file name of the data for the negative component of the anode
ne_2pos_file (str): file name of the data for the positive component of the anode
"""
if not os.path.split(pe_pristine_file)[0]:
self.pe_pristine = pd.read_csv(os.path.join(os.environ.get("BEEP_PROCESSING_DIR", "/"),
CELL_INFO_DIR,
pe_pristine_file),
usecols=['SOC_aligned', 'Voltage_aligned'])
else:
self.pe_pristine = pd.read_csv(os.path.join(pe_pristine_file),
usecols=['SOC_aligned', 'Voltage_aligned'])
if not os.path.split(ne_pristine_file)[0]:
self.ne_1_pristine = pd.read_csv(os.path.join(os.environ.get("BEEP_PROCESSING_DIR", "/"),
CELL_INFO_DIR,
ne_pristine_file),
usecols=['SOC_aligned', 'Voltage_aligned'])
else:
self.ne_1_pristine = pd.read_csv(os.path.join(ne_pristine_file),
usecols=['SOC_aligned', 'Voltage_aligned'])
if ne_2neg_file and ne_2pos_file:
if not os.path.split(ne_2neg_file)[0] and not os.path.split(ne_2pos_file)[0]:
self.ne_2_pristine_pos = pd.read_csv(os.path.join(os.environ.get("BEEP_PROCESSING_DIR", "/"),
CELL_INFO_DIR,
ne_2pos_file))
self.ne_2_pristine_neg = pd.read_csv(os.path.join(os.environ.get("BEEP_PROCESSING_DIR", "/"),
CELL_INFO_DIR,
ne_2neg_file))
else:
self.ne_2_pristine_pos = pd.read_csv(ne_2pos_file)
self.ne_2_pristine_neg = pd.read_csv(ne_2neg_file)
else:
self.ne_2_pristine_pos = pd.DataFrame()
self.ne_2_pristine_neg = pd.DataFrame()
self.cycle_type = cycle_type
self.step_type = step_type
self.upper_voltage = 4.2
self.lower_voltage = 2.7
self.threshold = 4.84 * 0.8
def process_beep_cycle_data_for_candidate_halfcell_analysis(self,
cell_struct,
real_cell_initial_charge_profile_aligned,
real_cell_initial_charge_profile,
cycle_index):
"""
Inputs:
diag_type_cycles: beep cell_struct.diagnostic_interpolated filtered to one diagnostic type
real_cell_initial_charge_profile_aligned: dataframe containing SOC (equally spaced) and voltage columns
real_cell_initial_charge_profile: dataframe containing SOC and voltage columns
cycle_index: cycle number to evaluate at
Outputs
real_cell_candidate_charge_profile_aligned: a dataframe containing columns SOC_aligned (evenly spaced) and
Voltage_aligned
"""
diag_type_cycles = cell_struct.diagnostic_data.loc[cell_struct.diagnostic_data['cycle_type'] == self.cycle_type]
real_cell_candidate_charge_profile = diag_type_cycles.loc[
(diag_type_cycles.cycle_index == cycle_index)
& (diag_type_cycles.step_type == 0) # step_type = 0 is charge, 1 is discharge
& (diag_type_cycles.voltage < self.upper_voltage)
& (diag_type_cycles.voltage > self.lower_voltage)][['voltage', 'charge_capacity']]
real_cell_candidate_charge_profile['SOC'] = (
(real_cell_candidate_charge_profile['charge_capacity'] -
np.min(real_cell_initial_charge_profile['charge_capacity']))
/ (np.max(real_cell_initial_charge_profile['charge_capacity']) -
np.min(real_cell_initial_charge_profile['charge_capacity'])) * 100
)
real_cell_candidate_charge_profile['Voltage'] = real_cell_candidate_charge_profile['voltage']
real_cell_candidate_charge_profile.drop('voltage', axis=1, inplace=True)
SOC_vec = np.linspace(0, np.max(real_cell_candidate_charge_profile['SOC']),
1001) # 100 ; np.max(real_cell_candidate_charge_profile['SOC']
real_cell_candidate_charge_profile_aligned = pd.DataFrame()
real_cell_candidate_charge_profile_interper = interp1d(real_cell_candidate_charge_profile['SOC'],
real_cell_candidate_charge_profile['Voltage'],
bounds_error=False)
real_cell_candidate_charge_profile_aligned['Voltage_aligned'] = real_cell_candidate_charge_profile_interper(
SOC_vec)
real_cell_candidate_charge_profile_aligned['Voltage_aligned'].fillna(self.lower_voltage, inplace=True)
real_cell_candidate_charge_profile_aligned['SOC_aligned'] = SOC_vec / np.max(
real_cell_initial_charge_profile_aligned['SOC_aligned'].loc[
~real_cell_initial_charge_profile_aligned['Voltage_aligned'].isna()]) * 100
return real_cell_candidate_charge_profile_aligned
def process_beep_cycle_data_for_initial_halfcell_analysis(self,
cell_struct,
step_type=0):
"""
This function extracts the initial (non-degraded) voltage and soc profiles for the cell with columns
interpolated on voltage and soc.
Inputs
cell_struct: beep cell_struct.diagnostic_interpolated filtered to one diagnostic type
step_type: specifies whether the cell is charging or discharging. 0 is charge, 1 is discharge.
Outputs
real_cell_initial_charge_profile_aligned: a dataframe containing columns SOC_aligned (evenly spaced)
and Voltage_aligned
real_cell_initial_charge_profile: a dataframe containing columns Voltage (evenly spaced), capacity, and SOC
"""
if step_type == 0:
capacity_col = 'charge_capacity'
else:
capacity_col = 'discharge_capacity'
diag_type_cycles = cell_struct.diagnostic_data.loc[cell_struct.diagnostic_data['cycle_type'] == self.cycle_type]
soc_vec = np.linspace(0, 100.0, 1001)
cycle_index_of_cycle_type = cell_struct.diagnostic_summary[
cell_struct.diagnostic_summary.cycle_type == self.cycle_type].cycle_index.iloc[0]
real_cell_initial_charge_profile = diag_type_cycles.loc[
(diag_type_cycles.cycle_index == cycle_index_of_cycle_type)
& (diag_type_cycles.step_type == step_type) # step_type = 0 is charge, 1 is discharge
& (diag_type_cycles.voltage < self.upper_voltage)
& (diag_type_cycles.voltage > self.lower_voltage)][['voltage', capacity_col]]
real_cell_initial_charge_profile['SOC'] = (
(
real_cell_initial_charge_profile[capacity_col] -
np.min(real_cell_initial_charge_profile[capacity_col])
) /
(
np.max(real_cell_initial_charge_profile[capacity_col]) -
np.min(real_cell_initial_charge_profile[capacity_col])
) * 100
)
real_cell_initial_charge_profile['Voltage'] = real_cell_initial_charge_profile['voltage']
real_cell_initial_charge_profile.drop('voltage', axis=1, inplace=True)
real_cell_initial_charge_profile_aligned = pd.DataFrame()
real_cell_initial_charge_profile_aligned['SOC_aligned'] = soc_vec
real_cell_initial_charge_profile_interper = interp1d(real_cell_initial_charge_profile['SOC'],
real_cell_initial_charge_profile['Voltage'],
bounds_error=False)
real_cell_initial_charge_profile_aligned['Voltage_aligned'] = real_cell_initial_charge_profile_interper(
real_cell_initial_charge_profile_aligned['SOC_aligned'])
return real_cell_initial_charge_profile_aligned, real_cell_initial_charge_profile
def get_dQdV_over_Q_from_halfcell_initial_matching(self, x, *params):
df_1, df_2, df_real_interped, emulated_full_cell_interped = self.halfcell_initial_matching_v2(x, *params)
# Calculate dQdV from full cell profiles
dq_dv_real = pd.DataFrame(np.gradient(df_real_interped['SOC_aligned'], df_real_interped['Voltage_aligned']),
columns=['dQdV'])
dq_dv_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['SOC_aligned'], emulated_full_cell_interped['Voltage_aligned']),
columns=['dQdV'])
# Include original data
dq_dv_real['SOC_aligned'] = df_real_interped['SOC_aligned']
dq_dv_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dq_dv_emulated['SOC_aligned'] = emulated_full_cell_interped['SOC_aligned']
dq_dv_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Interpolate over Q
# ^^ already done in this case as standard data template is over Q
return df_1, df_2, dq_dv_real, dq_dv_emulated, df_real_interped, emulated_full_cell_interped
def get_error_dQdV_over_Q_from_halfcell_initial_matching(self, x, *params):
df_1, df_2, dQdV_real, dQdV_emulated, df_real_interped, emulated_full_cell_interped = \
self.get_dQdV_over_Q_from_halfcell_initial_matching(x, *params)
# Calculate distance between lines
error = distance.euclidean(dQdV_real['dQdV'], dQdV_emulated['dQdV']) + 0.01 * len(
dQdV_emulated['dQdV'].loc[dQdV_emulated['dQdV'].isna()])
return error
def get_dQdV_over_V_from_halfcell_initial_matching(self, x, *params):
df_1, df_2, df_real_interped, emulated_full_cell_interped = self.halfcell_initial_matching_v2(x, *params)
# Calculate dQdV from full cell profiles
dq_dv_real = pd.DataFrame(np.gradient(df_real_interped['SOC_aligned'], df_real_interped['Voltage_aligned']),
columns=['dQdV']).ewm(alpha=x[-2]).mean()
dq_dv_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['SOC_aligned'], emulated_full_cell_interped['Voltage_aligned']),
columns=['dQdV']).ewm(alpha=x[-1]).mean()
# Include original data
dq_dv_real['SOC_aligned'] = df_real_interped['SOC_aligned']
dq_dv_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dq_dv_emulated['SOC_aligned'] = emulated_full_cell_interped['SOC_aligned']
dq_dv_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Interpolate over V
voltage_vec = np.linspace(2.7, 4.2, 1001)
v_dq_dv_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['dQdV'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_soc_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['SOC_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, 100))
v_dq_dv_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['dQdV'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_soc_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['SOC_aligned'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, 100))
dq_dv_over_v_real = pd.DataFrame(v_dq_dv_interper_real(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_real['SOC'] = v_soc_interper_real(voltage_vec)
dq_dv_over_v_real['Voltage'] = voltage_vec
dq_dv_over_v_emulated = pd.DataFrame(v_dq_dv_interper_emulated(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_emulated['SOC'] = v_soc_interper_emulated(voltage_vec)
dq_dv_over_v_emulated['Voltage'] = voltage_vec
return df_1, df_2, dq_dv_over_v_real, dq_dv_over_v_emulated, df_real_interped, emulated_full_cell_interped
def get_error_dQdV_over_V_from_halfcell_initial_matching(self, x, *params):
df_1, df_2, dq_dv_real, dq_dv_emulated, df_real_interped, emulated_full_cell_interped = \
self.get_dQdV_over_V_from_halfcell_initial_matching(x, *params)
# Calculate distance between lines
error = distance.euclidean(dq_dv_real['dQdV'], dq_dv_emulated['dQdV']) + 0.01 * len(
dq_dv_emulated['dQdV'].loc[dq_dv_emulated['dQdV'].isna()])
return error
def get_dVdQ_over_Q_from_halfcell_initial_matching(self, x, *params):
df_1, df_2, df_real_interped, emulated_full_cell_interped = self.halfcell_initial_matching_v2(x, *params)
# Calculate dVdQ from full cell profiles
dv_dq_real = pd.DataFrame(np.gradient(df_real_interped['Voltage_aligned'], df_real_interped['SOC_aligned']),
columns=['dVdQ'])
dv_dq_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['Voltage_aligned'], emulated_full_cell_interped['SOC_aligned']),
columns=['dVdQ'])
# Include original data
dv_dq_real['SOC_aligned'] = df_real_interped['SOC_aligned']
dv_dq_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dv_dq_emulated['SOC_aligned'] = emulated_full_cell_interped['SOC_aligned']
dv_dq_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Interpolate over Q
# ^^ already done in this case as standard data template is over Q
return df_1, df_2, dv_dq_real, dv_dq_emulated, df_real_interped, emulated_full_cell_interped
def get_error_dVdQ_over_Q_from_halfcell_initial_matching(self, x, *params):
(df_1,
df_2,
dVdQ_real,
dVdQ_emulated,
df_real_interped,
emulated_full_cell_interped) = self.get_dVdQ_over_Q_from_halfcell_initial_matching(x, *params)
# Calculate distance between lines
error = distance.euclidean(dVdQ_real['dVdQ'], dVdQ_emulated['dVdQ']) + 0.01 * len(
dVdQ_emulated['dVdQ'].loc[dVdQ_emulated['dVdQ'].isna()])
return error
def get_dVdQ_over_V_from_halfcell_initial_matching(self, x, *params):
(df_1,
df_2,
df_real_interped,
emulated_full_cell_interped) = self.halfcell_initial_matching_v2(x, *params)
# Calculate dVdQ from full cell profiles
dv_dq_real = pd.DataFrame(np.gradient(df_real_interped['SOC_aligned'], df_real_interped['Voltage_aligned']),
columns=['dVdQ'])
dv_dq_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['SOC_aligned'], emulated_full_cell_interped['Voltage_aligned']),
columns=['dVdQ'])
# Include original data
dv_dq_real['SOC_aligned'] = df_real_interped['SOC_aligned']
dv_dq_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dv_dq_emulated['SOC_aligned'] = emulated_full_cell_interped['SOC_aligned']
dv_dq_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Interpolate over V
voltage_vec = np.linspace(2.7, 4.2, 1001)
v_dv_dq_interper_real = interp1d(dv_dq_real['Voltage_aligned'].loc[~dv_dq_real['Voltage_aligned'].isna()],
dv_dq_real['dVdQ'].loc[~dv_dq_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_soc_interper_real = interp1d(dv_dq_real['Voltage_aligned'].loc[~dv_dq_real['Voltage_aligned'].isna()],
dv_dq_real['SOC_aligned'].loc[~dv_dq_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, 100))
v_dv_dq_interp_emulated = interp1d(dv_dq_emulated['Voltage_aligned'].loc[
~dv_dq_emulated['Voltage_aligned'].isna()],
dv_dq_emulated['dVdQ'].loc[~dv_dq_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_soc_interper_emulated = interp1d(dv_dq_emulated['Voltage_aligned'].loc[
~dv_dq_emulated['Voltage_aligned'].isna()],
dv_dq_emulated['SOC_aligned'].loc[~dv_dq_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, 100))
dv_dq_over_v_real = pd.DataFrame(v_dv_dq_interper_real(voltage_vec), columns=['dVdQ'])
dv_dq_over_v_real['SOC'] = v_soc_interper_real(voltage_vec)
dv_dq_over_v_real['Voltage'] = voltage_vec
dv_dq_over_v_emulated = pd.DataFrame(v_dv_dq_interp_emulated(voltage_vec), columns=['dVdQ'])
dv_dq_over_v_emulated['SOC'] = v_soc_interper_emulated(voltage_vec)
dv_dq_over_v_emulated['Voltage'] = voltage_vec
return df_1, df_2, dv_dq_over_v_real, dv_dq_over_v_emulated, df_real_interped, emulated_full_cell_interped
def get_error_dVdQ_over_V_from_halfcell_initial_matching(self, x, *params):
df_1, df_2, dv_dq_real, dv_dq_emulated, df_real_interped, emulated_full_cell_interped = \
self.get_dVdQ_over_V_from_halfcell_initial_matching(x, *params)
# Calculate distance between lines
error = distance.euclidean(dv_dq_real['dVdQ'], dv_dq_emulated['dVdQ']) + 0.01 * len(
dv_dq_emulated['dVdQ'].loc[dv_dq_emulated['dVdQ'].isna()])
return error
def blend_electrodes_robust(self, ne_pristine_matched, ne_2_pos, ne_2_neg, x_ne_2):
"""
"""
if ne_2_pos.empty:
df_blended = ne_pristine_matched
return df_blended
if ne_2_neg.empty:
df_blended = ne_pristine_matched
return df_blended
if x_ne_2 > 0:
ne_2_pristine = ne_2_pos
else:
ne_2_pristine = ne_2_neg
x_ne_2 = np.abs(x_ne_2)
# match the two NE materials by SOC
ne_pristine_matched_interper = interp1d(
ne_pristine_matched.loc[~ne_pristine_matched.Voltage_aligned.isna()]['SOC_aligned'],
ne_pristine_matched.loc[~ne_pristine_matched.Voltage_aligned.isna()]['Voltage_aligned'],
bounds_error=False)
ne_2_pristine_interper = interp1d(ne_2_pristine.loc[~ne_2_pristine.Voltage_aligned.isna()]['SOC_aligned'],
ne_2_pristine.loc[~ne_2_pristine.Voltage_aligned.isna()]['Voltage_aligned'],
bounds_error=False)
soc_vec = np.linspace(0, 100, 1001)
ne_pristine_matched_len1001 = pd.DataFrame(soc_vec, columns=['SOC_aligned'])
ne_pristine_matched_len1001['Voltage_aligned'] = ne_pristine_matched_interper(soc_vec)
ne_2_pristine_interper_len1001 = pd.DataFrame(soc_vec, columns=['SOC_aligned'])
ne_2_pristine_interper_len1001['Voltage_aligned'] = ne_2_pristine_interper(soc_vec)
df_ne = blend_electrodes(ne_pristine_matched_len1001,
ne_2_pristine_interper_len1001,
pd.DataFrame(),
x_ne_2)
return df_ne
def blend_electrodes_robust_v2(self, ne_pristine_matched, ne_2_pos, ne_2_neg, x_ne_2):
"""
"""
if ne_2_pos.empty:
df_blended = ne_pristine_matched
return df_blended
if ne_2_neg.empty:
df_blended = ne_pristine_matched
return df_blended
if x_ne_2 > 0:
ne_2_pristine = ne_2_pos
else:
ne_2_pristine = ne_2_neg
x_ne_2 = np.abs(x_ne_2)
# match the two NE materials by SOC
soc_vec = np.linspace(0, 100, 1001)
ne_pristine_matched_0to100 = ((ne_pristine_matched.loc[~ne_pristine_matched.Voltage_aligned.isna()][
'SOC_aligned'] - np.min(
ne_pristine_matched.loc[~ne_pristine_matched.Voltage_aligned.isna()]['SOC_aligned'])) /
(np.max(ne_pristine_matched.loc[~ne_pristine_matched.Voltage_aligned.isna()][
'SOC_aligned']) - np.min(
ne_pristine_matched.loc[~ne_pristine_matched.Voltage_aligned.isna()][
'SOC_aligned']))
) * 100
ne_pristine_matched_interper = interp1d(ne_pristine_matched_0to100,
ne_pristine_matched.loc[~ne_pristine_matched.Voltage_aligned.isna()][
'Voltage_aligned'],
bounds_error=False)
ne_2_pristine_interper = interp1d(ne_2_pristine.loc[~ne_2_pristine.Voltage_aligned.isna()]['SOC_aligned'],
ne_2_pristine.loc[~ne_2_pristine.Voltage_aligned.isna()]['Voltage_aligned'],
bounds_error=False)
ne_pristine_matched_len1001 = pd.DataFrame(soc_vec, columns=['SOC_aligned'])
ne_pristine_matched_len1001['Voltage_aligned'] = ne_pristine_matched_interper(soc_vec)
ne_2_pristine_interper_len1001 = pd.DataFrame(soc_vec, columns=['SOC_aligned'])
ne_2_pristine_interper_len1001['Voltage_aligned'] = ne_2_pristine_interper(soc_vec)
df_ne_blended = blend_electrodes(ne_pristine_matched_len1001,
ne_2_pristine_interper_len1001,
pd.DataFrame(),
x_ne_2)
# restore blend back to original SOC span of NE_pristine_matched
df_ne_blended_interper = interp1d(df_ne_blended['SOC_aligned'], df_ne_blended['Voltage_aligned'],
bounds_error=False) # intiializing interpolation across the blend
len_non_na_ne_pristine_matched = len(ne_pristine_matched.loc[
~ne_pristine_matched.Voltage_aligned.isna()]) # number of points
# applicable to NE in NE_pristine_matched
soc_vec_prematching = np.linspace(np.min(df_ne_blended['SOC_aligned']),
np.max(df_ne_blended['SOC_aligned']),
len_non_na_ne_pristine_matched) # vector across blended NE with same number
# of applicable points as original (NE_pristine_matched)
df_ne_blended_matched = ne_pristine_matched.copy()
df_ne_slice_for_matched = df_ne_blended_interper(soc_vec_prematching)
df_ne_blended_matched.at[(df_ne_blended_matched.loc[
(~df_ne_blended_matched['Voltage_aligned'].isna())]).index, 'Voltage_aligned'] = df_ne_slice_for_matched
return df_ne_blended_matched
def halfcell_initial_matching_v2(self, x, *params):
"""
Augments halfcell voltage profiles by scaling and translating them. Typically used in an optimization routine
to fit the emulated full cell profile to a real cell profile. Alternatively, this function can be used for
emulation of full cell voltage profiles from its electrode constituents with specified capacity ratio and
offset of the two electrodes.
Inputs:
x: an array of 2 or 3 parameters containing scale_ratio, offset, and optionally NE_2_x. scale_ratio is equal
to the capacity of the
cathode divided by the capacity of the anode. offset is defined as the SOC between the cathode at zero capacity
and the anode at zero
capacity. NE_2_x is the fraction of the secondary electrode material in the anode.
df_real: dataframe for the first diagnostic (pristine) of the real full cell. Columns for SOC (evenly spaced)
and Voltage.
df_pe: dataframe for the positive electrode. Columns for SOC (evenly spaced) and Voltage.
self.ne_1_pristine: dataframe for the primary material in the negative electrode. Columns for SOC
(evenly spaced) and Voltage.
df_ne_2: dataframe for the secondary material in the negative electrode. Columns for SOC (evenly spaced)
and Voltage. Supply empty DataFrame if not emulating a blend from two known elelctrodes.
"""
df_real, df_pe, df_ne_1, df_ne_2_pos, df_ne_2_neg = params
df_pe = self.pe_pristine
df_ne_1 = self.ne_1_pristine
df_ne_2_pos = self.ne_2_pristine_pos
df_ne_2_neg = self.ne_2_pristine_neg
scale_ratio = x[0]
offset = x[1]
if df_ne_2_pos.empty | df_ne_2_neg.empty:
# one-material anode
df_ne = pd.DataFrame()
df_ne['Voltage_aligned'] = df_ne_1['Voltage_aligned']
df_ne['SOC_aligned'] = df_ne_1['SOC_aligned']
else:
# blended anode
x_ne_2 = x[2]
df_ne = blend_electrodes(df_ne_1, df_ne_2_pos, df_ne_2_neg, x_ne_2) # _robust_v2
# shifted cathode
shifted_pe = df_pe.copy()
shifted_pe['SOC_aligned'] = shifted_pe['SOC_aligned'] * scale_ratio + offset
# shifted anode
shifted_ne = df_ne.copy()
# Interpolate across the max and min SOC of the half-cell dfs
df_1 = shifted_pe.copy()
df_2 = shifted_ne.copy()
min_soc = np.min((np.min(df_1['SOC_aligned']), np.min(df_2['SOC_aligned'])))
max_soc = np.max((np.max(df_1['SOC_aligned']), np.max(df_2['SOC_aligned'])))
soc_vec = np.linspace(min_soc, max_soc, 1001)
df_1_interper = interp1d(df_1['SOC_aligned'],
df_1['Voltage_aligned'],
bounds_error=False, fill_value=np.nan)
df_1['SOC_aligned'] = soc_vec.copy()
df_1['Voltage_aligned'] = df_1_interper(soc_vec)
df_2_interper = interp1d(df_2['SOC_aligned'],
df_2['Voltage_aligned'],
bounds_error=False, fill_value=np.nan)
df_2['SOC_aligned'] = soc_vec.copy()
df_2['Voltage_aligned'] = df_2_interper(soc_vec)
# Calculate the full-cell profile
df_3 = pd.DataFrame()
df_3['Voltage_aligned'] = df_1['Voltage_aligned'].subtract(df_2['Voltage_aligned'])
df_3['SOC_aligned'] = df_2['SOC_aligned']
# centering
centering_value = - np.min(df_3['SOC_aligned'].iloc[np.argmin(np.abs(df_3['Voltage_aligned'] - 2.7))])
emulated_full_cell_centered = df_3.copy()
emulated_full_cell_centered['SOC_aligned'] = df_3['SOC_aligned'] + centering_value
pe_out_centered = df_1.copy()
pe_out_centered['SOC_aligned'] = df_1['SOC_aligned'] + centering_value
ne_out_centered = df_2.copy()
ne_out_centered['SOC_aligned'] = df_2['SOC_aligned'] + centering_value
# Scaling
emulated_full_cell_centered.loc[(emulated_full_cell_centered['Voltage_aligned'] > self.upper_voltage) | (
emulated_full_cell_centered['Voltage_aligned'] < self.lower_voltage)] = np.nan
scaling_value = np.max(emulated_full_cell_centered['SOC_aligned'].loc[~emulated_full_cell_centered[
'Voltage_aligned'].isna()]) # value to scale emulated back to 100% SOC
emulated_full_cell_centered_scaled = emulated_full_cell_centered.copy()
emulated_full_cell_centered_scaled['SOC_aligned'] = \
emulated_full_cell_centered['SOC_aligned'] / scaling_value * 100
pe_out_centered_scaled = pe_out_centered.copy()
pe_out_centered_scaled['SOC_aligned'] = pe_out_centered['SOC_aligned'] / scaling_value * 100
ne_out_centered_scaled = ne_out_centered.copy()
ne_out_centered_scaled['SOC_aligned'] = ne_out_centered['SOC_aligned'] / scaling_value * 100
# Make new interpolation across SOC for full-cell error calculation
emulated_full_cell_interper = interp1d(
emulated_full_cell_centered_scaled.loc[
~emulated_full_cell_centered_scaled.Voltage_aligned.isna()].SOC_aligned,
emulated_full_cell_centered_scaled.loc[
~emulated_full_cell_centered_scaled.Voltage_aligned.isna()].Voltage_aligned,
bounds_error=False, fill_value='extrapolate', kind='quadratic')
real_full_cell_interper = interp1d(df_real.SOC_aligned,
df_real.Voltage_aligned,
bounds_error=False, fill_value=(self.lower_voltage, self.upper_voltage))
# Interpolate the emulated full-cell profile
SOC_vec_full_cell = np.linspace(0, 100.0, 1001)
emulated_full_cell_interped = pd.DataFrame()
emulated_full_cell_interped['SOC_aligned'] = SOC_vec_full_cell
emulated_full_cell_interped['Voltage_aligned'] = emulated_full_cell_interper(SOC_vec_full_cell)
# Interpolate the true full-cell profile
df_real_interped = emulated_full_cell_interped.copy()
df_real_interped['SOC_aligned'] = SOC_vec_full_cell
df_real_interped['Voltage_aligned'] = real_full_cell_interper(SOC_vec_full_cell)
return pe_out_centered_scaled, ne_out_centered_scaled, df_real_interped, emulated_full_cell_interped
def halfcell_initial_matching(self, x, *params):
"""
Augments halfcell voltage profiles by scaling and translating them. Typically used in an optimization
routine to fit the emulated full cell profile to a real cell profile.
Inputs:
x: an array of 4 or 5 parameters containing scale_pe, offset_pe,scale_pe, scale_ne, offset_ne,
and optionally ne_2_x
df_real: dataframe for the first diagnostic (pristine) of the real full cell. Columns for SOC (ev)
self.pe_pristine: dataframe for the positive electrode. Columns for SOC (evenly spaced) and Voltage.
df_ne_1: dataframe for the primary material in the negative electrode. Columns for SOC (evenly spaced)
and Voltage.
df_ne_2: dataframe for the secondary material in the negative electrode. Columns for SOC (evenly spaced)
and Voltage. Supply empty DataFrame if not emulating a blend from two known elelctrodes.
"""
df_real, df_pe, df_ne_1, df_ne_2 = params
scale_pe = x[0]
offset_pe = x[1]
scale_ne = x[2]
offset_ne = x[3]
if df_ne_2.empty:
# one-material anode
df_ne = pd.DataFrame()
df_ne['Voltage_aligned'] = df_ne_1['Voltage_aligned']
df_ne['SOC_aligned'] = df_ne_1['SOC_aligned']
else:
# blended anode
ne_2_x = x[4] # fraction of NE_2
df_ne = pd.DataFrame()
df_ne['Voltage_aligned'] = (ne_2_x * df_ne_2['Voltage_aligned'] + (1 - ne_2_x) * df_ne_1['Voltage_aligned'])
df_ne['SOC_aligned'] = df_ne_1['SOC_aligned']
# shifted cathode
shifted_pe = df_pe.copy()
shifted_pe['SOC_aligned'] = shifted_pe['SOC_aligned'] * scale_pe + offset_pe
# shifted anode
shifted_ne = df_ne.copy()
shifted_ne['SOC_aligned'] = shifted_ne['SOC_aligned'] * scale_ne + offset_ne
# Interpolate across the max and min SOC of the half-cell dfs
df_1 = shifted_pe.copy()
df_2 = shifted_ne.copy()
min_soc = np.min((np.min(df_1['SOC_aligned']), np.min(df_2['SOC_aligned'])))
max_soc = np.max((np.max(df_1['SOC_aligned']), np.max(df_2['SOC_aligned'])))
soc_vec = np.linspace(min_soc, max_soc, 1001)
df_1_interper = interp1d(df_1['SOC_aligned'], df_1['Voltage_aligned'], bounds_error=False)
df_1['SOC_aligned'] = soc_vec.copy()
df_1['Voltage_aligned'] = df_1_interper(soc_vec)
df_2_interper = interp1d(df_2['SOC_aligned'], df_2['Voltage_aligned'], bounds_error=False)
df_2['SOC_aligned'] = soc_vec.copy()
df_2['Voltage_aligned'] = df_2_interper(soc_vec)
# Calculate the full-cell profile
df_3 = pd.DataFrame()
df_3['Voltage_aligned'] = df_1['Voltage_aligned'].subtract(df_2['Voltage_aligned'])
df_3.loc[(df_3['Voltage_aligned'] > 4.2) | (df_3['Voltage_aligned'] < 2.7)] = np.nan
df_3['SOC_aligned'] = df_2['SOC_aligned']
# centering
centering_value = (df_real['SOC_aligned'].loc[(np.argmin(np.abs(df_real['Voltage_aligned'] -
np.min(df_3['Voltage_aligned'].loc[
~df_3['Voltage_aligned'].isna()]))))]
- np.min(df_3['SOC_aligned'].loc[~df_3['Voltage_aligned'].isna()])
)
emulated_full_cell_centered = df_3.copy()
emulated_full_cell_centered['SOC_aligned'] = df_3['SOC_aligned'] + centering_value
PE_out_centered = df_1.copy()
PE_out_centered['SOC_aligned'] = df_1['SOC_aligned'] + centering_value
NE_out_centered = df_2.copy()
NE_out_centered['SOC_aligned'] = df_2['SOC_aligned'] + centering_value
# Make new interpolation across SOC for full-cell error calculation
min_soc_full_cell = np.min((np.min(df_3.loc[~df_3.Voltage_aligned.isna()].SOC_aligned),
np.min(df_real.loc[~df_real.Voltage_aligned.isna()].SOC_aligned)))
max_soc_full_cell = np.max((np.max(df_3.loc[~df_3.Voltage_aligned.isna()].SOC_aligned),
np.max(df_real.loc[~df_real.Voltage_aligned.isna()].SOC_aligned)))
soc_vec_full_cell = np.linspace(min_soc_full_cell, max_soc_full_cell, 1001)
emulated_full_cell_interper = interp1d(emulated_full_cell_centered.SOC_aligned,
emulated_full_cell_centered.Voltage_aligned, bounds_error=False)
real_full_cell_interper = interp1d(df_real.SOC_aligned,
df_real.Voltage_aligned, bounds_error=False)
# Interpolate the emulated full-cell profile
emulated_full_cell_interped = pd.DataFrame()
emulated_full_cell_interped['SOC_aligned'] = soc_vec_full_cell
emulated_full_cell_interped['Voltage_aligned'] = emulated_full_cell_interper(soc_vec_full_cell)
# Interpolate the true full-cell profile
df_real_interped = emulated_full_cell_interped.copy()
df_real_interped['SOC_aligned'] = soc_vec_full_cell
df_real_interped['Voltage_aligned'] = real_full_cell_interper(soc_vec_full_cell)
return df_1, df_2, df_real_interped, emulated_full_cell_interped
def _get_error_from_halfcell_initial_matching(self, x, *params):
df_1, df_2, df_real_interped, emulated_full_cell_interped = self.halfcell_initial_matching_v2(x, *params)
error = distance.euclidean(df_real_interped['Voltage_aligned'],
emulated_full_cell_interped['Voltage_aligned']) + 0.01 * len(
emulated_full_cell_interped['Voltage_aligned'].loc[emulated_full_cell_interped['Voltage_aligned'].isna()])
return error
def _impose_degradation(self,
pe_pristine=pd.DataFrame(),
ne_1_pristine=pd.DataFrame(),
ne_2_pristine_pos=pd.DataFrame(),
ne_2_pristine_neg=pd.DataFrame(),
lli=0.0, lam_pe=0.0, lam_ne=0.0, x_ne_2=0.0):
pe_translation = 0
pe_shrinkage = 0
ne_translation = 0
ne_shrinkage = 0
# Blend negative electrodes
ne_pristine = blend_electrodes(ne_1_pristine, ne_2_pristine_pos, ne_2_pristine_neg, x_ne_2)
# Update degradation shifts for LLI
ne_translation += lli
# Update degradation shifts for LAM_PE
upper_voltage_limit = self.upper_voltage
pe_soc_setpoint = pe_pristine['SOC_aligned'].loc[
np.argmin(np.abs(pe_pristine['Voltage_aligned']
- ne_pristine[
'Voltage_aligned'] - upper_voltage_limit))] # SOC at which the upper voltage
# limit is hit
pe_translation += lam_pe * pe_soc_setpoint / 100 # correction for shrinkage to ensure the locking of
# the upper voltage SOC
pe_shrinkage += lam_pe # shrinkage of PE capacity due to LAM_PE
# Update degradation shifts for LAM_NE
lower_voltage_limit = self.lower_voltage
ne_soc_setpoint = ne_pristine['SOC_aligned'].loc[
np.argmin((pe_pristine['Voltage_aligned']
- ne_pristine[
'Voltage_aligned'] - lower_voltage_limit))] # SOC at which the lower voltage limit is hit
ne_translation += lam_ne * ne_soc_setpoint / 100 # correction for shrinkage to ensure the locking of the
# lower voltage SOC
ne_shrinkage += lam_ne # shrinkage of NE capacity due to LAM_NE
# Update SOC vector for both electrodes according to their imposed degradation
pe_pristine_shifted_by_deg = pe_pristine.copy()
pe_pristine_shifted_by_deg['SOC_aligned'] = pe_pristine_shifted_by_deg['SOC_aligned'] * (
1 - pe_shrinkage / 100) + pe_translation
ne_pristine_shifted_by_deg = ne_pristine.copy()
ne_pristine_shifted_by_deg['SOC_aligned'] = ne_pristine_shifted_by_deg['SOC_aligned'] * (
1 - ne_shrinkage / 100) + ne_translation
# Re-interpolate to align dataframes for differencing
lower_soc = np.min((np.min(pe_pristine_shifted_by_deg['SOC_aligned']),
np.min(ne_pristine_shifted_by_deg['SOC_aligned'])))
upper_soc = np.max((np.max(pe_pristine_shifted_by_deg['SOC_aligned']),
np.max(ne_pristine_shifted_by_deg['SOC_aligned'])))
soc_vec = np.linspace(lower_soc, upper_soc, 1001)
pe_pristine_interper = interp1d(pe_pristine_shifted_by_deg['SOC_aligned'],
pe_pristine_shifted_by_deg['Voltage_aligned'], bounds_error=False)
pe_degraded = pe_pristine_shifted_by_deg.copy()
pe_degraded['SOC_aligned'] = soc_vec
pe_degraded['Voltage_aligned'] = pe_pristine_interper(soc_vec)
ne_pristine_interper = interp1d(ne_pristine_shifted_by_deg['SOC_aligned'],
ne_pristine_shifted_by_deg['Voltage_aligned'], bounds_error=False)
ne_degraded = ne_pristine_shifted_by_deg.copy()
ne_degraded['SOC_aligned'] = soc_vec
ne_degraded['Voltage_aligned'] = ne_pristine_interper(soc_vec)
return pe_degraded, ne_degraded
def get_dQdV_over_V_from_degradation_matching(self, x, *params):
pe_out_centered, ne_out_centered, df_real_interped, emulated_full_cell_interped = \
self.halfcell_degradation_matching_v3(x, *params)
# Calculate dQdV from full cell profiles
dq_dv_real = pd.DataFrame(np.gradient(df_real_interped['SOC_aligned'], df_real_interped['Voltage_aligned']),
columns=['dQdV']).ewm(alpha=x[-2]).mean()
dq_dv_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['SOC_aligned'], emulated_full_cell_interped['Voltage_aligned']),
columns=['dQdV']).ewm(alpha=x[-1]).mean()
# Include original data
dq_dv_real['SOC_aligned'] = df_real_interped['SOC_aligned']
dq_dv_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dq_dv_emulated['SOC_aligned'] = emulated_full_cell_interped['SOC_aligned']
dq_dv_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Interpolate over V
voltage_vec = np.linspace(2.7, 4.2, 1001)
v_dq_dv_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['dQdV'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_soc_interper_real = interp1d(dq_dv_real['Voltage_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
dq_dv_real['SOC_aligned'].loc[~dq_dv_real['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, 100))
v_dq_dv_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['dQdV'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=0)
v_soc_interper_emulated = interp1d(dq_dv_emulated['Voltage_aligned'].loc[
~dq_dv_emulated['Voltage_aligned'].isna()],
dq_dv_emulated['SOC_aligned'].loc[~dq_dv_emulated['Voltage_aligned'].isna()],
bounds_error=False, fill_value=(0, 100))
dq_dv_over_v_real = pd.DataFrame(v_dq_dv_interper_real(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_real['SOC_aligned'] = v_soc_interper_real(voltage_vec)
dq_dv_over_v_real['Voltage_aligned'] = voltage_vec
dq_dv_over_v_emulated = pd.DataFrame(v_dq_dv_interper_emulated(voltage_vec), columns=['dQdV']).fillna(0)
dq_dv_over_v_emulated['SOC_aligned'] = v_soc_interper_emulated(voltage_vec)
dq_dv_over_v_emulated['Voltage_aligned'] = voltage_vec
return (pe_out_centered,
ne_out_centered,
dq_dv_over_v_real,
dq_dv_over_v_emulated,
df_real_interped,
emulated_full_cell_interped)
def get_error_dQdV_over_V_from_degradation_matching(self, x, *params):
try:
(pe_out_centered, ne_out_centered,
dq_dv_over_v_real, dq_dv_over_v_emulated,
df_real_interped, emulated_full_cell_interped) = self.get_dQdV_over_V_from_degradation_matching(x, *params)
error = distance.euclidean(dq_dv_over_v_real['dQdV'], dq_dv_over_v_emulated['dQdV']) + 0.01 * len(
dq_dv_over_v_emulated['dQdV'].loc[dq_dv_over_v_emulated['dQdV'].isna()])
except ValueError:
error = 1000 # set an error for cases where calcuation fails
return error
def get_dVdQ_over_SOC_from_degradation_matching(self, x, *params):
pe_out_centered, ne_out_centered, df_real_interped, emulated_full_cell_interped = \
self.halfcell_degradation_matching_v3(x, *params)
# Calculate dVdQ from full cell profiles
dv_dq_over_soc_real = pd.DataFrame(np.gradient(df_real_interped['Voltage_aligned'],
df_real_interped['SOC_aligned']),
columns=['dVdQ']).ewm(alpha=x[-2]).mean()
dv_dq_over_soc_emulated = pd.DataFrame(
np.gradient(emulated_full_cell_interped['Voltage_aligned'], emulated_full_cell_interped['SOC_aligned']),
columns=['dVdQ']).ewm(alpha=x[-1]).mean()
# Include original data
dv_dq_over_soc_real['SOC_aligned'] = df_real_interped['SOC_aligned']
dv_dq_over_soc_real['Voltage_aligned'] = df_real_interped['Voltage_aligned']
dv_dq_over_soc_emulated['SOC_aligned'] = emulated_full_cell_interped['SOC_aligned']
dv_dq_over_soc_emulated['Voltage_aligned'] = emulated_full_cell_interped['Voltage_aligned']
# Interpolate over Q
# ^^ already done in this case as standard data template is over Q
return (pe_out_centered,
ne_out_centered,
dv_dq_over_soc_real,
dv_dq_over_soc_emulated,
df_real_interped,
emulated_full_cell_interped)
def get_error_dVdQ_over_SOC_from_degradation_matching(self, x, *params):
try:
(pe_out_centered, ne_out_centered,
dv_dq_over_soc_real, dv_dq_over_soc_emulated,
df_real_interped, emulated_full_cell_interped) = \
self.get_dVdQ_over_SOC_from_degradation_matching(x, *params)
error = distance.euclidean(dv_dq_over_soc_real['dVdQ'], dv_dq_over_soc_emulated['dVdQ']) + 0.01 * len(
dv_dq_over_soc_emulated['dVdQ'].loc[dv_dq_over_soc_emulated['dVdQ'].isna()])
except ValueError:
error = 1000 # set an error for cases where calcuation fails
return error
def get_V_over_SOC_from_degradation_matching(self, x, *params):
(PE_out_centered, NE_out_centered, real_aligned, emulated_aligned) = \
self.halfcell_degradation_matching_v3(x, *params)
min_soc_full_cell = np.min(real_aligned.loc[~real_aligned.Voltage_aligned.isna()].SOC_aligned)
max_soc_full_cell = np.max(real_aligned.loc[~real_aligned.Voltage_aligned.isna()].SOC_aligned)
soc_vec_full_cell = np.linspace(min_soc_full_cell, max_soc_full_cell, 1001)
emulated_full_cell_interper = interp1d(
emulated_aligned.SOC_aligned.loc[~real_aligned.Voltage_aligned.isna()],
emulated_aligned.Voltage_aligned.loc[~real_aligned.Voltage_aligned.isna()],
bounds_error=False)
real_full_cell_interper = interp1d(real_aligned.SOC_aligned.loc[~real_aligned.Voltage_aligned.isna()],
real_aligned.Voltage_aligned.loc[~real_aligned.Voltage_aligned.isna()],
bounds_error=False)
# Interpolate the emulated full-cell profile
emulated_full_cell_interped = pd.DataFrame()
emulated_full_cell_interped['SOC_aligned'] = soc_vec_full_cell
emulated_full_cell_interped['Voltage_aligned'] = emulated_full_cell_interper(soc_vec_full_cell)
# Interpolate the true full-cell profile
df_real_interped = emulated_full_cell_interped.copy()
df_real_interped['SOC_aligned'] = soc_vec_full_cell
df_real_interped['Voltage_aligned'] = real_full_cell_interper(soc_vec_full_cell)
return PE_out_centered, NE_out_centered, df_real_interped, emulated_full_cell_interped
def _get_error_from_degradation_matching(self, x, *params):
try:
(pe_out_centered, ne_out_centered, real_aligned, emulated_aligned
) = self.get_V_over_SOC_from_degradation_matching(x, *params)
error = (distance.euclidean(real_aligned.loc[~emulated_aligned.Voltage_aligned.isna()].values.ravel(),
emulated_aligned.loc[~emulated_aligned.Voltage_aligned.isna()].values.ravel()) +
0.001 * len(emulated_aligned.loc[emulated_aligned.Voltage_aligned.isna()]))
except ValueError:
error = 100
return error
def halfcell_degradation_matching_v3(self, x, *params):
lli = x[0]
lam_pe = x[1]
lam_ne = x[2]
x_ne_2 = x[3]
pe_pristine, ne_1_pristine, ne_2_pristine_pos, ne_2_pristine_neg, real_full_cell_with_degradation = params
pe_out, ne_out = self._impose_degradation(pe_pristine, ne_1_pristine,
ne_2_pristine_pos, ne_2_pristine_neg,
lli, lam_pe,
lam_ne, x_ne_2)
emulated_full_cell_with_degradation = pd.DataFrame()
emulated_full_cell_with_degradation['SOC_aligned'] = pe_out['SOC_aligned'].copy()
emulated_full_cell_with_degradation['Voltage_aligned'] = pe_out['Voltage_aligned'] - ne_out['Voltage_aligned']
emulated_full_cell_with_degradation_centered = pd.DataFrame()
emulated_full_cell_with_degradation_centered['Voltage_aligned'] = emulated_full_cell_with_degradation[
'Voltage_aligned']
centering_value = - np.min(emulated_full_cell_with_degradation['SOC_aligned'].loc[
(~emulated_full_cell_with_degradation['Voltage_aligned'].isna())
])
emulated_full_cell_with_degradation_centered['SOC_aligned'] = \
(emulated_full_cell_with_degradation['SOC_aligned'] + centering_value)
pe_out_centered = pe_out.copy()
pe_out_centered['SOC_aligned'] = pe_out['SOC_aligned'] + centering_value
ne_out_centered = ne_out.copy()
ne_out_centered['SOC_aligned'] = ne_out['SOC_aligned'] + centering_value
# Interpolate full profiles across same SOC range
min_soc = np.min(
real_full_cell_with_degradation['SOC_aligned'].loc[
~real_full_cell_with_degradation['Voltage_aligned'].isna()])
max_soc = np.max(
real_full_cell_with_degradation['SOC_aligned'].loc[
~real_full_cell_with_degradation['Voltage_aligned'].isna()])
emulated_interper = interp1d(emulated_full_cell_with_degradation_centered['SOC_aligned'].loc[
~emulated_full_cell_with_degradation_centered['Voltage_aligned'].isna()],
emulated_full_cell_with_degradation_centered['Voltage_aligned'].loc[
~emulated_full_cell_with_degradation_centered['Voltage_aligned'].isna()],
bounds_error=False)
real_interper = interp1d(
real_full_cell_with_degradation['SOC_aligned'].loc[
~real_full_cell_with_degradation['Voltage_aligned'].isna()],
real_full_cell_with_degradation['Voltage_aligned'].loc[
~real_full_cell_with_degradation['Voltage_aligned'].isna()],
bounds_error=False)
soc_vec = np.linspace(min_soc, max_soc, 1001)
emulated_aligned = pd.DataFrame()
emulated_aligned['SOC_aligned'] = soc_vec
emulated_aligned['Voltage_aligned'] = emulated_interper(soc_vec)
real_aligned = pd.DataFrame()
real_aligned['SOC_aligned'] = soc_vec
real_aligned['Voltage_aligned'] = real_interper(soc_vec)
return pe_out_centered, ne_out_centered, real_aligned, emulated_aligned
def halfcell_degradation_matching_v2(self, x, *params):
lli = x[0]
lam_pe = x[1]
lam_ne = x[2]
pe_pristine, ne_pristine, real_full_cell_with_degradation = params
pe_out, ne_out = self._impose_degradation(pe_pristine, ne_pristine, lli, lam_pe, lam_ne)
emulated_full_cell_with_degradation = pd.DataFrame()
emulated_full_cell_with_degradation['SOC_aligned'] = pe_out['SOC_aligned'].copy()
emulated_full_cell_with_degradation['Voltage_aligned'] = pe_out['Voltage_aligned'] - ne_out['Voltage_aligned']
emulated_full_cell_with_degradation_centered = pd.DataFrame()
emulated_full_cell_with_degradation_centered['Voltage_aligned'] = emulated_full_cell_with_degradation[
'Voltage_aligned']
emulated_full_cell_with_degradation_centered['Voltage_aligned'].loc[
(emulated_full_cell_with_degradation_centered['Voltage_aligned'] > 4.2) |
(emulated_full_cell_with_degradation_centered['Voltage_aligned'] < 2.7)] = np.nan
centering_value = (
real_full_cell_with_degradation['SOC_aligned'].loc[
(np.argmin(np.abs(real_full_cell_with_degradation['Voltage_aligned']
- np.min(emulated_full_cell_with_degradation['Voltage_aligned'].loc[
~emulated_full_cell_with_degradation['Voltage_aligned'].isna()]))))
]
- np.min(emulated_full_cell_with_degradation['SOC_aligned'].loc[
~emulated_full_cell_with_degradation['Voltage_aligned'].isna()])
)
emulated_full_cell_with_degradation_centered['SOC_aligned'] = \
(emulated_full_cell_with_degradation['SOC_aligned'] + centering_value)
pe_out_centered = pe_out.copy()
pe_out_centered['SOC_aligned'] = pe_out['SOC_aligned'] + centering_value
ne_out_centered = ne_out.copy()
ne_out_centered['SOC_aligned'] = ne_out['SOC_aligned'] + centering_value
# Interpolate full profiles across same SOC range
min_soc = np.min(
(np.min(emulated_full_cell_with_degradation_centered['SOC_aligned'].loc[
~emulated_full_cell_with_degradation_centered['Voltage_aligned'].isna()]),
np.min(real_full_cell_with_degradation['SOC_aligned'].loc[
~real_full_cell_with_degradation['Voltage_aligned'].isna()])
)
)
max_soc = np.max(
(np.max(emulated_full_cell_with_degradation_centered['SOC_aligned'].loc[
~emulated_full_cell_with_degradation_centered['Voltage_aligned'].isna()]),
np.max(real_full_cell_with_degradation['SOC_aligned'].loc[
~real_full_cell_with_degradation['Voltage_aligned'].isna()]))
)
emulated_interper = interp1d(emulated_full_cell_with_degradation_centered['SOC_aligned'],
emulated_full_cell_with_degradation_centered['Voltage_aligned'],
bounds_error=False)
real_interper = interp1d(real_full_cell_with_degradation['SOC_aligned'],
real_full_cell_with_degradation['Voltage_aligned'],
bounds_error=False)
soc_vec = np.linspace(min_soc, max_soc, 1001)
emulated_aligned = pd.DataFrame()
emulated_aligned['SOC_aligned'] = soc_vec
emulated_aligned['Voltage_aligned'] = emulated_interper(soc_vec)
real_aligned = pd.DataFrame()
real_aligned['SOC_aligned'] = soc_vec
real_aligned['Voltage_aligned'] = real_interper(soc_vec)
return pe_out_centered, ne_out_centered, real_aligned, emulated_aligned
def get_voltage_curves_for_cell(self, processed_cycler_run, cycle_type='rpt_0.2C'):
# Filter down to only cycles of cycle_type
diag_type_cycles = processed_cycler_run.diagnostic_interpolated.loc[
processed_cycler_run.diagnostic_interpolated['cycle_type'] == cycle_type]
# Loop across cycles
cycle_indexes = diag_type_cycles['cycle_index'].unique()
for i in cycle_indexes:
diag_type_cycle_i = diag_type_cycles.loc[diag_type_cycles.cycle_index == i]
x_charge = diag_type_cycle_i.charge_energy.loc[diag_type_cycles['step_type'] == 0]
y_charge = diag_type_cycle_i.voltage.loc[diag_type_cycles['step_type'] == 0]
return x_charge, y_charge
def intracell_wrapper_init(self,
cell_struct,
initial_matching_bounds=None
):
"""
Wrapper function that calls all of the functions to get the initial values for the cell
before fitting all of the
Args:
cell_struct (beep.structure): dataframe to determine whether
charging or discharging
chg (bool): Charge state; True if charging
Returns:
(bool): True if step is the charge state specified.
"""
if initial_matching_bounds is None:
initial_matching_bounds = ((0.8, 1.2), (-20.0, 20.0), (1, 1), (0.1, 0.1), (0.1, 0.1))
real_cell_initial_charge_profile_aligned, real_cell_initial_charge_profile = \
self.process_beep_cycle_data_for_initial_halfcell_analysis(cell_struct)
opt_result_halfcell_initial_matching = differential_evolution(self._get_error_from_halfcell_initial_matching,
initial_matching_bounds,
args=(real_cell_initial_charge_profile_aligned,
self.pe_pristine, self.ne_1_pristine,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg),
strategy='best1bin', maxiter=1000,
popsize=15, tol=0.001, mutation=0.5,
recombination=0.7, seed=1,
callback=None, disp=False, polish=True,
init='latinhypercube', atol=0,
updating='deferred', workers=-1, constraints=())
(PE_pristine_matched,
NE_pristine_matched,
df_real_interped,
emulated_full_cell_interped) = self.halfcell_initial_matching_v2(opt_result_halfcell_initial_matching.x,
real_cell_initial_charge_profile_aligned,
self.pe_pristine,
self.ne_1_pristine,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg)
return (real_cell_initial_charge_profile_aligned,
real_cell_initial_charge_profile,
PE_pristine_matched,
NE_pristine_matched)
def intracell_values_wrapper(self,
cycle_index,
cell_struct,
real_cell_initial_charge_profile_aligned,
real_cell_initial_charge_profile,
PE_pristine_matched,
NE_pristine_matched,
degradation_bounds=None
):
if degradation_bounds is None:
degradation_bounds = ((-10, 50), # LLI
(-10, 50), # LAM_PE
(-10, 50), # LAM_NE
(1, 1), # (-1,1) x_NE_2
(0.1, 0.1), # (0.01,0.1)
(0.1, 0.1), # (0.01,0.1)
)
real_cell_candidate_charge_profile_aligned = self.process_beep_cycle_data_for_candidate_halfcell_analysis(
cell_struct,
real_cell_initial_charge_profile_aligned,
real_cell_initial_charge_profile,
cycle_index)
degradation_optimization_result = differential_evolution(self._get_error_from_degradation_matching,
degradation_bounds,
args=(PE_pristine_matched,
NE_pristine_matched,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg,
real_cell_candidate_charge_profile_aligned
),
strategy='best1bin', maxiter=100000,
popsize=15, tol=0.001, mutation=0.5,
recombination=0.7,
seed=1,
callback=None, disp=False, polish=True,
init='latinhypercube',
atol=0, updating='deferred', workers=-1,
constraints=()
)
(PE_out_centered,
NE_out_centered,
dVdQ_over_SOC_real,
dVdQ_over_SOC_emulated,
df_real_interped,
emulated_full_cell_interped) = self.get_dQdV_over_V_from_degradation_matching(
degradation_optimization_result.x,
PE_pristine_matched,
NE_pristine_matched,
self.ne_2_pristine_pos,
self.ne_2_pristine_neg,
real_cell_candidate_charge_profile_aligned)
#
(PE_upper_voltage, PE_lower_voltage, PE_upper_SOC, PE_lower_SOC, PE_mass,
NE_upper_voltage, NE_lower_voltage, NE_upper_SOC, NE_lower_SOC, NE_mass,
SOC_upper, SOC_lower, Li_mass) = get_halfcell_voltages(PE_out_centered, NE_out_centered)
#
LLI = degradation_optimization_result.x[0]
LAM_PE = degradation_optimization_result.x[1]
LAM_NE = degradation_optimization_result.x[2]
x_NE_2 = degradation_optimization_result.x[3]
alpha_real = degradation_optimization_result.x[4]
alpha_emulated = degradation_optimization_result.x[5]
loss_dict = {cycle_index: [LLI, LAM_PE, LAM_NE, x_NE_2, alpha_real, alpha_emulated,
PE_upper_voltage, PE_lower_voltage, PE_upper_SOC, PE_lower_SOC, PE_mass,
NE_upper_voltage, NE_lower_voltage, NE_upper_SOC, NE_lower_SOC, NE_mass,
Li_mass
]
}
profiles_dict = {cycle_index: real_cell_candidate_charge_profile_aligned}
return loss_dict, profiles_dict
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(name = "ciscotropo-webapi-python",
version = "0.1.4",
url = "http://github.com/tropo/tropo-webapi-python",
maintainer = "Cisco",
maintainer_email = "[email protected]",
description = "Python library for building voice/SMS/IM/Twitter apps at Tropo.com",
long_description = "This module implements a set of classes and methods for manipulating the Web API for the Tropo cloud communications service at http://www.tropo.com/",
platforms = ["Platform Independent"],
license = "MIT",
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python"
],
py_modules = ['tropo'],
)
|
python
|
from heuslertools.tools.measurement import Measurement
import xrayutilities as xu
import warnings
import numpy as np
class RSMMeasurement(Measurement):
"""Object representing rsm measurement.
Parameters
----------
file : str
path of xrdml file.
Attributes
----------
en : str
Energy of xrays.
wavelength : type
wavelength of xrays.
resol : float
resolution in qz.
"""
def __init__(self, file, material=None, geometry='hi_lo', beam_direction=None, surface_normale=None, reflex=None):
self.material = material
self.geometry = geometry
self.beam_direction = beam_direction
self.surface_normale = surface_normale
self.reflex = reflex
if any(elem is not None for elem in [material, geometry, beam_direction, surface_normale, reflex]):
self._create_experiment()
super().__init__(file, "")
def _load_data(self):
data = {}
data['scanmot'], data['Omega'], data['2Theta'], data['Chi'], data['Phi'], data['psd'] = xu.io.getxrdml_scan(self.file, 'om', 'tt', 'c', 'p')
return data
def _generate_names(self):
for name in self.data:
self.names[name] = {"short_name": name, "unit": "a.u."}
def _create_experiment(self):
self.hxrd = xu.HXRD(self.material.Q(self.beam_direction),
self.material.Q(self.surface_normale),
geometry=self.geometry)
def _get_nominal_angle(self, axis):
angles = {}
[angles['Omega'],
angles['Chi'],
angles['Phi'],
angles['2Theta']] = self.hxrd.Q2Ang(self.material.Q(self.reflex))
return angles[axis]
def _get_substrate_peak(self):
# anggridder = xu.FuzzyGridder2D(500, 500)
# anggridder(self.data['Omega'], self.data['2Theta'], self.data['psd'])
# angINT = xu.maplog(anggridder.data.transpose(), 10, 0)
# return anggridder.xaxis[np.unravel_index(angINT.argmax(), angINT.shape)[0]], anggridder.yaxis[np.unravel_index(angINT.argmax(), angINT.shape)[1]]
threshold = 10**int(np.log10(self.data['psd'].max()))
max_values = np.where(self.data['psd'] > threshold)
return np.mean(self.data['Omega'][max_values]), np.mean(self.data['2Theta'][max_values])
def get_angle_data(self, size=300, dynamic_range=10):
anggridder = xu.FuzzyGridder2D(300, 300)
anggridder(self.data['Omega'], self.data['2Theta'], self.data['psd'])
angINT = xu.maplog(anggridder.data.transpose(), dynamic_range, 0)
ticks = []
for i in range(round(dynamic_range)+1):
ticks.append(i)
return anggridder, angINT, ticks
def get_q_data(self, size=300, dynamic_range=10, om_sub=None, tt_sub=None):
sub_peak = self._get_substrate_peak()
if om_sub == None:
om_sub = sub_peak[0]
if tt_sub == None:
tt_sub = sub_peak[1]
#om_sub, tt_sub = self._get_substrate_peak()
qx, qy, qz = self.hxrd.Ang2Q(self.data['Omega'],
self.data['2Theta'],
delta=[om_sub - self._get_nominal_angle('Omega'),
tt_sub - self._get_nominal_angle('2Theta')])
qgridder = xu.FuzzyGridder2D(size, size)
qgridder(qy, qz, self.data['psd'])
qINT = xu.maplog(qgridder.data.transpose(), dynamic_range, 0)
ticks = []
for i in range(round(dynamic_range)+1):
ticks.append(i)
return qgridder, qINT, ticks
def get_hkl_data(self, size=300, dynamic_range=10, om_sub=None, tt_sub=None):
sub_peak = self._get_substrate_peak()
if om_sub == None:
om_sub = sub_peak[0]
if tt_sub == None:
tt_sub = sub_peak[1]
#om_sub, tt_sub = self._get_substrate_peak()
h, k, l = self.hxrd.Ang2HKL(self.data['Omega'],
self.data['2Theta'],
delta=[om_sub - self._get_nominal_angle('Omega'),
tt_sub - self._get_nominal_angle('2Theta')],
mat=self.material)
hklgridder = xu.FuzzyGridder2D(size, size)
hklgridder(h, l, self.data['psd'])
hklINT = xu.maplog(hklgridder.data.transpose(), dynamic_range, 0)
ticks = []
for i in range(round(dynamic_range)+1):
ticks.append(i)
return hklgridder, hklINT, ticks
|
python
|
import numpy as np
import numpy.linalg as la
def gaussian_estimate(data, label, alpha):
assert len(data) == len(label), "label length mismatch"
assert label.min() == 0, "label should start from 0"
assert label.max() != 0, "label should have multiple"
trim = np.sum(data, axis=0) > 0
data = data[:, trim]
dimension = len(data[0])
classified = [list() for _ in range(label.max() + 1)]
for i in range(len(label)):
classified[label[i]].append(data[i])
for i in range(len(classified)):
classified[i] = np.array(classified[i])
log_priors = np.array([np.log(1.0 * len(classified[i]) / len(label)) for i in range(len(classified))])
means = np.array([np.mean(points, axis=0) for points in classified])
variances = np.array([np.cov(np.transpose(points)) + alpha * np.eye(dimension) for points in classified])
return log_priors, means, variances, trim, dimension
class GaussianClassifier:
def __init__(self, data, label, alpha):
self.log_priors, self.means, self.variances, self.trim, self.d = gaussian_estimate(data, label, alpha)
self.num_classes = len(self.log_priors)
def classify(self, point):
raise Exception("unimplemented")
def classify_all(self, points):
return np.apply_along_axis(lambda p: self.classify(p), 1, points)
@staticmethod
def score(predictions, label):
return 1.0 * np.sum(np.equal(predictions, label)) / len(label)
class LDAClassifier(GaussianClassifier):
def __init__(self, data, label, alpha=1e-6):
GaussianClassifier.__init__(self, data, label, alpha)
self.variance = np.sum(self.variances, axis=0)
del self.variances
self.precisions = np.ndarray(shape=(self.num_classes, self.d), dtype=self.variance.dtype)
for i in range(self.num_classes):
self.precisions[i] = la.solve(self.variance, self.means[i])
del self.variance
def classify(self, point):
point = point[self.trim]
discriminant = np.ndarray(shape=self.num_classes, dtype=np.float)
for i in range(self.num_classes):
discriminant[i] = np.dot(point - np.divide(self.means[i], 2),
self.precisions[i]) + self.log_priors[i]
return np.argmax(discriminant)
class QDAClassifier(GaussianClassifier):
def __init__(self, data, label, alpha=1e-6):
GaussianClassifier.__init__(self, data, label, alpha)
self.log_determinants = np.ndarray(shape=self.num_classes, dtype=np.float)
for i in range(self.num_classes):
s, logdet = la.slogdet(self.variances[i])
if s == 0:
raise Exception("singular matrix")
self.log_determinants[i] = logdet
self.precisions = np.array([la.inv(self.variances[i]) for i in range(self.num_classes)])
del self.variances
def classify(self, point):
discriminant = np.ndarray(shape=(self.num_classes,), dtype=float)
for i in range(self.num_classes):
normalized = point - self.means[i]
discriminant[i] = self.log_priors[i] - self.log_determinants[i] / 2 \
- np.dot(np.dot(normalized, self.precisions[i]), normalized) / 2
return np.argmax(discriminant)
__all__ = [gaussian_estimate, LDAClassifier, QDAClassifier]
|
python
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient.tests import utils
from ceilometerclient.v2 import options
class BuildUrlTest(utils.BaseTestCase):
def test_one(self):
url = options.build_url('/', [{'field': 'this',
'op': 'gt',
'value': 43}])
self.assertEqual(url, '/?q.field=this&q.op=gt&q.type=&q.value=43')
def test_two(self):
url = options.build_url('/', [{'field': 'this',
'op': 'gt',
'value': 43},
{'field': 'that',
'op': 'lt',
'value': 88}])
ops = 'q.op=gt&q.op=lt'
vals = 'q.value=43&q.value=88'
types = 'q.type=&q.type='
fields = 'q.field=this&q.field=that'
self.assertEqual(url, '/?%s&%s&%s&%s' % (fields, ops, types, vals))
def test_default_op(self):
url = options.build_url('/', [{'field': 'this',
'value': 43}])
self.assertEqual(url, '/?q.field=this&q.op=&q.type=&q.value=43')
def test_one_param(self):
url = options.build_url('/', None, ['period=60'])
self.assertEqual(url, '/?period=60')
def test_two_params(self):
url = options.build_url('/', None, ['period=60',
'others=value'])
self.assertEqual(url, '/?period=60&others=value')
def test_with_data_type(self):
url = options.build_url('/', [{'field': 'f1',
'value': '10',
'type': 'integer'}])
self.assertEqual('/?q.field=f1&q.op=&q.type=integer&q.value=10', url)
class CliTest(utils.BaseTestCase):
def test_one(self):
ar = options.cli_to_array('this<=34')
self.assertEqual(ar, [{'field': 'this', 'op': 'le',
'value': '34', 'type': ''}])
def test_two(self):
ar = options.cli_to_array('this<=34;that!=foo')
self.assertEqual(ar, [{'field': 'this', 'op': 'le',
'value': '34', 'type': ''},
{'field': 'that', 'op': 'ne',
'value': 'foo', 'type': ''}])
def test_negative(self):
ar = options.cli_to_array('this>=-783')
self.assertEqual(ar, [{'field': 'this', 'op': 'ge',
'value': '-783', 'type': ''}])
def test_float(self):
ar = options.cli_to_array('this<=283.347')
self.assertEqual(ar, [{'field': 'this',
'op': 'le', 'value': '283.347',
'type': ''}])
def test_comma(self):
ar = options.cli_to_array('this=2.4,fooo=doof')
self.assertEqual([{'field': 'this',
'op': 'eq',
'value': '2.4,fooo=doof',
'type': ''}],
ar)
def test_special_character(self):
ar = options.cli_to_array('key~123=value!123')
self.assertEqual([{'field': 'key~123',
'op': 'eq',
'value': 'value!123',
'type': ''}],
ar)
def _do_test_typed_float_op(self, op, op_str):
ar = options.cli_to_array('that%sfloat::283.347' % op)
self.assertEqual([{'field': 'that',
'type': 'float',
'value': '283.347',
'op': op_str}],
ar)
def test_typed_float_eq(self):
self._do_test_typed_float_op('<', 'lt')
def test_typed_float_le(self):
self._do_test_typed_float_op('<=', 'le')
def test_typed_string_whitespace(self):
ar = options.cli_to_array('state=string::insufficient data')
self.assertEqual([{'field': 'state',
'op': 'eq',
'type': 'string',
'value': 'insufficient data'}],
ar)
def test_typed_string_whitespace_complex(self):
ar = options.cli_to_array(
'that>=float::99.9999;state=string::insufficient data'
)
self.assertEqual([{'field': 'that',
'op': 'ge',
'type': 'float',
'value': '99.9999'},
{'field': 'state',
'op': 'eq',
'type': 'string',
'value': 'insufficient data'}],
ar)
def test_invalid_operator(self):
self.assertRaises(ValueError, options.cli_to_array,
'this=2.4;fooo-doof')
def test_with_dot(self):
ar = options.cli_to_array('metadata.this<=34')
self.assertEqual(ar, [{'field': 'metadata.this',
'op': 'le', 'value': '34',
'type': ''}])
def test_single_char_field_or_value(self):
ar = options.cli_to_array('m<=34;large.thing>s;x!=y')
self.assertEqual([{'field': 'm',
'op': 'le',
'value': '34',
'type': ''},
{'field': 'large.thing',
'op': 'gt',
'value': 's',
'type': ''},
{'field': 'x',
'op': 'ne',
'value': 'y',
'type': ''}],
ar)
def test_without_data_type(self):
ar = options.cli_to_array('hostname=localhost')
self.assertEqual(ar, [{'field': 'hostname',
'op': 'eq',
'value': 'localhost',
'type': ''}])
def test_with_string_data_type(self):
ar = options.cli_to_array('hostname=string::localhost')
self.assertEqual(ar, [{'field': 'hostname',
'op': 'eq',
'type': 'string',
'value': 'localhost'}])
def test_with_int_data_type(self):
ar = options.cli_to_array('port=integer::1234')
self.assertEqual(ar, [{'field': 'port',
'op': 'eq',
'type': 'integer',
'value': '1234'}])
def test_with_bool_data_type(self):
ar = options.cli_to_array('port=boolean::true')
self.assertEqual(ar, [{'field': 'port',
'op': 'eq',
'type': 'boolean',
'value': 'true'}])
def test_with_float_data_type(self):
ar = options.cli_to_array('average=float::1234.5678')
self.assertEqual(ar, [{'field': 'average',
'op': 'eq',
'type': 'float',
'value': '1234.5678'}])
def test_with_datetime_data_type(self):
ar = options.cli_to_array('timestamp=datetime::sometimestamp')
self.assertEqual(ar, [{'field': 'timestamp',
'op': 'eq',
'type': 'datetime',
'value': 'sometimestamp'}])
def test_with_incorrect_type(self):
ar = options.cli_to_array('timestamp=invalid::sometimestamp')
self.assertEqual(ar, [{'field': 'timestamp',
'op': 'eq',
'type': '',
'value': 'invalid::sometimestamp'}])
def test_with_single_colon(self):
ar = options.cli_to_array('timestamp=datetime:sometimestamp')
self.assertEqual(ar, [{'field': 'timestamp',
'op': 'eq',
'type': '',
'value': 'datetime:sometimestamp'}])
def test_missing_key(self):
self.assertRaises(ValueError, options.cli_to_array,
'average=float::1234.0;>=string::hello')
def test_missing_value(self):
self.assertRaises(ValueError, options.cli_to_array,
'average=float::1234.0;house>=')
def test_timestamp_value(self):
ar = options.cli_to_array(
'project=cow;timestamp>=datetime::2014-03-11T16:02:58'
)
self.assertEqual([{'field': 'project',
'op': 'eq',
'type': '',
'value': 'cow'},
{'field': 'timestamp',
'op': 'ge',
'type': 'datetime',
'value': '2014-03-11T16:02:58'}],
ar)
|
python
|
# coding=utf-8
"""
Default Django settings for all Work4 projects.
Your own app should import all attributes from this file.
"""
from __future__ import unicode_literals
import os
import sys
from tzlocal import get_localzone
def env_to_bool(val):
"""Use this when parsing environment variables for booleans as it will properly consider 'FALSE' to be False."""
if isinstance(val, basestring):
return val.lower() in ("true", "yes", "1")
return bool(val)
def init_settings(app_name, debug):
"""
Initializes default settings. Should be called in the projects settings.py, passing any custom parameters that
might be used.
"""
# Determine project's root directory based on the path to the module
# referenced by app_name, in order to handle both standalone and subtree
# scenarios for pywork4core
project_root_dir = os.path.dirname(__import__(app_name).__path__[0])
app_root_dir = os.path.join(project_root_dir, app_name)
temp_dir = os.path.join(project_root_dir, 'temp')
# Needs its own line to avoid pragma no cover bleeding into other statements
log_level = 'DEBUG' if debug else 'INFO' # pragma: no cover
return {
'APP_NAME': app_name,
'DEBUG': debug,
'INSTALLED_APPS': ('django.contrib.contenttypes', 'django_app', 'django_nose', 'django_extensions', app_name),
# Directories
'PROJECT_ROOT_DIR': project_root_dir,
'APP_ROOT_DIR': app_root_dir,
'TEMP_DIR': temp_dir,
'VENV_DIR': os.path.join(project_root_dir, 'venv'),
# Timezone management: Use operating system's timezone
'TIME_ZONE': get_localzone().zone,
'USE_TZ': True,
# Language
'LANGUAGE_CODE': 'en-us',
'USE_I18N': False,
'USE_L10N': False,
# Templating
'TEMPLATES': [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'debug': debug,
'context_processors': []
}
}],
# Databases
'DATABASES': {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': temp_dir + '/db.sqlite'
}
},
'MONGOENGINE': os.environ.get('MONGOENGINE', {
'db': app_name,
'host': 'localhost',
'port': 27017,
'username': '',
'password': ''
}),
###########
# TESTING #
###########
'TESTING': len(sys.argv) > 1 and sys.argv[1] == 'test',
'TEST_RUNNER': 'django_nose.NoseTestSuiteRunner',
# During tests, Nose captures all logs during tests
'NOSE_ARGS': ['--logging-clear-handlers'],
###########
# LOGGING #
###########
'LOG_LEVEL': log_level,
'LOGGING': {
'version': 1,
'formatters': {
'standard': {
'format': "%(levelname)s [%(module)s] %(message)s"
},
},
'handlers': {
'console': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout
}
},
'loggers': {
'django': {
'level': 'INFO', # We don't want Django's debug, even in development mode.
'handlers': [], # Don't handle logs at this level,
'propagate': True # But bubble up to the `root` logger.
},
},
'root': {
'handlers': ['console'],
'level': log_level
}
}
}
def init_web_settings( # pylint: disable=too-many-arguments
app_name, debug, sentry_dsn, early_middleware=(), late_middleware=(), context_processors=()):
"""
Appends extra Django settings useful specifically for web apps, such as static files handling, etc.
:param sentry_dsn: a string containing the DSN to configure the raven client. Projects defining
this parameter MUST add `raven` to their pip requirements.
settings: dict
return: dict
"""
# Load settings dict into local scope for concision and coherence
settings = init_settings(app_name, debug)
staticfiles_dirs = (os.path.join(settings['APP_ROOT_DIR'], 'static'),)
# Needs its own line to avoid pragma no cover bleeding into other statements
staticfiles_storage = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\
if not settings['TESTING'] \
else 'django.contrib.staticfiles.storage.StaticFilesStorage' # pragma: no cover
if context_processors:
settings['TEMPLATES'][0]['OPTIONS']['context_processors'].extend(context_processors)
if debug: # pragma: no cover
settings['TEMPLATES'][0]['OPTIONS']['context_processors'].append("django.template.context_processors.debug")
settings['TEMPLATES'][0]['OPTIONS']['context_processors'].extend([
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request"])
settings.update({
# Installed apps
'INSTALLED_APPS': ('whitenoise.runserver_nostatic', 'django.contrib.staticfiles') + settings['INSTALLED_APPS'],
# Routing
'ROOT_URLCONF': app_name + '.urls',
# Static files
'STATIC_ROOT': 'staticfiles',
'STATIC_URL': '/static/',
'STATICFILES_DIRS': staticfiles_dirs,
# Don't use Whitenoise's static files storage in testing as it requires running collectstatic
'STATICFILES_STORAGE': staticfiles_storage,
'WHITENOISE_ROOT': staticfiles_dirs[0] + '/files',
'WHITENOISE_ALLOW_ALL_ORIGINS': False
})
##########
# SENTRY #
##########
settings['RAVEN_CONFIG'] = {} # To be extended by the parent project (May remain empty if Sentry is not enabled)
if sentry_dsn: # pragma: no cover
settings['INSTALLED_APPS'] = ('raven.contrib.django.raven_compat',) + settings['INSTALLED_APPS']
settings['LOGGING']['handlers']['sentry'] = {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
}
settings['LOGGING']['root']['handlers'].append('sentry')
# Raven shall only be a dependency of projects that define sentry_dsn
import raven
settings['RAVEN_CONFIG']['dsn'] = sentry_dsn
try:
settings['RAVEN_CONFIG']['release'] = raven.fetch_git_sha(settings['PROJECT_ROOT_DIR'])
except raven.exceptions.InvalidGitRepository:
# Probably not a git repo (on heroku?)
pass
settings['MIDDLEWARE'] = _compute_middleware_settings(
early_middleware, late_middleware, use_sentry=bool(sentry_dsn))
return settings
def _compute_middleware_settings(early=(), late=(), use_sentry=False):
"""This method takes care of inserting the app's middlewares without conflicting with Sentry's positioning"""
return (('raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',) if use_sentry else ()) \
+ early + ('django.middleware.common.CommonMiddleware',) + late
|
python
|
""" This will module contains the setup_flask function as well as registry functions """
__author__ = 'Ben Christenson'
__date__ = "9/15/15"
import os
import inspect
import sys
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import flask_login
from flask_login import LoginManager
from flask.blueprints import Blueprint
import sqlalchemy
from sqlalchemy import create_engine
from seaborn.logger import log
from seaborn.flask.models import ApiModel
from seaborn.flask.blueprint import BlueprintBinding
from seaborn.flask.blueprint.python_bindings import create_python_blueprint_bindings
from seaborn.flask.blueprint.unity_bindings import create_unity_blueprint_bindings
from seaborn.flask import decorators
from seaborn.timestamp import set_timezone_aware
class SetupFlask(object):
def __init__(self, configuration):
self.db = None
self.configuration = configuration
self.endpoints = None
setattr(flask_login, 'COOKIE_NAME', 'token')
self.app = Flask(__name__, template_folder=self.configuration.TEMPLATE_FOLDER,
static_folder=self.configuration.STATIC_FOLDER)
self.app.config.from_object(self.configuration)
self.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
set_timezone_aware(self.configuration.timezone_aware)
self._setup_database()
self._setup_debug_toolbar()
decorators.register(self.db, self.configuration.debug, '%s/' % self.configuration.flask_folder)
def setup_run(self, endpoints):
"""
This will setup flask and all the internals
:return: obj of the flask run
"""
try:
self.setup_endpoints(endpoints)
self._setup_login_manager()
self._setup_proxy_conn()
self._test_database()
run = self._setup_gevent() or self._run_server
log.trace("Done with App Setup")
return run
except Exception as ex:
log.error("Exception:: %s" % ex)
raise
def _setup_database(self):
log.trace("Creating Database Connection %s" % self.app.config['SQLALCHEMY_DATABASE_URI'])
self.db = SQLAlchemy(self.app)
def _setup_gevent(self):
if self.configuration.gevent and sys.version_info[0] == 2:
log.trace("Setup Gevents for multithreading support")
from gevent import monkey
monkey.patch_all()
from gevent import wsgi
server = wsgi.WSGIServer((self.configuration.ip_address, self.configuration.SERVER_PORT), self.app)
return server.serve_forever
def _test_database(self):
User = self.endpoints.User
if self.configuration.debug:
log.trace("Inspected Database for tables")
engine = create_engine(self.configuration.SQLALCHEMY_DATABASE_URI)
inspector = sqlalchemy.inspect(engine)
if not inspector.get_table_names():
self.initialize_database()
self.initialize_users()
if User.query.all() == []:
self.initialize_database()
self.initialize_users()
def _setup_proxy_conn(self):
if self.configuration.setup_proxy_conn:
from seaborn.flask.blueprint import ProxyEndpoint
conn = ProxyEndpoint()
log.trace("Setup Proxy Connection for internal api calls %s" % id(conn))
blue_prints = [getattr(self.endpoints, name) for name in dir(self.endpoints) if
isinstance(getattr(self.endpoints, name), BlueprintBinding)]
for blue_print in blue_prints:
blue_print.add_proxy_route(conn, True)
def _setup_login_manager(self):
"""
:return: None
"""
log.trace("Setup Login Manager")
# cookies are handled in _load_user
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'login'
login_manager.init_app(self.app)
@login_manager.user_loader
def load_user(user_id):
return self.endpoints.User.query.get(user_id)
def setup_endpoints(self, endpoints):
"""
:param endpoints:
:return: None
"""
self.endpoints = endpoints
log.trace("Registering Blueprint Endpoints")
for name in dir(self.endpoints):
blue_print = getattr(self.endpoints, name)
if isinstance(blue_print, Blueprint):
try:
self.app.register_blueprint(blue_print)
except Exception as e:
log.error("Problem with blueprint %s" % blue_print)
raise
def _setup_debug_toolbar(self):
if self.configuration.DEBUG_TOOLBAR:
log.trace("Setup Debug Toolbar")
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(self.app)
def _run_server(self):
"""
:return: None
"""
log.trace("Starting App Run")
self.app.run(host=self.configuration.ip_address, port=self.configuration.SERVER_PORT)
def initialize_database(self):
"""
WARNING: This will reinitialize the database by dropping all tables and re-creating them.
:return: None
"""
log.warning("Initializing Database")
try:
self.db.drop_all()
except:
pass
self.db.create_all()
def initialize_users(self, admin_password=None, super_password=None, demo_password=None, full_name=""):
"""
:param admin_password: str of the password for the admin account
:param super_password: str of the password for the super-user account
:param demo_password: str of the password for the demo account
:param full_name: str of full name to give to each of the admin, super, and demo accounts
:return: None
"""
admin_password = admin_password or self.configuration.admin_password
super_password = super_password or self.configuration.super_password
demo_password = demo_password or self.configuration.demo_password
admin_update = self.endpoints.user.views.admin_update
base_domain = '.'.join(self.configuration.domain.split('.')[-2:])
demo_user = admin_update._undecorated(username='Demo-User', email='demo@%s' % base_domain,
full_name=full_name, password=demo_password, auth_level='Demo')
super_user = admin_update._undecorated(username='Super-User', email='super@%s' % base_domain,
full_name=full_name, password=super_password, auth_level='Superuser')
admin_user = admin_update._undecorated(username='Admin-User', email='admin@%s' % base_domain,
full_name=full_name, password=admin_password, auth_level='Admin')
self.db.session.add_all([demo_user, super_user, admin_user])
self.db.session.commit()
def create_python_bindings(self):
"""
"""
log.warning('Creating python API bindings')
create_python_blueprint_bindings(
path='%s/bindings/python_bindings' % self.configuration.flask_folder,
blue_prints=[getattr(self.endpoints, name) for name in dir(self.endpoints) if
isinstance(getattr(self.endpoints, name), BlueprintBinding)],
models=[getattr(self.endpoints, name) for name in dir(self.endpoints) if
inspect.isclass(getattr(self.endpoints, name)) and
issubclass(getattr(self.endpoints, name), ApiModel)])
def create_unity_bindings(self):
"""
"""
log.warning('Creating unity API bindings')
vars = dir(self.endpoints)
for unity_path in self.configuration.unity_folder:
if os.path.exists(unity_path):
create_unity_blueprint_bindings(
path=unity_path,
blue_prints=[getattr(self.endpoints, name) for name in vars if
isinstance(getattr(self.endpoints, name), BlueprintBinding)],
models=[getattr(self.endpoints, name) for name in vars if
inspect.isclass(getattr(self.endpoints, name)) and
issubclass(getattr(self.endpoints, name), ApiModel)],
base_uri=self.configuration.domain,
clear=False,
class_members=[])
|
python
|
import wasm3 as m3
import pytest
from helpers import wat2wasm
MV_SWAP_WASM = wat2wasm("""
(module
(func (export "swap") (param i32 i32) (result i32 i32)
(get_local 1)
(get_local 0)
)
)
""")
MV_IMPORT_WASM = wat2wasm("""
(module
(type $t0 (func (param i32 i64) (result i64 i32)))
(import "env" "swap" (func $env.swap (type $t0)))
(func (export "swap") (type $t0)
(get_local 0)
(get_local 1)
(call $env.swap)
)
)
""")
def test_multivalue_swap():
env = m3.Environment()
rt = env.new_runtime(64)
mod = env.parse_module(MV_SWAP_WASM)
rt.load(mod)
swap = rt.find_function('swap')
assert swap(1, 2) == (2, 1)
assert swap(2, 1) == (1, 2)
assert swap.call_argv('1', '2') == (2, 1)
assert swap.call_argv('2', '1') == (1, 2)
def test_multivalue_imported():
env = m3.Environment()
rt = env.new_runtime(64)
mod = env.parse_module(MV_IMPORT_WASM)
rt.load(mod)
mod.link_function("env", "swap", "Ii(iI)", lambda a,b: (b,a))
swap = rt.find_function('swap')
assert swap(1, 2) == (2, 1)
assert swap(2, 1) == (1, 2)
|
python
|
""" # lint-amnesty, pylint: disable=cyclic-import
XFields for video module.
"""
import datetime
from xblock.fields import Boolean, DateTime, Dict, Float, List, Scope, String
from xmodule.fields import RelativeTime
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class VideoFields:
"""Fields for `VideoBlock`."""
display_name = String(
help=_("The display name for this component."),
display_name=_("Component Display Name"),
default="Video",
scope=Scope.settings
)
saved_video_position = RelativeTime(
help=_("Current position in the video."),
scope=Scope.user_state,
default=datetime.timedelta(seconds=0)
)
# TODO: This should be moved to Scope.content, but this will
# require data migration to support the old video module.
youtube_id_1_0 = String(
help=_("Optional, for older browsers: the YouTube ID for the normal speed video."),
display_name=_("YouTube ID"),
scope=Scope.settings,
default="3_yD_cEKoCk"
)
youtube_id_0_75 = String(
help=_("Optional, for older browsers: the YouTube ID for the .75x speed video."),
display_name=_("YouTube ID for .75x speed"),
scope=Scope.settings,
default=""
)
youtube_id_1_25 = String(
help=_("Optional, for older browsers: the YouTube ID for the 1.25x speed video."),
display_name=_("YouTube ID for 1.25x speed"),
scope=Scope.settings,
default=""
)
youtube_id_1_5 = String(
help=_("Optional, for older browsers: the YouTube ID for the 1.5x speed video."),
display_name=_("YouTube ID for 1.5x speed"),
scope=Scope.settings,
default=""
)
start_time = RelativeTime( # datetime.timedelta object
help=_(
"Time you want the video to start if you don't want the entire video to play. "
"Not supported in the native mobile app: the full video file will play. "
"Formatted as HH:MM:SS. The maximum value is 23:59:59."
),
display_name=_("Video Start Time"),
scope=Scope.settings,
default=datetime.timedelta(seconds=0)
)
end_time = RelativeTime( # datetime.timedelta object
help=_(
"Time you want the video to stop if you don't want the entire video to play. "
"Not supported in the native mobile app: the full video file will play. "
"Formatted as HH:MM:SS. The maximum value is 23:59:59."
),
display_name=_("Video Stop Time"),
scope=Scope.settings,
default=datetime.timedelta(seconds=0)
)
#front-end code of video player checks logical validity of (start_time, end_time) pair.
download_video = Boolean(
help=_("Allow students to download versions of this video in different formats if they cannot use the edX video"
" player or do not have access to YouTube. You must add at least one non-YouTube URL "
"in the Video File URLs field."),
display_name=_("Video Download Allowed"),
scope=Scope.settings,
default=False
)
html5_sources = List(
help=_("The URL or URLs where you've posted non-YouTube versions of the video. Each URL must end in .mpeg,"
" .mp4, .ogg, or .webm and cannot be a YouTube URL. (For browser compatibility, we strongly recommend"
" .mp4 and .webm format.) Students will be able to view the first listed video that's compatible with"
" the student's computer. To allow students to download these videos, "
"set Video Download Allowed to True."),
display_name=_("Video File URLs"),
scope=Scope.settings,
)
track = String(
help=_("By default, students can download an .srt or .txt transcript when you set Download Transcript "
"Allowed to True. If you want to provide a downloadable transcript in a different format, we recommend "
"that you upload a handout by using the Upload a Handout field. If this isn't possible, you can post a "
"transcript file on the Files & Uploads page or on the Internet, and then add the URL for the "
"transcript here. Students see a link to download that transcript below the video."),
display_name=_("Downloadable Transcript URL"),
scope=Scope.settings,
default=''
)
download_track = Boolean(
help=_("Allow students to download the timed transcript. A link to download the file appears below the video."
" By default, the transcript is an .srt or .txt file. If you want to provide the transcript for "
"download in a different format, upload a file by using the Upload Handout field."),
display_name=_("Download Transcript Allowed"),
scope=Scope.settings,
default=False
)
# `sub` is deprecated field and should not be used in future. Now, transcripts are primarily handled in VAL and
# backward compatibility for the video modules already using this field has been ensured.
sub = String(
help=_("The default transcript for the video, from the Default Timed Transcript field on the Basic tab. "
"This transcript should be in English. You don't have to change this setting."),
display_name=_("Default Timed Transcript"),
scope=Scope.settings,
default=""
)
show_captions = Boolean(
help=_("Specify whether the transcripts appear with the video by default."),
display_name=_("Show Transcript"),
scope=Scope.settings,
default=True
)
# Data format: {'de': 'german_translation', 'uk': 'ukrainian_translation'}
transcripts = Dict(
help=_("Add transcripts in different languages."
" Click below to specify a language and upload an .srt transcript file for that language."),
display_name=_("Transcript Languages"),
scope=Scope.settings,
default={}
)
transcript_language = String(
help=_("Preferred language for transcript."),
display_name=_("Preferred language for transcript"),
scope=Scope.preferences,
default="en"
)
transcript_download_format = String(
help=_("Transcript file format to download by user."),
scope=Scope.preferences,
values=[
# Translators: This is a type of file used for captioning in the video player.
{"display_name": _("SubRip (.srt) file"), "value": "srt"},
{"display_name": _("Text (.txt) file"), "value": "txt"}
],
default='srt',
)
speed = Float(
help=_("The last speed that the user specified for the video."),
scope=Scope.user_state
)
global_speed = Float(
help=_("The default speed for the video."),
scope=Scope.preferences,
default=1.0
)
auto_advance = Boolean(
help=_("Specify whether to advance automatically to the next unit when the video ends."),
scope=Scope.preferences,
# The default is True because this field only has an effect when auto-advance controls are enabled
# (globally enabled through feature flag and locally enabled through course setting); in that case
# it's good to start auto-advancing and let the student disable it, instead of the other way around
# (requiring the user to enable it). When auto-advance controls are hidden, this field won't be used.
default=True,
)
youtube_is_available = Boolean(
help=_("Specify whether YouTube is available for the user."),
scope=Scope.user_info,
default=True
)
handout = String(
help=_("Upload a handout to accompany this video. Students can download the handout by "
"clicking Download Handout under the video."),
display_name=_("Upload Handout"),
scope=Scope.settings,
)
only_on_web = Boolean(
help=_(
"Specify whether access to this video is limited to browsers only, or if it can be "
"accessed from other applications including mobile apps."
),
display_name=_("Video Available on Web Only"),
scope=Scope.settings,
default=False
)
edx_video_id = String(
help=_("If you were assigned a Video ID by edX for the video to play in this component, enter the ID here."
" In this case, do not enter values in the Default Video URL, the Video File URLs, "
"and the YouTube ID fields. If you were not assigned a Video ID,"
" enter values in those other fields and ignore this field."),
display_name=_("Video ID"),
scope=Scope.settings,
default="",
)
bumper_last_view_date = DateTime(
display_name=_("Date of the last view of the bumper"),
scope=Scope.preferences,
)
bumper_do_not_show_again = Boolean(
display_name=_("Do not show bumper again"),
scope=Scope.preferences,
default=False,
)
|
python
|
"""Ingest by directly writing a ZIP file to archive storage.
"""
from __future__ import print_function
import sys
import os
import os.path
import re
import time
from threading import Thread
if sys.version_info < (3, 0):
import Queue as queue
else:
import queue
import zlib
import logging
import pytest
import icat
import icat.config
from icat.query import Query
from icat.ids import DataSelection
icat.hzb = pytest.importorskip("icat.hzb") # This test module is HZB specific
from icat.hzb.proposal import ProposalNo
from helper import MemorySpace, DatasetBase
from conftest import getConfig, callscript
log = logging.getLogger("test.%s" % __name__)
# ============================ testdata ============================
testProposalNo = "12100409-ST-1.1-P"
testDatasetPrefix = "test_ingestarchive"
# ============================= helper =============================
@pytest.fixture(scope="module")
def icatconfig(setupicat, testConfig, request):
client, conf, config = getConfig(ids="mandatory")
client.login(conf.auth, conf.credentials)
mainbase = request.config.getini('mainstoragebase')
archivebase = request.config.getini('archivestoragebase')
conf.cmdargs.append("--mainStorageBase=%s" % mainbase)
conf.cmdargs.append("--archiveStorageBase=%s" % archivebase)
incomingbase = request.config.getini('incomingbase')
proposaldir = os.path.join(incomingbase, testProposalNo.replace('/', '_'))
os.mkdir(proposaldir)
conf.proposaldir = proposaldir
def cleanup():
os.rmdir(proposaldir)
client.logout()
if testConfig.cleanup:
request.addfinalizer(cleanup)
return (client, conf, config)
@pytest.fixture(scope="function")
def dataset(request, icatconfig, testConfig):
client, conf, _ = icatconfig
assert request.node.name.startswith("test_")
name = testDatasetPrefix + request.node.name[4:]
name = re.sub(r'[\[\]]', '_', name)
dataset = Dataset(conf.proposaldir, testProposalNo, name)
def cleanup():
dataset.cleanup(client)
if testConfig.cleanup:
request.addfinalizer(cleanup)
return dataset
class Datafile(object):
def __init__(self, directory, fname, size):
self.directory = directory
self.fname = fname
self.size = size
self.path = os.path.join(self.directory, self.fname)
self.crc32 = 0
self._create()
def _create(self):
size = self.size
chunksize = 8192
with open("/dev/urandom", "rb") as infile:
with open(self.path, "wb") as outfile:
while size > 0:
if chunksize > size:
chunksize = size
chunk = infile.read(chunksize)
self.crc32 = zlib.crc32(chunk, self.crc32)
outfile.write(chunk)
size -= len(chunk)
def getcrc(self):
return "%x" % (self.crc32 & 0xffffffff)
def unlink(self):
os.unlink(self.path)
class Dataset(DatasetBase):
fileCount = 32
fileSize = MemorySpace("10 MiB")
def __init__(self, incomingdir, proposal, name):
self.proposal = ProposalNo.parse(proposal)
self.name = name
self.files = []
self.datasetdir = os.path.join(incomingdir, name)
os.mkdir(self.datasetdir)
for n in range(1,self.fileCount+1):
fname = "test_%05d.dat" % n
datafile = Datafile(self.datasetdir, fname, self.fileSize)
self.files.append(datafile)
self.size = self.fileCount*self.fileSize
self.dataset = None
def cleanup(self, client):
super(Dataset, self).cleanup(client)
for f in self.files:
f.unlink()
os.rmdir(self.datasetdir)
def uploadFiles(self, client):
raise RuntimeError("This Dataset class does not support upload.")
def ingest(self, conf):
args = conf.cmdargs + [str(self.proposal), self.name]
args.extend(f.path for f in self.files)
log.info("%s: call ingest script", self.name)
callscript("addfile-archive.py", args)
log.info("%s: ingest complete", self.name)
def search(self, client):
query = icat.query.Query(client, "Dataset",
conditions={"name":"= '%s'" % self.name},
includes=["datafiles"])
cond = self.proposal.as_conditions()
query.addConditions({ "investigation.%s" % a: c
for (a, c) in cond.items() })
return client.assertedSearch(query)[0]
def verify(self, client):
self.dataset = self.search(client)
assert len(self.dataset.datafiles) == len(self.files), \
"wrong number of datafiles"
dfidx = dict([ (df.name,df) for df in self.dataset.datafiles ])
for f in self.files:
assert f.fname in dfidx
df = dfidx[f.fname]
assert df.location
assert df.fileSize == f.size
assert df.checksum == f.getcrc()
# ============================= tests ==============================
def test_ingest(icatconfig, dataset):
"""Ingest a dataset by directly writing to archive storage.
"""
client, conf, _ = icatconfig
dataset.ingest(conf)
dataset.verify(client)
dataset.download(client)
def test_ingest_and_upload(icatconfig, dataset):
"""Try to upload a file to the same dataset while we are ingesting it.
"""
client, conf, config = icatconfig
resultQueue = queue.Queue()
def upload(conf, client_kwargs, dataset):
try:
# Note: I'm not sure whether Suds is thread safe. Therefore
# use a separate local client object in each thread.
client = icat.Client(conf.url, **client_kwargs)
client.login(conf.auth, conf.credentials)
name = dataset.name
datafileFormat = dataset.getDatafileFormat(client)
f = Datafile(dataset.datasetdir, "upload.dat", 32)
while True:
# Get the dataset object from ICAT, continue to retry
# while it does not exist yet.
try:
ds = dataset.search(client)
log.info("Upload: dataset %s found.", name)
break
except icat.SearchAssertionError:
log.info("Upload: dataset %s not found (yet).", name)
time.sleep(0.2)
continue
selection = DataSelection([ds])
datafile = client.new("datafile", name=f.fname,
dataset=ds, datafileFormat=datafileFormat)
while True:
# Do the upload. This may (or even should) fail due to
# the dataset not online. The error triggers a restore,
# so we continue to retry until the restore has been
# completed.
try:
df = client.putData(f.path, datafile)
log.info("Upload to dataset %s succeeded.", name)
break
except icat.IDSDataNotOnlineError:
status = client.ids.getStatus(selection)
log.info("Upload: dataset %s is %s.", name, status)
time.sleep(0.2)
continue
resultQueue.put(f)
client.logout()
except Exception as err:
resultQueue.put(err)
def ingest(conf, dataset):
try:
dataset.ingest(conf)
resultQueue.put("Ok")
except Exception as err:
resultQueue.put(err)
tul = Thread(target=upload, args=(conf, config.client_kwargs, dataset))
tin = Thread(target=ingest, args=(conf, dataset))
threads = [tul, tin]
for t in threads:
t.start()
for t in threads:
t.join()
c = 0
while True:
try:
r = resultQueue.get(block=False)
c += 1
if isinstance(r, Datafile):
dataset.files.append(r)
dataset.fileCount += 1
elif isinstance(r, str):
pass
else:
raise r
except queue.Empty:
break
assert c == 2
dataset.verify(client)
dataset.download(client)
@pytest.mark.parametrize("delay", [False, True])
def test_ingest_existing(icatconfig, dataset, delay):
"""Try to ingest a dataset that already exist. This must yield an
error and must not in any way damage the already existing dataset.
"""
client, conf, _ = icatconfig
query = icat.query.Query(client, "Investigation",
conditions=dataset.proposal.as_conditions())
investigation = client.assertedSearch(query)[0]
old_dataset = DatasetBase(client, investigation, dataset.name,
fileCount=4, fileSize=MemorySpace("10 KiB"))
old_dataset.uploadFiles(client)
if delay:
# Request archive of the old dataset and wait until it is
# written to archive storage and removed from main storage.
client.ids.archive(DataSelection([old_dataset.dataset]))
time.sleep(90)
# OSError is raised if the ZIP file in archive storage exists,
# RuntimeError is raised if the directory in main storage exists,
# icat.ICATObjectExistsError is raised if neither files exist, but
# the dataset in ICAT.
with pytest.raises( (OSError, RuntimeError, icat.ICATObjectExistsError) ):
dataset.ingest(conf)
old_dataset.download(client)
old_dataset.cleanup()
|
python
|
from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
def has_object_permission(self,request,view,obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.id==request.user.id
class UpdateStatus(permissions.BasePermission):
def has_object_permission(self,request,view,obj):
if request.method in permissions.SAFE_METHODS:
return obj.user_profile==request.user
|
python
|
import collections
import logging
import string
from abc import abstractmethod
from typing import Callable, List
from flask_sqlalchemy import SQLAlchemy
from retrying import retry
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import FlushError, NoResultFound
from profiles.exceptions import OrcidTokenNotFound, ProfileNotFound
from profiles.models import EmailAddress, ID_LENGTH, OrcidToken, Profile
from profiles.types import CanBeCleared
from profiles.utilities import generate_random_string
LOGGER = logging.getLogger(__name__)
class OrcidTokens(CanBeCleared):
@abstractmethod
def add(self, orcid_token: OrcidToken) -> None:
raise NotImplementedError
@abstractmethod
def get(self, orcid: str) -> OrcidToken:
raise NotImplementedError
@abstractmethod
def remove(self, orcid: str) -> None:
raise NotImplementedError
class Profiles(CanBeCleared, collections.Sized):
@abstractmethod
def add(self, profile: Profile) -> Profile:
raise NotImplementedError
@abstractmethod
def get(self, profile_id: str) -> Profile:
raise NotImplementedError
@abstractmethod
def get_by_orcid(self, orcid: str) -> Profile:
raise NotImplementedError
@abstractmethod
def get_by_email_address(self, *email_addresses: str) -> Profile:
raise NotImplementedError
@abstractmethod
def next_id(self) -> str:
raise NotImplementedError
@abstractmethod
def list(self, limit: int = None, offset: int = 0, desc: bool = False) -> List[Profile]:
raise NotImplementedError
class SQLAlchemyOrcidTokens(OrcidTokens):
def __init__(self, db: SQLAlchemy) -> None:
self.db = db
def add(self, orcid_token: OrcidToken) -> None:
self.db.session.add(orcid_token)
def get(self, orcid: str) -> OrcidToken:
try:
return self.db.session.query(OrcidToken).filter_by(orcid=orcid).one()
except NoResultFound as exception:
msg = 'ORCID token for ORCID {} not found'.format(orcid)
LOGGER.info(msg=msg)
raise OrcidTokenNotFound(msg) from exception
def clear(self) -> None:
self.db.session.query(OrcidToken).delete()
def remove(self, orcid: str) -> None:
try:
orcid_token = self.db.session.query(OrcidToken).filter_by(orcid=orcid).one()
self.db.session.delete(orcid_token)
except NoResultFound:
LOGGER.info('Unable to remove ORCID token for ORCID %s. Token not found', orcid)
class SQLAlchemyProfiles(Profiles):
def __init__(self, db: SQLAlchemy, next_id_generator: Callable[[], str] = None) -> None:
if next_id_generator is None:
def generate_id():
return generate_random_string(ID_LENGTH, string.ascii_lowercase + string.digits)
next_id_generator = generate_id
self.db = db
self._next_id_generator = next_id_generator
def add(self, profile: Profile) -> Profile:
self.db.session.begin_nested()
self.db.session.add(profile)
try:
self.db.session.commit()
except (FlushError, IntegrityError) as exception:
self.db.session.rollback()
message = exception.args[0]
log_msg = 'Unable to create profile with id {}. '.format(profile.id)
if 'orcid' in message and profile.orcid:
log_msg += 'Profile with ORCID {} already exists'.format(profile.orcid)
LOGGER.info(msg=log_msg)
return self.get_by_orcid(profile.orcid)
elif 'EmailAddress' in message and profile.email_addresses:
email_addresses = [x.email for x in profile.email_addresses]
log_msg += 'Profile with email address {} already exists'.format(email_addresses)
LOGGER.info(msg=log_msg)
return self.get_by_email_address(*email_addresses)
raise exception
return profile
def get(self, profile_id: str) -> Profile:
try:
return self.db.session.query(Profile).filter_by(id=profile_id).one()
except NoResultFound as exception:
msg = 'Profile with ID {} not found'.format(profile_id)
LOGGER.info(msg=msg)
raise ProfileNotFound(msg) from exception
def get_by_orcid(self, orcid: str) -> Profile:
try:
return self.db.session.query(Profile).filter_by(orcid=orcid).one()
except NoResultFound as exception:
msg = 'Profile with ORCID {} not found'.format(orcid)
LOGGER.info(msg=msg)
raise ProfileNotFound(msg) from exception
def get_by_email_address(self, *email_addresses: str) -> Profile:
if not email_addresses:
raise ProfileNotFound('No email address(es) provided')
try:
return self.db.session.query(Profile).join(EmailAddress) \
.filter(EmailAddress.email.in_(email_addresses)).one()
except NoResultFound as exception:
msg = 'Profile with email address(es) {} not found'.format(email_addresses)
LOGGER.info(msg=msg)
raise ProfileNotFound(msg) from exception
@retry(stop_max_attempt_number=10)
def next_id(self) -> str:
profile_id = self._next_id_generator()
if self.db.session.query(Profile.id).filter_by(id=profile_id).scalar() is not None:
raise RuntimeError('Generated ID already in use')
return profile_id
def list(self, limit: int = None, offset: int = 0, desc: bool = True) -> List[Profile]:
query = self.db.session.query(Profile)
if desc:
query = query.order_by(Profile.desc())
else:
query = query.order_by(Profile.asc())
return query.limit(limit).offset(offset).all()
def clear(self) -> None:
self.db.session.query(Profile).delete()
def __len__(self) -> int:
return self.db.session.query(Profile).count()
|
python
|
from .common import EWSAccountService, create_attachment_ids_element
from ..properties import RootItemId
from ..util import create_element
class DeleteAttachment(EWSAccountService):
"""MSDN:
https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/deleteattachment-operation
"""
SERVICE_NAME = 'DeleteAttachment'
def call(self, items):
return self._elems_to_objs(self._chunked_get_elements(self.get_payload, items=items))
def _elems_to_objs(self, elems):
for elem in elems:
if isinstance(elem, Exception):
yield elem
continue
yield RootItemId.from_xml(elem=elem, account=self.account)
@classmethod
def _get_elements_in_container(cls, container):
return container.findall(RootItemId.response_tag())
def get_payload(self, items):
payload = create_element('m:%s' % self.SERVICE_NAME)
attachment_ids = create_attachment_ids_element(items=items, version=self.account.version)
payload.append(attachment_ids)
return payload
|
python
|
"""
388. Longest Absolute File Path
Medium
Suppose we have a file system that stores both files and directories. An example of one system is represented in the following picture:
Here, we have dir as the only directory in the root. dir contains two subdirectories, subdir1 and subdir2. subdir1 contains a file file1.ext and subdirectory subsubdir1. subdir2 contains a subdirectory subsubdir2, which contains a file file2.ext.
In text form, it looks like this (with ⟶ representing the tab character):
dir
⟶ subdir1
⟶ ⟶ file1.ext
⟶ ⟶ subsubdir1
⟶ subdir2
⟶ ⟶ subsubdir2
⟶ ⟶ ⟶ file2.ext
If we were to write this representation in code, it will look like this: "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext". Note that the '\n' and '\t' are the new-line and tab characters.
Every file and directory has a unique absolute path in the file system, which is the order of directories that must be opened to reach the file/directory itself, all concatenated by '/'s. Using the above example, the absolute path to file2.ext is "dir/subdir2/subsubdir2/file2.ext". Each directory name consists of letters, digits, and/or spaces. Each file name is of the form name.extension, where name and extension consist of letters, digits, and/or spaces.
Given a string input representing the file system in the explained format, return the length of the longest absolute path to a file in the abstracted file system. If there is no file in the system, return 0.
Example 1:
Input: input = "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext"
Output: 20
Explanation: We have only one file, and the absolute path is "dir/subdir2/file.ext" of length 20.
Example 2:
Input: input = "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext"
Output: 32
Explanation: We have two files:
"dir/subdir1/file1.ext" of length 21
"dir/subdir2/subsubdir2/file2.ext" of length 32.
We return 32 since it is the longest absolute path to a file.
Example 3:
Input: input = "a"
Output: 0
Explanation: We do not have any files, just a single directory named "a".
Example 4:
Input: input = "file1.txt\nfile2.txt\nlongfile.txt"
Output: 12
Explanation: There are 3 files at the root directory.
Since the absolute path for anything at the root directory is just the name itself, the answer is "longfile.txt" with length 12.
Constraints:
1 <= input.length <= 104
input may contain lowercase or uppercase English letters, a new line character '\n', a tab character '\t', a dot '.', a space ' ', and digits.
"""
# V0
# IDEA : dict + replae, split
class Solution(object):
def lengthLongestPath(self, input):
# NOTE : we maintain a dict for collecting key and the length till now
d={}
longest=0
fileList=input.split("\n")
for i in fileList:
# directory
if "." not in i:
key = i.count("\t") # level of directory
value = len(i.replace("\t","")) # length after removing '\t'
d[key]=value
# file
else:
key=i.count("\t")
### NOTE : length of doc (all directory length + doc length + count of '\')
length = sum([d[j] for j in d.keys() if j<key]) + len(i.replace("\t","")) + key
longest=max(longest,length)
print (d)
return longest
# V0'
# IDEA : HASH TABLE
class Solution(object):
def lengthLongestPath(self, input):
maxlen = 0
pathlen = {0: 0}
#for line in input.splitlines():
for line in input.split('\n'):
name = line.lstrip('\t')
depth = len(line) - len(name)
if '.' in name:
maxlen = max(maxlen, pathlen[depth] + len(name))
else:
pathlen[depth + 1] = pathlen[depth] + len(name) + 1
return maxlen
# V0''
# IDEA : stack + string op
class Solution:
def lengthLongestPath(self, input):
parts = input.split('\n')
ans = 0
stack = []
L = 0
for s in parts:
# strip '\t' on left end only
cs = s.lstrip('\t')
level = len(s) - len(cs)
while stack and stack[-1][1] >= level:
L -= len(stack.pop()[0])
stack.append((cs, level))
L += len(cs)
if "." in cs:
ans = max(ans, len(stack) + L - 1)
return ans
# V1
# IDEA :STACK
# https://leetcode.com/problems/longest-absolute-file-path/discuss/812407/Python-3-or-Stack-or-Explanation
class Solution:
def lengthLongestPath(self, s: str) -> int:
paths, stack, ans = s.split('\n'), [], 0
for path in paths:
p = path.split('\t')
depth, name = len(p) - 1, p[-1]
l = len(name)
while stack and stack[-1][1] >= depth:
stack.pop()
if not stack:
stack.append((l, depth))
else:
stack.append((l+stack[-1][0], depth))
if '.' in name:
ans = max(ans, stack[-1][0] + stack[-1][1])
return ans
# V1
# IDEA : dict + replae, split
# https://leetcode.com/problems/longest-absolute-file-path/discuss/86640/python-solution-easy-to-understand
class Solution(object):
def lengthLongestPath(self, input):
dict={}
longest=0
fileList=input.split("\n")
for i in fileList:
if "." not in i: # directory
key = i.count("\t") # level of directory
value = len(i.replace("\t","")) # length after removing '\t'
dict[key]=value
else: # doc
key=i.count("\t")
# length of doc (all directory length + doc length + count of '\')
length = sum([dict[j] for j in dict.keys() if j<key]) + len(i.replace("\t","")) + key
longest=max(longest,length)
return longest
# V1'
# http://bookshadow.com/weblog/2016/08/21/leetcode-longest-absolute-file-path/
class Solution(object):
def lengthLongestPath(self, input):
"""
:type input: str
:rtype: int
"""
ans = lengthSum = 0
stack = [(-1, 0)]
for p in input.split('\n'):
depth = p.count('\t')
name = p.replace('\t', '')
topDepth, topLength = stack[-1]
while depth <= topDepth:
stack.pop()
lengthSum -= topLength
topDepth, topLength = stack[-1]
length = len(name) + (depth > 0)
lengthSum += length
stack.append((depth, length))
if name.count('.'):
ans = max(ans, lengthSum)
return ans
# V1''
# https://leetcode.com/problems/longest-absolute-file-path/discuss/86619/Simple-Python-solution
# IDEA : lstrip() : Remove spaces to the left of the string:
# https://www.w3schools.com/python/ref_string_lstrip.asp
class Solution(object):
def lengthLongestPath(self, input):
maxlen = 0
pathlen = {0: 0}
for line in input.splitlines():
name = line.lstrip('\t')
depth = len(line) - len(name)
if '.' in name:
maxlen = max(maxlen, pathlen[depth] + len(name))
else:
pathlen[depth + 1] = pathlen[depth] + len(name) + 1
return maxlen
### Test case : dev
# V1'''
# http://bookshadow.com/weblog/2016/08/21/leetcode-longest-absolute-file-path/
class Solution(object):
def lengthLongestPath(self, input):
"""
:type input: str
:rtype: int
"""
maxlen = 0
pathlen = {0: 0}
for line in input.splitlines():
name = line.lstrip('\t')
depth = len(line) - len(name)
if '.' in name:
maxlen = max(maxlen, pathlen[depth] + len(name))
else:
pathlen[depth + 1] = pathlen[depth] + len(name) + 1
return maxlen
# V1''''
# https://www.jiuzhang.com/solution/longest-absolute-file-path/#tag-highlight-lang-python
import re, collections
class Solution:
# @param {string} input an abstract file system
# @return {int} return the length of the longest absolute path to file
def lengthLongestPath(self, input):
# Write your code here
dict = collections.defaultdict(lambda: "")
lines = input.split("\n")
n = len(lines)
result = 0
for i in xrange(n):
count = lines[i].count("\t")
lines[i] = dict[count - 1] + re.sub("\\t+","/", lines[i])
if "." in lines[i]:
result = max(result, len(lines[i]))
dict[count] = lines[i]
return result
# V1'''''
# https://leetcode.com/problems/longest-absolute-file-path/discuss/266896/Simple-(Python)-solution-with-detailed-explanation
class Solution:
def lengthLongestPath(self, input: str) -> int:
# return value
maxlen = 0
# map each level (0-indexed) to the current
# maximum path length through that level;
#
# begin with a "-1 level" of 0 length, to simply
# index the "level before the 0th level", which
# is a common practice in dynamic programming
levels = {-1:0}
# split up the string so that each file/directory (token) is an
# entry in the list; this preserves the tabs in each entry
for token in input.split('\n'):
# the 0-indexed level of this token is the number of tabs
''' only tabs contribute to the level; spaces are regular characters'''
level = token.count('\t')
# update the current level to be the length of the path
# through the previous level + length of this token;
# subtracting the level removes the tabs from the count
#
# think of this like reading through the structure with your eyes
# from top to bottom; for each token you look at on a new line,
# you update the line's level to be the path through the current token,
# disregarding the remainder. we never "look back up" when reading
# the directory from top to bottom, and the string structure is
# given to be consistent, so one pass updates the levels as deisred
levels[level] = levels[level - 1] + len(token) - level
# if we just processed a filename, overwrite the maximum length
# if the current path + the number of backslashes is longer;
# we store the raw token lengths without any separator,
# so adding the 0-indexed level provides the correct number
# of separators; if there is 1 item, the level is 0 (so no '/'),
# if there are two items, the level is 1 (so one '/' between
# the items) and so on
if '.' in token:
maxlen = max(maxlen, levels[level] + level)
return maxlen
# V1'''''''
# https://leetcode.com/problems/longest-absolute-file-path/discuss/328592/python3-beats-99
class Solution:
def lengthLongestPath(self, input):
parts = input.split('\n')
ans = 0
stack = []
L = 0
for s in parts:
cs = s.lstrip('\t')
level = len(s) - len(cs)
while stack and stack[-1][1] >= level:
L -= len(stack.pop()[0])
stack.append((cs, level))
L += len(cs)
if "." in cs:
ans = max(ans, len(stack) + L - 1)
return ans
# V2
# Time: O(n)
# Space: O(d), d is the max depth of the paths
class Solution(object):
def lengthLongestPath(self, input):
"""
:type input: str
:rtype: int
"""
def split_iter(s, tok):
start = 0
for i in range(len(s)):
if s[i] == tok:
yield s[start:i]
start = i + 1
yield s[start:]
max_len = 0
path_len = {0: 0}
for line in split_iter(input, '\n'):
name = line.lstrip('\t')
depth = len(line) - len(name)
if '.' in name:
max_len = max(max_len, path_len[depth] + len(name))
else:
path_len[depth + 1] = path_len[depth] + len(name) + 1
return max_len
|
python
|
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = ""
setup(
long_description=readme,
name="tgintegration",
version="0.4.0",
description="An Integration Test Library for Telegram Messenger Bots on top of Pyrogram.",
python_requires="==3.*,>=3.6.7",
author="JosXa",
author_email="[email protected]",
license="MIT",
packages=["tgintegration", "tgintegration.containers"],
package_dir={"": "."},
package_data={},
install_requires=["pyrogram", "typing-extensions==3.*,>=3.7.4"],
dependency_links=["git+https://github.com/pyrogram/pyrogram@asyncio#egg=pyrogram"],
extras_require={
"dev": [
"bumpversion==0.5.3",
"coverage==4.1",
"cryptography==1.7",
"flake8==2.6.0",
"pytest==2.9.2",
"pytest-runner==2.11.1",
"pyyaml==5.1",
"sphinx==2.*,>=2.3.0",
"sphinx-autodoc-typehints==1.*,>=1.10.3",
"tgcrypto==1.*,>=1.2.0",
"tox==2.3.1",
"watchdog==0.8.3",
"wheel==0.29.0",
]
},
)
|
python
|
import logging
from datetime import datetime
import tweepy
import creds
from main import make_covid_graph
logging.basicConfig(filename='nepal-covid.log',
level=logging.INFO,
format='%(asctime)s-%(name)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
today = datetime.now().date()
reported, reocvered, deaths = make_covid_graph()
logging.info('Chart created')
auth = tweepy.OAuthHandler(creds.api_key, creds.api_secret)
auth.set_access_token(creds.access_token, creds.access_secret)
api = tweepy.API(auth)
logging.info('API Authenticated')
tweet = f"""
Nepal Covid graph for {today} with reporeted cases {reported}, deaths {deaths}, and recovered {reocvered}.
Source code on https://github.com/upretip/nepal-covid"""
image_path = f'output/{today}.png'
try:
status = api.update_with_media(image_path, tweet)
logging.info('Tweet sent')
except Exception as e:
logging.error(f'{e}')
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
test_compat.py
Last updated: 2019-09-27
"""
from wz_core.reporting import Report
from test_core import testinit, runTests
if __name__ == '__main__':
testinit ()
# from wz_compat import migrate
# runTests (migrate)
# from wz_compat import import_pupils
# runTests (import_pupils)
from wz_compat import config
runTests (config)
from wz_compat import grades
runTests (grades)
|
python
|
import base64
import logging
from tornado.escape import json_decode
from tornado.web import Finish
from db import UserStatus
from emailer import send_verification_email, send_password_reset_email
from service.property import get_property, set_property
from service.user import (
get_user,
get_user_profile,
get_user_by_email,
get_user_verification,
get_user_verification_code,
update_user_password,
update_user_profile,
change_user_password,
add_location,
verify_user
)
from .base import CORSHandler, SecureHandler
class UserHandler(SecureHandler):
"""
Get user data for requesting user
"""
def get(self, path: str):
user_id = self.get_user_id()
user = get_user(user_id)
if not user:
logging.warning(f'User {user_id} has token but not found?')
self.write_error(400, f'User not found with id: {user_id}')
else:
self.success(200, user)
self.finish()
class UserProfileHandler(SecureHandler):
def get(self, path):
user_id = self.get_user_id()
user_profile = get_user_profile(user_id)
self.success(200, user_profile)
self.finish()
def post(self, path):
user_id = self.get_user_id()
data = self.get_data()
logging.info(f'Updating settings for user {user_id}, settings {data}')
food = data.get('food_preferences')
pantry = data.get('pantry')
eager = data.get('eagerness')
# validate data
if food is not None and not all([1 <= int(pref) <= 4 for pref in food]):
fields = ", ".join(set(food) - set(range(1, 5)))
self.write_error(400, f'Food preferences not found: {fields}')
raise Finish()
if pantry is not None and not isinstance(pantry, bool):
self.write_error(400, f'Pantry value must be true or false')
raise Finish()
if eager is not None and (not isinstance(eager, int) or not 1 <= eager <= 5):
self.write_error(400, f'Eagerness value must be from 1 to 5')
raise Finish()
update_user_profile(user_id, food, pantry, eager)
self.success(204)
self.finish()
class UserPasswordHandler(CORSHandler, SecureHandler):
required_fields = set(['old_password', 'new_password'])
def post(self):
user_id = self.get_user_id()
data = self.get_data()
old_pass = data.get('old_password')
new_pass = data.get('new_password')
if change_user_password(user_id, old_pass, new_pass):
self.success(status=200)
else:
self.write_error(400, 'Incorrect password')
self.finish()
class UserPasswordResetHandler(CORSHandler):
def initialize(self, token_service: 'JwtTokenService', executor: 'ThreadPoolExecutor'):
self.token_service = token_service
self.executor = executor
def post(self, path):
# user forgot password
# we need to generate a one-time use reset token
# and email them a password reset link
data = json_decode(self.request.body)
if 'email' in data:
# they are requesting the reset link
user = get_user_by_email(data['email'])
if user:
token = self.token_service.create_password_reset_token(user.id)
encoded = base64.b64encode(token).decode()
logging.info('encoded: ', encoded)
self.executor.submit(send_password_reset_email, user.email, encoded)
self.success(status=204)
else:
self.write_error(400, 'No user exists with that email address')
elif 'token' in data and 'password' in data:
# they are sending their token and new password
# check that the token is correct, then
# set them up with their new password
try:
token = base64.b64decode(data['token']).decode()
except:
logging.warning(f'Encountered invalid token: {data["token"]}')
self.write_error(400, 'Password reset failed, invalid token')
raise Finish()
owner = self.token_service.decode_password_token(token, False)['own']
user = get_user(owner)
if user is not None:
try:
if self.token_service.validate_token(token):
password = data['password']
if not update_user_password(owner, password):
logging.error(f'Failed password reset for user {owner.id}')
self.write_error(500, 'Password reset failed')
else:
self.success(status=204)
else:
self.write_error(400, 'Password reset failed, token is expired')
except Exception as e:
logging.error(e)
self.write_error(500, 'Password reset failed')
else:
logging.warning(f"User with id {owner} tried to reset password, but they don't exist")
self.write_error(400, 'No user exists with that id')
else:
self.write_error(400, 'Missing fields')
self.finish()
class UserLocationHandler(SecureHandler):
required_fields = set(['latitude', 'longitude'])
def post(self, path):
user_id = self.get_user_id()
data = self.get_data()
add_location(user_id, data['latitude'], data['longitude'], data.get('time'))
self.success(204)
self.finish()
class UserVerificationHandler(SecureHandler):
def get(self, path):
user_id = self.get_user_id()
try:
user = get_user(user_id)
threshold = int(get_property('user.threshold'))
if user.active:
logging.info(f"User {user_id} is already active")
self.write_error(400, "Error: user already active")
elif user.status == "REQUESTED" and (threshold > 0 or get_user_verification_code(user_id) is not None):
code = get_user_verification(user_id)
send_verification_email(to=user.email, code=code)
set_property('user.threshold', str(threshold-1))
self.success(status=204)
elif user.status == 'VERIFIED' or user.status == 'ACCEPTED':
self.write_error(400, "User is already verified")
else:
self.write_error(403, "User has not yet been permitted")
except Exception as e:
logging.error("Failed to send verification email")
logging.error(e)
self.write_error(500, "Error: failed to send verification email")
finally:
self.finish()
def post(self, path):
# decode json
user_id = self.get_user_id()
data = self.get_data()
code = data.get('code')
if not code:
self.write_error(400, 'Missing verification code')
else:
if verify_user(code, user_id):
self.success(status=204)
else:
self.write_error(400, 'Invalid verification code')
self.finish()
|
python
|
from bs4 import BeautifulSoup
from collections import Counter
from string import punctuation
def word_frequencies(url):
"""
Downloads the content from the given URL and returns a dict {word -> frequency}
giving the count of each word on the page. Ignores HTML tags in the response.
:param url: Full URL of HTML page
:return: dict {word -> frequency}
"""
html = open(url, 'r', encoding='utf-8')
code = html.read()
soup = BeautifulSoup(code, features="html.parser")
# We get the words in paragrphs
text_p = (''.join(s.findAll(text=True))for s in soup.findAll('p'))
c_p = Counter((x.rstrip(punctuation).lower() for y in text_p for x in y.split()))
# We get the words in divs
text_div = (''.join(s.findAll(text=True))for s in soup.findAll('div'))
c_div = Counter((x.rstrip(punctuation).lower() for y in text_div for x in y.split()))
# We sum the two countesr and get a list with words count from most to less common
total = c_div + c_p
returnDict = {}
for k, v in total.most_common():
returnDict[k] = v
return returnDict
|
python
|
import numpy as np
array = np.array([['08', '02', '22', '97', '38', '15', '00', '40', '00', '75', '04', '05', '07', '78', '52', '12', '50', '77', '91', '08'],
['49', '49', '99', '40', '17', '81', '18', '57', '60', '87', '17', '40', '98', '43', '69', '48', '04', '56', '62', '00'],
['81', '49', '31', '73', '55', '79', '14', '29', '93', '71', '40', '67', '53', '88', '30', '03', '49', '13', '36', '65'],
['52', '70', '95', '23', '04', '60', '11', '42', '69', '24', '68', '56', '01', '32', '56', '71', '37', '02', '36', '91'],
['22', '31', '16', '71', '51', '67', '63', '89', '41', '92', '36', '54', '22', '40', '40', '28', '66', '33', '13', '80'],
['24', '47', '32', '60', '99', '03', '45', '02', '44', '75', '33', '53', '78', '36', '84', '20', '35', '17', '12', '50'],
['32', '98', '81', '28', '64', '23', '67', '10', '26', '38', '40', '67', '59', '54', '70', '66', '18', '38', '64', '70'],
['67', '26', '20', '68', '02', '62', '12', '20', '95', '63', '94', '39', '63', '08', '40', '91', '66', '49', '94', '21'],
['24', '55', '58', '05', '66', '73', '99', '26', '97', '17', '78', '78', '96', '83', '14', '88', '34', '89', '63', '72'],
['21', '36', '23', '09', '75', '00', '76', '44', '20', '45', '35', '14', '00', '61', '33', '97', '34', '31', '33', '95'],
['78', '17', '53', '28', '22', '75', '31', '67', '15', '94', '03', '80', '04', '62', '16', '14', '09', '53', '56', '92'],
['16', '39', '05', '42', '96', '35', '31', '47', '55', '58', '88', '24', '00', '17', '54', '24', '36', '29', '85', '57'],
['86', '56', '00', '48', '35', '71', '89', '07', '05', '44', '44', '37', '44', '60', '21', '58', '51', '54', '17', '58'],
['19', '80', '81', '68', '05', '94', '47', '69', '28', '73', '92', '13', '86', '52', '17', '77', '04', '89', '55', '40'],
['04', '52', '08', '83', '97', '35', '99', '16', '07', '97', '57', '32', '16', '26', '26', '79', '33', '27', '98', '66'],
['88', '36', '68', '87', '57', '62', '20', '72', '03', '46', '33', '67', '46', '55', '12', '32', '63', '93', '53', '69'],
['04', '42', '16', '73', '38', '25', '39', '11', '24', '94', '72', '18', '08', '46', '29', '32', '40', '62', '76', '36'],
['20', '69', '36', '41', '72', '30', '23', '88', '34', '62', '99', '69', '82', '67', '59', '85', '74', '04', '36', '16'],
['20', '73', '35', '29', '78', '31', '90', '01', '74', '31', '49', '71', '48', '86', '81', '16', '23', '57', '05', '54'],
['01', '70', '54', '71', '83', '51', '54', '69', '16', '92', '33', '48', '61', '43', '52', '01', '89', '19', '67', '48']])
array_aslist = array.astype('int64').tolist()
max_row = 20 - 4
max_col = 20 - 4
max_prod = 1
for offset_row in range(0,max_row):
for offset_col in range(0,max_col):
prod_row = array_aslist[offset_row][offset_col] * array_aslist[offset_row][offset_col + 1] * array_aslist[offset_row][offset_col + 2] * array_aslist[offset_row][offset_col + 3]
prod_col = array_aslist[offset_row][offset_col] * array_aslist[offset_row + 1][offset_col] * array_aslist[offset_row + 2][offset_col] * array_aslist[offset_row + 3][offset_col]
prod_diag = array_aslist[offset_row][offset_col] * array_aslist[offset_row + 1][offset_col + 1] * array_aslist[offset_row + 2][offset_col + 2] * array_aslist[offset_row + 3][offset_col + 3]
prod_antidiag = array_aslist[offset_row + 3][offset_col] * array_aslist[offset_row + 2][offset_col + 1] * array_aslist[offset_row + 1][offset_col + 2] * array_aslist[offset_row][offset_col + 3]
max_prod_tmp = max([prod_row,prod_col,prod_diag,prod_antidiag])
if max_prod_tmp > max_prod:
max_prod = max_prod_tmp
print max_prod
|
python
|
import os
import logging
import cfn_storage_gateway_provider
import cfn_cache_provider
import cfn_file_share_provider
log = logging.getLogger()
log.setLevel(os.environ.get("LOG_LEVEL", "INFO"))
def handler(request, context):
if request['ResourceType'] == 'Custom::StorageGatewayNfsFileShare':
return cfn_file_share_provider.handler(request, context)
elif request['ResourceType'] == 'Custom::StorageGatewayCache':
return cfn_cache_provider.handler(request, context)
else:
return cfn_storage_gateway_provider.handler(request, context)
|
python
|
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as random
import re
import textstat
from scipy import stats
from scipy.stats import spearmanr
#coding:utf-8
###############
#一般図書のYL
x = [8, 6.6, 8.5, 6.5, 5, 7, 6, 5, 5, 5, 6.5, 6.5, 7, 8.2, 7.6, 7.5, 7.5, 7.3,
7, 8.2, 8, 8.5, 7, 6.6, 7.7, 7, 5, 8.5, 8.5, 7, 7, 8]
#FREスケールを入れるリスト
y=[]
number=2
while number < 36:
if (number != 22) and (number != 23):
#text_listにリストとして読み込む
with open('book'+ str(number)+'.txt', 'r') as f:
#改行("\n")を""に変換
text_list = f.read().splitlines()
list_suu =0
#改行は1行だけのものをなくす→2行以上の改行を全て消すわけではない
while list_suu < len(text_list):
if text_list[list_suu] == "":
text_list[list_suu] = "\n"
list_suu+=1
#正規表現
#イラスト部分は削除
text_list = [s for s in text_list if re.sub('.Illustration:\s\d+.', '', s)]
#ページ数は削除
text_list = [s for s in text_list if re.sub('{\d+}', '', s)]
#リストを結合して,空白で繋いで,文字列に変換
mojiretu = ''.join(text_list)
#正規表現
#{数字}(多分ページ数)を削除
mojiretu_p = re.sub('{\d+}', '', mojiretu)
#[Illustration:00]を消す
mojiretu_p_ill = re.sub('.Illustration:\s\d+.', '', mojiretu_p)
#FREスケールをリストに入れる
y.append(textstat.flesch_reading_ease(mojiretu_p_ill))
number+=1
print(y)
#相関計算
x_np = np.array(x)
y_np = np.array(y)
#シャピロウィルク検定で正規性の確認
#w値とp_value
shap_w, shap_p_value_x = stats.shapiro(x)
shap_w, shap_p_value_y = stats.shapiro(y)
print(shap_p_value_x,"x_shapiro")
print(shap_p_value_y, "y_syapiro")
#p_valueが0.05以上なら,帰無仮説が採択→正規性がある
if shap_p_value_x >= 0.05 and shap_p_value_y >= 0.05 :
print("正規性があるといえる")
#ピアソンの相関係数をとる
# 相関行列を計算
coef = np.corrcoef(x_np, y_np)
soukan = coef[0][1]
#p_valueが0.05以下なら,帰無仮説が棄却→正規性がない
else:
print("正規性があるといえない")
#スピアマンの順位相関係数
correlation, pvalue = spearmanr(x, y)
soukan = correlation
##############################
print("一般図書のFRE")
print(soukan)
##グラフの描写
# グラフの大きさ指定
plt.figure(figsize=(5, 5))
# グラフの描写
plt.plot(x, y, 'o', label='Score')
plt.title('Correlation coefficient') # タイトル
plt.xlabel('YL') # x軸のラベル
plt.ylabel('FRE_No.104') # y軸のラベル
plt.grid(True) # gridの表示
plt.legend() # 凡例の表示
plt.savefig("FRE_tamesi.png")
|
python
|
import asyncio
from collections import OrderedDict
import logging
import uuid
import msgpack
import re
import types
from wolverine.module import MicroModule
from wolverine.module.controller.zhelpers import unpackb, packb, dump
from wolverine.module.service import ServiceMessage
logger = logging.getLogger(__name__)
class MicroRouter(MicroModule):
def __init__(self):
super(MicroRouter, self).__init__()
self.name = 'router'
self.service_handlers = OrderedDict()
self.client_handlers = {}
self.clients = {}
self.servers = {}
self.async_req_queue = {}
def init(self):
pass
# self.sort_handlers()
def sort_handlers(self):
"""sort the service handlers by key length from shortest to longest"""
d = self.service_handlers.copy()
sorted_handlers = OrderedDict(
sorted(d.items(), key=lambda t: len(t[0])))
self.service_handlers = sorted_handlers
@asyncio.coroutine
def app_stop(self):
for service_name in list(self.servers.keys()):
self.remove_server(service_name)
for key in list(self.clients.keys()):
yield from self.remove_client(key)
self.service_handlers = {}
logger.info("router exited")
@asyncio.coroutine
def add_client(self, client, name, **options):
service_id = options.get('service_id', name)
if service_id not in self.clients.keys():
self.clients[service_id] = client
up = yield from self.app.registry.register(name,
register_type='service')
return up
else:
logger.warning('not overriding a client with route ' + name)
return True
def remove_client(self, name):
if name in self.clients.keys():
try:
self.clients[name].close()
except Exception:
logger.error('error closing client ' + name, exc_info=True)
try:
yield from \
self.app.registry.deregister(name, register_type='service')
except Exception:
logger.error('failed to deregister client' + name,
exc_info=True)
del self.clients[name]
def add_server(self, name, service):
if name not in self.servers.keys():
self.servers[name] = service
return True
else:
logger.warning('service ' + name + ' already registered')
return False
def remove_server(self, name):
if name in self.servers.keys():
self.servers[name].close()
del self.servers[name]
def add_service_handler(self, route, func):
if route not in self.service_handlers.keys():
self.service_handlers[route] = []
self.service_handlers[route].append(func)
def remove_service_handler(self, handler):
if handler in self.service_handlers.keys():
logger.info('removing all handlers for route ' + handler)
logger.info('removed ' + str(len(self.service_handlers[handler])) +
' handlers')
del self.service_handlers[handler]
def add_client_handler(self, route, func):
if route not in self.client_handlers.keys():
self.client_handlers[route] = []
self.client_handlers[route].append(func)
def remove_client_handler(self, handler):
if handler in self.service_handlers.keys():
logger.info('removing all handlers for route ' + handler)
logger.info('removed ' + str(len(self.service_handlers[handler])) +
'handlers')
del self.service_handlers[handler]
def handle_service(self, data):
route = data[-2]
logger.info('handling data for route ' + route.decode('utf-8'))
if logger.getEffectiveLevel() == logging.DEBUG:
dump(data)
return self._handle_service(route, data)
def _handle_service(self, route, data):
result = {'data': [], 'errors': []}
if isinstance(route, bytes):
route = route.decode('utf-8')
found = False
for key, handlers in self.service_handlers.items():
pattern = re.compile(key)
if pattern.match(route):
req = unpackb(data[-1])
found = True
logger.info('handler: ' + key)
for func in handlers:
try:
response = func(req)
if isinstance(response, types.GeneratorType):
response = yield from response
if isinstance(response, ServiceMessage):
if response.has_error():
result['errors'].append(response.errors)
response = response.data
if response is not None:
result['data'].append(response)
except Exception as ex:
logger.error('failed in handling data: ', exc_info=True)
logger.error('failed in data handling')
result['errors'].append("{0}".format(ex))
break
if not found:
logger.info('no matching route for ' + route)
packet = data[:-1] + [packb(result)]
return packet
def reply(self, data, name):
if name in self.servers.keys():
client = self.servers[name]
client.write(data)
yield from client.drain()
def _send(self, data, client):
client.write(data)
yield from client.drain()
data = yield from client.read()
return data
@asyncio.coroutine
def _send_async(self, data, client, correlation_id, future):
self.async_req_queue[correlation_id] = future
client.write(data)
yield from client.drain()
return future
def send(self, data, route='.*', version='1', **options):
client = None
future = options.pop('future', None)
service = route.split('/')[0]
if len(route.split('/')) < 2:
route += '/'
for c_client_id, c_client in self.clients.items():
c_service, c_service_id = c_client_id.split(':')
c_version = c_service_id.split('_')[1]
if service == c_service and version == c_version:
client = c_client
break
if client:
correlation_id = str(uuid.uuid1())[:8]
b_data = msgpack.packb(data, use_bin_type=True)
packet = (bytes(correlation_id, encoding='utf-8'),
bytes(route, encoding='utf-8'),
b_data)
if not future:
response = yield from self._send(packet, client)
response = msgpack.unpackb(response[-1])
else:
response = yield from self._send_async(packet, client,
correlation_id, future)
return response
else:
return None
|
python
|
"""
Class Encore CIS 211 w/ Joseph
contest.py
Author: Joseph Goh
Updated: 03/04/2021
"""
from typing import List, Dict
class Hometown:
def __init__(self, name: str):
self.name = name
self.champions = []
def get_total_wins(self) -> int:
total = 0
for champ in self.champions:
total += champ.wins
return total
def __str__(self):
return f"{self.name}: {len(self.champions)} champs with {self.get_total_wins()} wins"
class Contestant:
def __init__(self, name: str, height: float, weight: float, hometown: Hometown):
self.name = name
self.height = height
self.weight = weight
self.hometown = hometown
self.wins = 0
def notify_win(self):
self.wins += 1
if self not in self.hometown.champions:
self.hometown.champions.append(self)
def __str__(self):
return f"Contestant({self.name}, {self.height} in, {self.weight} lb)"
def __repr__(self):
return self.__str__()
def __lt__(self, other):
"""
Return True *only if* self is strictly lesser than other in either height or weight while
*also* being lesser or equal in the other category.
Invoked when you evaluate 'self < other'. (i.e. 'x < y' evaluates to x.__lt__(y).)
"""
loss_by_height = (self.height < other.height) and (self.weight <= other.weight)
loss_by_weight = (self.height <= other.height) and (self.weight < other.weight)
return loss_by_height or loss_by_weight
def __gt__(self, other):
"""
Return True *only if* self is strictly greater than other in either height or weight while
*also* being greater or equal in the other category.
Invoked when you evaluate 'self > other'. (i.e. 'x > y' evaluates to x.__gt__(y).)
"""
win_by_height = (self.height > other.height) and (self.weight >= other.weight)
win_by_weight = (self.height >= other.height) and (self.weight > other.weight)
return win_by_height or win_by_weight
class Contest:
def __init__(self, contestants: List[Contestant]):
self.contestants = contestants
def assign_rank(self) -> List[int]:
ranks_li = []
for entry in self.contestants:
rank = 1
for opponent in self.contestants:
if entry < opponent:
rank += 1
ranks_li.append(rank)
return ranks_li
def create_rank_dict(self) -> Dict[int, List[Contestant]]:
ranks_li = self.assign_rank()
# Build a dictionary with ranks as keys and empty lists as values
ranks_dict = {}
for i in range(1, len(self.contestants) + 1):
ranks_dict[i] = []
# Append Contestant objects accordingly to
for i in range(len(self.contestants)):
ranks_dict[ranks_li[i]].append(self.contestants[i])
return ranks_dict
def announce_results(self):
ranks_dict = self.create_rank_dict()
for champ in ranks_dict[1]:
champ.notify_win()
dimmsdale = Hometown('Dimmsdale')
retroville = Hometown('Retroville')
celery0 = Contestant('Fibrous Fred', 21, 4, dimmsdale)
celery1 = Contestant('The Magic Cronch', 15, 6, dimmsdale)
celery2 = Contestant('Tony Stalk', 20, 5, retroville)
celery3 = Contestant('Pascal', 18, 3, retroville)
celery4 = Contestant('Salad Spice', 14, 2, retroville)
celerlympics = Contest([celery0, celery2])
celery_pageant = Contest([celery0, celery1, celery2, celery3, celery4])
celerlympics.announce_results()
celery_pageant.announce_results()
print(dimmsdale)
print(retroville)
|
python
|
#!/usr/bin/env python3
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import time
import argparse
import numpy as np
import cv2
import caffe
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Age-gender model')
parser.add_argument('--gpu', help='GPU id to use', default=0, type=int)
parser.add_argument('--compute_mode', type=str, choices=['CPU', 'GPU'], default='GPU',
help='Caffe compute mode: CPU or GPU')
parser.add_argument('--def', dest='prototxt', help='prototxt file defining the network',
required=True, type=str)
parser.add_argument('--net', dest='caffemodel', help='model to test', required=True, type=str)
parser.add_argument('--gt', dest='gt', help='Path to groundtruth annotation txt file', type=str, required=True)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def preprocess_image(img, size):
"""Transforms input image into network-compatible format.
:param img: Input image
:param size: Target size of network input
:return: Network-compatible input blob
"""
img = cv2.resize(img, size)
return img.transpose((2, 0, 1)).astype(np.float32)
def main():
"""
"""
args = parse_args()
if args.compute_mode == 'GPU':
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
if args.compute_mode == 'CPU':
caffe.set_mode_cpu()
print('Called with args:')
print(args)
assert os.path.exists(args.gt)
assert os.path.exists(args.prototxt)
assert os.path.exists(args.caffemodel)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
data_dir = os.path.dirname(args.gt)
with open(args.gt) as f:
lines = f.readlines()
gender_positives = 0
gender_negatives = 0
age_dif = 0
num = 0
for line in lines:
path, l_gen, l_age = line.split()
gen = bool(float(l_gen))
age = float(l_age)
print('path = ', path, 'gen = ', gen, 'age = ', age)
# image reading
im_path = os.path.join(data_dir, path)
if not os.path.exists(im_path):
print("Can't load", im_path)
continue
img = cv2.imread(im_path)
if img is None:
print("Can't load", im_path)
continue
# image resizing
in_height, in_width = net.blobs['data'].data.shape[2:]
net.blobs['data'].data[...] = np.array(preprocess_image(img, (in_width, in_height)).astype(np.float32)/255.)
gender_out_layer = 'prob'
age_out_layer = 'fc3_a' #'age_conv3'
# forward pass
net.forward()
female_prob = net.blobs[gender_out_layer].data[0][0][0][0]
male_prob = net.blobs[gender_out_layer].data[0][1][0][0]
age_output = net.blobs[age_out_layer].data[0][0][0][0] * 100
print("maleProb:", male_prob, "age:", age_output)
if bool(male_prob > 0.5) == gen:
gender_positives += 1
else:
gender_negatives += 1
age_dif += abs(age_output - age*100)
num += 1
gender_accuracy = gender_positives / (gender_positives + gender_negatives)
print('gender_accuracy = ', gender_accuracy)
age_mae = age_dif/num
print('age mae = ', age_mae)
if __name__ == '__main__':
main()
|
python
|
'''Adapte o código do desafio #107, criando uma função adicional chamada moeda() que consiga
mostrar os números como um valor monetário formatado.'''
import moeda
preco = float(input('Digite o preço: R$ '))
print(f'A metade de {moeda.moeda(preco)} é {moeda.moeda(moeda.metade(preco))}')
print(f'O dobro de {moeda.moeda(preco)} é {moeda.moeda(moeda.dobro(preco))}')
print(f'Aumentando 10%, temos {moeda.moeda(moeda.aumentar(preco, 10))}')
print(f'Diminuindo 13%, temos {moeda.moeda(moeda.diminuir(preco, 13))}')
|
python
|
input = open('/Users/krishjain/Desktop/AdventOfCode/Day 6/input.txt', "r").read()
input = input.split("\n\n")
countOfAnswersPerGroup = []
for i in input:
listOfAnswersToWhichAnsweredYes = []
for j in i.replace("\n", ""):
listOfAnswersToWhichAnsweredYes.append(j)
unorderedListOfAnswers = list(dict.fromkeys(listOfAnswersToWhichAnsweredYes))
orderedListOfAnswers = sorted(unorderedListOfAnswers)
countOfAnswersPerGroup.append(len(orderedListOfAnswers))
sumOfCounts = sum(countOfAnswersPerGroup)
print("Answer is: " + str(sumOfCounts))
|
python
|
import matplotlib.pyplot as plt
env_map = [(8.92, 283.86), (17.83, 283.44), (68.13, 720.79), (112.8, 892.9), (140.79, 888.92), (101.75, 533.38), (106.89, 478.2), (115.39, 449.42), (117.73, 405.24), (111.25, 342.38), (87.73, 243.69), (113.38, 286.37), (121.92, 281.75), (124.33, 264.21), (124.39, 244.14), (125.74, 228.72), (125.22, 211.74), (123.24, 194.2), (122.53, 180.3), (125.2, 172.32), (122.58, 158.03), (123.02, 148.71), (124.99, 141.77), (122.53, 130.49), (124.45, 124.45), (130.49, 122.53), (135.77, 119.7), (145.63, 120.47), (156.45, 121.36), (168.28, 122.26), (172.03, 116.91), (173.09, 109.84), (166.98, 98.75), (170.88, 93.94), (174.64, 88.98), (164.68, 77.49), (137.66, 59.57), (135.75, 53.75), (123.26, 44.37), (122.69, 39.86), (122.92, 35.71), (123.01, 31.58), (122.97, 27.49), (122.79, 23.42), (126.42, 20.02), (141.87, 17.92), (142.37, 13.46), (144.71, 9.1), (147.93, 4.65), (163.0, 0.0), (171.92, -5.4), (170.66, -10.74), (175.22, -16.56), (177.59, -22.43), (160.99, -25.5), (157.17, -29.98), (153.22, -34.25), (151.1, -38.8), (151.73, -44.08), (143.61, -46.66), (132.66, -47.76), (130.17, -51.54), (128.49, -55.6), (125.77, -59.18), (126.52, -64.47), (126.19, -69.37), (126.53, -74.83), (133.4, -84.66), (172.03, -116.91), (165.85, -120.5), (169.09, -131.16), (169.51, -140.23), (157.52, -138.88), (152.35, -143.07), (144.96, -144.96), (136.22, -145.06), (128.29, -145.52), (120.47, -145.63), (112.77, -145.39), (105.21, -144.81), (97.8, -143.91), (90.55, -142.69), (85.52, -144.6), (79.01, -143.71), (73.55, -144.34), (67.27, -142.96), (41.7, -96.36), (57.8, -145.97), (45.05, -125.14), (46.35, -142.66), (34.04, -117.16), (36.06, -140.44), (32.72, -146.39), (27.92, -146.36), (23.31, -147.17), (18.55, -146.83), (13.93, -147.34), (9.29, -147.71), (4.65, -147.93), (-0.0, -148.0), (-4.65, -147.93),
(-9.29, -147.71), (-13.93, -147.34), (-18.67, -147.83), (-23.47, -148.15), (-28.29, -148.33), (-33.16, -148.34), (-38.05, -148.19), (-43.24, -148.85), (-48.52, -149.32), (-53.86, -149.6), (-59.27, -149.69), (-64.74, -149.59), (-70.68, -150.2), (-76.72, -150.58), (-79.97, -145.47), (-84.5, -142.88), (-93.23, -146.91), (-100.61, -148.05), (-90.52, -124.59), (-91.32, -117.73), (-93.7, -113.27), (-89.94, -102.02), (-89.68, -95.49), (-89.1, -89.1), (-89.66, -84.2), (-89.26, -78.7), (-89.38, -73.94), (-89.29, -69.26), (-88.99, -64.66), (-89.32, -60.71), (-88.65, -56.26), (-88.66, -52.43), (-88.51, -48.66), (-89.1, -45.4), (-88.67, -41.73), (-89.02, -38.52), (-88.33, -34.97), (-91.27, -32.86), (-98.91, -32.14), (-106.59, -30.97), (-118.17, -30.34), (-131.75, -29.45), (-118.86, -22.67), (-112.6, -17.83), (-129.97, -16.42), (-139.38, -13.18), (-135.73, -8.54), (-89.96, -2.83), (-38.0, 0.0), (-37.98, 1.19), (-63.87, 4.02), (-62.72, 5.93), (-67.46, 8.52), (-78.03, 12.36), (-82.51, 15.74), (-81.98, 18.32), (-82.33, 21.14), (-82.59, 23.99), (-81.79, 26.58), (-81.86, 29.47), (-82.75, 32.76), (-89.94, 38.92), (-136.63, 64.29), (-137.22, 69.91), (-144.59, 79.49), (-154.07, 91.12), (-154.51, 98.06), (-154.66, 105.11), (-155.33, 112.85), (-158.82, 123.19), (-198.79, 164.46), (-203.28, 179.22), (-193.91, 182.09), (-222.03, 222.03), (-219.74, 234.0), (-208.97, 237.04), (-217.36, 262.75), (-205.32, 264.7), (-196.32, 270.21), (-188.86, 277.9), (-165.57, 260.9), (-159.84, 270.27), (-152.72, 277.79), (-139.38, 273.54), (-129.86, 275.97), (-108.02, 249.63), (-110.81, 279.86), (-100.61, 279.44), (-89.61, 275.81), (-81.47, 280.41), (-68.89, 268.3), (-62.17, 278.14), (-53.78, 281.92), (-44.43, 280.5), (-34.72, 274.82), (-25.03, 264.82), (-17.83, 283.44), (-8.89, 282.86), (0.0, 280.0)]
x = [x[0]for x in env_map if x[0] > -800 and x[0] < 800]
y = [y[1]for y in env_map if y[0] > -800 and y[0] < 800]
plt.scatter(x, y)
plt.show()
|
python
|
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import cStringIO
import httplib2
import re
import sys
import fixtures
from inventoryclient import exc
from inventoryclient import shell as inventoryclient_shell
from inventoryclient.tests import utils
from testtools import matchers
FAKE_ENV = {'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where'}
class ShellTest(utils.BaseTestCase):
re_options = re.DOTALL | re.MULTILINE
# Patch os.environ to avoid required auth info.
def make_env(self, exclude=None):
env = dict((k, v) for k, v in FAKE_ENV.items() if k != exclude)
self.useFixture(fixtures.MonkeyPatch('os.environ', env))
def setUp(self):
super(ShellTest, self).setUp()
def shell(self, argstr):
orig = sys.stdout
try:
sys.stdout = cStringIO.StringIO()
_shell = inventoryclient_shell.InventoryShell()
_shell.main(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(exc_value.code, 0)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
return out
def test_help_unknown_command(self):
self.assertRaises(exc.CommandError, self.shell, 'help foofoo')
def test_debug(self):
httplib2.debuglevel = 0
self.shell('--debug help')
self.assertEqual(httplib2.debuglevel, 1)
def test_help(self):
required = [
'.*?^usage: inventory',
'.*?^See "inventory help COMMAND" '
'for help on a specific command',
]
for argstr in ['--help', 'help']:
help_text = self.shell(argstr)
for r in required:
self.assertThat(help_text,
matchers.MatchesRegex(r,
self.re_options))
def test_help_on_subcommand(self):
required = [
'.*?^usage: inventory host-show <hostname or id>'
'',
".*?^Show host attributes.",
'',
".*?^Positional arguments:",
".*?^ <hostname or id> Name or ID of host",
]
argstrings = [
'help host-show',
]
for argstr in argstrings:
help_text = self.shell(argstr)
for r in required:
self.assertThat(help_text,
matchers.MatchesRegex(r, self.re_options))
def test_auth_param(self):
self.make_env(exclude='OS_USERNAME')
self.test_help()
|
python
|
# coding: utf-8
# http://gitlab.skoltech.ru/shapeev/mlip-dev/blob/master/src/external/python/mlippy/cfgs.py
from __future__ import print_function
import numpy as np
class Cfg:
pos = None
lat = None
types = None
energy = None
forces = None
stresses = None
desc = None
grade = None
def readcfg(f):
cfg = Cfg()
cfg.lat = np.zeros((3, 3))
size = -1
mode = -1
line = f.readline()
while line:
line = line.upper()
line = line.strip()
if mode == 0:
if line.startswith('SIZE'):
line = f.readline()
size = int(line.strip())
cfg.types = np.zeros(size)
cfg.pos = np.zeros((size, 3))
elif line.startswith('SUPERCELL'):
line = f.readline()
vals = line.strip().split()
cfg.lat[0, :] = vals[0:3]
line = f.readline()
vals = line.strip().split()
cfg.lat[1, :] = vals[0:3]
line = f.readline()
vals = line.strip().split()
cfg.lat[2, :] = vals[0:3]
elif line.startswith('ATOMDATA'):
if line.endswith('FZ'):
cfg.forces = np.zeros((size, 3))
for i in range(size):
line = f.readline()
vals = line.strip().split()
cfg.types[i] = vals[1]
cfg.pos[i, :] = vals[2:5]
if cfg.forces is not None:
cfg.forces[i, :] = vals[5:8]
elif line.startswith('ENERGY'):
line = f.readline()
cfg.energy = float(line.strip())
elif line.startswith('PLUSSTRESS'):
line = f.readline()
vals = line.strip().split()
cfg.stresses = np.zeros(6)
cfg.stresses[:] = vals[0:6]
elif line.startswith('FEATURE MV_GRADE'):
cfg.grade = float(line.split()[-1])
elif line.startswith('FEATURE PYIRON'):
cfg.desc = line.split()[-1]
if line.startswith('BEGIN_CFG'):
mode = 0
elif line.startswith('END_CFG'):
break
line = f.readline()
return cfg
def savecfg(f, cfg, desc=None):
atstr1 = 'AtomData: id type cartes_x cartes_y cartes_z fx fy fz'
atstr2 = 'AtomData: id type cartes_x cartes_y cartes_z'
size = len(cfg.types)
print('BEGIN_CFG', file=f)
print('Size', file=f)
print(' %-d' % size, file=f)
if cfg.lat is not None:
print('SuperCell', file=f)
for i in range(3):
print(' %14f%14f%14f'
% (cfg.lat[i, 0], cfg.lat[i, 1], cfg.lat[i, 2]), file=f)
if cfg.forces is not None:
print(atstr1, file=f)
else:
print(atstr2, file=f)
for i in range(size):
if cfg.forces is not None:
print(' %4d %4d %14f%14f%14f %16.8e %16.8e %16.8e' %
(i+1, cfg.types[i], cfg.pos[i, 0], cfg.pos[i, 1], cfg.pos[i, 2],
cfg.forces[i, 0], cfg.forces[i, 1], cfg.forces[i, 2]), file=f)
else:
print(' %4d %4d %14f%14f%14f' %
(i+1, cfg.types[i], cfg.pos[i, 0], cfg.pos[i, 1], cfg.pos[i, 2]),
file=f)
if cfg.energy is not None:
print('Energy\t%14f' % cfg.energy, file=f)
if cfg.stresses is not None:
print('PlusStress: xx yy zz yz xz xy', file=f)
print(' %14f%14f%14f%14f%14f%14f' %
(cfg.stresses[0], cfg.stresses[1], cfg.stresses[2],
cfg.stresses[3], cfg.stresses[4], cfg.stresses[5]), file=f)
if desc is not None:
print('Feature from %s' % desc, file=f)
if cfg.desc is not None:
print('Feature %s' % cfg.desc, file=f)
print('END_CFG', file=f)
class cfgparser:
def __init__(self, file, max_cfgs=None):
self.cfgs = []
self.file = file
self.max_cfgs = max_cfgs
def __enter__(self):
while True:
if self.max_cfgs is not None and len(self.cfgs) == self.max_cfgs:
break
cfg = readcfg(self.file)
if cfg.types is not None:
self.cfgs.append(cfg)
else:
break
return self.cfgs
def __exit__(self, *args):
self.cfgs = []
def printcfg(cfg):
savecfg(None, cfg)
def loadcfgs(filename, max_cfgs=None):
with open(filename, 'r') as file:
with cfgparser(file, max_cfgs) as cfgs:
return cfgs
def savecfgs(filename, cfgs, desc=None):
with open(filename, 'w') as file:
for cfg in cfgs:
savecfg(file, cfg, desc)
print("", file=file)
|
python
|
from bitrue.client import Client
if __name__ == '__main__':
client = Client(api_key='',
api_secret='',
)
trades = client.get_my_trades()
print(client._order_format_print(trades))
'''
symbol id orderId origClientOrderId price qty commission commissionAssert time isBuyer isMaker isBestMatch
-------- ------- --------- ------------------- ----------- ---------- ------------ ------------------ ------------- --------- --------- -------------
HOTXRP 1583958 53673021 0.004473 717 1559843532000 True True True
'''
|
python
|
# coding=utf-8
# Copyright 2022 The Balloon Learning Environment Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculates stable parameters for balloon initialization."""
import dataclasses
import datetime as dt
from balloon_learning_environment.env.balloon import balloon
from balloon_learning_environment.env.balloon import solar
from balloon_learning_environment.env.balloon import standard_atmosphere
from balloon_learning_environment.env.balloon import thermal
from balloon_learning_environment.utils import constants
import numpy as np
import s2sphere as s2
@dataclasses.dataclass
class StableParams:
ambient_temperature: float
internal_temperature: float
mols_air: float
envelope_volume: float
superpressure: float
def calculate_stable_params_for_pressure(
pressure: float, envelope_volume_base: float,
envelope_volume_dv_pressure: float, envelope_mass: float,
payload_mass: float, mols_lift_gas: float, latlng: s2.LatLng,
date_time: dt.datetime, upwelling_infrared: float,
atmosphere: standard_atmosphere.Atmosphere) -> StableParams:
"""Calculates stable parameter values for the ambient pressure.
This calculates the internal and external temperature for a balloon
at the specified pressure, as well as the mols air in the ballonet,
envelope volume, and superpressure required to float at the specified
ambient temperature.
Args:
pressure: Ambient pressure of the balloon [Pa].
envelope_volume_base: The y-intercept for the balloon envelope volume
model [m^3].
envelope_volume_dv_pressure: The slope for the balloon envelope volume
model.
envelope_mass: Mass of the balloon envelope [kg].
payload_mass: The mass of the payload. The term payload here refers to
all parts of the flight system other than the balloon envelope [kg].
mols_lift_gas: Mols of helium within the balloon envelope [mols].
latlng: The current latitude and longitude of the balloon.
date_time: The current date and time of the balloon.
upwelling_infrared: The upwelling infrared value.
atmosphere: The current atmosphere state.
Returns:
A tuple of (ambient temperature [K], mols air in ballonet [mols]).
"""
ambient_temperature = atmosphere.at_pressure(pressure).temperature
# ---- Cold start mols air in envelope ----
# Compute the mols gas in balloon that gives the desired pressure.
# This comes from rho * V = m, where:
#
#. ambient_pressure * air_molar_mass
# rho = ----------------------------------
# universal_gas_const * ambient_temp
#
# m = (mass_envelope + mass_payload +
# helium_molar_mass * mols_helium +
# air_molar_mass * mols_air)
#
# Then, you just solve for mols_air to get the following equation.
mols_air = (
(pressure * constants.DRY_AIR_MOLAR_MASS * envelope_volume_base /
(constants.UNIVERSAL_GAS_CONSTANT * ambient_temperature) -
envelope_mass - payload_mass - constants.HE_MOLAR_MASS * mols_lift_gas)
/ constants.DRY_AIR_MOLAR_MASS)
# TODO(joshgreaves): Warning or Exception for initializing out of range?
mols_air = np.clip(mols_air, 0.0, None)
# ---- Cold start internal temperature ----
internal_temperature = 206.0 # [K] pick an average value to start search.
solar_elevation, _, solar_flux = solar.solar_calculator(latlng, date_time)
# Apply a few iterations of Newton-Raphson to find where the rate of
# change of temperature is close to 0.
delta_temp = 0.01
for _ in range(10):
# Note: we use envelope_volume_base rather than envelope_volume, since
# empirically it doesn't make much of a difference, and the envelope
# volume isn't calculated until the temperature is calculated.
d_internal_temp1 = thermal.d_balloon_temperature_dt(
envelope_volume_base, envelope_mass,
internal_temperature - delta_temp / 2, ambient_temperature, pressure,
solar_elevation, solar_flux, upwelling_infrared)
d_internal_temp2 = thermal.d_balloon_temperature_dt(
envelope_volume_base, envelope_mass,
internal_temperature + delta_temp / 2, ambient_temperature, pressure,
solar_elevation, solar_flux, upwelling_infrared)
# d2_internal_temp is the second derivitive of temperature w.r.t time.
d2_internal_temp = (d_internal_temp2 - d_internal_temp1) / delta_temp
mean_d_internal_temp = (d_internal_temp1 + d_internal_temp2) / 2.0
if abs(d2_internal_temp) > 0.0:
internal_temperature -= (mean_d_internal_temp / d2_internal_temp)
if abs(mean_d_internal_temp) < 1e-5:
break
# ---- Cold start superpressure ----
envelope_volume, superpressure = (
balloon.Balloon.calculate_superpressure_and_volume(
mols_lift_gas, mols_air, internal_temperature, pressure,
envelope_volume_base, envelope_volume_dv_pressure))
return StableParams(ambient_temperature, internal_temperature, mols_air,
envelope_volume, superpressure)
def cold_start_to_stable_params(
balloon_state: balloon.BalloonState,
atmosphere: standard_atmosphere.Atmosphere) -> None:
"""Sets parameters to stable values for the ambient pressure.
The pressure altitude of the balloon depends on a number of variables,
such as the number of mols of air in the ballonet, the temperature
of air and gas inside the envelope, and the superpressure. To have
a balloon float at a specific pressure level, these parameters should
be updated to match the specified ambient pressure.
Args:
balloon_state: The balloon state to update with stable params.
atmosphere: The current atmosphere the balloon is flying in.
"""
stable_params = calculate_stable_params_for_pressure(
balloon_state.pressure, balloon_state.envelope_volume_base,
balloon_state.envelope_volume_dv_pressure, balloon_state.envelope_mass,
balloon_state.payload_mass, balloon_state.mols_lift_gas,
balloon_state.latlng, balloon_state.date_time,
balloon_state.upwelling_infrared, atmosphere)
balloon_state.ambient_temperature = stable_params.ambient_temperature
balloon_state.internal_temperature = stable_params.internal_temperature
balloon_state.mols_air = stable_params.mols_air
balloon_state.envelope_volume = stable_params.envelope_volume
balloon_state.superpressure = stable_params.superpressure
|
python
|
liste = ["ali", "veli"]
print(liste)
if "al" not in liste:
print("var")
|
python
|
from .optim_low import *
__all__ = ["OptimMP"]
|
python
|
"""empty message
Revision ID: dcbee03e3639
Revises:
Create Date: 2018-07-10 04:25:35.968792
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dcbee03e3639'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('credentials',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=True),
sa.Column('token_hash', sa.Text(), nullable=True),
sa.Column('salt', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('uploads',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uploader', sa.Integer(), nullable=True),
sa.Column('ipfs_hash', sa.Text(), nullable=True),
sa.Column('original_name', sa.Text(), nullable=True),
sa.Column('date', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.ForeignKeyConstraint(['uploader'], ['credentials.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_unique_constraint("uploader_ipfs_constraint", "uploads", ["uploader", "ipfs_hash"])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('uploads')
op.drop_table('credentials')
op.drop_constraint("uploader_ipfs_constraint", "uploads")
# ### end Alembic commands ###
|
python
|
from trezor import wire
from trezor.messages import MessageType
def boot():
# only enable LoadDevice in debug builds
if __debug__:
wire.add(MessageType.LoadDevice, __name__, "load_device")
wire.add(MessageType.ResetDevice, __name__, "reset_device")
wire.add(MessageType.BackupDevice, __name__, "backup_device")
wire.add(MessageType.WipeDevice, __name__, "wipe_device")
wire.add(MessageType.RecoveryDevice, __name__, "recovery_device")
wire.add(MessageType.ApplySettings, __name__, "apply_settings")
wire.add(MessageType.ApplyFlags, __name__, "apply_flags")
wire.add(MessageType.ChangePin, __name__, "change_pin")
wire.add(MessageType.SetU2FCounter, __name__, "set_u2f_counter")
|
python
|
from __future__ import print_function
import sys
import os
import requests
from datetime import datetime
from random import randint
import re
class Facegrab:
def __init__(self):
self.base_url = "http://graph.facebook.com/picture?id={}&width=800"
self.sess = requests.Session()
self.sess.headers.update({
"User-Agent": "Facegrab v2"
})
@staticmethod
def create_dir(prefix):
dir_c = os.path.join(
os.getcwd(),
prefix,
# datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
"myfriends"
)
try:
os.makedirs(dir_c)
except OSError as e:
if e.errno != 17:
pass
else:
print("Cannot create a folder.")
exit
return dir_c
def getProfile(self, photoUrl, saveUrl):
print(f"Downloading {photoUrl}.")
response = self.sess.get(photoUrl)
if response.headers["Content-Type"] == "image/gif":
return
with open(saveUrl, "wb") as f:
f.write(response.content)
return True
def getImages(self, uids):
sizeDataset = len(uids)
_id = randint(1, int(1e4))
photoCount = 0
folder = self.create_dir("facegrab")
while photoCount < sizeDataset:
profile = self.getProfile(
self.base_url.format(uids[photoCount]),
f"{folder}/{_id}.jpg"
)
if profile:
photoCount += 1
_id += 1
else:
_id += 10 # Cannot understand the logic behind this.
print(
"\nFace Dataset created in facegrab folder."
f"\nSize: {photoCount}"
)
print()
return
def retrieve_uids():
pattern = re.compile(r"(\{id:\"\d*\",name)")
ids = set()
for i in range(1): # repeats the operation 1 times
for i, line in enumerate(open('friendslist' + str(i+1) + '.txt','r')):
for match in re.finditer(pattern, line):
ids.add(match.groups()[0].split("\"")[1])
# print('Found on line %s: %s' % (i+1, match.groups()))
#
print("found " + str(len(ids)) + " ids")
return list(ids)
if __name__ == "__main__":
checks = [
len(sys.argv) == 1,
# sys.argv[1].isdigit()
# int(sys.argv[1]) < int(1e7)
]
# if not all(checks):
# print("\nIncorrect arguments.")
# print(
# "Usage: python facegrab.py"
# )
uids = retrieve_uids()
grabby = Facegrab()
# grabby.getImages(int(sys.argv[1]), uids)
grabby.getImages(uids)
|
python
|
from polygon import WebSocketClient
from polygon.websocket.models import WebSocketMessage
from typing import List
import asyncio
c = WebSocketClient(subscriptions=["T.*"])
async def handle_msg(msgs: List[WebSocketMessage]):
for m in msgs:
print(m)
async def timeout():
await asyncio.sleep(1)
print("unsubscribe_all")
c.unsubscribe_all()
await asyncio.sleep(1)
print("close")
await c.close()
async def main():
await asyncio.gather(c.connect(handle_msg), timeout())
asyncio.run(main())
|
python
|
import numpy as np
#import tensorflow as tf
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from tqdm import tqdm
import argparse
import sys
import cv2
import math
rgb = True
def draw_bboxes (img, boxes, outname):
for i in range(len(boxes)):
xmin,ymin,xmax,ymax,cat = boxes[i]
cv2.rectangle(img,(xmin,ymin),(xmax,ymax),(0,0,255),4)
font = cv2.FONT_HERSHEY_SIMPLEX
#cv2.putText(img, str(cat), (xmin,ymin-10), font, 0.75, (255,255,255), 2, cv2.LINE_AA)
print (outname)
cv2.imwrite(outname, img)
if __name__ == "__main__":
#print ("Usage: python show_regions.py image.jpg boxes.txt image_with_boxes.jpg confidence_threshold")
image = cv2.imread(sys.argv[1], int(rgb))
fboxes = open(sys.argv[2], 'r')
outname = sys.argv[3]
confidence = float(sys.argv[4])
boxes = []
for line in fboxes:
line = line.strip('\n')
fields = line.split(' ')
xmin = int(fields[0])
ymin = int(fields[1])
xmax = int(fields[2])
ymax = int(fields[3])
cat = int(fields[4])
score = float(fields[5])
#Showing only some categories:
not_show = [44,50,51,54,55,59,72,73,74,75,76,77,79,82,83,84,86,89,91,93,94]
if (score > confidence) and (cat not in not_show):
boxes.append((xmin,ymin,xmax,ymax,cat))
draw_bboxes(image, boxes, outname)
|
python
|
from pylama_dmypy import VERSION
from setuptools import setup, find_packages
# fmt: off
setup(
name = "pylama-dmypy"
, version = VERSION
, packages = find_packages(include="pylama_dmypy.*", exclude=["tests*"])
, extras_require =
{ 'tests':
[ "pylama==8.3.8"
, "mypy==0.942"
]
}
, entry_points =
{ 'pylama.linter': ['dmypy = pylama_dmypy.linter:Linter']
}
# metadata
, url = "http://github.com/delfick/pylama_dmypy"
, author = "Stephen Moore"
, author_email = "[email protected]"
, description = "Linting plugin for pylama to see dmypy"
, long_description = open("README.rst").read()
, license = "MIT"
)
# fmt: on
|
python
|
# -*- coding: utf-8 -*-
__all__ = ('SlashCommandPermissionOverwriteWrapper', 'SlashCommandWrapper', )
from functools import partial as partial_func
from ...discord.guild import Guild
from ...discord.preconverters import preconvert_snowflake
from ...discord.interaction import ApplicationCommandPermissionOverwrite
UNLOADING_BEHAVIOUR_DELETE = 0
UNLOADING_BEHAVIOUR_KEEP = 1
UNLOADING_BEHAVIOUR_INHERIT = 2
SYNC_ID_GLOBAL = 0
SYNC_ID_MAIN = 1
SYNC_ID_NON_GLOBAL = 2
def raw_name_to_display(raw_name):
"""
Converts the given raw application command name to it's display name.
Parameters
----------
raw_name : `str`
The name to convert.
Returns
-------
display_name : `str`
The converted name.
"""
return '-'.join([w for w in raw_name.strip('_ ').lower().replace(' ', '-').replace('_', '-').split('-') if w])
class SlashCommandWrapper:
"""
Wraps a slash command enabling the wrapper to postprocess the created slash command.
Attributes
----------
_wrapped : `Any`
The wrapped object.
"""
__slots__ = ('_wrapped',)
def __new__(cls):
"""
Creates a partial function to wrap a slash command.
Subclasses should overwrite this method.
Returns
-------
wrapper : `functools.partial` of ``SlashCommandWrapper._decorate``
Partial function to wrap a slash command.
"""
return partial_func(cls._decorate, cls)
def _decorate(cls, wrapped):
"""
Wraps the given command.
Subclasses should overwrite this method.
Parameters
----------
wrapped : `Any`
The slash command or other wrapper to wrap.
Returns
-------
self : ``SlashCommandWrapper``
The created instance.
"""
self = object.__new__(cls)
self._wrapped = wrapped
return self
def apply(self, slash_command):
"""
Applies the wrapper's changes on the respective slash command.
Subclasses should overwrite this method.
Parameters
----------
slash_command : ``SlashCommand``
"""
pass
def __repr__(self):
"""Returns the slash command wrapper's representation."""
return f'<{self.__class__.__name__} wrapped={self._wrapped!r}>'
def fetch_function_and_wrappers_back(self):
"""
Fetches back the source function and all the wrappers, the returns them.
Returns
-------
function : `Any`
The wrapped function.
wrappers : `list` of ``SlashCommandWrapper`` instances
The fetched back wrappers.
"""
wrappers = [self]
maybe_wrapper = self._wrapped
while True:
if isinstance(maybe_wrapper, SlashCommandWrapper):
wrappers.append(maybe_wrapper)
maybe_wrapper = maybe_wrapper._wrapped
else:
function = maybe_wrapper
break
wrappers.reverse()
return function, wrappers
class SlashCommandPermissionOverwriteWrapper(SlashCommandWrapper):
"""
Wraps a slash to command allowing / disallowing it only for the given user or role inside of a guild.
Attributes
----------
_wrapped : `Any`
The wrapped object.
_guild_id : `int`
The guild id where the overwrites should be applied to.
_overwrite : ``ApplicationCommandPermissionOverwrite``
The permission overwrite to apply.
"""
__slots__ = ('_guild_id', '_overwrite')
def __new__(cls, guild, target, allow):
"""
Creates a partial function to wrap a slash command.
Parameters
----------
guild : ``Guild`` or `int`
The guild's identifier where the overwrite is applied.
target : ``User``, ``Client`` or ``Role``, `tuple` ((``User``, ``UserBase``, ``Role`` type) or \
`str` (`'Role'`, `'role'`, `'User'`, `'user'`), `int`)
The target entity of the overwrite
The expected type & value might be pretty confusing, but the target was it to allow relaxing creation.
To avoid confusing, here is a list of the expected structures:
- ``Role`` instance
- ``User`` instance
- ``Client`` instance
- `tuple` (``Role`` type, `int`)
- `tuple` (``User`` type, `int`)
- `tuple` (``UserBase`` type, `int`)
- `tuple` (`'Role'`, `int`)
- `tuple` (`'role'`, `int`)
- `tuple` (`'User'`, `int`)
- `tuple` (`'user'`, `int`)
allow : `bool`
Whether the respective application command should be enabled for the respective entity.
Returns
-------
wrapper : `functools.partial` of ``SlashCommandWrapper._decorate``
Partial function to wrap a slash command.
"""
if isinstance(guild, Guild):
guild_id = guild.id
elif isinstance(guild, (int, str)):
guild_id = preconvert_snowflake(guild, 'guild')
else:
raise TypeError(f'`guild` can be given neither as `{Guild.__class__.__name__}`, and as `int` instance, '
f'got {guild.__class__.__name__}.')
overwrite = ApplicationCommandPermissionOverwrite(target, allow)
return partial_func(cls._decorate, cls, guild_id, overwrite)
def _decorate(cls, guild_id, overwrite, wrapped):
"""
Wraps given command.
Parameters
----------
guild_id : `int`
The guild id where the overwrites should be applied to.
overwrite : ``ApplicationCommandPermissionOverwrite``
The permission overwrite to apply.
wrapped : `Any`
The slash command or other wrapper to wrap.
Returns
-------
self : ``SlashCommandWrapper``
The created instance.
"""
self = object.__new__(cls)
self._guild_id = guild_id
self._overwrite = overwrite
self._wrapped = wrapped
return self
def apply(self, slash_command):
"""
Applies the wrapper's changes on the respective slash command.
Parameters
----------
slash_command : ``SlashCommand``
"""
slash_command.add_overwrite(self._guild_id, self._overwrite)
def __repr__(self):
"""Returns the slash command wrapper's representation."""
return f'<{self.__class__.__name__} wrapped={self._wrapped!r}, guild_id={self._guild_id!r}, ' \
f'overwrite={self._overwrite!r}>'
RUNTIME_SYNC_HOOKS = []
def runtime_sync_hook_is_client_running(client):
"""
Runtime sync hook to check whether a slash command should be registered and synced instantly when added or removed.
Parameters
----------
client : ``Client``
The respective client of the ``Slasher``.
"""
return client.running
RUNTIME_SYNC_HOOKS.append(runtime_sync_hook_is_client_running)
|
python
|
from collections import deque, defaultdict
import sys
N = int(input())
ab = []
for i in range(N - 1):
ab.append(tuple(map(int, input().split())))
c_list = list(map(int, input().split()))
c_list.sort()
c_que = deque(c_list)
cnt = defaultdict(int)
outs = defaultdict(list)
for a, b in ab:
cnt[a - 1] += 1
cnt[b - 1] += 1
outs[a - 1].append(b - 1)
outs[b - 1].append(a - 1)
cnt = sorted(cnt.items(), key=lambda x: -x[1])
top = cnt[0][0]
tar = defaultdict(lambda: 0)
sys.setrecursionlimit(10 ** 9)
visit = [False] * N
def dfs(start):
tmp = []
visit[start] = True
for v2 in outs[start]:
if visit[v2]:
continue
q = c_que.pop()
tar[v2] = q
tmp.append(v2)
for v2 in tmp:
if visit[v2]:
continue
dfs(v2)
tar[top] = c_que.pop()
dfs(top)
ans = 0
for a, b in ab:
a -= 1
b -= 1
ans += min(tar[a], tar[b])
print(ans)
for i in range(N):
print(tar[i], end=" ")
|
python
|
#################################################
# SN74HC595 with software SPI ESP12-E #
# Sums up 1 bit by 1 bit till 255 is reached #
# Author: Miguel Sama #
# Date: 19/06/2018 #
#################################################
from machine import Pin, SPI
import utime
#Software SCLK at PIN 0 (D3 dev board)
#Software MOSI at PIN 2 (D4 dev board)
#Used PIN 15 (D8 dev board) as latch
#~OE connected directly to GND
#~SRCLR connected directly to VCC
#LSB LED connected to Qa. MSB LED connected to Qh
latch = Pin(15,Pin.OUT)
spi = SPI(-1, baudrate=200000, polarity=0, phase=0, sck=Pin(0), mosi=Pin(2), miso=Pin(4))
a = 0
while True:
while a < 256:
latch.value(0)
spi.write(bytearray([a]))
latch.value(1)
a += 1
utime.sleep_ms(50)
a = 0
|
python
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing using the cluster command to move between clusters."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
# Note, this used to test the rebind_cluster, however I've
# deco'ed that command. I've kept this test here in order to preserve
# the state of all the previous and subsequent tests that assume the
# state of evh1 (it's a tangled web of statefulness going on here!)
class TestRebindCluster(TestBrokerCommand):
# Failure test is in add_virtual_hardware.
def test_100_rebind_evh1(self):
self.successtest(["cluster",
"--hostname", "evh1.aqd-unittest.ms.com",
"--cluster", "utecl2"])
def test_110_unbind_evh2(self):
# Let's see if we can put a node back after the cluster size has shrunk
command = ["uncluster", "--hostname", "evh2.aqd-unittest.ms.com",
"--personality", "generic", "--cluster", "utecl1"]
self.successtest(command)
def test_111_rebind_evh2(self):
command = ["cluster", "--hostname", "evh2.aqd-unittest.ms.com",
"--personality", "vulcan-10g-server-prod",
"--cluster", "utecl1"]
self.successtest(command)
def test_200_verifyrebindevh1(self):
command = "show host --hostname evh1.aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Primary Name: evh1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Member of ESX Cluster: utecl2", command)
# FIXME: Also test plenary files.
def test_200_verify_evh2(self):
command = ["show", "cluster", "--cluster", "utecl1"]
out = self.commandtest(command)
self.matchoutput(out,
"Member: evh2.aqd-unittest.ms.com [node_index: 0]",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestRebindCluster)
unittest.TextTestRunner(verbosity=2).run(suite)
|
python
|
import clr
if clr.use35:
clr.AddReference("Microsoft.Scripting")
clr.AddReference("Microsoft.Dynamic")
clr.AddReference("Microsoft.Scripting.Core")
import Microsoft.Scripting.Ast as Exprs
from Microsoft.Scripting.ComInterop import ComBinder
from Microsoft.Scripting.Utils import (Action, Func)
else:
clr.AddReference("System.Core")
clr.AddReference("Microsoft.Dynamic")
import System.Linq.Expressions as Exprs
from Microsoft.Scripting.ComInterop import ComBinder
from System import (Action, Func)
from System.Runtime.CompilerServices import CallSite
from System.Dynamic import (ExpandoObject, InvokeBinder, DynamicMetaObject,
GetMemberBinder, SetMemberBinder, CallInfo,
BindingRestrictions, IDynamicMetaObjectProvider,
InvokeMemberBinder, CreateInstanceBinder,
GetIndexBinder, SetIndexBinder,
BinaryOperationBinder, UnaryOperationBinder)
from System import (MissingMemberException,
InvalidOperationException, Boolean, MissingMemberException,
Type, Array, Delegate, Void)
import System.Reflection as refl
from System.IO import Path, File
### RuntimeHelpers is a collection of functions that perform operations at
### runtime of Sympl code, such as performing an import or fetching a global
### variable's value (depending on global look up semantics).
###
class RuntimeHelpers (object):
### SymplImport takes the runtime and module as context for the import.
### It takes a list of names, what, that either identify a (possibly dotted
### sequence) of names to fetch from Globals or a file name to load. Names
### is a list of names to fetch from the final object that what indicates
### and then set each name in module. Renames is a list of names to add to
### module instead of names. If names is empty, then the name set in
### module is the last name in what. If renames is not empty, it must have
### the same cardinality as names.
###
@staticmethod
def SymplImport (runtime, module, what, names, renames):
## Get object or file scope.
helpers = DynamicObjectHelpers
if len(what) == 1:
name = what[0]
if helpers.HasMember(runtime.Globals, name):
value = helpers.GetMember(runtime.Globals, name)
else:
f = DynamicObjectHelpers.GetMember(module, "__file__")
f = Path.Combine(Path.GetDirectoryName(f), name + ".sympl")
if File.Exists(f):
value = runtime.ExecuteFile(f)
else:
raise Exception("Import: can't find name in globals " +
"or as file to load -- " + name + ", " +
f)
else:
## What has more than one name, must be Globals access.
value = runtime.Globals
for name in what:
value = helpers.GetMember(value, name)
## Assign variables in module.
if len(names) == 0:
setattr(module, what[-1], value)
else:
for n, m in zip(names, renames or names):
setattr(module, m, getattr(value, n))
return None
@staticmethod
def SymplEq (x, y):
## Not that Sympl has other immediate values, but could add more branches
## for doubles or types that might flow in from .NET interop.
if type(x) is int and type(y) is int:
return x == y
else:
return x is y
### Hack until dynamic expr invoking Cons type is fixed in Ipy.
###
@staticmethod
def MakeCons (x, y):
return Cons(x, y)
@staticmethod
def GetConsElt (lst, i):
return RuntimeHelpers._nthcdr(lst, i).First
@staticmethod
def SetConsElt (lst, i, value):
lst = RuntimeHelpers._nthcdr(lst, i)
lst.First = value
return value
@staticmethod
def _nthcdr (lst, i):
while i > 0 and lst is not None:
lst = lst.Rest
i = i - 1
if i == 0 and lst is not None:
return lst
else:
raise Exception("List doesn't have " + repr(i) + " elements.")
### Don't need this in C# because can create an Property MemberExpr. This
### works in IPy because our TMMO.BindGetMember falls back to Python's
### MO to fetch the member.
###
#@staticmethod
#def GetTypeModelReflType (typModel):
# return typModel.ReflType
########################
### Helpers for code gen
########################
### RunHelpersInvokeBinder is the binder that let's me invoke members of
### runtimeHelpers as DynamicExprs. In C#, we can create MethodCallExprs
### with the MethodInfos of my RuntimeHelpers members, so we don't need this.
###
class RunHelpersInvokeBinder (InvokeBinder):
#@property doesn't work
def get_CacheIdentity (self):
return self
def GetHashCode (self):
## Random sophmoric hash ...
return 197 ^ super(RunHelpersInvokeBinder, self).GetHashCode()
def Equals (self, obj):
return (isinstance(obj, RunHelpersInvokeBinder) and
super(RunHelpersInvokeBinder, self).Equals(obj))
def FallbackInvoke(objMO, argMOs, errorSuggestionMO):
## Python handles the actual invoke in its callable MO
## When translated to C#, won't need DynExpr to call helper ... MCE.
pass
### MakeSymplImportCall gets called from analysis code that generates
### Expression Trees for Sympl 'import' expressions. Runtime and module
### are ParamExprs from the outer lambda wrapping a file's top-level exprs.
### What, names, and renames are lists (possibly empty) of IdTokens.
###
def MakeSymplImportCall (runtime, module, what, names, renames):
if not isinstance(names, list):
raise Exception("Internal: name is not list?")
return Exprs.Expression.Dynamic(
RunHelpersInvokeBinder(CallInfo(5)),
object, #ret type
Exprs.Expression.Constant(RuntimeHelpers.SymplImport),
runtime, module,
Exprs.Expression.Constant([x.Name for x in what]),
Exprs.Expression.Constant([x.Name for x in names]),
Exprs.Expression.Constant([x.Name for x in renames]))
def MakeSymplEqCall (left, right):
return Exprs.Expression.Convert(
Exprs.Expression.Dynamic(
RunHelpersInvokeBinder(CallInfo(2)),
object, #ret type
Exprs.Expression.Constant(RuntimeHelpers.SymplEq),
left, right),
bool) #clr.GetClrType(Boolean))
def MakeSymplConsCall (left, right):
return Exprs.Expression.Dynamic(
RunHelpersInvokeBinder(CallInfo(2)),
object, #ret type
Exprs.Expression.Constant(RuntimeHelpers.MakeCons),
left, right)
def MakeSymplListCall (args):
return Exprs.Expression.Dynamic(
RunHelpersInvokeBinder(CallInfo(len(args))),
object, #ret type
Exprs.Expression.Constant(Cons._List),
*args)
###############################
### Helpers for runtime binding
###############################
### GetTargetArgsRestrictions generates the restrictions needed for the
### MO resulting from binding an operation. This combines all existing
### restrictions and adds some for arg conversions. targetInst indicates
### whether to restrict the target to an instance (for operations on type
### objects) or to a type (for operations on an instance of that type).
###
### NOTE, this function should only be used when the caller is converting
### arguments to the same types as these restrictions. See ConvertArguments.
###
def GetTargetArgsRestrictions (targetMO, argMOs, targetInst):
## Important to add existing restriction first because the
## DynamicMetaObjects (and possibly values) we're looking at depend
## on the pre-existing restrictions holding true.
restrictions = targetMO.Restrictions.Merge(
BindingRestrictions.Combine(argMOs))
if targetInst:
restrictions = restrictions.Merge(
BindingRestrictions.GetInstanceRestriction(
targetMO.Expression,
targetMO.Value))
else:
restrictions = restrictions.Merge(
BindingRestrictions.GetTypeRestriction(
targetMO.Expression,
targetMO.LimitType))
for a in argMOs:
if a.HasValue and a.Value is None:
r = BindingRestrictions.GetInstanceRestriction(a.Expression,
None)
else:
r = BindingRestrictions.GetTypeRestriction(a.Expression,
a.LimitType)
restrictions = restrictions.Merge(r)
return restrictions
### ParamsMatchArgs returns whether the args are assignable to the parameters.
### We specially check for our TypeModel that wraps .NET's RuntimeType, and
### elsewhere we detect the same situation to convert the TypeModel for calls.
###
### IsAssignableFrom works except for value args that need to pass to reftype
### params. We could detect that to be smarter and then explicitly StrongBox
### the args.
###
### Could check for a.HasValue and a.Value is None and
### ((paramtype is class or interface) or (paramtype is generic and
### nullable<t>)) to support passing nil anywhere.
###
### Consider checking p.IsByRef and returning false since that's not CLS.
###
def ParamsMatchArgs (params, args):
for a,p in zip(args, params):
if (p.ParameterType is clr.GetClrType(Type) and #get past py type wrapping
type(a.Value) is TypeModel): #ok if no value, value = null
continue
if not p.ParameterType.IsAssignableFrom(a.LimitType):
## or p.IsByRef: punt for non CLS
return False
return True
### Returns a DynamicMetaObject with an expression that fishes the .NET
### RuntimeType object from the TypeModel MO.
###
def GetRuntimeTypeMoFromModel (typeMO):
if type(typeMO) is not TypeModelMetaObject:
raise Exception("Internal: Need TMMO to fish out ReflType.")
return DynamicMetaObject(
## In C# can use Expression.Call on methodinfo.
Exprs.Expression.Convert(
Exprs.Expression.Dynamic(
## This call site doesn't share any L2 caching
## since we don't call GetGetMemberBinder from Sympl.
## We aren't plumbed to get the runtime instance here.
SymplGetMemberBinder("ReflType"),
object,
typeMO.Expression),
Type),
#Exprs.Expression.Dynamic(
# RunHelpersInvokeBinder(CallInfo(1)),
# object,
# Exprs.Expression.Constant(
# RuntimeHelpers.GetTypeModelReflType),
# typeMO.Expression),
typeMO.Restrictions.Merge(
BindingRestrictions.GetTypeRestriction(
typeMO.Expression, TypeModel))) #,
## Must supply a value to prevent binder FallbackXXX methods
## from infinitely looping if they do not check this MO for
## HasValue == false and call Defer. After Sympl added Defer
## checks, we could verify, say, FallbackInvokeMember by no
## longer passing a value here.
#typeMO.ReflType)
### Returns list of Convert exprs converting args to param types. If an arg
### is a TypeModel, then we treat it special to perform the binding. We need
### to map from our runtime model to .NET's RuntimeType object to match.
###
### To call this function, args and pinfos must be the same length, and param
### types must be assignable from args.
###
### NOTE, if using this function, then need to use GetTargetArgsRestrictions
### and make sure you're performing the same conversions as restrictions.
###
def ConvertArguments (argMOs, pinfos):
res = []
for p,a in zip(pinfos, argMOs):
argExpr = a.Expression
if type(a.Value) is TypeModel and p.ParameterType is clr.GetClrType(Type):
argExpr = GetRuntimeTypeMoFromModel(a).Expression
res.append(Exprs.Expression.Convert(argExpr, p.ParameterType))
return res
###
### Note, callers must ensure the DynamicMetaObject that uses this expression
### has consistent restrictions for the conversion done on args and the target.
###
def GetIndexExpression (targetMO, indexMOs):
indexExprs = [Exprs.Expression.Convert(x.Expression, x.LimitType)
for x in indexMOs]
if type(targetMO.Value) is Cons: #Don't look at LimitType to compare py type objs.
## In C# can use Expression.Call on methodinfo.
return Exprs.Expression.Dynamic(
RunHelpersInvokeBinder(CallInfo(2)),
object,
Exprs.Expression.Constant(RuntimeHelpers.GetConsElt),
Exprs.Expression.Convert(targetMO.Expression,
targetMO.LimitType),
indexExprs[0])
elif targetMO.LimitType.IsArray:
return Exprs.Expression.ArrayAccess(
Exprs.Expression.Convert(targetMO.Expression,
targetMO.LimitType),
indexExprs)
else:
## Check for Item indexer.
props = targetMO.LimitType.GetProperties()
props = [x for x in props if len(x.GetIndexParameters()) == len(indexMOs)]
res = []
for p in props:
if ParamsMatchArgs(p.GetIndexParameters(), indexMOs):
res.append(p)
if len(res) == 0:
return Exprs.Expression.Throw(
Exprs.Expression.New(
MissingMemberException.GetConstructor(
Array[Type]([str])),
Exprs.Expression.Constant(
"Can't find matching indexer property.")))
return Exprs.Expression.MakeIndex(
Exprs.Expression.Convert(targetMO.Expression,
targetMO.LimitType),
res[0], indexExprs)
## CreateThrow takes arguments like fallback and bind methods, dynamic meta
## objects. It also takes restrictions to constrain when the throw rule is
## good. It takes an Exception type and arguments for the throw the resulting
## DynamicMetaObject represents. It returns a DynamicMetaObject whose expr
## throws the exception, and ensures the expr's type is object to satisfy
## the CallSite return type constraint.
##
def CreateThrow (target, args, moreTests, exception, *exceptionArgs):
argExprs = None
argTypes = Type.EmptyTypes
if exceptionArgs is not None:
argExprs = []
argTypes = []
for o in exceptionArgs:
e = Exprs.Expression.Constant(o)
argExprs.append(e)
argTypes.append(e.Type)
constructor = clr.GetClrType(exception).GetConstructor(Array[Type](argTypes))
if constructor is None:
raise ArgumentException(
"Type doesn't have constructor with a given signature")
return DynamicMetaObject(
Exprs.Expression.Throw(
Exprs.Expression.New(constructor, argExprs),
## Force expression to be type object so that DLR CallSite code
## things only type object flows out of the CallSite.
object),
target.Restrictions.Merge(BindingRestrictions.Combine(args))
.Merge(moreTests))
### EnsureObjectResult wraps expr if necessary so that any binder or
### DynamicMetaObject result expression returns object. This is required
### by CallSites.
###
def EnsureObjectResult (expr):
if not expr.Type.IsValueType:
return expr
if expr.Type is clr.GetClrType(Void):
return Exprs.Expression.Block(expr, Exprs.Expression.Default(object))
else:
return Exprs.Expression.Convert(expr, object)
##############################
### Type model IDynObj wrapper
##############################
### TypeModel wraps System.Runtimetypes. When Sympl code encounters
### a type leaf node in Sympl.Globals and tries to invoke a member, wrapping
### the ReflectionTypes in TypeModels allows member access to get the type's
### members and not ReflectionType's members.
###
class TypeModel (object, IDynamicMetaObjectProvider):
def __init__ (self, typ):
## Note, need to check for initialized members in GetMetaObject so
## that creating TypeModel's works without using our custom MO.
self.ReflType = typ
### GetMetaObject needs to wrap the base IDO due to Python's objects all
### being IDOs. While this GetMetaObject definition is on the stack IPy
### ensures Ipy uses its own MO for TypeModel instances. However, when
### this function is NOT pending on the stack, IPy calls this GetMetaObject
### to get the MO. TypeModelMetaObject needs to delegate to the Python IDO
### to dot into TypeModel instances for members, or capture all the
### TypeModel state in our MO. We pass ReflType here so that in
### TypeModelMetaObject BindXXX methods, we do not have to access TypeModel
### instance members. If we did access TypeModel instance members from
### TypeModelMetaObject, then that code would call this GetMetaObject and
### use the BindGetMember below, which would fail to access ReflType since
### the BindGetMember blow looks for members on the type represented by
### ReflType.
###
### The C# implementation won't need to do this awkward workaround.
###
def GetMetaObject (self, objParam):
baseIdoMo = IDynamicMetaObjectProvider.GetMetaObject(self, objParam)
if hasattr(self, "ReflType"):
## If came through once and initialized this slot, then return
## my MO for accessing the members of the type represented by
## ReflType.
return TypeModelMetaObject(objParam, self, self.ReflType,
baseIdoMo)
return baseIdoMo
class TypeModelMetaObject (DynamicMetaObject):
### Constructor takes ParameterExpr to reference CallSite, a TypeModel
### that the new TypeModelMetaObject represents, and the base Python IDO MO
### handle for the TypeModel instance that Python uses. We need to
### delegate to this MO explicitly to get Python to do binding for us on
### TypeModel instances when it is NOT Sympl code that is trying to use
### the TypeModel to dot into members on the type represented by the
### TypeModel instance.
###
def __new__ (self, objParam, typModel, refltype, baseIdoMo):
mo = super(TypeModelMetaObject, self).__new__(
TypeModelMetaObject, objParam, BindingRestrictions.Empty,
typModel)
mo.TypModel = typModel
mo.ReflType = refltype
mo.ObjParamExpr = objParam
mo.BaseIDOMO = baseIdoMo
return mo
def BindGetMember (self, binder):
#debugprint("tmmo bindgetmember ...", binder.Name)
flags = (refl.BindingFlags.IgnoreCase | refl.BindingFlags.Static |
refl.BindingFlags.Public)
## consider BindingFlags.Instance if want to return wrapper for
## inst members that is callable.
members = self.ReflType.GetMember(binder.Name, flags)
if len(members) == 1:
return DynamicMetaObject(
## We always access static members for type model
## objects, so the first argument in MakeMemberAccess
## should be null (no instance).
EnsureObjectResult(
Exprs.Expression.MakeMemberAccess(
None, members[0])),
## Don't need restriction test for name since this
## rule is only used where binder is used, which is
## only used in sites with this binder.Name.
self.Restrictions.Merge(
BindingRestrictions.GetInstanceRestriction(
self.Expression,
self.Value)))
else:
## Defer to IPy binding to access TypeModel instance members. IPy
## will fallback to the binder as appropriate.
##return binder.FallbackGetMember(self)
return self.BaseIDOMO.BindGetMember(binder)
### Because we don't ComboBind over several MOs and operations, and no one
### is falling back to this function with MOs that have no values, we
### don't need to check HasValue. If we did check, and HasValue == False,
### then would defer to new InvokeMemberBinder.Defer().
###
def BindInvokeMember (self, binder, args):
debugprint("tmmo: bindinvokemember ...", binder.Name)
flags = (refl.BindingFlags.IgnoreCase | refl.BindingFlags.Static |
refl.BindingFlags.Public)
members = self.ReflType.GetMember(binder.Name, flags)
if (len(members) == 1 and
(isinstance(members[0], refl.PropertyInfo) or
isinstance(members[0], refl.FieldInfo))):
raise Exception("Haven't implemented invoking delegate values " +
"from properties or fields.")
## NOT TESTED, should check type for isinstance delegate
#return DynamicMetaObject(
# Exprs.Expression.Dynamic(
# SymplInvokeBinder(CallInfo(len(args))),
# object,
# ([Exprs.MakeMemberAccess(self.Expression, mem)] +
# (x.Expression for x in args))))
## Don't test for eventinfos since we do nothing with them now.
else:
## Get MethodInfos with right arg count.
debugprint("tmmo bind invoke mem ... searching ...", len(members))
mi_mems = [x for x in members if isinstance(x, refl.MethodInfo) and
len(x.GetParameters()) == len(args)]
debugprint("methodinfo members with same arg count: ", len(mi_mems))
debugprint(mi_mems)
res = []
for mem in mi_mems:
if ParamsMatchArgs(mem.GetParameters(), args):
res.append(mem)
if len(res) == 0:
## Sometimes when binding members on TypeModels the member
## is an intance member since the Type is an instance of Type.
## We fallback to the binder with the Type instance to see if
## it binds. The SymplInvokeMemberBinder does handle this.
refltypeMO = GetRuntimeTypeMoFromModel(self)
return binder.FallbackInvokeMember(refltypeMO, args, None)
## True means generate an instance restriction on the MO.
## We are only looking at the members defined in this Type instance.
restrictions = GetTargetArgsRestrictions(self, args, True)
## restrictions and conversion must be done consistently.
callArgs = ConvertArguments(args, res[0].GetParameters())
## Fix expr to satisfy object type required by CallSite.
return DynamicMetaObject(
EnsureObjectResult(Exprs.Expression.Call(res[0],
callArgs)),
restrictions)
## Could try just letting Expr.Call factory do the work, but if
## there is more than one applicable method using just
## assignablefrom, Expr.Call flames out. It does not pick a "most
## applicable" method.
## Defer to IPy binding to invoke TypeModel instance members. IPy
## will fallback to the binder as appropriate.
##return binder.FallbackInvokeMember(self)
##return self.BaseIDOMO.BindInvokeMember(binder, args)
def BindCreateInstance (self, binder, args):
ctors = self.ReflType.GetConstructors()
## Get constructors with right arg count.
ctors = [x for x in ctors
if len(x.GetParameters()) == len(args)]
res = []
for mem in ctors:
if ParamsMatchArgs(mem.GetParameters(), args):
res.append(mem)
if len(res) == 0:
refltypeMO = GetRuntimeTypeMoFromModel(self)
return binder.FallbackCreateInstance(refltypeMO, args)
## True means generate an instance restriction on the MO.
## We only have a rule to create this exact type.
restrictions = GetTargetArgsRestrictions(self, args, True)
## restrictions and conversion must be done consistently.
callArgs = ConvertArguments(args, res[0].GetParameters())
return DynamicMetaObject(
## Creating an object, so don't need EnsureObjectResult.
Exprs.Expression.New(res[0], callArgs),
restrictions)
###
### Bindings I don't care about, so defer to Pythons IDO
###
def BindConvert (self, binder):
return self.BaseIDOMO.BindConvert(binder)
def BindSetMember (self, binder, valueMO):
return self.BaseIDOMO.BindSetMember(binder, valueMO)
def BindDeleteMember (self, binder):
return self.BaseIDOMO.BindDeleteMember(binder)
def BindGetIndex (self, binder, indexes):
return self.BaseIDOMO.BindGetIndex (binder, indexes)
def BindSetIndex (self, binder, indexes, value):
return self.BaseIDOMO.BindSetIndex (binder, indexes, value)
def BindDeleteIndex (self, binder, indexes):
return self.BaseIDOMO.BindDeleteIndex (binder, indexes)
def BindInvoke (self, binder, args):
return self.BaseIDOMO.BindInvoke (binder, args)
def BindUnaryOperation (self, binder):
return self.BaseIDOMO.BindUnaryOperation (binder)
def BindBinaryOperation (self, binder, arg):
return self.BaseIDOMO.BindBinaryOperation (binder, arg)
#######################################################
### Dynamic Helpers for HasMember, GetMember, SetMember
#######################################################
### DynamicObjectHelpers provides access to IDynObj members given names as
### data at runtime. When the names are known at compile time (o.foo), then
### they get baked into specific sites with specific binders that encapsulate
### the name. We need this in python because hasattr et al are case-sensitive.
###
### Currently Sympl only uses this on ExpandoObjects, but it works generally on
### IDOs.
###
class DynamicObjectHelpers (object):
Sentinel = object()
GetSites = {}
SetSites = {}
@staticmethod
def HasMember (dynObj, name):
if not isinstance(dynObj, IDynamicMetaObjectProvider):
raise Exception("DynamicObjectHelpers only works on IDOs for now.")
return (DynamicObjectHelpers.GetMember(dynObj, name) !=
DynamicObjectHelpers.Sentinel)
#Alternative impl used when EOs had bug and didn't call fallback ...
#mo = dynObj.GetMetaObject(Exprs.Expression.Parameter(object, "bogus"))
#for member in mo.GetDynamicMemberNames():
# if String.Equals(member, name,
# String.StringComparison.OrdinalIgnoreCase):
# return True
#return False
@staticmethod
def GetMember (dynObj, name):
if not isinstance(dynObj, IDynamicMetaObjectProvider):
raise Exception("DynamicObjectHelpers only works on IDOs for now.")
## Work around an IronPython 4.0 issue:
## http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=22735
if clr.use35:
func = Func
else:
func = clr.GetPythonType(Type.GetType("System.Func`3"))
site = DynamicObjectHelpers.GetSites.get(name)
if site is None:
site = CallSite[func[CallSite, object, object]].Create(
DOHelpersGetMemberBinder(name))
DynamicObjectHelpers.GetSites[name] = site
return site.Target(site, dynObj)
@staticmethod
def GetMemberNames (dynObj):
if not isinstance(dynObj, IDynamicMetaObjectProvider):
raise Exception("DynamicObjectHelpers only works on IDOs for now.")
return (dynObj.GetMetaObject(Exprs.Expression.Parameter(object, "bogus"))
.GetDynamicMemberNames())
@staticmethod
def SetMember(dynObj, name, value):
if not isinstance(dynObj, IDynamicMetaObjectProvider):
raise Exception("DynamicObjectHelpers only works on IDOs for now.")
## Work around an IronPython 4.0 issue:
## http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=22735
if clr.use35:
action = Action
else:
action = clr.GetPythonType(Type.GetType("System.Action`3"))
site = DynamicObjectHelpers.SetSites.get(name)
if site is None:
## For general usage ExpandoObject type param could be object.
site = CallSite[action[CallSite, ExpandoObject, object]].Create(
DOHelpersSetMemberBinder(name))
DynamicObjectHelpers.SetSites[name] = site
site.Target(site, dynObj, value)
class DOHelpersGetMemberBinder (GetMemberBinder):
#def __init__ (self, name, ignoreCase):
# ## super(...) always works, even with multiple inheritance but
# ## GetMemberBinder.__init__(self, name, True) would work in this case.
# super(DOHelpersGetMemberBinder, self).__init__(name, ignoreCase)
def __new__ (cls, name):
return GetMemberBinder.__new__(cls, name, True)
def FallbackGetMember(self, targetMO, errorSuggestionMO):
## Don't add my own type restriction, target adds them.
return DynamicMetaObject(
Exprs.Expression.Constant(DynamicObjectHelpers.Sentinel),
targetMO.Restrictions)
## Don't need Equals override or GetHashCode because there is no more
## specific binding metadata in this binder than what the base methods
## already compare.
# def GetHashCode (self):
# pass
#
# def Equals (self, obj):
# return (isinstance(obj, DOHelpersGetMemberBinder) and
# super(DOHelpersGetMemberBinder, self).Equals(obj))
class DOHelpersSetMemberBinder (SetMemberBinder):
#def __init__ (self, name, ignoreCase):
# super(DOHelpersSetMemberBinder, self).__init__(name, ignoreCase)
def __new__ (cls, name):
return SetMemberBinder.__new__(cls, name, True)
def FallbackSetMember(self, targetMO, valueMO, errorSuggestionMO):
return (errorSuggestionMO or
CreateThrow(
targetMO, None, BindingRestrictions.Empty,
MissingMemberException,
## General msg: Sympl doesn't override IDOs to set members.
"If IDynObj doesn't support setting members, " +
"DOHelpers can't do it for the IDO."))
## Don't need Equals override or GetHashCode because there is no more
## specific binding metadata in this binder than what the base methods
## already compare.
#def GetHashCode (self):
# pass
#
#def Equals (self, obj):
# return (isinstance(obj, DOHelpersSetMemberBinder) and
# super(DOHelpersSetMemberBinder, self).Equals(obj))
###########################
### General Runtime Binders
###########################
### SymplGetMemberBinder is used for general dotted expressions for fetching
### members.
###
class SymplGetMemberBinder (GetMemberBinder):
#def __init__ (self, name, ignoreCase):
# ## super(...) always works, even with multiple inheritance but
# ## GetMemberBinder.__init__(self, name, True) would work in this case.
# super(DOHelpersGetMemberBinder, self).__init__(name, ignoreCase)
def __new__ (cls, name):
return GetMemberBinder.__new__(cls, name, True) # True = IgnoreCase
def FallbackGetMember(self, targetMO, errorSuggestionMO):
## Defer if any object has no value so that we evaulate their
## Expressions and nest a CallSite for the GetMember.
if not targetMO.HasValue:
return self.Defer(targetMO)
## Try COM binding first.
isCom, com = ComBinder.TryBindGetMember(self, targetMO, True)
if isCom:
return com
debugprint("symplgetmember ...", targetMO.Expression, self.Name)
## Find our own binding.
flags = (refl.BindingFlags.IgnoreCase | refl.BindingFlags.Static |
refl.BindingFlags.Instance | refl.BindingFlags.Public)
## bindingflags.flattenhierarchy? public and protected static members
members = targetMO.LimitType.GetMember(self.Name, flags)
if len(members) == 1:
return DynamicMetaObject(
EnsureObjectResult(
Exprs.Expression.MakeMemberAccess(
Exprs.Expression.Convert(targetMO.Expression,
members[0].DeclaringType),
members[0])),
## Don't need restriction test for name since this
## rule is only used where binder is used, which is
## only used in sites with this binder.Name.
BindingRestrictions.GetTypeRestriction(
targetMO.Expression, targetMO.LimitType))
else:
if errorSuggestionMO is not None:
return errorSuggestionMO
return CreateThrow(
targetMO, None,
BindingRestrictions.GetTypeRestriction(
targetMO.Expression, targetMO.LimitType),
MissingMemberException,
"Object " + str(targetMO.Value) +
" does not have member " + self.Name)
### SymplSetMemberBinder is used for general dotted expressions for setting
### members.
###
class SymplSetMemberBinder (SetMemberBinder):
#def __init__ (self, name, ignoreCase):
# ## super(...) always works, even with multiple inheritance but
# ## GetMemberBinder.__init__(self, name, True) would work in this case.
# super(DOHelpersGetMemberBinder, self).__init__(name, ignoreCase)
def __new__ (cls, name):
return SetMemberBinder.__new__(cls, name, True) # True = IgnoreCase
def FallbackSetMember(self, targetMO, valueMO, errorSuggestionMO):
debugprint("symplsetmember fallback ...", targetMO.Expression, self.Name,
" ..name now expr..", valueMO.Expression)
## Defer if any object has no value so that we evaulate their
## Expressions and nest a CallSite for the SetMember.
if not targetMO.HasValue:
return self.Defer(targetMO)
## Try COM object first.
isCom, com = ComBinder.TryBindSetMember(self, targetMO, valueMO)
if isCom:
return com
## Find our own binding.
flags = (refl.BindingFlags.IgnoreCase | refl.BindingFlags.Static |
refl.BindingFlags.Instance | refl.BindingFlags.Public)
members = targetMO.LimitType.GetMember(self.Name, flags)
if len(members) == 1:
mem = members[0]
val = None
## Should check for member domain type being Type and value being
## TypeModel, similar to ConvertArguments, and building an
## expression like GetRuntimeTypeMoFromModel.
if mem.MemberType == refl.MemberTypes.Property:
val = Exprs.Expression.Convert(valueMO.Expression,
mem.PropertyType)
elif mem.MemberType == refl.MemberTypes.Field:
val = Exprs.Expression.Convert(valueMO.Expression,
mem.FieldType)
else:
return (errorSuggestionMO or
CreateThrow(
targetMO, None,
BindingRestrictions.GetTypeRestriction(
targetMO.Expression, targetMO.LimitType),
InvalidOperationException,
"Sympl only support setting properties and " +
"fields at this time."))
return DynamicMetaObject(
EnsureObjectResult(
Exprs.Expression.Assign(
Exprs.Expression.MakeMemberAccess(
Exprs.Expression.Convert(
targetMO.Expression,
members[0].DeclaringType),
members[0]),
valueMO.Expression)),
## Don't need restriction test for name since this
## rule is only used where binder is used, which is
## only used in sites with this binder.Name.
BindingRestrictions.GetTypeRestriction(
targetMO.Expression, targetMO.LimitType))
else:
if errorSuggestionMO is not None:
return errorSuggestionMO
return CreateThrow(
targetMO, None,
BindingRestrictions.GetTypeRestriction(
targetMO.Expression, targetMO.LimitType),
MissingMemberException,
"IDynObj member name conflict.")
### SymplInvokeMemberBinder is used for general dotted expressions in function
### calls for invoking members.
###
class SymplInvokeMemberBinder (InvokeMemberBinder):
def __new__ (cls, name, callinfo):
return InvokeMemberBinder.__new__(cls, name, True, callinfo)
def FallbackInvokeMember (self, targetMO, argMOs, errorSuggestionMO):
## Defer if any object has no value so that we evaulate their
## Expressions and nest a CallSite for the InvokeMember.
if not targetMO.HasValue or not all(map(lambda x: x.HasValue, argMOs)):
return self.Defer((targetMO,) + tuple(argMOs))
## Try COM object first.
isCom, com = ComBinder.TryBindInvokeMember(self, targetMO, argMOs)
if isCom:
return com
## Find our own binding.
flags = (refl.BindingFlags.IgnoreCase | refl.BindingFlags.Instance |
refl.BindingFlags.Public)
members = targetMO.LimitType.GetMember(self.Name, flags)
if (len(members) == 1 and
(isinstance(members[0], refl.PropertyInfo) or
isinstance(members[0], refl.FieldInfo))):
raise Exception("Haven't implemented invoking delegate values " +
"from properties or fields.")
## NOT TESTED, should check type for isinstance delegate
#return DynamicMetaObject(
# Exprs.Expression.Dynamic(
# SymplInvokeBinder(CallInfo(len(args))),
# object,
# ([Exprs.MakeMemberAccess(self.Expression, mem)] +
# (x.Expression for x in args))))
## Don't test for eventinfos since we do nothing with them now.
else:
## Get MethodInfos with right arg count.
debugprint("tmmo bind invoke mem ... searching ...", len(members))
mi_mems = [x for x in members if isinstance(x, refl.MethodInfo) and
len(x.GetParameters()) == len(argMOs)]
debugprint("methodinfo members with same arg count: ", len(mi_mems))
debugprint(mi_mems)
res = []
for mem in mi_mems:
if ParamsMatchArgs(mem.GetParameters(), argMOs):
res.append(mem)
## False below means generate a type restriction on the MO.
## We are looking at the members targetMO's Type.
restrictions = GetTargetArgsRestrictions(targetMO, argMOs, False)
## See if we have a result and return an error MO.
if len(res) == 0:
return (errorSuggestionMO or
CreateThrow(
targetMO, argMOs, restrictions,
MissingMemberException,
"Cannot bind member invoke -- " + repr(argMOs)))
## restrictions and conversion must be done consistently.
callArgs = ConvertArguments(argMOs, res[0].GetParameters())
return DynamicMetaObject(
EnsureObjectResult(
Exprs.Expression.Call(
Exprs.Expression.Convert(targetMO.Expression,
targetMO.LimitType),
res[0], callArgs)),
restrictions)
def FallbackInvoke (self, targetMO, argMOs, errorSuggestionMO):
## Just "defer" since we have code in SymplInvokeBinder that knows
## what to do, and typically this fallback is from a language like Python
## that passes a DynamicMetaObject with HasValue == false.
return DynamicMetaObject(
Exprs.Expression.Dynamic(
## This call site doesn't share any L2 caching
## since we don't call GetInvokeBinder from Sympl.
## We aren't plumbed to get the runtime instance here.
SymplInvokeBinder(CallInfo(len(argMOs))),
object, #ret type
[targetMO.Expression] +
[x.Expression for x in argMOs]),
## No new restrictions since SymplInvokeBinder will handle it.
targetMO.Restrictions.Merge(
BindingRestrictions.Combine(argMOs)))
## This class is needed to canonicalize InvokeMemberBinders in Sympl. See
## the comment above the GetXXXBinder methods at the end of the Sympl class.
##
class InvokeMemberBinderKey (object):
def __init__ (self, name, info):
self._name = name
self._info = info
def _getName (self): return self._name
Name = property(_getName)
def _getInfo (self): return self._info
Info = property(_getInfo)
def __eq__ (self, obj): #def Equals (self, obj):
return ((obj is not None) and (obj.Name == self._name) and
obj.Info.Equals(self._info))
def __hash__ (self): #def GetHashCode (self):
return 0x28000000 ^ self._name.GetHashCode() ^ self._info.GetHashCode()
### Used for calling runtime helpers, delegate values, and callable IDO (which
### really get handled by their MOs.
###
class SymplInvokeBinder (InvokeBinder):
def FallbackInvoke (self, targetMO, argMOs, errorSuggestionMO):
debugprint("symplinvokebinder fallback...", targetMO.Expression, "...",
[x.Expression for x in argMOs])
## Defer if any object has no value so that we evaulate their
## Expressions and nest a CallSite for the InvokeMember.
if not targetMO.HasValue or not all(map(lambda x: x.HasValue, argMOs)):
return self.Defer((targetMO,) + tuple(argMOs))
## Try COM object first.
isCom, com = ComBinder.TryBindInvoke(self, targetMO, argMOs)
if isCom:
return com
## Find our own binding.
if targetMO.LimitType.IsSubclassOf(Delegate):
params = targetMO.LimitType.GetMethod("Invoke").GetParameters()
if len(params) == len(argMOs):
debugprint("returning rule")
debugprint(" ... ", targetMO.LimitType, "...", targetMO.Expression.Type,
"...", targetMO.Value)
## Don't need to check if argument types match parameters.
## If they don't, users get an argument conversion error.
expression = Exprs.Expression.Invoke(
Exprs.Expression.Convert(targetMO.Expression,
targetMO.LimitType),
ConvertArguments(argMOs, params))
return DynamicMetaObject(
EnsureObjectResult(expression),
BindingRestrictions.GetTypeRestriction(
targetMO.Expression, targetMO.LimitType))
return (errorSuggestionMO or
CreateThrow(
targetMO, argMOs,
BindingRestrictions.GetTypeRestriction(
targetMO.Expression, targetMO.LimitType),
InvalidOperationException,
"Wrong number of arguments for function -- " +
str(targetMO.LimitType) + " got " + argMOs))
### Used to instantiate types or IDOs that can be instantiated (though their MOs
### really do the work.
###
class SymplCreateInstanceBinder (CreateInstanceBinder):
def __new__ (cls, callinfo):
return CreateInstanceBinder.__new__(cls, callinfo)
def FallbackCreateInstance (self, targetMO, argMOs, errorSuggestionMO):
## Defer if any object has no value so that we evaulate their
## Expressions and nest a CallSite for the CreateInstance.
if not targetMO.HasValue or not all(map(lambda x: x.HasValue, argMOs)):
return self.Defer((targetMO,) + tuple(argMOs))
## Make sure target actually contains a Type.
if not (Type.IsAssignableFrom(Type, targetMO.LimitType)):
return (errorSuggestionMO or
CreateThrow(
targetMO, argMOs, BindingRestrictions.Empty,
InvalidOperationException,
("Type object must be used when creating instance -- " +
repr(targetMO))))
## Get constructors with right arg count.
ctors = [x for x in targetMO.Value.GetConstructors()
if len(x.GetParameters()) == len(argMOs)]
## Get ctors with param types that work for args. This works
## for except for value args that need to pass to reftype params.
## We could detect that to be smarter and then explicitly StrongBox
## the args.
res = []
for mem in ctors:
if ParamsMatchArgs(mem.GetParameters(), argMOs):
res.append(mem)
## True means generate an instance restriction on the MO.
## We are only looking at the members defined in this Type instance.
restrictions = GetTargetArgsRestrictions(targetMO, argMOs, True)
if len(res) == 0:
return (errorSuggestionMO or
CreateThrow(
targetMO, argMOs, restrictions,
MissingMemberException,
"Can't bind create instance -- " + repr(targetMO)))
## restrictions and conversion must be done consistently.
callArgs = ConvertArguments(argMOs, res[0].GetParameters())
return DynamicMetaObject(
## Creating an object, so don't need EnsureObjectResult.
Exprs.Expression.New(res[0], callArgs),
restrictions)
class SymplGetIndexBinder (GetIndexBinder):
#def __new__ (cls, callinfo):
# return GetIndexBinder.__new__(cls, callinfo)
def FallbackGetIndex (self, targetMO, argMOs, errorSuggestionMO):
## Defer if any object has no value so that we evaulate their
## Expressions and nest a CallSite for the InvokeMember.
if not targetMO.HasValue or not all(map(lambda x: x.HasValue, argMOs)):
return self.Defer((targetMO,) + tuple(argMOs))
## Try COM object first.
isCom, com = ComBinder.TryBindGetIndex(self, targetMO, argMOs)
if isCom:
return com
## Give a good error for Cons.
if type(targetMO.Value) is Cons:
if len(argMOs) != 1:
return (errorSuggestionMO or
CreateThrow(
targetMO, argMOs, BindingRestrictions.Empty,
InvalidOperationException,
"Indexing Sympl list requires exactly one argument."))
## Find our own binding.
##
## Conversions created in GetIndexExpression must be consistent with
## restrictions made in GetTargetArgsRestrictions.
return DynamicMetaObject(
EnsureObjectResult(GetIndexExpression(targetMO, argMOs)),
## False means make type restriction on targetMO.LimitType
GetTargetArgsRestrictions(targetMO, argMOs, False))
class SymplSetIndexBinder (SetIndexBinder):
#def __new__ (cls, callinfo):
# return SetIndexBinder.__new__(cls, callinfo)
def FallbackSetIndex (self, targetMO, argMOs, valueMO, errorSuggestionMO):
## Defer if any object has no value so that we evaulate their
## Expressions and nest a CallSite for the SetIndex.
if (not targetMO.HasValue or not all(map(lambda x: x.HasValue, argMOs)) or
not valueMO.HasValue):
return self.Defer((targetMO,) + tuple(argMOs) + (valueMO,))
## Try COM object first.
isCom, com = ComBinder.TryBindSetIndex(self, targetMO, argMOs, valueMO)
if isCom:
return com
## Find our own binding. First setup value.
valueExpr = valueMO.Expression
if type(valueMO.Value) is TypeModel:
## Don't use LimitType to compare py type objs, use the value.
valueExpr = GetRuntimeTypeMoFromModel(valueMO).Expression
## Check Cons vs. normal
if type(targetMO.Value) is Cons:
## Don't use LimitType to compare py type objs, use the value.
if len(argMOs) != 1:
return (errorSuggestionMO or
CreateThrow(
targetMO, argMOs, BindingRestrictions.Empty,
InvalidOperationException,
"Indexing Sympl list requires exactly one argument."))
setIndexExpr = (
## In C# can use Expression.Call on methodinfo.
Exprs.Expression.Dynamic(
RunHelpersInvokeBinder(CallInfo(3)),
object,
Exprs.Expression.Constant(RuntimeHelpers.SetConsElt),
Exprs.Expression.Convert(targetMO.Expression,
targetMO.LimitType),
Exprs.Expression.Convert(argMOs[0].Expression,
argMOs[0].LimitType),
## Calling Py runtime helper doesn't need the type
## conversions, and it is unnecessarily boxing in python.
valueExpr))
else:
indexExpr = GetIndexExpression(targetMO, argMOs)
setIndexExpr = EnsureObjectResult(
Exprs.Expression.Assign(indexExpr, valueExpr))
## False means make type restriction on targetMO.LimitType
restrictions = GetTargetArgsRestrictions(targetMO, argMOs, False)
return DynamicMetaObject(setIndexExpr, restrictions)
class SymplBinaryOperationBinder (BinaryOperationBinder):
def FallbackBinaryOperation (self, leftMO, rightMO, errorSuggestionMO):
## Defer if any object has no value so that we evaulate their
## Expressions and nest a CallSite for the SetIndex.
if not leftMO.HasValue or not rightMO.HasValue:
self.Defer(leftMO, rightMO)
restrictions = (leftMO.Restrictions.Merge(rightMO.Restrictions)
.Merge(BindingRestrictions.GetTypeRestriction(
leftMO.Expression, leftMO.LimitType))
.Merge(BindingRestrictions.GetTypeRestriction(
rightMO.Expression, rightMO.LimitType)))
return DynamicMetaObject(
EnsureObjectResult(
Exprs.Expression.MakeBinary(
self.Operation,
Exprs.Expression.Convert(leftMO.Expression, leftMO.LimitType),
Exprs.Expression.Convert(rightMO.Expression, rightMO.LimitType))),
restrictions)
### This is mostly for example and plumbing in case anyone adds a dynamic unary
### operator. The only unary Op Sympl suports is logical Not, which it handles
### without a dynamic node since everything that is not nil or false is true.
###
class SymplUnaryOperationBinder (UnaryOperationBinder):
def FallbackUnaryOperation (self, operandMO, errorSuggestionMO):
## Defer if any object has no value so that we evaulate their
## Expressions and nest a CallSite for the SetIndex.
if not operandMO.HasValue:
self.Defer(operandMO)
return DynamicMetaObject(
EnsureObjectResult(
Exprs.Expression.MakeUnary(
self.Operation,
Exprs.Expression.Convert(operandMO.Expression,
operandMO.LimitType),
operandMO.LimitType)),
operandMO.Restrictions.Merge(
BindingRestrictions.GetTypeRestriction(
operandMO.Expression,
operandMO.LimitType)))
###########################
### Cons Cells and Symbols
###########################
class Symbol (object):
def __init__ (self, name):
self._name = name
self._value = None
self.Plist = None
### Need __repr__ to just print name, not <Symbol name>, when Symbols are
### inside list structures.
###
def __repr__ (self):
return self._name
## IPy doesn't bind repr for Py printing, and ToString for .NET
## printing. Need to print here like we want for ToString.
#return "<Symbol " + self.Name + ">"
### Need ToString when Sympl program passing Symbol to Console.WriteLine.
### Otherwise, it prints as internal IPy constructed type.
###
def ToString (self):
return self._name
def _getName (self): return self._name
def _setName (self, value):
self._name = value
return value
Name = property(_getName, _setName)
def _getValue (self): return self._value
def _setValue (self, value):
self._value = value
return value
Value = property(_getValue, _setValue)
def _getPlist (self): return self._plist
def _setPlist (self, value):
self._plist = value
return value
PList = property(_getPlist, _setPlist)
class Cons (object):
def __init__ (self, first, rest):
self._first = first
self._rest = rest
### NOTE: does not handle circularities!
###
def __repr__ (self):
head = self
res = "("
while head is not None:
res = res + repr(head._first)
if head._rest is None:
head = None
elif type(head._rest) is Cons:
head = head._rest
res = res + " "
else:
res = res + " . " + repr(head._rest)
head = None
return res + ")"
def ToString (self):
return self.__repr__()
def _getFirst (self):
return self._first
def _setFirst (self, value):
self._first = value
return value
First = property(_getFirst, _setFirst)
def _getRest (self):
return self._rest
def _setRest (self, value):
self._rest = value
return value
Rest = property(_getRest, _setRest)
### In C# this will be internal to the Sympl Runtime, called only by the
### code emitted when analyzing a keyword form invocation for List.
###
@staticmethod
def _List (*elements):
if len(elements) == 0: return None
head = Cons(elements[0], None)
tail = head
for elt in elements[1:]:
tail.Rest = Cons(elt, None)
tail = tail.Rest
return head
##################
### Dev-time Utils
##################
_debug = False
def debugprint (*stuff):
if _debug:
for x in stuff:
print x,
print
|
python
|
import os
import json, boto3
def lambda_handler(event, context):
print("Trigger Event: ")
print(event)
region = os.environ['REGION']
elbv2_client = boto3.client('elbv2', region_name=region)
available_target_groups = os.environ['AVAILABLE_TARGET_GROUPS']
arr_available_target_groups = available_target_groups.split(',')
# Get HTTP Target Group.
http_listener_arn = os.environ['HTTP_LISTENER_ARN']
http_listener = elbv2_client.describe_rules( ListenerArn=http_listener_arn)
http_target_group_arn = get_current_http_target_group(http_listener['Rules'], arr_available_target_groups)
if http_target_group_arn==False:
print("Could not identify the target arn")
return False
print("Current HTTP target group: ")
print(http_target_group_arn)
# Get HTTPS listener rules.
https_listener_arn = os.environ['SSL_LISTENER_ARN']
https_listener = elbv2_client.describe_rules(ListenerArn=https_listener_arn)
https_listener_rules = https_listener['Rules']
print("Current HTTPS target group: ")
https_target_group_arn = get_current_http_target_group(https_listener['Rules'], arr_available_target_groups)
print(https_target_group_arn)
results = {}
i = 0
while i < len(https_listener_rules):
# Skip default rule
if https_listener_rules[i]['IsDefault']==True:
i +=1
continue
actions = https_listener_rules[i]['Actions']
actions, modify = process_actions(actions, http_target_group_arn, arr_available_target_groups)
if modify==1:
print("Updating SSL listener rules..")
rule_arn = https_listener_rules[i]['RuleArn']
results[rule_arn] = modify_rules(elbv2_client, rule_arn, actions)
i +=1
# For ECS After Allow Test Traffic hook
print(results)
send_codedeploy_validation_status(event, results)
return results
# Returns the current B/G target group from a list of lister rules.
def get_current_http_target_group(http_listener_rules, arr_available_target_groups):
i=0
while i < len(http_listener_rules):
# Continue if default listener rule.
if http_listener_rules[i]['IsDefault']==True:
i +=1
continue
actions = http_listener_rules[i]['Actions']
n=0
while n<len(actions):
try:
for tg in actions[n]['ForwardConfig']['TargetGroups']:
if tg['TargetGroupArn'] in arr_available_target_groups and (tg['Weight'] is 100 or tg['Weight'] is 1) :
return tg['TargetGroupArn']
except Exception as e:
print(e)
n +=1
i +=1
return False
def process_actions(actions, http_target_group_arn, arr_available_target_groups):
modify = 0
for ak, action in enumerate(actions):
try:
if action['Type'] == "forward" and check_target_update(action['TargetGroupArn'], arr_available_target_groups):
actions[ak]['TargetGroupArn']=http_target_group_arn
for tgk, target_group in enumerate(action['ForwardConfig']['TargetGroups']):
if check_target_update(target_group['TargetGroupArn'], arr_available_target_groups):
actions[ak]['ForwardConfig']['TargetGroups'][tgk]['TargetGroupArn']=http_target_group_arn
modify=1
except Exception as e:
print(e)
return (actions), modify
# Check old target group is associated w/out available target and different.
# Be wary I found its possible the Listener rule is updated at the initial Ready Stage.
# DO NOT TRY COMPARING OLD AN NEW, SIMPLY ALWAYS UPDATE TO MATCH HTTP IF ONE OF THE AVAILABLE TARGETS
def check_target_update(old_target_group, arr_available_target_groups):
return old_target_group in arr_available_target_groups
# Sends notification to CodeDeploy on hook status...
def send_codedeploy_validation_status(event, results):
region = os.environ['REGION']
codedeploy_client = boto3.client('codedeploy', region_name=region)
status = ('Failed', 'Succeeded')[len(results) > 0]
print(status)
try:
return codedeploy_client.put_lifecycle_event_hook_execution_status(
deploymentId=event['DeploymentId'],
lifecycleEventHookExecutionId=event['LifecycleEventHookExecutionId'],
status=status
)
except Exception as e:
print("Recoverable Exception: ")
print(e)
return False
def modify_rules(elbv2_client, arn, actions):
try:
return elbv2_client.modify_rule(
RuleArn=arn,
Actions=actions
)
except Exception as e:
print(e)
|
python
|
import RPi.GPIO as GPIO
import time
pinServo=40
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pinServo, GPIO.OUT)
servo=GPIO.PWM(pinServo, 50)
servo.start(0)
while True:
ang = int(input())
# if (ang>170 and ang < 0):
# print('angulo no posible')
# break
# else:
servo.ChangeDutyCycle((ang*12/180))
time.sleep(0.5)
|
python
|
from cdmanager import cd
import os
import shutil
from pathlib import Path
working_folder = '../data/tfs_rep_2/'
destination_folder = '../gemFiles/rep2/'
with cd(working_folder):
for root, dirs, files in os.walk('./'):
#print(dirs)
if 'GEMout' in dirs:
namedir = str(root.replace('./', ''))
originalfolder = os.path.join(
namedir,
'GEMout'
)
finalFolder = os.path.join(
destination_folder,
str(root).replace('/', '').replace('.', '')+'GEMout'
)
print(originalfolder, finalFolder)
shutil.copytree(originalfolder, finalFolder)
|
python
|
from __future__ import annotations
import decimal
import re
import pytest
from django.core.validators import MaxValueValidator, MinValueValidator, URLValidator
from rest_framework import serializers
from rest_framework.fields import (
BooleanField,
CharField,
DateField,
DateTimeField,
DecimalField,
DurationField,
EmailField,
Field,
FileField,
FloatField,
ImageField,
IntegerField,
IPAddressField,
JSONField,
ReadOnlyField,
RegexField,
SlugField,
TimeField,
URLField,
UUIDField,
)
from rest_framework_gis.fields import GeometryField
from restdoctor.rest_framework.schema import RestDoctorSchema
url_pattern = URLValidator.regex.pattern.replace('\\Z', '\\z')
@pytest.mark.parametrize(
('field', 'expected_schema'),
[
(Field(), {'type': 'string'}),
(ReadOnlyField(), {'readOnly': True, 'type': 'string'}),
(Field(write_only=True), {'writeOnly': True, 'type': 'string'}),
(Field(allow_null=True), {'nullable': True, 'type': 'string'}),
(Field(default='default'), {'default': 'default', 'type': 'string'}),
(Field(help_text='help_text'), {'description': 'help_text', 'type': 'string'}),
],
)
def test_basic_fields_schema(field, expected_schema):
schema = RestDoctorSchema()
result = schema._get_field_schema(field)
assert result == expected_schema
@pytest.mark.parametrize(
('field', 'expected_schema'),
[
(
# AutoField, BigIntegerField, IntegerField, PositiveIntegerField,
# PositiveSmallIntegerField, SmallIntegerField
IntegerField(validators=[MinValueValidator(10)]),
{'type': 'integer', 'minimum': 10},
),
(
# BigIntegerField
IntegerField(max_value=2 ** 32),
{'type': 'integer', 'maximum': 2 ** 32, 'format': 'int64'},
),
(
# PositiveIntegerField, PositiveSmallIntegerField
IntegerField(min_value=0),
{'type': 'integer', 'minimum': 0},
),
(
# BooleanField, NullBooleanField
BooleanField(),
{'type': 'boolean'},
),
(
# NullBooleanField
BooleanField(allow_null=True),
{'type': 'boolean', 'nullable': True},
),
(
# CharField, TextField
CharField(allow_blank=True, max_length=10, min_length=2),
{'type': 'string', 'maxLength': 10, 'minLength': 2},
),
(
# CharField, TextField
CharField(allow_blank=False),
{'type': 'string'},
),
(
# CharField, TextField
CharField(trim_whitespace=False),
{'type': 'string'},
),
(
# DateField
DateField(),
{'type': 'string', 'format': 'date'},
),
(
# DateTimeField
DateTimeField(),
{'type': 'string', 'format': 'date-time'},
),
(
# DecimalField
DecimalField(
max_digits=4,
decimal_places=2,
validators=[MinValueValidator(10), MaxValueValidator(20)],
),
{
'type': 'string',
'format': 'decimal',
'multipleOf': 0.01,
'maximum': 20,
'minimum': 10,
},
),
(
# DecimalField
DecimalField(max_digits=5, decimal_places=2, validators=[MinValueValidator(0)]),
{
'type': 'string',
'format': 'decimal',
'multipleOf': 0.01,
'maximum': 1000,
'minimum': 0,
},
),
(
# DecimalField decimal
DecimalField(
max_digits=5,
decimal_places=2,
validators=[
MinValueValidator(decimal.Decimal('0.1')),
MaxValueValidator(decimal.Decimal('2.1222')),
],
),
{
'type': 'string',
'format': 'decimal',
'multipleOf': 0.01,
'maximum': 2.1222,
'minimum': 0.1,
},
),
(
# DurationField
DurationField(max_value=300, min_value=100),
{'type': 'string', 'maximum': 300, 'minimum': 100},
),
(
# EmailField
EmailField(),
{'type': 'string', 'format': 'email'},
),
(
# JSONField
JSONField(),
{'type': 'object'},
),
(
# FileField
FileField(),
{'type': 'string', 'format': 'binary'},
),
(
# FloatField
FloatField(),
{'type': 'number'},
),
(
# ImageField
ImageField(),
{'type': 'string', 'format': 'binary'},
),
(
# SlugField
SlugField(),
{'type': 'string', 'pattern': '^[-a-zA-Z0-9_]+$'},
),
(
# TimeField
TimeField(),
{'type': 'string'},
),
(
# URLField
URLField(),
{'type': 'string', 'format': 'uri', 'pattern': url_pattern},
),
(
# UUIDField
UUIDField(),
{'type': 'string', 'format': 'uuid'},
),
(
# IPAddressField
IPAddressField(protocol='IPv6'),
{'type': 'string', 'format': 'ipv6'},
),
(
RegexField(re.compile(r'^[-a-zA-Z0-9_]+$')),
{'type': 'string', 'pattern': '^[-a-zA-Z0-9_]+$'},
),
(
# IPAddressField
IPAddressField(protocol='IPv4'),
{'type': 'string', 'format': 'ipv4'},
),
(
IntegerField(min_value=100, max_value=200),
{'type': 'integer', 'minimum': 100, 'maximum': 200},
),
(
IntegerField(max_value=2147483648),
{'type': 'integer', 'maximum': 2147483648, 'format': 'int64'},
),
(
CharField(min_length=5, max_length=10),
{'type': 'string', 'minLength': 5, 'maxLength': 10},
),
(
GeometryField(),
{
'type': 'object',
'required': ['type', 'coordinates'],
'properties': {
'type': {'type': 'string', 'enum': ['Point']},
'coordinates': {
'type': 'array',
'items': {'type': 'number', 'format': 'float'},
'example': [12.9721, 77.5933],
'minItems': 2,
'maxItems': 3,
},
},
},
),
],
)
def test_fields_validators_schema(field, expected_schema):
schema = RestDoctorSchema()
result = schema._get_field_schema(field)
assert result == expected_schema
|
python
|
from typing import Optional
from pydantic import BaseModel, Field
class ScopeCreate(BaseModel):
scope_name: str = Field(max_length=32)
description: str = Field(max_length=128)
default: Optional[bool] = Field(default=False)
class ScopeUpdate(BaseModel):
description: str = Field(max_length=128)
class Scope(ScopeCreate):
class Config:
orm_mode = True
|
python
|
def day2_1():
total = 0
for line in open('day2input.txt'):
l, w, h = line.split('x')
l, w, h = int(l), int(w), int(h)
area = 2*l*w + 2*w*h + 2*h*l
slack = min(l*w, w*h, h*l)
total += area + slack
print total
def day2_2():
total = 0
for line in open('day2input.txt'):
l, w, h = line.split('x')
l, w, h = int(l), int(w), int(h)
ribbon = 2 * min(l+w, h+w, h+l)
bow = l*w*h
total += ribbon + bow
print total
|
python
|
# PROJECT DECEMBRE 2019
# PROJECT STAR MAP / DATABASE
# By Enguerran VIDAL
# This file contains the database handling functions.
###############################################################
# IMPORTS #
###############################################################
import csv
###############################################################
# FUNCTIONS #
###############################################################
def csv2txt(csv_file, txt_file):
''' Transforms a csv file into a txt file '''
with open(txt_file, "w") as my_output_file:
print(" Opened new txt file")
with open(csv_file, "r") as my_input_file:
print(" Opened old csv file")
[my_output_file.write(",".join(row) + '\n') for row in csv.reader(my_input_file)]
my_output_file.close()
def format_txt(txt_file, char1, char2):
''' Changes "char1" into "char2" throughout an entire .txt file.'''
with open(txt_file, "r") as file:
lines = file.readlines()
file.close()
with open(txt_file, "w") as file:
n = len(lines)
for i in range(n):
lines[i] = lines[i].replace(char1, char2)
file.write(lines[i])
file.close()
def import_database(txt_file):
''' Returns the data from a .txt file transformed from a csv file'''
with open(txt_file, "r") as file:
lines = file.readlines()
n = len(lines)
for i in range(n):
lines[i] = lines[i].split(',')
m = len(lines[i])
for j in range(m):
if lines[i][j] == '' or lines[i][j] == '\n':
lines[i][j] = 'N'
labels = lines[0]
lines.pop(0)
return labels, lines
def dat2csv(dat_file, csv_file):
with open(dat_file) as infile, open(csv_file, "w") as outfile:
csv_writer = csv.writer(outfile)
prev = ''
csv_writer.writerow(['ID', 'PARENT_ID'])
for line in infile.read().splitlines():
csv_writer.writerow([line, prev])
prev = line
|
python
|
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import pydart
import controller
import numpy as np
import pickle
import action as ac
from numpy.linalg import inv
import mmMath
state = {}
state['DrawAxis'] = True
state['DrawGrid'] = True
state['DrawDataAbs'] = True
state['DrawDataRelGlobal'] = True
state['DrawDataRelLocal'] = True
data_abs = None
data_rel_global = None
data_rel_local = None
dt = 1.0 / 1000.0
skel_file = '/home/jungdam/Research/AlphaCon/pydart/apps/turtle/data/skel/turtle.skel'
data_file = './data/warmup/5.0_100_10_torque.warmup'
pydart.init()
world = pydart.create_world(dt, skel_file)
skel = world.skels[0]
skel.controller = controller.Controller(world, skel)
def load_data(file_name, world, skel):
f = open(file_name, 'r')
data = pickle.load(f)
size = len(data)
data_abs = []
data_rel_local = []
data_rel_global = []
print len(data), 'data loaded'
cnt = 0
for d in data:
state_skel_init = d[0]
state_skel_term = d[1]
action = d[2]
#
world.reset()
skel.controller.reset()
skel.set_states(state_skel_init)
# world.step(False,False)
R_1, p_1 = mmMath.T2Rp(skel.body('trunk').T)
# p_1 = skel.body('trunk').world_com()
# skel.controller.add_action(action)
# while True:
# world.step()
# if skel.controller.is_new_wingbeat():
# break
#
world.reset()
skel.controller.reset()
skel.set_states(state_skel_term)
# world.step(False,False)
R_2, p_2 = mmMath.T2Rp(skel.body('trunk').T)
# p_2 = skel.body('trunk').world_com()
data_rel_global.append(p_2 - p_1)
data_rel_local.append(np.dot(inv(R_1), p_2 - p_1))
data_abs.append(p_1)
cnt += 1
if cnt % 1000 == 0:
print cnt, "data processed"
return data_abs, data_rel_global, data_rel_local
def render_callback():
global state
global data_rel_global
global data_rel_local
if state['DrawAxis']:
glLineWidth(5.0)
glBegin(GL_LINES)
glColor3d(1, 0, 0)
glVertex3d(0, 0, 0)
glVertex3d(1, 0, 0)
glColor3d(0, 1, 0)
glVertex3d(0, 0, 0)
glVertex3d(0, 1, 0)
glColor3d(0, 0, 1)
glVertex3d(0, 0, 0)
glVertex3d(0, 0, 1)
glEnd()
if state['DrawGrid']:
l = 2.0
dl = 0.1
n = int(l / dl)
glColor3d(0.5, 0.5, 0.5)
glLineWidth(0.5)
# xz plane
for i in range(2 * n + 1):
glBegin(GL_LINES)
glVertex3d(-l + i * dl, 0, -l)
glVertex3d(-l + i * dl, 0, l)
glEnd()
for i in range(2 * n + 1):
glBegin(GL_LINES)
glVertex3d(-l, 0, -l + i * dl)
glVertex3d(l, 0, -l + i * dl)
glEnd()
# xy plane
for i in range(2 * n + 1):
glBegin(GL_LINES)
glVertex3d(-l + i * dl, -l, 0)
glVertex3d(-l + i * dl, l, 0)
glEnd()
for i in range(2 * n + 1):
glBegin(GL_LINES)
glVertex3d(-l, -l + i * dl, 0)
glVertex3d(l, -l + i * dl, 0)
glEnd()
# yz plane
for i in range(2 * n + 1):
glBegin(GL_LINES)
glVertex3d(0, -l + i * dl, -l)
glVertex3d(0, -l + i * dl, l)
glEnd()
for i in range(2 * n + 1):
glBegin(GL_LINES)
glVertex3d(0, -l, -l + i * dl)
glVertex3d(0, l, -l + i * dl)
glEnd()
if state['DrawDataRelGlobal']:
glColor3d(1.0, 1.0, 0.0)
glBegin(GL_POINTS)
for d in data_rel_global:
glVertex3d(d[0], d[1], d[2])
glEnd()
if state['DrawDataRelLocal']:
glColor3d(0.0, 1.0, 1.0)
glBegin(GL_POINTS)
for d in data_rel_local:
glVertex3d(d[0], d[1], d[2])
glEnd()
if state['DrawDataAbs']:
glColor3d(1.0, 0.0, 1.0)
glBegin(GL_POINTS)
for d in data_abs:
glVertex3d(d[0], d[1], d[2])
glEnd()
def keyboard_callback(key):
""" Programmable interactions """
global state
if key == '1':
state['DrawDataAbs'] = not state['DrawDataAbs']
elif key == '2':
state['DrawDataRelGlobal'] = not state['DrawDataRelGlobal']
elif key == '3':
state['DrawDataRelLocal'] = not state['DrawDataRelLocal']
else:
return False
return True
data_abs, data_rel_global, data_rel_local = load_data(data_file, world, skel)
pydart.glutgui.glutgui_base.run(
title='Data Checker',
trans=[0, 0, -30],
keyboard_callback=keyboard_callback,
render_callback=render_callback)
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from django.conf.urls import include, url
from django.contrib import admin
from gps.view.index import *
urlpatterns = [
url('^getPeople/$', getPeople),
url('^getGPS/$', getGPS),
url('^sendGPS/$', sendGPS),
]
|
python
|
import csv
import glob
import math
import os
import sys
from random import random, seed
from timeit import default_timer as timer
import time
from statistics import mean
from pathlib import Path
import networkx as nx
import numpy as np
from scapy.layers.inet import IP, UDP
from scapy.utils import PcapWriter, PcapReader
import tkinter as tk
from tkinter import filedialog
import zat
from zat.log_to_dataframe import LogToDataFrame
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import cm
import matplotlib.transforms as mtrans
from scripts.dataset_analysis.filtered_dataset_analysis import Filtered_Dataset_Analysis
class Failed_Experiment_Analysis():
@staticmethod
def create_results_for_failed_experiments(path_to_results, path_to_for_malpaca_files, data_set_name):
for_malpaca_folders = [f.path for f in os.scandir(path_to_for_malpaca_files) if f.is_dir()]
for_malpaca_folders = [(x, os.path.basename(x)) for x in for_malpaca_folders]
results_folders = [f.path for f in os.scandir(path_to_results) if f.is_dir()]
results_folders = [os.path.basename(x) for x in results_folders]
failed_experiments = []
for path, for_malpaca_name in for_malpaca_folders:
if for_malpaca_name not in results_folders:
failed_experiments.append((path, for_malpaca_name))
for path, for_malpaca_name in failed_experiments:
csv_files = glob.glob(path + "/*.csv")
for csv_index, csv_file in enumerate(csv_files):
csv_df = pd.read_csv(csv_file)
if csv_index == 0:
combined_summary_df = csv_df
else:
combined_summary_df = combined_summary_df.append(csv_df)
new_results_path = path_to_results + "/" + for_malpaca_name + "_failed"
new_csv_path = new_results_path + "/combined_summary.csv"
path_detailed_label_csv = new_results_path + "/detailed_length_summary.csv"
path_detailed_label_table = new_results_path + "/detailed_length_summary.png"
shortened_summary_path = new_results_path + "/shortened_summary.csv"
overall_summary_path = new_results_path + "/overall_summary.csv"
os.mkdir(new_results_path)
combined_summary_df.to_csv(new_csv_path, index=False)
total_amount_connections = len(combined_summary_df.index)
dl_average_length_df = combined_summary_df.groupby("detailed_label")[
"connection_length"].mean().to_frame().reset_index()
dl_average_length_df = dl_average_length_df.rename(
columns={"connection_length": "avg_connection_length"})
dl_average_length_df["avg_connection_length"] = dl_average_length_df["avg_connection_length"].apply(
lambda x: round(x, 2))
dl_con_count_df = combined_summary_df.groupby("detailed_label")[
"connection_length"].count().to_frame().reset_index()
dl_con_count_df = dl_con_count_df.rename(columns={"connection_length": "connection_count"})
detailed_label_info_df = dl_average_length_df.merge(right=dl_con_count_df, on="detailed_label")
detailed_label_info_df["ratio"] = round(
(detailed_label_info_df["connection_count"] / total_amount_connections) * 100, 4)
detailed_label_info_df = detailed_label_info_df.sort_values(by="connection_count", ascending=False)
detailed_label_info_df.to_csv(path_detailed_label_csv, index=False)
fig, ax = plt.subplots()
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
table = ax.table(cellText=detailed_label_info_df.values, colLabels=detailed_label_info_df.columns,
loc='center',
cellLoc='center')
table.auto_set_column_width(col=list(range(len(detailed_label_info_df.columns))))
for (row, col), cell in table.get_celld().items():
if (row == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
fig.tight_layout(pad=3.0)
plt.savefig(path_detailed_label_table, dpi=1200, bbox_inches='tight')
plt.close()
plt.clf()
data_shortened = {
"validity_index": "nan",
"shilouette_score": "nan",
"noise_percentage": "nan",
"number_clusters": "nan",
"cohesion_score": "nan",
"purity_score": "nan",
"avg_cluster_probability": "nan",
"avg_clustering_error": "nan"}
shortened_summary = pd.DataFrame(data_shortened, index=[0])
shortened_summary.to_csv(shortened_summary_path, index=False)
data_overall = {
"total_time_processing" : "nan",
"validity_index" : "nan",
"shilouette_score" : "nan",
"total_number_connections" : "nan",
"total_number_packets" : "nan",
"total_number_clusters" : "nan",
"avg_cluster_size" : "nan",
"std_cluster_size" : "nan",
"noise_percentage" : "nan",
"avg_label_cohesion" : "nan",
"avg_detailed_label_cohesion" : "nan",
"avg_application_name_cohesion" : "nan",
"avg_application_category_name_cohesion" : "nan",
"avg_name_cohesion" : "nan",
"avg_label_purity" : "nan",
"avg_detailed_label_purity" : "nan",
"avg_application_name_purity" : "nan",
"avg_application_category_name_purity" : "nan",
"avg_name_purity" : "nan",
"avg_cluster_probability" : "nan",
"avg_clustering_error" : "nan"
}
overall_summary = pd.DataFrame(data_overall, index=[0])
overall_summary.to_csv(overall_summary_path, index=False)
|
python
|
import os
import sys
import glob
import pickle
import shutil
import argparse
import tensorflow as tf
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def read_pickle_from_file(filename):
""" function reads pickle file"""
with tf.io.gfile.GFile(filename, 'rb') as f:
if sys.version_info >= (3, 0):
data_dict = pickle.load(f, encoding='bytes')
else:
data_dict = pickle.load(f)
return data_dict
def convert_to_tfrecord(input_files, output_file):
"""Converts a file to TFRecords."""
print('Generating %s' % output_file)
with tf.io.TFRecordWriter(output_file) as record_writer:
for input_file in input_files:
data_dict = read_pickle_from_file(input_file)
data = data_dict[b'data']
labels = data_dict[b'labels']
num_entries_in_batch = len(labels)
for i in range(num_entries_in_batch):
example = tf.train.Example(features=tf.train.Features(
feature={
'image': _bytes_feature(data[i].tobytes()),
'label': _int64_feature(labels[i])
}))
record_writer.write(example.SerializeToString())
def _get_file_names(eval_file_idx):
""" get the file names for eval """
file_names = {}
train_files_idx_list = [1, 2, 3, 4, 5]
train_files_idx_list.remove(eval_file_idx)
print('Setting idx \'{}\' as validation/eval data.'.format(eval_file_idx))
file_names['train'] = ['data_batch_%d' % i for i in train_files_idx_list]
file_names['eval'] = ['data_batch_'+str(eval_file_idx)]
file_names['test'] = ['test_batch']
return file_names
def create_tfrecords(cifar10_data_folder, validation_data_idx):
""" function to generate tfrecords
Creates three sub-folders, train, eval, test and put resp
tfr files. CIFAR10 데이터를 이용하여 FRecords를 생성해보자.
"""
batch_files = _get_file_names(validation_data_idx)
tfrecords_outdir = './tfrecords'
for data_type in ['train', 'eval', 'test']:
input_files = [os.path.join(cifar10_data_folder, i) \
for i in batch_files[data_type]]
resp_dir = tfrecords_outdir + '/' + data_type
shutil.rmtree(resp_dir, ignore_errors=True)
os.makedirs(resp_dir)
for ifile in input_files:
batch_file_name = os.path.basename(ifile).split('.')[0]
tfrecords_outfile_name = \
os.path.join(tfrecords_outdir, data_type, batch_file_name + '.tfr')
convert_to_tfrecord([ifile], tfrecords_outfile_name)
def main(argv):
""" main routing to use dump tfrecords for cifar10 data"""
if argv is not None:
print('argv: {}'.format(argv))
parser = argparse.ArgumentParser()
parser.add_argument('-vidx', '--validation_data_idx', type=int,
choices=[1, 2, 3, 4, 5],
help='Define model to run', default=5)
parser.add_argument('-cf', '--cifar10_data_folder', type=str,
required=True, help='Cifar10 data folder path')
args = parser.parse_args()
validation_data_idx = args.validation_data_idx
cifar10_data_folder = args.cifar10_data_folder
create_tfrecords(cifar10_data_folder, validation_data_idx)
if __name__ == '__main__':
main(sys.argv)
|
python
|
#!/usr/bin/pyton
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
import pdb
import os
directory=os.path.basename(__file__)[:-3]
if not os.path.exists('texFiles/'+str(directory)):
os.system('mkdir texFiles/'+str(directory))
path='texFiles/'+str(directory)
"""
Comparison of the implementation of the 1D elastic set of equations
in dynamics:
- with the USL
- with the GIMP
"""
def export2DTeXFile(fileName,xFields,xlabel,ylabel,time,yfields,*kwargs):
TeXFile=open(fileName,"w")
n_fields = np.shape(yfields)[0]
n_labels = np.shape(kwargs)[0]
# Define Paul Tol's colors (purple to red)
color=['Purple','Orange','Blue','Red','Duck','Green','Yellow']
col=['120,28,129','231,133,50','63,96,174','217,33,32','83,158,182','109,179,136','202,184,67']
marker=['*','x','triangle*','square*','+','star','pentagone*']
marker=['none','none','+','triangle','none','star','pentagone*']
style=['solid','dashed','solid','solid','solid','star','pentagone*']
thickness=['very thick','very thick','thin','thin','thick']
couleur=['Purple','Orange','Blue','Red','black','Duck','Green']
for i in range(len(col)):
TeXFile.write(r'\definecolor{'+color[i]+'}{RGB}{'+col[i]+'}')
TeXFile.write('\n')
TeXFile.write(r'\begin{tikzpicture}[scale=0.9]');TeXFile.write('\n')
TeXFile.write(r'\begin{axis}[xlabel='+str(xlabel)+',ylabel='+str(ylabel)+',ymajorgrids=true,xmajorgrids=true,title={() '+time+'}]');TeXFile.write('\n')
legend=''
for i in range(n_fields):
if i==0:
legend=legend+kwargs[0][i]
else:
legend=legend+','+kwargs[0][i]
TeXFile.write(r'\addplot['+str(couleur[i])+','+str(thickness[i])+',mark='+str(marker[i])+','+str(style[i])+'] coordinates {')
for j in range(np.shape(yfields[i])[0]):
TeXFile.write('('+str(xFields[i][j])+','+str(yfields[i][j])+') ')
TeXFile.write('};\n')
TeXFile.write(r'\legend{'+str(legend)+'}')
TeXFile.write('\n')
TeXFile.write(r'\end{axis}')
TeXFile.write('\n')
TeXFile.write('\end{tikzpicture}')
TeXFile.write('\n')
TeXFile.close()
def export2pgfPlot(fileName,xfield,yfield,xlabel,ylabel):
#pdb.set_trace()
dataFile=open(fileName,"w")
dataFile.write('# Curve ('+str(xlabel)+';'+str(ylabel)+') '+str(len(xfield))+' points.\n')
for i,x in enumerate(xfield):
dataFile.write(str(x)+' '+str(yfield[i])+' i\n')
dataFile.close()
###Opening the files and computation of the solution by each method
###Parameters####
CFL=0.7
NTmaxi = 300
length = 6.0
ppc=1
Nelem = 50
E = 2.0e11
Sigy = 400.0e7
H = 10e9
rho = 7800.0
c=np.sqrt(E/rho)
sigd =0.# -0.25*Sigy
v0=0.5*Sigy/(2*rho*c)
factor=1.
timeOut = 0.36*length/(np.sqrt(E/rho))#0.002
t_order=1
timeUnload = 2*timeOut#2.e-4#2*timeOut
algo = 'USL'
# limit = 0 : minmod // limit = 1 : superbee // limit = 2 : MUSCL
limit=-1
update_position=False
mpm_mapping=True
hardening='isotropic'
parameters = {"CFL":CFL,"Nelem":Nelem,"NTmaxi":NTmaxi,"ppc":ppc,"length":length,"Young":E,"Sigy":Sigy, "H":H,"rho":rho,"sigd":sigd,"timeOut":timeOut,"timeUnload":timeUnload,"update_position":update_position,"v0":v0,"factor":factor,"limit":limit,"algo":algo,"t_order":t_order,"mpm_mapping":mpm_mapping,"hardening":hardening}
#################
##MPM: Material Point Method
USL = dict(parameters)
print 'Computing MPM'
execfile('mpm/elasticity.py', USL)
algo='USF'
parameters = {"CFL":CFL,"Nelem":Nelem,"NTmaxi":NTmaxi,"ppc":ppc,"length":length,"Young":E,"Sigy":Sigy, "H":H,"rho":rho,"sigd":sigd,"timeOut":timeOut,"timeUnload":timeUnload,"update_position":update_position,"v0":v0,"factor":factor,"limit":limit,"algo":algo,"t_order":t_order,"mpm_mapping":mpm_mapping,"hardening":hardening}
##MPM: Material Point Method
USF = dict(parameters)
print 'Computing modified MPM'
execfile('mpm/elasticity.py', USF)
algo='USL'
ppc=2
parameters = {"CFL":CFL,"Nelem":Nelem,"NTmaxi":NTmaxi,"ppc":ppc,"length":length,"Young":E,"Sigy":Sigy, "H":H,"rho":rho,"sigd":sigd,"timeOut":timeOut,"timeUnload":timeUnload,"update_position":update_position,"v0":v0,"factor":factor,"limit":limit,"algo":algo,"t_order":t_order,"mpm_mapping":mpm_mapping,"hardening":hardening}
print "=============== 2PPC COMPUTATIONS ===================="
##MPM: Material Point Method
USL2 = dict(parameters)
print 'Computing USL'
execfile('mpm/elasticity.py', USL2)
algo='USF'
parameters = {"CFL":CFL,"Nelem":Nelem,"NTmaxi":NTmaxi,"ppc":ppc,"length":length,"Young":E,"Sigy":Sigy, "H":H,"rho":rho,"sigd":sigd,"timeOut":timeOut,"timeUnload":timeUnload,"update_position":update_position,"v0":v0,"factor":factor,"limit":limit,"algo":algo,"t_order":t_order,"mpm_mapping":mpm_mapping,"hardening":hardening}
##MPM: Material Point Method
USF2 = dict(parameters)
print 'Computing modified MPM'
execfile('mpm/elasticity.py', USF2)
#############################################################################
######################### Comparison ######################################
#############################################################################
x = np.linspace(0.,length,Nelem+1)
dx = x[1]-x[0]
y=x+(dx/2.0)
#Centres of cells/Elements
y=y[:(len(y)-1)]
####Animated plot ###########################################################
from matplotlib import rcParams
rcParams['axes.labelsize'] = 16
rcParams['xtick.labelsize'] = 16
rcParams['ytick.labelsize'] = 16
rcParams['legend.fontsize'] = 16
#frames=[20,30,40,50,60,80,100,120,140]
frames=[10,25]
#pdb.set_trace()
c = np.sqrt(E/rho)
HT = (H*E)/(H+E) ; cp = np.sqrt(HT/rho)
## Middle point velocity plots
plt.plot(USL["time"][:-1],USL["velo"][24,:],label="USL 1ppc")
plt.plot(USL2["time"][:-1],USL2["velo"][49,:],label="USL 2ppc")
plt.plot(USF["time"][:-1],USF["velo"][24,:],label="USF 1ppc")
plt.plot(USF2["time"][:-1],USF2["velo"][49,:],label="USF 2ppc")
plt.grid()
plt.legend()
plt.show()
# plt.plot(USF["time"][:-1],USF["NRG"][:-1]/max(USF["NRG"][:-1]))
# plt.grid()
# plt.show()
plt.plot(USL["time"][:-1],USL["NRG"][:-1]/max(USL["NRG"][:-1]),'b-x',lw=2.,label='USL 1ppc')
plt.plot(USL2["time"][:-1],USL2["NRG"][:-1]/max(USL2["NRG"][:-1]),'r-x',lw=2.,label='USL 2ppc')
plt.plot(USF["time"][:-1],USF["NRG"][:-1]/max(USF["NRG"][:-1]),'bo',lw=2.,label='USF 1ppc')
plt.plot(USF2["time"][:-1],USF2["NRG"][:-1]/max(USF2["NRG"][:-1]),'ro',lw=2.,label='USF 2ppc')
plt.grid()
plt.legend(numpoints=1)
plt.show()
export2DTeXFile(str(path)+'/US_energies.tex',np.array([USL["time"][:-1],USL2["time"][:-1],USF["time"][:-1],USF2["time"][:-1]]),'$time (s)$',"$\frac{e}{e_{max}}$",'Evolution of total energy',np.array([USL["NRG"][:-1]/max(USL["NRG"][:-1]),USL2["NRG"][:-1]/max(USL2["NRG"][:-1]),USF["NRG"][:-1]/max(USF["NRG"][:-1]),USF2["NRG"][:-1]/max(USF2["NRG"][:-1])]),['USL 1ppc','USL 2ppc','USF 1ppc','USF 2ppc'])
N2=int(Nelem/2.)-40
N1=int(Nelem/2.)-50
for n1 in frames:
time = '%.2e' % USL["time"][n1]
plt.plot(USL["pos"][:,n1],USL["Sth"][:,n1],'k-',lw=2.,ms=8.,label='analytical')
plt.plot(USL["pos"][:,n1],USL["sig"][:,n1],'g-x',lw=2.,ms=8.,label='USL 1ppc')
plt.plot(USL2["pos"][:,n1],USL2["sig"][:,n1],'g-o',lw=2.,ms=8.,label='USL 2ppc')
plt.plot(USF["pos"][:,n1],USF["sig"][:,n1],'rx',lw=2.,ms=8.,label='USF 1ppc')
plt.plot(USF2["pos"][:,n1],USF2["sig"][:,n1],'ro',lw=2.,ms=8.,label='USF 2ppc')
plt.title('Contrainte longitudinale dans la barre au temps t='+str(time)+' s.',size=24.)
plt.xlabel('x (m)',size=24.)
plt.ylabel(r'$\sigma (Pa)$',size=28.)
plt.legend(numpoints=1)
plt.grid()
plt.show()
export2DTeXFile(str(path)+'/US_diffusion'+str(n1)+'.tex',np.array([USL["pos"][:,n1],USL2["pos"][:,n1],USF["pos"][:,n1],USF2["pos"][:,n1]]),'$x (m)$','$\sigma (Pa)$',str(time),np.array([USL["sig"][:,n1],USL2["sig"][:,n1],USF["sig"][:,n1],USF2["sig"][:,n1]]),['USL 1ppc','USL 2ppc','USF 1ppc','USF 2ppc'])
export2DTeXFile(str(path)+'/US_velo'+str(n1)+'.tex',np.array([USL["pos"][:,n1],USL2["pos"][:,n1],USF["pos"][:,n1],USF2["pos"][:,n1],USF2["pos"][:,n1]]),'$x (m)$','$v (m/s)$',str(time),np.array([USL["velo"][:,n1],USL2["velo"][:,n1],USF["velo"][:,n1],USF2["velo"][:,n1],USF2["Vth"][:,n1]]),['USL 1ppc','USL 2ppc','USF 1ppc','USF 2ppc','analytical'])
|
python
|
import logging
from contextlib import closing
from typing import Any, Dict, Optional
from flask_appbuilder.security.sqla.models import User
from flask_babel import gettext as _
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import DBAPIError, NoSuchModuleError
from rabbitai.commands.base import BaseCommand
from rabbitai.databases.commands.exceptions import (
DatabaseSecurityUnsafeError,
DatabaseTestConnectionDriverError,
DatabaseTestConnectionFailedError,
DatabaseTestConnectionUnexpectedError,
)
from rabbitai.databases.dao import DatabaseDAO
from rabbitai.exceptions import RabbitaiSecurityException
from rabbitai.extensions import event_logger
from rabbitai.models.core import Database
logger = logging.getLogger(__name__)
class TestConnectionDatabaseCommand(BaseCommand):
def __init__(self, user: User, data: Dict[str, Any]):
self._actor = user
self._properties = data.copy()
self._model: Optional[Database] = None
def run(self) -> None:
self.validate()
uri = self._properties.get("sqlalchemy_uri", "")
if self._model and uri == self._model.safe_sqlalchemy_uri():
uri = self._model.sqlalchemy_uri_decrypted
# context for error messages
url = make_url(uri)
context = {
"hostname": url.host,
"password": url.password,
"port": url.port,
"username": url.username,
"database": url.database,
}
try:
database = DatabaseDAO.build_db_for_connection_test(
server_cert=self._properties.get("server_cert", ""),
extra=self._properties.get("extra", "{}"),
impersonate_user=self._properties.get("impersonate_user", False),
encrypted_extra=self._properties.get("encrypted_extra", "{}"),
)
database.set_sqlalchemy_uri(uri)
database.db_engine_spec.mutate_db_for_connection_test(database)
username = self._actor.username if self._actor is not None else None
engine = database.get_sqla_engine(user_name=username)
with closing(engine.raw_connection()) as conn:
try:
alive = engine.dialect.do_ping(conn)
except Exception: # pylint: disable=broad-except
alive = False
if not alive:
raise DBAPIError(None, None, None)
# Log succesful connection test with engine
event_logger.log_with_context(
action="test_connection_success",
engine=database.db_engine_spec.__name__,
)
except (NoSuchModuleError, ModuleNotFoundError) as ex:
event_logger.log_with_context(
action=f"test_connection_error.{ex.__class__.__name__}",
engine=database.db_engine_spec.__name__,
)
raise DatabaseTestConnectionDriverError(
message=_("Could not load database driver: {}").format(
database.db_engine_spec.__name__
),
)
except DBAPIError as ex:
event_logger.log_with_context(
action=f"test_connection_error.{ex.__class__.__name__}",
engine=database.db_engine_spec.__name__,
)
# check for custom errors (wrong username, wrong password, etc)
errors = database.db_engine_spec.extract_errors(ex, context)
raise DatabaseTestConnectionFailedError(errors)
except RabbitaiSecurityException as ex:
event_logger.log_with_context(
action=f"test_connection_error.{ex.__class__.__name__}",
engine=database.db_engine_spec.__name__,
)
raise DatabaseSecurityUnsafeError(message=str(ex))
except Exception as ex: # pylint: disable=broad-except
event_logger.log_with_context(
action=f"test_connection_error.{ex.__class__.__name__}",
engine=database.db_engine_spec.__name__,
)
errors = database.db_engine_spec.extract_errors(ex, context)
raise DatabaseTestConnectionUnexpectedError(errors)
def validate(self) -> None:
database_name = self._properties.get("database_name")
if database_name is not None:
self._model = DatabaseDAO.get_database_by_name(database_name)
|
python
|
"""
Created on Mon Jan 18 12:34:06 2021
Note this is working with ETABs v19, comtypes v1.1.7. It is not working on my local
machiene only on the remote desktop
@author: aguter
"""
import numpy as np
import os
import sys
import comtypes.client
import matplotlib.pyplot as plt
ProgramPath = r"C:\Program Files\Computers and Structures\ETABS 19\ETABS.exe"
helper = comtypes.client.CreateObject('ETABSv1.Helper')
helper = helper.QueryInterface(comtypes.gen.ETABSv1.cHelper)
#create API helper object
ETABSObject = comtypes.client.GetActiveObject("CSI.ETABS.API.ETABSObject")
SapModel = ETABSObject.SapModel
#sets to kip, ft, farienheit
SapModel.SetPresentUnits(4)
#unlocks model so that sections cuts can be created
SapModel.SetModelIsLocked(False)
areas = SapModel.SelectObj.GetSelected()
area_obj = []
#filters out the floors
for type_obj , beam_num in zip(areas[1],areas[2]) :
if type_obj == 5 :
area_obj.append(beam_num)
else :
pass
AreaInfo = []
PointData = []
for area in area_obj:
AreaInfo.append(SapModel.AreaObj.GetPoints(area))
x = SapModel.AreaObj.GetPoints(area)[1]
for pnt in x:
PointData.append(SapModel.PointObj.GetCoordCartesian(pnt)[0:3])
x_areas = np.array(PointData)[:,0]
y_areas = np.array(PointData)[:,1]
print(AreaInfo)
print(PointData)
#sets the Global Coordinates and local coordinate system that points exist within
class Global_Coord :
def __init__(self, ref_pnt, vector):
self.ref_pnt = np.array(ref_pnt)
hyp = (vector[0]**2+vector[1]**2+vector[2]**2)**0.5
self.vector = np.array(vector)
self.R = np.array([[vector[0]/hyp, -vector[1]/hyp, 0],[vector[1]/hyp,vector[0]/hyp, 0], [0,0,1]])
self.R_inv = np.linalg.inv(self.R)
self.A = np.array(ref_pnt)
#should make this class accept *args, *kwargs
class Point():
def __init__(self, point, global_cord):
self.pnt_cord = np.array(point)
self.R = global_cord.R
self.R_inv = global_cord.R_inv
self.A = global_cord.A
#https://gamedev.stackexchange.com/questions/79765/how-do-i-convert-from-the-global-coordinate-space-to-a-local-space
def glo_to_loc(self):
temp = self.pnt_cord-self.A
loc_coord = np.matmul(self.R_inv,temp)
return loc_coord
#https://gamedev.stackexchange.com/questions/79765/how-do-i-convert-from-the-global-coordinate-space-to-a-local-space
def loc_to_glo(self, loc_coord):
loc_coord = np.array(loc_coord)
glo_coord = np.matmul(self.R,loc_coord) + self.A
return glo_coord
#UI
ref_pnt=[0,0,0]
#UI
vector = [1,0,0]
#set up global coordinate system
global_sys = Global_Coord(ref_pnt, vector)
print(global_sys.R)
local_coords = []
for area_pnts in PointData:
local_coords.append(Point(area_pnts,global_sys).glo_to_loc())
local_coords_trans = np.transpose(np.array(local_coords))
u_max = max(local_coords_trans[0])
u_min = min(local_coords_trans[0])
v_max = max(local_coords_trans[1])
v_min = min(local_coords_trans[1])
distance = u_max - u_min
# UI Number of slices to make along the diahragm
n_cuts = 100
u_range = np.linspace(u_min+0.01, u_min+distance-0.01, n_cuts)
#UI
height = 10
#class to make the cutting plane
class four_points():
def __init__(self, u_range, v_min, v_max, z_coord):
self.u_range = u_range
self.v_min = v_min
self.v_max = v_max
self.z_coord = z_coord
def make_4_pnts(self):
cuts = []
for i in self.u_range:
cuts.append([[i,self.v_min, self.z_coord-0.5],[i,self.v_min,self.z_coord+0.5],[i,self.v_max,self.z_coord+0.5],[i,self.v_max,self.z_coord-0.5]])
return cuts
#Need to make height a variable, last values
sections = four_points(u_range, v_min, v_max, height).make_4_pnts()
global_quad = []
for quad in sections:
one_quad = []
for pnt in quad:
x = Point(pnt,global_sys).loc_to_glo(pnt)
one_quad.append(x)
global_quad.append(one_quad)
print(global_quad)
x_plt_pnts = []
y_plt_pnts = []
for a in global_quad:
x_plt_pnts.append(a[0][0])
x_plt_pnts.append(a[2][0])
y_plt_pnts.append(a[0][1])
y_plt_pnts.append(a[2][1])
plt.plot(x_plt_pnts,y_plt_pnts)
plt.plot(x_areas,y_areas, 'ro')
plt.show()
name = []
for i in range(n_cuts):
temp = "0000"
len_num = len(str(i))
len_rem = len(temp)-len_num
temp_2 = temp[:len_rem] + str(i)
name.append(temp_2)
print(name)
def make_quad_etabs(name_sect,point):
name = str(name_sect)
final = []
for i in range(4):
if i == 0:
test = [name, 'Quads', 'All', 'Analysis', 'Default', '', '', '', '0','0','0','','Top or Right or Positive3','1', '1', '1', str(point[0][0]), str(point[0][1]), str(point[0][2]), '1']
final.append(test)
elif i == 1:
test = [name, '', '','', '', '', '', '', '','','','','','', '1', '2', str(point[1][0]), str(point[1][1]), str(point[1][2]), '']
final.append(test)
elif i == 2:
test = [name, '', '','', '', '', '', '', '','','','','','', '1', '3', str(point[2][0]), str(point[2][1]), str(point[2][2]), '']
final.append(test)
elif i == 3:
test = [name, '', '','', '', '', '', '', '','','','','','', '1', '4', str(point[3][0]), str(point[3][1]), str(point[3][2]), '']
final.append(test)
return final
etabs_data_sect = []
for i,(etabs_quad,sec_name) in enumerate(zip(global_quad,name)):
etabs_data_sect.append(make_quad_etabs(sec_name, etabs_quad))
flat_etabs_data = []
for point in etabs_data_sect:
temp = []
for data in point:
for sing_data in data:
temp.append(sing_data)
flat_etabs_data.append(temp)
mega_data = []
for point in flat_etabs_data:
for ind_pnt in point:
mega_data.append(ind_pnt)
TableKey = 'Section Cut Definitions'
TableVersion = 1
FieldsKeysIncluded = ['Name', 'Defined By', 'Group','Result Type', 'Result Location', 'Location X', 'Location Y', 'Location Z', 'Rotation About Z','Rotation About Y', 'Rotation About X', 'Axis Angle', 'Element Side', 'Number of Quads', 'Quad Number', 'Point Number', 'Quad X', 'Quad Y', 'Quad Z', 'GUID']
NumberRecords = len(flat_etabs_data)
y = SapModel.DatabaseTables.SetTableForEditingArray(TableKey,TableVersion,FieldsKeysIncluded, NumberRecords,mega_data)
FillImport = True
z= SapModel.DatabaseTables.ApplyEditedTables(FillImport)
model_has_run = SapModel.Analyze.RunAnalysis()
#sets to kip, ft, farienheit
SapModel.SetPresentUnits(4)
SapModel.Results.Setup.SetCaseSelectedForOutput("EQY+")
NumberResults = 1
SCut = []
LoadCase = []
StepType = []
StepNum = []
F1 = []
F2 = []
F3 = []
M1 = []
M2 = []
M3 = []
test_cut = SapModel.Results.SectionCutAnalysis(NumberResults, SCut, LoadCase, StepType, StepNum, F1, F2, F3, M1, M2, M3)
location = []
for i in sections:
location.append(i[0][0])
shear = test_cut[6]
moment = test_cut[10]
#Plots shear results
plt.plot(location,shear, 'ro-')
plt.xlabel('location')
plt.ylabel('Shear (kips)')
plt.title('Shear of the Diaphragm')
plt.grid(True)
plt.show()
#Plots moment results
plt.plot(location,moment, 'ro-')
plt.xlabel('location')
plt.ylabel('Moment (kip*ft)')
plt.title('Moment of the Diaphragm')
plt.grid(True)
plt.show()
|
python
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import string
from copy import deepcopy
from typing import List
import wordninja
from nltk import word_tokenize
from tqdm import tqdm
from nemo.collections.nlp.data.text_normalization import constants
from nemo.utils import logging
__all__ = ['read_data_file', 'normalize_str', 'flatten', 'process_url']
def flatten(l):
""" flatten a list of lists """
return [item for sublist in l for item in sublist]
def input_preprocessing(sent: str, lang: str):
""" Function for preprocessing the input texts. The function first does
some basic tokenization. For English, it then also processes Greek letters
such as Δ or λ (if any).
Args:
sent: input text.
lang: language
Returns: preprocessed input text.
"""
# Basic Preprocessing and Tokenization
if lang == constants.ENGLISH:
sent = sent.replace('+', ' plus ')
sent = sent.replace('=', ' equals ')
sent = sent.replace('@', ' at ')
sent = sent.replace('*', ' times ')
# Greek letters processing
if lang == constants.ENGLISH:
for jx, tok in enumerate(sent):
if tok in constants.EN_GREEK_TO_SPOKEN:
sent = sent[:jx] + constants.EN_GREEK_TO_SPOKEN[tok] + sent[jx + 1 :]
return sent
def read_data_file(fp: str, lang: str, max_insts: int = -1):
""" Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
Args:
fp: file paths
lang: language
max_insts: Maximum number of instances (-1 means no limit)
Returns:
insts: List of sentences parsed as list of words
"""
insts, w_words, s_words, classes = [], [], [], []
# Read input file
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in input_preprocessing(line.strip(), lang=lang).split('\t')]
if es[0] == '<eos>':
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
# Reset
w_words, s_words, classes = [], [], []
if max_insts > 0 and len(insts) >= max_insts:
break
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
def process_url(tokens: List[str], outputs: List[str], lang: str):
"""
The function is used to process the spoken form of every URL in an example.
E.g., "dot h_letter _letter t_letter _letter m_letter _letter l_letter" ->
"dot h t m l"
Args:
tokens: The tokens of the written form
outputs: The expected outputs for the spoken form
lang: Selected language.
Return:
outputs: The outputs for the spoken form with preprocessed URLs.
"""
if lang != constants.ENGLISH:
return outputs
for i in range(len(tokens)):
t, o = tokens[i], outputs[i]
if o != constants.SIL_WORD and '_letter' in o:
o_tokens = o.split(' ')
all_spans, cur_span = [], []
for j in range(len(o_tokens)):
if len(o_tokens[j]) == 0:
continue
if o_tokens[j] == '_letter':
all_spans.append(cur_span)
all_spans.append([' '])
cur_span = []
else:
o_tokens[j] = o_tokens[j].replace('_letter', '')
cur_span.append(o_tokens[j])
if len(cur_span) > 0:
all_spans.append(cur_span)
o_tokens = flatten(all_spans)
o = ''
for o_token in o_tokens:
if len(o_token) > 1:
o += ' ' + o_token + ' '
else:
o += o_token
o = o.strip()
o_tokens = wordninja.split(o)
o = ' '.join(o_tokens)
outputs[i] = o
return outputs
def normalize_str(input_str):
""" Normalize an input string """
return input_str.strip().lower().replace(" ", " ")
def remove_puncts(input_str):
""" Remove punctuations from an input string """
return input_str.translate(str.maketrans('', '', string.punctuation))
def basic_tokenize(input_str, lang):
"""
The function is used to do some basic tokenization
Args:
input_str: The input string
lang: Language of the input string
Return: a list of tokens of the input string
"""
if lang == constants.ENGLISH:
return word_tokenize(input_str)
return input_str.strip().split(' ')
def post_process_punct(input: str, nn_output: str):
"""
Post-processing of the normalized output to match input in terms of spaces around punctuation marks.
After NN normalization, Moses detokenization puts a space after
punctuation marks, and attaches an opening quote "'" to the word to the right.
E.g., input to the TN NN model is "12 test' example",
after normalization and detokenization -> "twelve test 'example" (the quote is considered to be an opening quote,
but it doesn't match the input and can cause issues during TTS voice generation.)
The current function will match the punctuation and spaces of the normalized text with the input sequence.
"12 test' example" -> "twelve test 'example" -> "twelve test' example" (the quote was shifted to match the input).
Args:
input: input text (original input to the NN, before normalization or tokenization)
nn_output: output text (output of the TN NN model)
"""
input = [x for x in input]
nn_output = [x for x in nn_output]
punct_marks = string.punctuation
try:
for punct in punct_marks:
if input.count(punct) != nn_output.count(punct):
continue
idx_in, idx_out = 0, 0
while punct in input[idx_in:]:
idx_in = input.index(punct, idx_in)
idx_out = nn_output.index(punct, idx_out)
if idx_in > 0 and idx_out > 0:
if nn_output[idx_out - 1] == " " and input[idx_in - 1] != " ":
nn_output[idx_out - 1] = ""
elif nn_output[idx_out - 1] != " " and input[idx_in - 1] == " ":
nn_output[idx_out - 1] += " "
if idx_in < len(input) - 1 and idx_out < len(nn_output) - 1:
if nn_output[idx_out + 1] == " " and input[idx_in + 1] != " ":
nn_output[idx_out + 1] = ""
elif nn_output[idx_out + 1] != " " and input[idx_in + 1] == " ":
nn_output[idx_out] = nn_output[idx_out] + " "
idx_out += 1
idx_in += 1
except:
logging.warning(f"Skipping post-processing of {''.join(nn_output)}")
nn_output = "".join(nn_output)
return re.sub(r' +', ' ', nn_output)
|
python
|
from unittest import mock
from src.video_player import VideoPlayer
def test_flag_video_with_reason(capfd):
player = VideoPlayer()
player.flag_video("amazing_cats_video_id", "dont_like_cats")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Successfully flagged video: Amazing Cats (reason: dont_like_cats)" \
in lines[0]
def test_flag_video_without_reason(capfd):
player = VideoPlayer()
player.flag_video("another_cat_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Successfully flagged video: Another Cat Video " \
"(reason: Not supplied)" in lines[0]
def test_flag_video_already_flagged(capfd):
player = VideoPlayer()
player.flag_video("amazing_cats_video_id", "dont_like_cats")
player.flag_video("amazing_cats_video_id", "dont_like_cats")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully flagged video: Amazing Cats (reason: dont_like_cats)" in \
lines[0]
assert "Cannot flag video: Video is already flagged" in lines[1]
def test_flag_video_nonexistent(capfd):
player = VideoPlayer()
player.flag_video("video_does_not_exist", "flag_video_reason")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot flag video: Video does not exist" in lines[0]
def test_flag_video_can_no_longer_play(capfd):
player = VideoPlayer()
player.flag_video("amazing_cats_video_id")
player.play_video("amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully flagged video: Amazing Cats " \
"(reason: Not supplied)" in lines[0]
assert "Cannot play video: Video is currently flagged " \
"(reason: Not supplied)" in lines[1]
def test_flag_videos_play_random(capfd):
player = VideoPlayer()
player.flag_video("funny_dogs_video_id")
player.flag_video("amazing_cats_video_id")
player.flag_video("another_cat_video_id")
player.flag_video("life_at_google_video_id")
player.flag_video("nothing_video_id")
player.play_random_video()
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 6
assert "Successfully flagged video: Funny Dogs " \
"(reason: Not supplied)" in lines[0]
assert "Successfully flagged video: Amazing Cats " \
"(reason: Not supplied)" in lines[1]
assert "Successfully flagged video: Another Cat Video " \
"(reason: Not supplied)" in lines[2]
assert "Successfully flagged video: Life at Google " \
"(reason: Not supplied)" in lines[3]
assert "Successfully flagged video: Video about nothing " \
"(reason: Not supplied)" in lines[4]
assert "No videos available" in lines[5]
def test_flag_video_add_to_playlist(capfd):
player = VideoPlayer()
player.flag_video("amazing_cats_video_id")
player.create_playlist("my_playlist")
player.add_to_playlist("my_playlist", "amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 3
assert ("Successfully flagged video: Amazing Cats "
"(reason: Not supplied)") in lines[0]
assert "Successfully created new playlist: my_playlist" in lines[1]
assert ("Cannot add video to my_playlist: Video is currently "
"flagged (reason: Not supplied)") in lines[2]
def test_flag_video_show_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_playlist")
player.add_to_playlist("my_playlist", "amazing_cats_video_id")
player.flag_video("amazing_cats_video_id", "dont_like_cats")
player.show_playlist("my_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Successfully created new playlist: my_playlist" in lines[0]
assert "Added video to my_playlist: Amazing Cats" in lines[1]
assert "Successfully flagged video: Amazing Cats " \
"(reason: dont_like_cats)" in lines[2]
assert "Showing playlist: my_playlist" in lines[3]
assert ("Amazing Cats (amazing_cats_video_id) [#cat #animal] - FLAGGED "
"(reason: dont_like_cats)") in lines[4]
def test_flag_video_show_all_videos(capfd):
player = VideoPlayer()
player.flag_video("amazing_cats_video_id", "dont_like_cats")
player.show_all_videos()
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 7
assert "Successfully flagged video: Amazing Cats " \
"(reason: dont_like_cats)" in lines[0]
assert "Here's a list of all available videos:" in lines[1]
assert ("Amazing Cats (amazing_cats_video_id) [#cat #animal] - FLAGGED "
"(reason: dont_like_cats)") in lines[2]
assert "Another Cat Video (another_cat_video_id) [#cat #animal]" in lines[3]
assert "Funny Dogs (funny_dogs_video_id) [#dog #animal]" in lines[4]
assert "Life at Google (life_at_google_video_id) [#google #career]" in \
lines[5]
assert "Video about nothing (nothing_video_id) []" in lines[6]
@mock.patch('builtins.input', lambda *args: 'No')
def test_flag_video_search_videos(capfd):
player = VideoPlayer()
player.flag_video("amazing_cats_video_id", "dont_like_cats")
player.search_videos("cat")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Successfully flagged video: Amazing Cats " \
"(reason: dont_like_cats)" in lines[0]
assert "Here are the results for cat:" in lines[1]
assert "1) Another Cat Video (another_cat_video_id) [#cat #animal]" in \
lines[2]
assert ("Would you like to play any of the above? If yes, "
"specify the number of the video.") in lines[3]
assert ("If your answer is not a valid number, we will assume "
"it's a no.") in lines[4]
@mock.patch('builtins.input', lambda *args: 'No')
def test_flag_video_search_videos_with_tag(capfd):
player = VideoPlayer()
player.flag_video("amazing_cats_video_id", "dont_like_cats")
player.search_videos_tag("#cat")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Successfully flagged video: Amazing Cats " \
"(reason: dont_like_cats)" in lines[0]
assert "Here are the results for #cat:" in lines[1]
assert "1) Another Cat Video (another_cat_video_id) [#cat #animal]" in \
lines[2]
assert ("Would you like to play any of the above? If yes, "
"specify the number of the video.") in lines[3]
assert ("If your answer is not a valid number, we will assume "
"it's a no.") in lines[4]
def test_flag_video_stops_playing_video(capfd):
player = VideoPlayer()
player.play_video("amazing_cats_video_id")
player.flag_video("amazing_cats_video_id", "dont_like_cats")
player.show_playing()
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 4
assert "Playing video: Amazing Cats" in lines[0]
assert "Stopping video: Amazing Cats" in lines[1]
assert "Successfully flagged video: Amazing Cats " \
"(reason: dont_like_cats)" in lines[2]
assert "No video is currently playing" in lines[3]
def test_flag_video_leaves_video_if_video_is_different(capfd):
player = VideoPlayer()
player.play_video("amazing_cats_video_id")
player.flag_video("another_cat_video_id", "dont_like_cats")
player.show_playing()
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 3
assert "Playing video: Amazing Cats" in lines[0]
assert "Successfully flagged video: Another Cat Video " \
"(reason: dont_like_cats)" in lines[1]
assert "Currently playing: Amazing Cats (amazing_cats_video_id) " \
"[#cat #animal]" in lines[2]
def test_flag_video_stops_paused_video(capfd):
player = VideoPlayer()
player.play_video("amazing_cats_video_id")
player.pause_video()
player.flag_video("amazing_cats_video_id", "dont_like_cats")
player.show_playing()
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Playing video: Amazing Cats" in lines[0]
assert "Pausing video: Amazing Cats" in lines[1]
assert "Stopping video: Amazing Cats" in lines[2]
assert "Successfully flagged video: Amazing Cats " \
"(reason: dont_like_cats)" in lines[3]
assert "No video is currently playing" in lines[4]
def test_allow_video(capfd):
player = VideoPlayer()
player.flag_video("amazing_cats_video_id", "dont_like_cats")
player.allow_video("amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 2
assert "Successfully flagged video: Amazing Cats " \
"(reason: dont_like_cats)" in lines[0]
assert "Successfully removed flag from video: Amazing Cats" in lines[1]
def test_allow_video_not_flagged(capfd):
player = VideoPlayer()
player.allow_video("amazing_cats_video_id")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot remove flag from video: Video is not flagged" in lines[0]
def test_allow_video_nonexistent(capfd):
player = VideoPlayer()
player.allow_video("video_does_not_exist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "Cannot remove flag from video: Video does not exist" in lines[0]
def test_allow_video_show_playlist(capfd):
player = VideoPlayer()
player.create_playlist("my_playlist")
player.add_to_playlist("my_playlist", "amazing_cats_video_id")
player.flag_video("amazing_cats_video_id", "dont_like_cats")
player.show_playlist("my_playlist")
player.allow_video("amazing_cats_video_id")
player.show_playlist("my_playlist")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 8
assert "Successfully created new playlist: my_playlist" in lines[0]
assert "Added video to my_playlist: Amazing Cats" in lines[1]
assert ("Successfully flagged video: Amazing Cats "
"(reason: dont_like_cats)") in lines[2]
assert "Showing playlist: my_playlist" in lines[3]
assert ("Amazing Cats (amazing_cats_video_id) [#cat #animal] - FLAGGED "
"(reason: dont_like_cats)") in lines[4]
assert "Successfully removed flag from video: Amazing Cats" in lines[5]
assert "Showing playlist: my_playlist" in lines[6]
assert "Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[7]
|
python
|
from app import db
from app import models
from flask import Blueprint
bp = Blueprint("setup", __name__)
@bp.route("/", methods=["POST"], strict_slashes=False)
def setup():
models.Base.metadata.create_all(db.get_engine())
return "<h1> Created DB successfully! </h1>"
|
python
|
try:
from setuptools import setup
except ImportError:
print("No setuptools package, using distutils.core")
from distutils.core import setup
import unittest
def my_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
setup(name='kpp',
version='0.1',
author='Jamie Fairbrother',
author_email='[email protected]',
url='http://www.lancs.ac.uk/~fairbrot',
test_suite='setup.my_test_suite',
packages=['kpp'])
|
python
|
from .compare_gw import *
|
python
|
from django import forms
from django.forms import widgets
from django.utils.translation import ugettext as _
from .models import Organization
from .utils import get_data_source_model
class OrganizationForm(forms.ModelForm):
class Meta:
model = Organization
fields = (
'data_source', 'origin_id', 'classification',
'name', 'founding_date', 'dissolution_date',
'internal_type', 'parent', 'admin_users',
'regular_users', 'private_users', 'replaced_by',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# the fields can be dynamically exclude, for example set them to readonly in admin
if 'replaced_by' in self.fields:
# An organization cannot be replaced by an organization that have already been replaced
qs = Organization.objects.filter(replaced_by__isnull=True)
if self.instance.id:
# prevent self reference
qs = qs.exclude(id=self.instance.id)
self.fields['replaced_by'].queryset = qs
if 'data_source' in self.fields:
# Only allow selecting data source within editable sources
self.fields['data_source'].queryset = get_data_source_model().objects.filter(user_editable=True)
if 'parent' in self.fields and self.instance.id:
# prevent recursive reference
desc_ids = self.instance.get_descendants(include_self=True).values_list('id', flat=True)
self.fields['parent'].queryset = Organization.objects.exclude(id__in=desc_ids)
def clean(self):
cleaned_data = super().clean()
internal_type = cleaned_data.get('internal_type')
parent = cleaned_data.get('parent')
if internal_type == Organization.AFFILIATED and parent is None:
raise forms.ValidationError(_('Affiliated organization must have a parent organization'))
return cleaned_data
class SubOrganizationForm(forms.ModelForm):
default_internal_type = Organization.NORMAL
class Meta:
model = Organization
fields = (
'name', 'founding_date', 'classification', 'data_source', 'origin_id', 'internal_type'
)
widgets = {
'internal_type': widgets.HiddenInput,
}
def __init__(self, *args, **kwargs):
if 'initial' not in kwargs:
kwargs['initial'] = {}
kwargs['initial']['internal_type'] = self.default_internal_type
super().__init__(*args, **kwargs)
# the fields can be dynamically exclude, for example set them to readonly in admin
if 'data_source' in self.fields:
# Only allow selecting data source within editable sources
self.fields['data_source'].queryset = get_data_source_model().objects.filter(user_editable=True)
def clean_internal_type(self):
return self.initial['internal_type'] # do not allow changing internal_type
class AffiliatedOrganizationForm(SubOrganizationForm):
default_internal_type = Organization.AFFILIATED
|
python
|
import inspect
import sys
import threading
from typing import Callable, Union, TypeVar, Any, Generic, Optional, List, Iterable
class PromiseResolver:
__slots__ = ("_promise", )
def __init__(self, promise):
self._promise = promise
def resolve(self, val):
if self._promise is None:
return
self._promise._resolve(val)
self._promise = None
def reject(self, reason):
if self._promise is None:
return
self._promise._reject(reason)
self._promise = None
T = TypeVar('T')
X = TypeVar('X')
PromiseOrT = Union['Promise[T]', T]
PromiseOrX = Union['Promise[X]', X]
class PromiseEventLoop:
def __init__(self):
self._scheduled = []
self._scheduled_next = []
self._scheduled_offthread = []
self._cv = threading.Condition()
def schedule_thread(self, fn: Callable[[], None]):
"""
Executes fn the next time event loop is processed.
It is safe to call from different thread.
"""
with self._cv:
self._scheduled_offthread.append(fn)
self._cv.notify()
def schedule(self, fn: Callable[[], None]):
"""
Executes fn the next time event loop is processed.
"""
self._scheduled_next.append(fn)
def unhandled_rejection(self, promise: PromiseOrX, reason: Any) -> None:
raise RuntimeError(f"Unhandled rejection: {reason}")
def pull_from_offthread(self):
with self._cv:
self._scheduled_next.extend(self._scheduled_offthread)
self._scheduled_offthread.clear()
def process(self):
self.pull_from_offthread()
while self._scheduled_next:
while self._scheduled_next:
self._scheduled, self._scheduled_next = self._scheduled_next, self._scheduled
for fn in self._scheduled:
fn()
self._scheduled.clear()
self.pull_from_offthread()
def wait(self):
with self._cv:
self.pull_from_offthread()
if len(self._scheduled_next) == 0:
def has_new_elements():
return len(self._scheduled_offthread) > 0
self._cv.wait_for(has_new_elements)
self.process()
EventLoop = PromiseEventLoop()
STATE_PENDING = 0
STATE_RESOLVED = 1
STATE_REJECTED = 2
class Promise(Generic[T]):
__slots__ = ("_listeners", "_state", "_value")
def __init__(self, func: Callable[[Callable[[PromiseOrT], None], Callable[[Any], None]], None]):
self._listeners = None
self._state = STATE_PENDING
self._value = None
if func:
resolver = PromiseResolver(self)
def callable():
try:
func(resolver.resolve, resolver.reject)
except:
resolver.reject(sys.exc_info()[1])
EventLoop.schedule(callable)
@staticmethod
def resolve(value: PromiseOrT) -> 'Promise[T]':
if isinstance(value, Promise):
return value
return Promise(lambda res, _: res(value))
@staticmethod
def reject(reason: Any) -> 'Promise[Any]':
return Promise(lambda _, rej: rej(reason))
@staticmethod
def all(seq: Iterable['Promise[Any]']) -> 'Promise[List[Any]]':
promise_list = list(seq)
if len(promise_list) == 0:
return Promise.resolve([])
def worker(resolve, reject):
result = [None] * len(promise_list)
pending = len(promise_list)
for idx, promise in enumerate(promise_list):
def fn(succ, val, idx=idx):
if not succ:
return reject(val)
nonlocal pending
result[idx] = val
pending -= 1
if pending <= 0:
resolve(result)
Promise.resolve(promise)._listen(fn)
return Promise(worker)
def then(
self,
on_succ: Callable[[T], PromiseOrX],
on_fail: Optional[Callable[[Any], PromiseOrX]] = None
) -> 'Promise[X]':
def cont(succ, val):
if succ:
if on_succ is not None:
return on_succ(val)
else:
return val
else:
if on_fail is not None:
return on_fail(val)
else:
return self
return self._chain(cont)
def catch(self, on_fail: Callable[[Any], PromiseOrX]) -> 'Promise[X]':
def cont(succ, val):
if succ:
return val
else:
return on_fail(val)
return self._chain(cont)
def _chain(self, func: Callable[[bool, Union[T, Any]], PromiseOrX]) -> 'Promise[X]':
promise = Promise(lambda res, rej: None)
def cont(succ, val):
try:
promise._resolve(func(succ, val))
except:
promise._reject(sys.exc_info()[1])
self._listen(cont)
return promise
def _listen(self, func: Callable[[bool, Union[T, Any]], None]) -> None:
if self._listeners is None:
self._listeners = func
self._fire_if_settled()
else:
if not isinstance(self._listeners, list):
self._listeners = [self._listeners]
self._listeners.append(func)
if len(self._listeners) == 1:
self._fire_if_settled()
def _fire_if_settled(self):
if self._state != STATE_PENDING:
EventLoop.schedule(self._dispatch)
def _dispatch(self):
if self._listeners is None:
if self._state == STATE_REJECTED:
EventLoop.unhandled_rejection(self, self._value)
else:
to_dispatch = []
if isinstance(self._listeners, list):
to_dispatch.extend(self._listeners)
self._listeners.clear()
else:
to_dispatch.append(self._listeners)
self._listeners = None
for listener in to_dispatch:
listener(self._state == STATE_RESOLVED, self._value)
def always(self, func: Callable[[], None]) -> 'Promise[T]':
def cont(_, __):
func()
return self
return self._chain(cont)
def run_until_completion(self) -> T:
while self._state == STATE_PENDING:
EventLoop.wait()
if self._state == STATE_RESOLVED:
return self._value
else:
raise self._value
def _resolve(self, val: PromiseOrT) -> None:
self._fulfill(True, val)
def _reject(self, reason: Any) -> None:
self._fulfill(False, reason)
def _fulfill(self, succ: bool, val: Union[T, Any]) -> None:
if succ:
if isinstance(val, Promise):
val._listen(self._fulfill)
else:
self._settle(succ, val)
else:
self._settle(succ, val)
def _settle(self, succ: bool, val: Union[T, Any]) -> None:
if succ:
self._state = STATE_RESOLVED
else:
self._state = STATE_REJECTED
self._value = val
if self._listeners or self._state == STATE_REJECTED:
EventLoop.schedule(self._dispatch)
def asyncify(fn):
def wrapper(*args, **kwargs):
def worker(resolve, reject):
if not inspect.isgeneratorfunction(fn):
resolve(fn(*args, **kwargs))
else:
iter = fn(*args, **kwargs)
def spin_val(success, value):
try:
if success:
result = iter.send(value)
else:
result = iter.throw(value)
except StopIteration as stop:
return resolve(stop.value)
except:
return reject(sys.exc_info()[1])
if isinstance(result, Promise):
result.then(
lambda val: spin_val(True, val),
lambda reason: spin_val(False, reason)
)
else:
Promise.resolve(result).then(lambda val: spin_val(True, val))
spin_val(True, None)
return Promise(worker)
return wrapper
|
python
|
from __future__ import annotations
import multiprocessing
import queue
import threading
import time
from dataclasses import dataclass, field
from multiprocessing import Queue, Process
from queue import Empty
from threading import Thread
from typing import List, Dict
import psutil
import requests
from MHDDoS.attack import Attack, AttackState
from MHDDoS.utils.config_files import read_configuration_file_lines
from MHDDoS.utils.proxies import load_proxies
from MHDDoS.utils.targets import Target
from utils.input_args import Arguments
from utils.logs import get_logger_for_current_process
from utils.misc import TimeInterval
from utils.network import IPGeolocationData
@dataclass(slots=True, order=True, frozen=True)
class AttackSupervisorState:
is_fetching_targets: bool
is_fetching_proxies: bool
is_fetching_geolocation: bool
attack_states: List[AttackState]
targets_addresses: List[str]
proxies_addresses: List[str]
local_ip_geolocation: IPGeolocationData
timestamp: float = field(default_factory=time.time)
@property
def attack_processes_count(self) -> int:
return len(self.attack_states) if (self.attack_states is not None) else 0
@property
def proxies_count(self) -> int:
return len(self.proxies_addresses) if (self.proxies_addresses is not None) else 0
@property
def targets_count(self) -> int:
return len(self.targets_addresses) if (self.targets_addresses is not None) else 0
def __eq__(self, other):
if not isinstance(other, AttackSupervisorState):
return False
return self.timestamp == other.timestamp
class AttackSupervisor(Thread):
"""Thread which controls the state of the attack processes in Palyanytsya."""
_args: Arguments
_attacks_state_queue: multiprocessing.Queue
_supervisor_state_queue: queue.Queue
_logging_queue: multiprocessing.Queue
_targets: List[Target] = []
_proxies_addresses: List[str] = []
_attack_processes: List[Process] = []
_attack_states: Dict[int, AttackState] = {}
_targets_fetch_interval: TimeInterval
_proxies_fetch_interval: TimeInterval
_geolocation_fetch_interval: TimeInterval
_internal_loop_sleep_interval: float = 0.01
_state_publish_interval: float = 0.5
_is_fetching_proxies: bool = False
_is_fetching_targets: bool = False
_is_fetching_geolocation: bool = False
_last_geolocation: IPGeolocationData = None
_state_publisher_thread: Thread = None
exception: Exception = None
def __init__(self,
args: Arguments,
supervisor_state_queue: queue.Queue,
logging_queue: multiprocessing.Queue):
Thread.__init__(self, daemon=True, name="Supervisor")
self.logger = get_logger_for_current_process(logging_queue, "SUPERVISOR")
self._logging_queue = logging_queue
self._args = args
self._supervisor_state_queue = supervisor_state_queue
self._attacks_state_queue = Queue()
self._targets_fetch_interval = TimeInterval(args.config_fetch_interval)
self._proxies_fetch_interval = TimeInterval(args.proxies_fetch_interval)
self._geolocation_fetch_interval = TimeInterval(20)
stop_event = threading.Event()
stop_event.set()
self._stop_event = stop_event
self.logger.info("Starting attack supervisor...")
def run(self) -> None:
logger = self.logger
self._state_publisher_thread = Thread(target=self._state_publisher, daemon=True)
self._state_publisher_thread.start()
try:
self.run_main_loop()
except Exception as e:
self.exception = e
logger.exception("Exception in Supervisor", exc_info=e)
except (KeyboardInterrupt, SystemExit):
pass
finally:
logger.info("Supervisor thread stopping...")
self.stop()
self.cleanup()
logger.info("Supervisor thread stopped.")
def run_main_loop(self):
while self._stop_event.is_set():
targets_changed = False
if self._targets_fetch_interval.check_if_has_passed():
self._is_fetching_targets = True
targets_changed = self._fetch_targets()
self._is_fetching_targets = False
proxies_changed = False
if self._proxies_fetch_interval.check_if_has_passed():
self._is_fetching_proxies = True
proxies_changed = self._fetch_proxies()
self._is_fetching_proxies = False
if self._geolocation_fetch_interval.check_if_has_passed():
self._is_fetching_geolocation = True
self._check_geolocation()
self._is_fetching_geolocation = False
if targets_changed or proxies_changed:
self._restart_attacks()
self._update_attack_states()
time.sleep(self._internal_loop_sleep_interval)
def cleanup(self):
logger = self.logger
logger.info("Stopping state publisher thread...")
self._state_publisher_thread.join()
logger.info("State publisher thread stopped.")
logger.info("Stopping attacks...")
self._stop_all_attacks(True)
self._attacks_state_queue.close()
logger.info("Attacks stopped.")
def stop(self):
self._stop_event.clear()
def _fetch_targets(self) -> bool:
"""
Fetches targets configuration.
Returns:
True if targets have changed, False otherwise.
"""
logger = self.logger
logger.info("Fetching targets...")
config = self._args.config
new_targets_strings: List[str] = []
new_targets_strings.extend(self._args.targets)
if config:
new_targets_strings.extend(read_configuration_file_lines(config))
new_targets: List[Target] = []
for target_string in new_targets_strings:
logger.info(f"parsing from '{target_string}'")
target = Target.parse_from_string(target_string)
if not target:
continue
elif target.is_valid:
new_targets.append(target)
logger.info(f"Fetched target: '{target}'")
else:
logger.error(f"Target '{target}' is not valid. Will not attack this one.")
# check if the targets changed
if self._compare_lists(new_targets_strings, self._targets):
logger.info("Targets have not changed.")
return False
self._targets = new_targets
logger.info(f"Targets updated. Attack processes will be re-initialized for {len(new_targets)} loaded targets.")
return True
def _fetch_proxies(self) -> bool:
"""
Fetches proxies configuration.
Returns:
True if proxies have changed, False otherwise.
"""
logger = self.logger
proxies_file_path = self._args.proxies
if proxies_file_path is None:
return False
logger.info("Fetching proxies...")
new_proxies = load_proxies(proxies_file_path)
if new_proxies is None:
logger.error(f"Could not load any proxies from the given path: '{proxies_file_path}'")
return False
# check if the proxies changed (we compare the addresses because Proxy objects do not have __eq__ method defined)
new_proxies_addresses = [f"{p}" for p in new_proxies]
if self._compare_lists(new_proxies_addresses, self._proxies_addresses):
logger.info("Proxies have not changed.")
return False
self._proxies_addresses = new_proxies_addresses
logger.info(f"Proxies updated. Attack processes will be re-initialized for {len(new_proxies_addresses)} loaded proxies.")
return True
def _check_geolocation(self) -> None:
"""
Checks local machine's IP geolocation and stops the attacks if it changed since the previous check.
"""
logger = self.logger
try:
new_geolocation = IPGeolocationData.get_for_my_ip(10)
except requests.ConnectionError:
logger.error(f"Failed to get geolocation data for the local IP (ConnectionError). Will retry in {self._geolocation_fetch_interval.time_left:.0f} seconds.")
return
geolocation_changed = (new_geolocation != self._last_geolocation) and (self._last_geolocation is not None)
if geolocation_changed and not self._args.ignore_geolocation_change:
# stop running attacks, if any
if self.running_attacks_count > 0:
logger.warning(f"Geolocation of the local machine's IP has changed!"
f" {self._last_geolocation} -> {new_geolocation}\n"
f" {self.running_attacks_count} running attack processes will be stopped.")
self._stop_all_attacks()
# wait for confirmation from the user to continue
# TODO: add arg to disable this check entirely?
if self._args.no_gui:
input(f"Geolocation of the local machine's IP has changed: {self._last_geolocation} -> {new_geolocation}.\n"
f"Press any key to restart the attacks, or Ctrl+C to exit.")
else:
# TODO: warn the user if geolocation has changed and stop the attacks
pass
self._last_geolocation = new_geolocation
logger.info(f"Checked geolocation: {new_geolocation}")
def _stop_all_attacks(self, blocking: bool = False) -> None:
# kill all existing attack processes
processes_to_kill = self._attack_processes.copy()
for process in processes_to_kill:
process.terminate()
if blocking:
for process in processes_to_kill:
process.join()
def _restart_attacks(self) -> None:
logger = self.logger
# kill all existing attack processes
self._stop_all_attacks()
# check if we have targets
targets_count = len(self._targets)
if targets_count <= 0:
logger.error("Attacks will not be started, as there are no valid targets.")
return
# TODO: apply CPU usage limit to attack processes?
cpu_count = psutil.cpu_count()
cpu_per_target = float(cpu_count) / targets_count
# launch attack process for every target
for i, target in enumerate(self._targets):
attack_process = Attack(
target=target,
attack_methods=self._args.attack_methods,
requests_per_connection=self._args.requests_per_connection,
proxies_file_path=self._args.proxies,
attack_state_queue=self._attacks_state_queue,
logging_queue=self._logging_queue,
)
attack_process.name = f"ATTACK_{i + 1}"
self._attack_processes.append(attack_process)
attack_process.start()
logger.info(f"Started attacks upon {len(self._targets)} targets.")
def _update_attack_states(self) -> None:
if len(self._attack_processes) <= 0:
return
# collect all that's available in the attack state queue
new_states: Dict[int, AttackState] = {}
for _ in range(100): # <- limit retries so that we don't run infinitely in that low-chance situation where our attack loops share state faster than the Supervisor updates
try:
new_state: AttackState = self._attacks_state_queue.get_nowait()
new_states[new_state.attack_pid] = new_state # <- overwrite every new state for the same attack PID; this way we will have only the most recent states left in the Dict after this loop
except Empty:
break
previous_attack_states = self._attack_states.copy()
previous_attack_states.update(new_states)
self._attack_states = previous_attack_states
def _restart_dead_attacks(self):
# TODO: do this
raise NotImplemented
def _state_publisher(self):
while self._stop_event.is_set():
sorted_attack_states = [self._attack_states[k] for k in sorted(self._attack_states.keys())]
state = AttackSupervisorState(
is_fetching_proxies=self._is_fetching_proxies,
is_fetching_targets=self._is_fetching_targets,
is_fetching_geolocation=self._is_fetching_geolocation,
attack_states=sorted_attack_states,
proxies_addresses=self._proxies_addresses,
local_ip_geolocation=self._last_geolocation,
targets_addresses=[f"{t}" for t in self._targets]
)
self._supervisor_state_queue.put(state)
time.sleep(self._state_publish_interval)
@staticmethod
def _compare_lists(list_a: List, list_b: List) -> bool:
"""
Compares if two lists have the same items.
The lists get sorted in-place before the comparison.
Args:
list_a: The 1st list.
list_b: The 2nd list.
Returns:
True if all items in the lists are identical, False otherwise.
"""
list_a.sort()
list_b.sort()
return list_a == list_b
@property
def running_attacks_count(self) -> int:
alive_processes = [p for p in self._attack_processes if p.is_alive()]
return len(alive_processes)
|
python
|
from .convscore_solver import SolverConvScore
import torch
from utils import to_var
from tqdm import tqdm
from math import isnan
import numpy as np
class SolverConvScoreSSREM(SolverConvScore):
def __init__(self, config, train_data_loader, eval_data_loader, is_train=True, model=None):
super(SolverConvScoreSSREM, self).__init__(config, train_data_loader, eval_data_loader, is_train, model)
def train(self):
epoch_loss_history = []
for epoch_i in range(self.epoch_i, self.config.n_epoch):
self.epoch_i = epoch_i
batch_loss_history = []
self.model.train()
for batch_i, (contexts, res_true, res_ns1, res_ns2, res_ns3, res_ns4) in \
enumerate(tqdm(self.train_data_loader, ncols=80)):
contexts = to_var(torch.FloatTensor(contexts))
res_trues = to_var(torch.FloatTensor(res_true))
res_ns1 = to_var(torch.FloatTensor(res_ns1))
res_ns2 = to_var(torch.FloatTensor(res_ns2))
res_ns3 = to_var(torch.FloatTensor(res_ns3))
res_ns4 = to_var(torch.FloatTensor(res_ns4))
self.optimizer.zero_grad()
# Call forward function
batch_loglikelihood = self.model(contexts, res_trues, res_ns1, res_ns2, res_ns3, res_ns4)
batch_loss = -torch.sum(batch_loglikelihood)
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
if batch_i % self.config.print_every == 0:
tqdm.write(f'Epoch: {epoch_i+1}, iter {batch_i}: loss = {batch_loss.item():.3f}')
# Back-propagation
batch_loss.backward()
# Gradient cliping
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.clip)
# Run optimizer
self.optimizer.step()
epoch_loss = np.mean(batch_loss_history)
epoch_loss_history.append(epoch_loss)
self.epoch_loss = epoch_loss
if epoch_i % self.config.save_every_epoch == 0:
self.save_model(epoch_i + 1)
self.true_scores, self.false_scores, self.eval_epoch_loss = self.evaluate()
print_str = f'Epoch {epoch_i + 1} loss average: {epoch_loss:.3f}, ' \
f'True:{self.true_scores:.3f}, False:{self.false_scores:.3f}, ' \
f'Validation loss: {self.eval_epoch_loss:.3f}'
print(print_str)
if epoch_i % self.config.plot_every_epoch == 0:
self.write_summary(epoch_i)
self.save_model(self.config.n_epoch)
return epoch_loss_history
def evaluate(self):
self.model.eval()
true_scores_list = list()
false_scores_list = list()
batch_loss_history = list()
for batch_i, (contexts, res_true, res_ns1, res_ns2, res_ns3, res_ns4) in \
enumerate(tqdm(self.eval_data_loader, ncols=80)):
with torch.no_grad():
contexts = to_var(torch.FloatTensor(contexts))
res_trues = to_var(torch.FloatTensor(res_true))
res_ns1 = to_var(torch.FloatTensor(res_ns1))
res_ns2 = to_var(torch.FloatTensor(res_ns2))
res_ns3 = to_var(torch.FloatTensor(res_ns3))
res_ns4 = to_var(torch.FloatTensor(res_ns4))
# Call forward function
true_scores = self.model.score(contexts, res_trues)
false_scores = self.model.score(contexts, res_ns4)
true_scores_list += true_scores.data.cpu().numpy().tolist()
false_scores_list += false_scores.data.cpu().numpy().tolist()
# Call forward function
batch_loglikelihood = self.model(contexts, res_trues, res_ns1, res_ns2, res_ns3, res_ns4)
batch_loss = -torch.sum(batch_loglikelihood)
assert not isnan(batch_loss.item())
batch_loss_history.append(batch_loss.item())
epoch_loss = np.sum(batch_loss_history)
return np.mean(true_scores_list), np.mean(false_scores_list), epoch_loss
|
python
|
import numpy as np
import cv2 as cv
def cv_imshow(title='Res',img=None):
print(img.shape)
cv.imshow(title,img)
cv.waitKey(0)
cv.destroyAllWindows()
def image_normalization(img, img_min=0, img_max=255):
"""This is a typical image normalization function
where the minimum and maximum of the image is needed
source: https://en.wikipedia.org/wiki/Normalization_(image_processing)
:param img: an image could be gray scale or color
:param img_min: for default is 0
:param img_max: for default is 255
:return: a normalized image, if max is 255 the dtype is uint8
"""
img = np.float32(img)
epsilon=1e-12 # whenever an inconsistent image
img = (img-np.min(img))*(img_max-img_min)/((np.max(img)-np.min(img))+epsilon)+img_min
return img
def restore_rgb(config,I):
"""
:param config: [args.channel_swap, args.mean_pixel_value]
:param I: and image or a set of images
:return: an image or a set of images restored
"""
if len(I)>3 and not type(I)==np.ndarray:
I =np.array(I)
I = I[:,:,:,0:3]
n = I.shape[0]
for i in range(n):
x = I[i,...]
x = np.array(x, dtype=np.float32)
x += config[1]
x = x[:, :, config[0]]
x = image_normalization(x)
I[i,:,:,:]=x
elif len(I.shape)==3 and I.shape[-1]==3:
I = np.array(I, dtype=np.float32)
I += config[1]
I = I[:, :, config[0]]
I = image_normalization(I)
else:
print("Sorry the input data size is out of our configuration")
# print("The enterely I data {} restored".format(I.shape))
return I
def visualize_result(imgs_list, arg):
"""
function for tensorflow results
:param imgs_list: a list of prediction, gt and input data
:param arg:
:return: one image with the whole of imgs_list data
"""
n_imgs = len(imgs_list)
data_list =[]
for i in range(n_imgs):
tmp = imgs_list[i]
if tmp.shape[1]==3:
tmp = np.transpose(np.squeeze(tmp),[1,2,0])
tmp=restore_rgb([arg.channel_swap,arg.mean_pixel_values[:3]],tmp)
tmp = np.uint8(image_normalization(tmp))
else:
tmp= np.squeeze(tmp)
if len(tmp.shape) == 2:
tmp = np.uint8(image_normalization(tmp))
tmp = cv.bitwise_not(tmp)
tmp = cv.cvtColor(tmp, cv.COLOR_GRAY2BGR)
else:
tmp = np.uint8(image_normalization(tmp))
data_list.append(tmp)
img = data_list[0]
if n_imgs % 2 == 0:
imgs = np.zeros((img.shape[0] * 2 + 10, img.shape[1] * (n_imgs // 2) + ((n_imgs // 2 - 1) * 5), 3))
else:
imgs = np.zeros((img.shape[0] * 2 + 10, img.shape[1] * ((1 + n_imgs) // 2) + ((n_imgs // 2) * 5), 3))
n_imgs += 1
k=0
imgs = np.uint8(imgs)
i_step = img.shape[0]+10
j_step = img.shape[1]+5
for i in range(2):
for j in range(n_imgs//2):
if k<len(data_list):
imgs[i*i_step:i*i_step+img.shape[0],j*j_step:j*j_step+img.shape[1],:]=data_list[k]
k+=1
else:
pass
return imgs
|
python
|
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chex: Testing made fun, in JAX!"""
from chex._src.asserts import assert_axis_dimension
from chex._src.asserts import assert_axis_dimension_gt
from chex._src.asserts import assert_devices_available
from chex._src.asserts import assert_equal
from chex._src.asserts import assert_equal_rank
from chex._src.asserts import assert_equal_shape
from chex._src.asserts import assert_equal_shape_prefix
from chex._src.asserts import assert_equal_shape_suffix
from chex._src.asserts import assert_exactly_one_is_none
from chex._src.asserts import assert_gpu_available
from chex._src.asserts import assert_is_broadcastable
from chex._src.asserts import assert_max_traces
from chex._src.asserts import assert_not_both_none
from chex._src.asserts import assert_numerical_grads
from chex._src.asserts import assert_rank
from chex._src.asserts import assert_scalar
from chex._src.asserts import assert_scalar_in
from chex._src.asserts import assert_scalar_negative
from chex._src.asserts import assert_scalar_non_negative
from chex._src.asserts import assert_scalar_positive
from chex._src.asserts import assert_shape
from chex._src.asserts import assert_tpu_available
from chex._src.asserts import assert_tree_all_close
from chex._src.asserts import assert_tree_all_equal_comparator
from chex._src.asserts import assert_tree_all_equal_shapes
from chex._src.asserts import assert_tree_all_equal_structs
from chex._src.asserts import assert_tree_all_finite
from chex._src.asserts import assert_tree_no_nones
from chex._src.asserts import assert_tree_shape_prefix
from chex._src.asserts import assert_type
from chex._src.asserts import clear_trace_counter
from chex._src.asserts import if_args_not_none
from chex._src.dataclass import dataclass
from chex._src.dataclass import mappable_dataclass
from chex._src.fake import fake_jit
from chex._src.fake import fake_pmap
from chex._src.fake import fake_pmap_and_jit
from chex._src.fake import set_n_cpu_devices
from chex._src.pytypes import Array
from chex._src.pytypes import ArrayBatched
from chex._src.pytypes import ArrayDevice
from chex._src.pytypes import ArrayNumpy
from chex._src.pytypes import ArraySharded
from chex._src.pytypes import ArrayTree
from chex._src.pytypes import CpuDevice
from chex._src.pytypes import Device
from chex._src.pytypes import GpuDevice
from chex._src.pytypes import Numeric
from chex._src.pytypes import PRNGKey
from chex._src.pytypes import Scalar
from chex._src.pytypes import Shape
from chex._src.pytypes import IpuDevice
from chex._src.variants import all_variants
from chex._src.variants import ChexVariantType
from chex._src.variants import params_product
from chex._src.variants import TestCase
from chex._src.variants import variants
__version__ = "0.0.5"
__all__ = (
"all_variants",
"Array",
"ArrayBatched",
"ArrayDevice",
"ArrayNumpy",
"ArraySharded",
"ArrayTree",
"assert_axis_dimension",
"assert_axis_dimension_gt",
"assert_devices_available",
"assert_equal",
"assert_equal_rank",
"assert_equal_shape",
"assert_equal_shape_prefix",
"assert_equal_shape_suffix",
"assert_exactly_one_is_none",
"assert_gpu_available",
"assert_is_broadcastable",
"assert_max_traces",
"assert_not_both_none",
"assert_numerical_grads",
"assert_rank",
"assert_scalar",
"assert_scalar_in",
"assert_scalar_negative",
"assert_scalar_non_negative",
"assert_scalar_positive",
"assert_shape",
"assert_tpu_available",
"assert_tree_all_close",
"assert_tree_all_equal_comparator",
"assert_tree_all_equal_shapes",
"assert_tree_all_equal_structs",
"assert_tree_all_finite",
"assert_tree_no_nones",
"assert_tree_shape_prefix",
"assert_type",
"ChexVariantType",
"clear_trace_counter",
"CpuDevice",
"dataclass",
"Device",
"fake_jit",
"fake_pmap",
"fake_pmap_and_jit",
"GpuDevice",
"if_args_not_none",
"mappable_dataclass",
"Numeric",
"params_product",
"PRNGKey",
"Scalar",
"set_n_cpu_devices",
"Shape",
"TestCase",
"IpuDevice",
"variants",
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the Chex public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
|
python
|
#!/usr/bin/env python
from __future__ import print_function
import math
import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from b2_logic.nodes.pilot import PilotNode, PVelocityController
from b2.msg import Proximity
DEFAULT_NODE_NAME = "pilot_node"
# Subscribes
DEFAULT_PROXIMITY_TOPIC = "ir_sensors/proximity"
DEFAULT_ODOMETRY_TOPIC = "base_node/odom"
# Publishes
DEFAULT_CMD_TOPIC = "base_node/cmd_vel"
DEFAULT_LOOP_HZ = 5 # hertz
DEFAULT_MAX_FWD_SPEED = 0.5 # m/sec
DEFAULT_MIN_FWD_SPEED = 0.1 # m/sec
DEFAULT_MAX_TURN_SPEED = math.pi / 4 # radians/sec
DEFAULT_MIN_TURN_SPEED = 0.1 # radians/sec
DEFAULT_TURN_DEGREES = 90 # degrees, will be converted to radians
DEFAULT_TURN_DEGREE_TOLERANCE = 5 # degrees, will be converted to radians
DEFAULT_LINEAR_K = 1
DEFAULT_ANGULAR_K = 1 # K constant for angular P controller
if __name__ == "__main__":
rospy.init_node(DEFAULT_NODE_NAME, log_level=rospy.DEBUG)
node_name = rospy.get_name()
loophz = rospy.get_param("~loop_hz", DEFAULT_LOOP_HZ)
max_fwd_speed = rospy.get_param("~max_fwd_speed", DEFAULT_MAX_FWD_SPEED)
min_fwd_speed = rospy.get_param("~min_fwd_speed", DEFAULT_MIN_FWD_SPEED)
max_turn_speed = rospy.get_param("~max_turn_speed", DEFAULT_MAX_TURN_SPEED)
min_turn_speed = rospy.get_param("~min_turn_speed", DEFAULT_MIN_TURN_SPEED)
turn_radians = math.radians(rospy.get_param("~turn_degrees", DEFAULT_TURN_DEGREES))
turn_radians_tolerance = math.radians(
rospy.get_param("~turn_degree_tolerance", DEFAULT_TURN_DEGREE_TOLERANCE))
linear_k = rospy.get_param("~linear_k", DEFAULT_LINEAR_K)
angular_k = rospy.get_param("~angular_k", DEFAULT_ANGULAR_K)
# P-Controller
pcontroller = PVelocityController(
min_fwd_speed, max_fwd_speed,
min_turn_speed, max_turn_speed,
linear_k=linear_k, angular_k=angular_k
)
# Publishes
cmd_vel_pub = rospy.Publisher(
rospy.get_param('~cmd_topic', DEFAULT_CMD_TOPIC),
Twist,
queue_size=1
)
node = PilotNode(loophz, turn_radians, turn_radians_tolerance, cmd_vel_pub, pcontroller)
# Subscribes
rospy.Subscriber(
rospy.get_param("~proximity_topic", DEFAULT_PROXIMITY_TOPIC),
Proximity,
node.prox_callback
)
# Subscribes
rospy.Subscriber(
rospy.get_param("~odom_topic", DEFAULT_ODOMETRY_TOPIC),
Odometry,
node.odom_callback
)
node.run()
|
python
|
from aws_cdk import (
aws_s3 as s3,
core
)
class CdkExampleS3Stack(core.Stack):
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
bucket = s3.Bucket(
self, "mytestbucket",
bucket_name="mytestbucket.mydomain.com"
)
bucket.node.find_child('Resource').cfn_options.deletion_policy = core.CfnDeletionPolicy.DELETE
|
python
|
"""
opencadd.io.core
Defines the base class for the io module.
"""
from pathlib import Path
class _Base:
"""
Base class for io classes.
"""
def __init__(self):
"""
This class contains no __init__ initialization.
"""
raise RuntimeError("This class only support initialization from classmethods.")
@classmethod
def from_file(cls, filepath, **kwargs):
"""
Parse a structure from a file into different output data types.
Parameters
----------
filepath : pathlib.Path or str
Path to file.
**kwargs
Arbitrary keyword arguments.
"""
raise NotImplementedError("Implement in your subclass!")
@classmethod
def from_text(cls, text, **kwargs):
"""
Parse a structure from a string (text) into different output data types.
Parameters
----------
text : str
Structure file text.
**kwargs
Arbitrary keyword arguments.
"""
raise NotImplementedError("Implement in your subclass!")
@classmethod
def _file_to_text(cls, filepath):
"""
Get content (text) from file.
Parameters
----------
filepath : pathlib.Path or str
Path to file.
Returns
-------
str
File content (text).
"""
filepath = cls._convert_filepath(filepath)
with open(filepath, "r") as f:
text = f.read()
return text
@classmethod
def _convert_filepath(cls, filepath):
"""
Convert a filepath.
Parameters
----------
filepath : pathlib.Path or str
Path to file.
Returns
-------
pathlib.Path
Path to file.
Raises
------
FileNotFoundError
Raised if file does not exist.
"""
filepath = Path(filepath)
if not filepath.exists():
raise FileNotFoundError(f"File {filepath} does not exist.")
return filepath
|
python
|
from django.test import TestCase
from django.template import Template, Context
class TemplateTagTest(TestCase):
def setUp(self):
pass
def test_tag(self):
content = (
"{% load logical_rules_tags %}"
"This is a {% testrule test_is_pizza 'pizza' %}test{% endtestrule %}"
"{% testrule test_is_pizza 'calzone' %}So is this...{% endtestrule %}"
)
expected = "This is a test"
r = Template(content).render(Context({}))
self.assertEqual(expected, r)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
import urllib2
import urllib
import cookielib
import re
from config import WEIBO_ACCOUNT, WEIBO_PWD
class Fetcher(object):
def __init__(self, username=None, pwd=None, cookie_filename=None):
self.cj = cookielib.LWPCookieJar()
if cookie_filename is not None:
self.cj.load(cookie_filename)
self.cookie_processor = urllib2.HTTPCookieProcessor(self.cj)
self.opener = urllib2.build_opener(self.cookie_processor, urllib2.HTTPHandler)
urllib2.install_opener(self.opener)
self.username = username
self.pwd = pwd
self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1',
'Referer': '', 'Content-Type': 'application/x-www-form-urlencoded'}
def get_rand(self, url):
urlre = re.compile(r'form action="([^<>\/].+?)"')
vkre = re.compile(r'name="vk" value="([^<>\/].+?)"')
passwdre = re.compile(r'type="password" name="([^<>\/].+?)"')
headers = {'User-Agent': 'Mozilla/5.0 (Windows;U;Windows NT 5.1;zh-CN;rv:1.9.2.9)Gecko/20100824 Firefox/3.6.9', 'Referer': ''}
req = urllib2.Request(url, "", headers)
login_page = urllib2.urlopen(req).read()
url = urlre.findall(login_page)[0]
passwd = passwdre.findall(login_page)[0]
vk = vkre.findall(login_page)[0]
return url, passwd, vk
def login(self, username=None, pwd=None, cookie_filename=None, content=None):
if self.username is None or self.pwd is None:
self.username = username
self.pwd = pwd
url = 'http://login.weibo.cn/login/?ns=1&revalid=2&backURL=http%3A%2F%2Fweibo.cn%2F%3Ffrom%3Dindex%26rl%3D1%26luicode%3D20000173&backTitle=%CE%A2%B2%A9&vt=4'
# 获取随机数rand、password的name和vk
url, passwd, vk = self.get_rand(url)
url = 'http://login.weibo.cn/login/' + url
data = urllib.urlencode({'mobile': self.username,
passwd: self.pwd,
'backURL': 'http%3A%2F%2Fweibo.cn%2F%3Ffrom%3Dindex%26amp%3Brl%3D1%26amp%3Bluicode%3D20000173',
'backTitle': '微博',
'tryCount': '',
'vk': vk,
'submit': '登录', })
# 模拟提交登陆
self.fetch(url, data)
page = self.fetch("http://weibo.cn/", data)
linkre = re.compile(r'sendmblog([^<>\/].+?)"')
link = "http://weibo.cn/mblog/sendmblog" + linkre.findall(page)[0]
data = urllib.urlencode({'rl': '1,', 'content': content})
page = self.fetch(link, data)
return page.find('发布成功') > 0
def fetch(self, url, data):
req = urllib2.Request(url, data, headers=self.headers)
return urllib2.urlopen(req).read()
weibo = Fetcher()
def format(school, card_id, name, note=None, contact_name=None):
if int(school) == 1:
school_tag = "#南开大学失物招领#"
elif int(school) == "2":
school_tag = "#天津大学失物招领#"
elif int(school) == "4":
school_tag = "#河工大失物招领#"
else:
school_tag = ''
if contact_name:
picker = "{0}同学".format(contact_name.encode('utf8'))
else:
picker = '"雷锋同志"'
message = "#云印南天校园卡招领中心#{0}*{1}*同学卡号为{2}的校园卡已经被{3}捡到,凭学号进入find.yunyin.org查看详细招领内容".format(school_tag, name.encode('utf8'), card_id, picker)
if note:
message = "%s 补充:%s" % (message, note)
return message
def post(text):
return weibo.login(WEIBO_ACCOUNT, WEIBO_PWD, "", text)
|
python
|
"""
Pop does not care what it is removing in case of set
Since set is an unordered collection
"""
from prescription_data import patients
trial_patients = {'Denise', 'Eddie', 'Frank', 'Georgia', 'Kenny'}
while trial_patients:
patient = trial_patients.pop()
print(patient, end=':')
prescription = patients[patient]
print(prescription)
|
python
|
# Copyright 2016-2017 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from swift_health_statsd.ipc import check_output
from swift_health_statsd.collector import Collector
log = logging.getLogger(__name__)
class SwiftDispersionCollector(Collector):
def metric_name_prefix(self):
return "swift_dispersion"
def logger(self):
return log
def collector_steps(self):
return { "dispersion": self.__collect }
def __collect(self):
cmd = " ".join((self.config.dispersion_report_path, '-j'))
out = check_output(cmd, timeout=30)
# swift-dispersion-report on Liberty prints an initial line "Using
# storage policy: default", so look for the first line that contains
# the actual JSON
while "\n" in out and not out.lstrip().startswith('{'):
# remove first line
out = out.split("\n", 1).pop()
data = json.loads(out)
for server in ['object', 'container']:
counts = data.get(server, {})
expected = counts.get('copies_expected')
found = counts.get('copies_found')
if expected is None or found is None:
missing = None
else:
missing = expected - found
self.submit(server + '_copies_expected', expected)
self.submit(server + '_copies_found', found)
self.submit(server + '_copies_missing', missing)
self.submit(server + '_overlapping', counts.get('overlapping'))
|
python
|
import argparse
import pymongo
import uuid
import datetime
import pprint
import copy
from datetime import timedelta
from starflyer import ScriptBase
from camper.app import markdownify
from logbook import Logger
log = Logger('Migration')
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)+1):
yield start_date + timedelta(n)
class MigrateBarcamps(ScriptBase):
"""script for migrating barcamps to new event format"""
def fix_event(self, e):
"""add all the missing fields to the initial event"""
size = self.barcamp['size']
e = copy.copy(e) # better work with a copy
if not e.has_key("_id"):
e['_id'] = unicode(uuid.uuid4())
log.info("id added,")
if not e.has_key("size"):
e['size'] = size
log.info("size added,")
if not e.has_key("timetable"):
e['timetable'] = {}
log.info("timetable added")
if not e.has_key("maybe"):
e['maybe'] = []
log.info("maybe added",)
if not e.has_key("own_location"):
e['own_location'] = False
log.info("own location set")
e['description'] = markdownify(e['description'])
return e
def generate_events(self, e):
"""use the given event and generate copies of that for every day of the barcamp"""
if e['start_date'] is None:
events = {}
else:
events = {}
for single_date in daterange(e['start_date'], e['end_date']):
e = copy.copy(e)
e['_id'] = unicode(uuid.uuid4())
e['date'] = single_date
e['start_time'] = "8:00"
e['end_time'] = "18:00"
events[e['_id']] = e
self.barcamp['events'] = events
return events
def fix_location(self, e):
"""move the location from the event to the barcamp"""
self.barcamp['location'] = e['location']
self.barcamp['location']['country'] = self.barcamp['location']['country'].upper()
log.info("location copied")
def fix_design(self):
"""fix the barcamp design"""
self.barcamp.update({
'background_color' : '#fcfcfa',
'link_color' : '#337CBB',
'text_color' : '#333',
'header_color' : '#fff',
'navbar_link_color' : '#888',
'navbar_active_bg' : '#555',
'navbar_active_color' : '#eee',
'navbar_border_color' : '#f0f0f0',
'navbar_hover_bg' : '#f8f8f8'
})
def __call__(self):
barcamps = self.app.config.dbs.db.barcamps
# we loop over raw data because the format cannot be serialized yet
for b in barcamps.find():
self.barcamp = b
log.info("converting %s" %b['name'])
# fix the single event to contain all necessary new fields
e = b['events'][0]
e = self.fix_event(e)
self.fix_design()
# generate the events
self.generate_events(e)
# fix location (move it from event to barcamp)
self.fix_location(e)
# convert all markdown to html
b['description'] = markdownify(b['description'])
log.info("converted description")
pages = self.app.config.dbs.pages.for_slot("menu", barcamp=self.barcamp)
for page in pages:
page.content = markdownify(page.content)
page.save()
log.info("converted page %s" %page.title)
barcamps.save(b)
log.info("barcamp %s saved" %self.barcamp['name'])
def migrate_barcamps(*args, **kwargs):
s = MigrateBarcamps()
s()
|
python
|
import json
from datetime import datetime
import time
import PySimpleGUI as sg
import os
import keyring # for password storage
import hashlib
# import all the custom functions
from functionalities import *
# import all the window formats
from windows import *
if __name__ == "__main__" :
# check if the memoir file exists
# OR
# if the memoir file is deliberately deleted, create an empty list to store future data
if not os.path.exists("memoir.json") or os.path.getsize("memoir.json") == 0:
# if is does not exists, create a new one
with open("memoir.json", "w") as f :
f.write(json.dumps([]))
# read the settings file
settings = read_settings()
# check if there is any password set or not
password = keyring.get_password('e-memoir', 'user')
# if there is a password, run the login screen and get if the password was correct or not
# if there is no password, just input True in run_app variable
theme = settings[5]['theme']
run_app = loginWindow_fn(theme, password, settings) if password else True
# run the app only if the password was right or there was no password at all
if run_app :
# need the proper date format
temp_dic = {"28-11-1999": "%d-%m-%Y", "28-Nov-1999": "%d-%b-%Y", "28-November-1999": "%d-%B-%Y",
"11-28-1999": "%m-%d-%Y", "Nov-28-1999": "%b-%d-%Y", "November-28-1999": "%B-%d-%Y",
"1999-11-28": "%Y-%m-%d", "1999-Nov-28": "%Y-%b-%d", "1999-November-28": "%Y-%B-%d"}
# replace the date format
date_format = temp_dic[settings[3]['date_format']]
colors = ['Blue', 'Red', 'Yellow', 'Orange', 'Pink', 'Green', 'Violet']
sg.ChangeLookAndFeel(theme)
sg.SetOptions(element_padding=((4,4),2))
tabgrp = mainWindowLayout_fn(date_format, settings, temp_dic, have_password=True if password else False)
#Define Window
window =sg.Window("E-Memoir",tabgrp, icon=settings[6]['icon_path'])
while True: # Event Loop
event, values = window.Read()
if event in (None, "Close") :
break
if values['message'] :
write_functionality(window, event, values, settings, date_format)
# if no message is entered and then also the save or preview button is clicked
elif event in ("Save", "Show Preview") :
window['_output_'].update("No message entered.....")
# if quit after saving memoir is enabled, quit the program after a specified delay
if event == 'Save' and settings[1]['quit_after_saving_memoir'] :
window['_output_'].update('Memoir saved. Quiting.. See you tomorrow :)')
time.sleep(settings[1]['delay'])
break
# if the show memoirs button is clicked
if event == "Show Memoirs" :
# if the dates are entered
if values['-FROM-'] and values['-TO-'] :
read_functionality(window, values, date_format, int(settings[4]["max_records_to_display"]))
else :
window['_memoirs_'].update('Please select the From and To dates.......')
if event in ("Save settings", "Reset settings") :
update_settings(window, event, values, settings, temp_dic)
if event in ("Deletion Preview", "Delete Records") :
delete_functionality(window, event, values, date_format, int(settings[4]["max_records_to_display"]), delete_all=values['delete-all'])
if event == "Themes Preview" :
# open a window with the theme preview
themes_preview_fn(settings[6]['theme_path'])
if event == "Change Password" :
# open the change password window
if change_password_fn(theme, settings) :
window['_status_'].update('Password changed successfully')
if event == "Remove Password" :
# open the remove password window
if remove_password_fn(theme, settings) :
break
# since we are not refreshing the layout after setting password
# it may happen that user clicks on it again and sets a new password again
# which should not happen (that is the function of Change Password)
if event == "Set Password" :
if keyring.get_password('e-memoir', 'user'):
# the password has already been set
window['_status_'].update('Password has already been set.')
elif set_password_fn(theme, settings):
# open the set password window, set the password and show the message
window['_status_'].update("Password set successfully.")
# update the color of the button
window['-color-'].update(button_color=('white', values['color-text']))
#access all the values and if selected add them to a string
window.close()
|
python
|
#To test how fast rotations.py is
from rotations import *
import numpy as np
from timeit import default_timer as timer
ranx5 = np.random.randint(10,size=(100000,3))
ranx6 = np.random.randint(10,size=(1000000,3))
ranx7 = np.random.randint(10,size=(10000000,3))
ranx8 = np.random.randint(10,size=(100000000,3))
start = timer()
vrotate(ranx5, 40, [4,2,3])
end = timer()
timex5 = end-start
start = timer()
vrotate(ranx6, 40, [4,2,3])
end = timer()
timex6 = end-start
start = timer()
vrotate(ranx7, 40, [4,2,3])
end = timer()
timex7 = end-start
start = timer()
vrotate(ranx8, 40, [4,2,3])
end = timer()
timex8 = end-start
print(timex5, timex6, timex7, timex8)
|
python
|
from os import getenv, getppid, kill, system
from configparser import ConfigParser
from typing import Union, Optional
from psutil import process_iter
from datetime import datetime
from subprocess import Popen
from signal import SIGHUP
from pathlib import Path
from sys import platform
from re import match
from typer import Abort, echo, style
from inquirer import prompt, List
from urllib.request import urlopen
from bs4 import BeautifulSoup
from appdirs import AppDirs
def print_error(error: str) -> None:
echo(style(f'\nERROR:\n%s' % (error), fg='red'), err=True)
def print_fatal(error: str) -> None:
echo(style(f'\nFATAL:\n%s' % (error), fg='red', bg='bright_white'), err=True)
def print_warning(msg: str) -> None:
echo(style(f'\nWARNING:\n%s' % (msg), fg='magenta'))
def print_prompt(msg: str) -> None:
echo(style(f'\n%s' % (msg), fg='bright_yellow'))
def print_success(msg: str) -> None:
echo(style(f'\n%s' % (msg), fg='bright_green'))
def open_and_write(path: str, content: str) -> None:
with open(path, 'w', encoding='UTF-8') as file:
file.write(content)
def check_and_fix_path(path: str) -> Union[str, bool]:
path_copy: str = path
if platform == 'linux' or platform == 'linux2':
if bool(match(r'[~](\/\w+)+', path)):
path: str = path[1:]
path: str = getenv('HOME') + path
elif bool(match(r'home\/\w+\/', path)):
if path[0] != '/':
path: str = '/' + path
if path.find(getenv('HOME'), 0) != 0:
return False
if Path(path).exists():
return path
return path_copy
def open_vscode(path: str) -> None:
options: tuple[str] = (
'Nothing',
'Close terminal and open it in VSCode',
'Open VSCode'
)
answer: dict\
= prompt([List(name='action', message="What do you want to do with the project?", choices=options)])
if answer['action'] == options[1]:
system(f'code {path}')
kill(getppid(), SIGHUP)
elif answer['action'] == options[2]:
system(f'code {path}')
def check_and_put_singlequotes(value: str) -> str:
if value is not None:
if value.find('\'') != -1:
value: str = value.replace('\'', '\'\'')
return value
def reload_tasker_process() -> None:
pid: int = int(user_config('processes', 'tasker'))
for proc in process_iter():
if proc.pid == pid:
proc.kill()
try:
user_config(
section='processes',
option='tasker',
element='%d' % Popen(['/usr/sbin/vert/tasker']).pid,
write=True
)
return
except Exception as error:
print_fatal(error)
print_warning('Tasker process was not found')
raise Abort()
def run_tasker_process() -> None:
pid: int = int(user_config('processes', 'tasker'))
for proc in process_iter():
if proc.pid == pid:
print_warning('Is already running, make more process would target your performance')
raise Abort()
try:
user_config(
section='processes',
option='tasker',
element='%d' % Popen(['/usr/sbin/vert/tasker']).pid,
write=True
)
return
except Exception as error:
print_fatal(error)
def kill_tasker_process() -> None:
pid: int = int(user_config('processes', 'tasker'))
for proc in process_iter():
if proc.pid == pid:
proc.kill()
return
print_warning('Tasker process was not found')
raise Abort()
def html_title_extractor(url: str) -> Union[str, bool]:
try:
soup = BeautifulSoup(urlopen(url), features='html5lib')
return soup.title.get_text()
except Exception as _:
return False
def user_config(section: str, option: str, element: Optional[str] = None, write: Optional[bool] = False) -> Union[str, None]:
config: object = ConfigParser()
appdir: object = AppDirs()
appdir.appname = 'vert'
ini_file: str = '%s/config.ini' % (appdir.user_config_dir)
if Path(appdir.user_config_dir).exists() and Path(ini_file).exists():
config.read(ini_file)
if not write:
return config[section][option]
if section not in config.sections():
config.add_section(section)
if element and option:
config.set(section, option, element)
with open(ini_file, 'w', encoding='UTF-8') as file:
config.write(file)
else:
if not Path(appdir.user_config_dir).exists():
Path(appdir.user_config_dir).mkdir()
Path(ini_file).touch()
config.read(appdir.user_config_dir)
config.add_section('database')
config.set('database', 'dsn',
'dbname=<> user=<> password=<> host=<> port=<>')
config.add_section('path')
config.set('path', 'workspace', '/home/user/path/to/workspace')
with open(ini_file, 'w', encoding='UTF-8') as file:
config.write(file)
config(section, option, element, write)
def date_format(dt: Union[datetime, str]) -> str:
if type(dt) is str:
return datetime.strptime(dt, '%Y-%m-%d %H:%M:%S.%f').strftime('%B %m, %Y')
return dt.strftime('%B %m, %Y')
def time_format(dt: Union[datetime, str]) -> str:
if type(dt) is str:
return datetime.strptime(dt, '%Y-%m-%d %H:%M:%S.%f').strftime('%H:%M')
return dt.strftime('%H:%M')
if __name__ == '__main__':
pass
|
python
|
import stanza
from stanza.protobuf import DependencyEnhancerRequest, Document, Language
from stanza.server.java_protobuf_requests import send_request, add_sentence, JavaProtobufContext
ENHANCER_JAVA = "edu.stanford.nlp.trees.ud.ProcessUniversalEnhancerRequest"
def build_enhancer_request(doc, language, pronouns_pattern):
if bool(language) == bool(pronouns_pattern):
raise ValueError("Should set exactly one of language and pronouns_pattern")
request = DependencyEnhancerRequest()
if pronouns_pattern:
request.setRelativePronouns(pronouns_pattern)
elif language.lower() in ("en", "english"):
request.language = Language.UniversalEnglish
elif language.lower() in ("zh", "zh-hans", "chinese"):
request.language = Language.UniversalChinese
else:
raise ValueError("Sorry, but language " + language + " is not supported yet. Either set a pronouns pattern or file an issue at https://stanfordnlp.github.io/stanza suggesting a mechanism for converting this language")
request_doc = request.document
request_doc.text = doc.text
num_tokens = 0
for sent_idx, sentence in enumerate(doc.sentences):
request_sentence = add_sentence(request_doc.sentence, sentence, num_tokens)
num_tokens = num_tokens + sum(len(token.words) for token in sentence.tokens)
graph = request_sentence.basicDependencies
nodes = []
word_index = 0
for token in sentence.tokens:
for word in token.words:
# TODO: refactor with the bit in java_protobuf_requests
word_index = word_index + 1
node = graph.node.add()
node.sentenceIndex = sent_idx
node.index = word_index
if word.head != 0:
edge = graph.edge.add()
edge.source = word.head
edge.target = word_index
edge.dep = word.deprel
return request
def process_doc(doc, language=None, pronouns_pattern=None):
request = build_enhancer_request(doc, language, pronouns_pattern)
return send_request(request, Document, ENHANCER_JAVA, "$CLASSPATH")
class UniversalEnhancer(JavaProtobufContext):
"""
UniversalEnhancer context window
This is a context window which keeps a process open. Should allow
for multiple requests without launching new java processes each time.
"""
def __init__(self, language=None, pronouns_pattern=None, classpath=None):
super(UniversalEnhancer, self).__init__(classpath, Document, ENHANCER_JAVA)
if bool(language) == bool(pronouns_pattern):
raise ValueError("Should set exactly one of language and pronouns_pattern")
self.language = language
self.pronouns_pattern = pronouns_pattern
def process(self, doc):
request = build_enhancer_request(doc, self.language, self.pronouns_pattern)
return self.process_request(request)
def main():
nlp = stanza.Pipeline('en',
processors='tokenize,pos,lemma,depparse')
with UniversalEnhancer(language="en") as enhancer:
doc = nlp("This is the car that I bought")
result = enhancer.process(doc)
print(result.sentence[0].enhancedDependencies)
if __name__ == '__main__':
main()
|
python
|
from .pygbe import *
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['ServiceEndpointPolicyDefinitionInitArgs', 'ServiceEndpointPolicyDefinition']
@pulumi.input_type
class ServiceEndpointPolicyDefinitionInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
service_endpoint_policy_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
service_endpoint_policy_definition_name: Optional[pulumi.Input[str]] = None,
service_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ServiceEndpointPolicyDefinition resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_endpoint_policy_name: The name of the service endpoint policy.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] service: Service endpoint name.
:param pulumi.Input[str] service_endpoint_policy_definition_name: The name of the service endpoint policy definition name.
:param pulumi.Input[Sequence[pulumi.Input[str]]] service_resources: A list of service resources.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_endpoint_policy_name", service_endpoint_policy_name)
if description is not None:
pulumi.set(__self__, "description", description)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if service is not None:
pulumi.set(__self__, "service", service)
if service_endpoint_policy_definition_name is not None:
pulumi.set(__self__, "service_endpoint_policy_definition_name", service_endpoint_policy_definition_name)
if service_resources is not None:
pulumi.set(__self__, "service_resources", service_resources)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceEndpointPolicyName")
def service_endpoint_policy_name(self) -> pulumi.Input[str]:
"""
The name of the service endpoint policy.
"""
return pulumi.get(self, "service_endpoint_policy_name")
@service_endpoint_policy_name.setter
def service_endpoint_policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_endpoint_policy_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input[str]]:
"""
Service endpoint name.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service", value)
@property
@pulumi.getter(name="serviceEndpointPolicyDefinitionName")
def service_endpoint_policy_definition_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the service endpoint policy definition name.
"""
return pulumi.get(self, "service_endpoint_policy_definition_name")
@service_endpoint_policy_definition_name.setter
def service_endpoint_policy_definition_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_endpoint_policy_definition_name", value)
@property
@pulumi.getter(name="serviceResources")
def service_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of service resources.
"""
return pulumi.get(self, "service_resources")
@service_resources.setter
def service_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "service_resources", value)
class ServiceEndpointPolicyDefinition(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
service_endpoint_policy_definition_name: Optional[pulumi.Input[str]] = None,
service_endpoint_policy_name: Optional[pulumi.Input[str]] = None,
service_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Service Endpoint policy definitions.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service: Service endpoint name.
:param pulumi.Input[str] service_endpoint_policy_definition_name: The name of the service endpoint policy definition name.
:param pulumi.Input[str] service_endpoint_policy_name: The name of the service endpoint policy.
:param pulumi.Input[Sequence[pulumi.Input[str]]] service_resources: A list of service resources.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceEndpointPolicyDefinitionInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Service Endpoint policy definitions.
:param str resource_name: The name of the resource.
:param ServiceEndpointPolicyDefinitionInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceEndpointPolicyDefinitionInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
service_endpoint_policy_definition_name: Optional[pulumi.Input[str]] = None,
service_endpoint_policy_name: Optional[pulumi.Input[str]] = None,
service_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceEndpointPolicyDefinitionInitArgs.__new__(ServiceEndpointPolicyDefinitionInitArgs)
__props__.__dict__["description"] = description
__props__.__dict__["id"] = id
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["service"] = service
__props__.__dict__["service_endpoint_policy_definition_name"] = service_endpoint_policy_definition_name
if service_endpoint_policy_name is None and not opts.urn:
raise TypeError("Missing required property 'service_endpoint_policy_name'")
__props__.__dict__["service_endpoint_policy_name"] = service_endpoint_policy_name
__props__.__dict__["service_resources"] = service_resources
__props__.__dict__["etag"] = None
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200501:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20180701:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20180801:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20181001:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20181101:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20181201:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20190201:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20190401:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20190601:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20190701:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20190801:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20190901:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20191101:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20191201:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20200301:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20200401:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20200601:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20200701:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20200801:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20201101:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20201101:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20210201:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20210201:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-native:network/v20210301:ServiceEndpointPolicyDefinition"), pulumi.Alias(type_="azure-nextgen:network/v20210301:ServiceEndpointPolicyDefinition")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ServiceEndpointPolicyDefinition, __self__).__init__(
'azure-native:network/v20200501:ServiceEndpointPolicyDefinition',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ServiceEndpointPolicyDefinition':
"""
Get an existing ServiceEndpointPolicyDefinition resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ServiceEndpointPolicyDefinitionInitArgs.__new__(ServiceEndpointPolicyDefinitionInitArgs)
__props__.__dict__["description"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["service"] = None
__props__.__dict__["service_resources"] = None
return ServiceEndpointPolicyDefinition(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the service endpoint policy definition resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def service(self) -> pulumi.Output[Optional[str]]:
"""
Service endpoint name.
"""
return pulumi.get(self, "service")
@property
@pulumi.getter(name="serviceResources")
def service_resources(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of service resources.
"""
return pulumi.get(self, "service_resources")
|
python
|
import carla
from leaderboard.envs.map_utils import *
def viz(args):
client = carla.Client('localhost', 2000)
client.set_timeout(10.0)
world = client.load_world(args.town_name)
carla_map = world.get_map()
spawn_points = carla_map.get_spawn_points()
blueprint_library = world.get_blueprint_library()
vehicle_bp = random.choice(blueprint_library.filter('vehicle.lincoln.mkz2017'))
vehicle_bp.set_attribute('role_name', 'hero')
start_pose = random.choice(spawn_points)
player = world.spawn_actor(vehicle_bp, start_pose)
player.set_autopilot(False)
# Setup pygame
os.environ["SDL_VIDEODRIVER"] = "dummy"
pygame.init()
display = pygame.display.set_mode((320, 320),0,32)
pygame.display.flip()
# Set map drawer module
input_module = ModuleInput(MODULE_INPUT)
hud_module = ModuleHUD(MODULE_HUD, 320, 320)
world_module = ModuleWorld(MODULE_WORLD, client, world, carla_map, player)
# Register Modules
module_manager.register_module(world_module)
module_manager.register_module(hud_module)
module_manager.register_module(input_module)
module_manager.start_modules()
# Get map surface
map_surface = world_module.map_image.big_map_surface
map_image = np.swapaxes(pygame.surfarray.array3d(map_surface),0,1)
print (map_image.shape)
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
f, ax = plt.subplots(1,1,figsize=(20,15))
ax.imshow(map_image)
for i,spawn_point in enumerate(spawn_points):
pixel_x, pixel_y = world_module.map_image.world_to_pixel(spawn_point.location)
circle = Circle((pixel_x,pixel_y),5,color='red')
ax.add_patch(circle)
ax.text(pixel_x,pixel_y,'%d'%i,fontsize=10,color='blue')
plt.savefig('%s.png'%args.town_name)
print('%s.png'%args.town_name)
player.destroy()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--town-name', default='Town01')
args = parser.parse_args()
viz(args)
|
python
|
import os
import os.path as path
import shutil
import unittest
from cache import Cache, format_as_timestamp
from pathlib import Path
from datetime import datetime, timedelta
test_folder = 'test-files'
db_file_name = path.join(test_folder, 'test.db')
table_name = 'cache'
folder_path = path.join(test_folder, 'cache-files')
def createTestFile():
new_file_name = 'test-file.txt'
new_file_path = path.join(test_folder, new_file_name)
open(new_file_path, 'w').close()
return [new_file_name, new_file_path]
class Test(unittest.TestCase):
def createCache(self):
self.cache = Cache(db_file_name, table_name, folder_path)
return self.cache
def setUp(self):
if path.exists(test_folder):
shutil.rmtree(test_folder)
Path(test_folder).mkdir(parents=True, exist_ok=True)
def test_initializes_db(self):
with self.createCache() as cache:
c = cache.connection.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (cache.table_name,))
name = c.fetchone()[0]
c.close()
self.assertEqual(table_name, name)
def test_initializes_folder(self):
with self.createCache() as cache:
exists = os.path.exists(folder_path)
self.assertTrue(exists)
def test_file_name_exists(self):
with self.createCache() as cache:
open(path.join(folder_path, 'test-file.txt'), 'w').close()
self.assertTrue(cache.file_name_exists('test-file.txt'))
self.assertFalse(cache.file_name_exists('not-a-test-file.txt'))
def test_add_file(self):
with self.createCache() as cache:
[new_file_name, new_file_path] = createTestFile()
cache.add_file('1234', new_file_path, new_file_name, copy_file=False)
c = cache.connection.cursor()
rows = c.execute('select * from cache').fetchall()
c.close()
self.assertEqual(1, len(rows))
self.assertEqual('1234', rows[0][0])
self.assertEqual(new_file_name, rows[0][1])
self.assertFalse(path.exists(new_file_path))
self.assertTrue(path.exists(path.join(cache.folder_path, new_file_name)))
def test_add_file_copy(self):
with self.createCache() as cache:
[new_file_name, new_file_path] = createTestFile()
cache.add_file('1234', new_file_path, new_file_name, copy_file=True)
self.assertTrue(path.exists(new_file_path))
self.assertTrue(path.exists(path.join(cache.folder_path, new_file_name)))
def test_add_file_existing_names(self):
with self.createCache() as cache:
[new_file_name, new_file_path] = createTestFile()
cache.add_file('1', new_file_path, new_file_name, copy_file=True)
cache.add_file('2', new_file_path, new_file_name, copy_file=True)
cache.add_file('3', new_file_path, new_file_name, copy_file=True)
c = cache.connection.cursor()
rows = c.execute('select * from cache').fetchall()
c.close()
names = [x[1] for x in rows]
self.assertListEqual(
['test-file.txt', 'test-file (2).txt', 'test-file (3).txt'],
names
)
for name in names:
self.assertTrue(path.exists(path.join(folder_path, name)))
def test_add_file_existing_key(self):
with self.createCache() as cache:
[new_file_name, new_file_path] = createTestFile()
cache.add_file('1', new_file_path, new_file_name, copy_file=True)
try:
cache.add_file('1', new_file_path, new_file_name, copy_file=True)
self.fail()
except Exception:
pass
def test_get_file_path(self):
with self.createCache() as cache:
[new_file_name, new_file_path] = createTestFile()
cache.add_file('1234', new_file_path, new_file_name, copy_file=False)
file_path = cache.get_file_path('1234')
self.assertEqual(file_path, path.join(folder_path, new_file_name))
def test_delete_file(self):
with self.createCache() as cache:
[new_file_name, new_file_path] = createTestFile()
path1 = cache.add_file('1', new_file_path, new_file_name, copy_file=True)
path2 = cache.add_file('2', new_file_path, new_file_name, copy_file=True)
path3 = cache.add_file('3', new_file_path, new_file_name, copy_file=True)
errors = cache.delete_file('2')
self.assertEqual(0, len(errors))
self.assertTrue(path.exists(path1))
self.assertFalse(path.exists(path2))
self.assertTrue(path.exists(path3))
c = cache.connection.cursor()
rows = c.execute('select * from cache').fetchall()
self.assertEqual(2, len(rows))
self.assertListEqual(['1', '3'], [row[0] for row in rows])
c.close()
def test_clear(self):
with self.createCache() as cache:
[new_file_name, new_file_path] = createTestFile()
cache.add_file('1', new_file_path, new_file_name, copy_file=True)
cache.add_file('2', new_file_path, new_file_name, copy_file=True)
cache.add_file('3', new_file_path, new_file_name, copy_file=True)
# Delete one of the files to provoke error
os.remove(path.join(folder_path, new_file_name))
errors = cache.clear()
self.assertEqual(1, len(errors))
self.assertEqual('1', errors[0][0])
c = cache.connection.cursor()
rows = c.execute('select * from cache').fetchall()
self.assertEqual(1, len(rows))
self.assertEqual('1', rows[0][0])
c.close()
def test_delete_older_than(self):
with self.createCache() as cache:
[new_file_name, new_file_path] = createTestFile()
cache.add_file('1', new_file_path, new_file_name, copy_file=True)
cache.add_file('2', new_file_path, new_file_name, copy_file=True)
cache.add_file('3', new_file_path, new_file_name, copy_file=True)
# Manipulate times
c = cache.connection.cursor()
def update_minus_days(key, days):
dt = (datetime.now() - timedelta(days=days)).astimezone()
new_dt = format_as_timestamp(dt)
c.execute("update cache set timestamp = ? where key = ?", (new_dt, key))
update_minus_days('1', 31) # should delete
update_minus_days('2', 29) # should not delete
cache.connection.commit()
cache.delete_older_than(timedelta(days=30))
rows = c.execute('select * from cache').fetchall()
self.assertEqual(2, len(rows))
c.close()
if __name__ == '__main__':
unittest.main()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0105_card_promo_link'),
]
operations = [
migrations.CreateModel(
name='ModerationReport',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('creation', models.DateTimeField(auto_now_add=True)),
('status', models.PositiveIntegerField(default=1, choices=[(0, 'Rejected'), (1, 'Pending'), (2, 'In Progress'), (3, 'Accepted')])),
('comment', models.TextField(help_text='If your report is accepted, the account will be marked as fake and will never appear in leaderboards. Provide proofs below.', null=True, verbose_name='Comment', blank=True)),
('moderation_comment', models.TextField(null=True, verbose_name='Comment', blank=True)),
('fake_account', models.ForeignKey(related_name='moderationreport', to='api.Account')),
('fake_eventparticipation', models.ForeignKey(related_name='moderationreport', to='api.EventParticipation')),
('images', models.ManyToManyField(related_name='report', to='api.UserImage')),
('moderated_by', models.ForeignKey(related_name='moderation_done', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('reported_by', models.ForeignKey(related_name='reports_sent', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
},
bases=(models.Model,),
),
]
|
python
|
import time
import torch
import numpy as np
import pandas as pd
import scipy
from h5py import File
import itertools, random
from tqdm import tqdm
from loguru import logger
import torch.utils.data as tdata
from typing import List, Dict
class TrainHDF5Dataset(tdata.Dataset):
"""
HDF5 dataset indexed by a labels dataframe.
Indexing is done via the dataframe since we want to preserve some storage
in cases where oversampling is needed ( pretty likely )
"""
def __init__(self,
h5filedict: Dict,
h5labeldict: Dict,
label_type='soft',
transform=None):
super(TrainHDF5Dataset, self).__init__()
self._h5filedict = h5filedict
self._h5labeldict = h5labeldict
self._datasetcache = {}
self._labelcache = {}
self._len = len(self._h5labeldict)
# IF none is passed still use no transform at all
self._transform = transform
assert label_type in ('soft', 'hard', 'softhard', 'hardnoise')
self._label_type = label_type
self.idx_to_item = {
idx: item
for idx, item in enumerate(self._h5labeldict.keys())
}
first_item = next(iter(self._h5filedict.keys()))
with File(self._h5filedict[first_item], 'r') as store:
self.datadim = store[first_item].shape[-1]
def __len__(self):
return self._len
def __del__(self):
for k, cache in self._datasetcache.items():
cache.close()
for k, cache in self._labelcache.items():
cache.close()
def __getitem__(self, index: int):
fname: str = self.idx_to_item[index]
h5file: str = self._h5filedict[fname]
labelh5file: str = self._h5labeldict[fname]
if not h5file in self._datasetcache:
self._datasetcache[h5file] = File(h5file, 'r')
if not labelh5file in self._labelcache:
self._labelcache[labelh5file] = File(labelh5file, 'r')
data = self._datasetcache[h5file][f"{fname}"][()]
speech_target = self._labelcache[labelh5file][f"{fname}/speech"][()]
noise_target = self._labelcache[labelh5file][f"{fname}/noise"][()]
speech_clip_target = self._labelcache[labelh5file][
f"{fname}/clipspeech"][()]
noise_clip_target = self._labelcache[labelh5file][
f"{fname}/clipnoise"][()]
noise_clip_target = np.max(noise_clip_target) # take max around axis
if self._label_type == 'hard':
noise_clip_target = noise_clip_target.round()
speech_target = speech_target.round()
noise_target = noise_target.round()
speech_clip_target = speech_clip_target.round()
elif self._label_type == 'hardnoise': # only noise yay
noise_clip_target = noise_clip_target.round()
noise_target = noise_target.round()
elif self._label_type == 'softhard':
r = np.random.permutation(noise_target.shape[0] // 4)
speech_target[r] = speech_target[r].round()
target_clip = torch.tensor((noise_clip_target, speech_clip_target))
data = torch.as_tensor(data).float()
target_time = torch.as_tensor(
np.stack((noise_target, speech_target), axis=-1)).float()
if self._transform:
data = self._transform(data)
return data, target_time, target_clip, fname
class HDF5Dataset(tdata.Dataset):
"""
HDF5 dataset indexed by a labels dataframe.
Indexing is done via the dataframe since we want to preserve some storage
in cases where oversampling is needed ( pretty likely )
"""
def __init__(self, h5file: File, h5label: File, fnames, transform=None):
super(HDF5Dataset, self).__init__()
self._h5file = h5file
self._h5label = h5label
self.fnames = fnames
self.dataset = None
self.label_dataset = None
self._len = len(fnames)
# IF none is passed still use no transform at all
self._transform = transform
with File(self._h5file, 'r') as store, File(self._h5label,
'r') as labelstore:
self.datadim = store[self.fnames[0]].shape[-1]
def __len__(self):
return self._len
def __getitem__(self, index):
if self.dataset is None:
self.dataset = File(self._h5file, 'r')
self.label_dataset = File(self._h5label, 'r')
fname = self.fnames[index]
data = self.dataset[fname][()]
speech_target = self.label_dataset[f"{fname}/speech"][()]
noise_target = self.label_dataset[f"{fname}/noise"][()]
speech_clip_target = self.label_dataset[f"{fname}/clipspeech"][()]
noise_clip_target = self.label_dataset[f"{fname}/clipnoise"][()]
noise_clip_target = np.max(noise_clip_target) # take max around axis
target_clip = torch.tensor((noise_clip_target, speech_clip_target))
data = torch.as_tensor(data).float()
target_time = torch.as_tensor(
np.stack((noise_target, speech_target), axis=-1)).float()
if self._transform:
data = self._transform(data)
return data, target_time, target_clip, fname
class EvalH5Dataset(tdata.Dataset):
"""
HDF5 dataset indexed by a labels dataframe.
Indexing is done via the dataframe since we want to preserve some storage
in cases where oversampling is needed ( pretty likely )
"""
def __init__(self, h5file: File, fnames=None):
super(EvalH5Dataset, self).__init__()
self._h5file = h5file
self._dataset = None
# IF none is passed still use no transform at all
with File(self._h5file, 'r') as store:
if fnames is None:
self.fnames = list(store.keys())
else:
self.fnames = fnames
self.datadim = store[self.fnames[0]].shape[-1]
self._len = len(store)
def __len__(self):
return self._len
def __getitem__(self, index):
if self._dataset is None:
self._dataset = File(self._h5file, 'r')
fname = self.fnames[index]
data = self._dataset[fname][()]
data = torch.as_tensor(data).float()
return data, fname
class MinimumOccupancySampler(tdata.Sampler):
"""
docstring for MinimumOccupancySampler
samples at least one instance from each class sequentially
"""
def __init__(self, labels, sampling_mode='same', random_state=None):
self.labels = labels
data_samples, n_labels = labels.shape
label_to_idx_list, label_to_length = [], []
self.random_state = np.random.RandomState(seed=random_state)
for lb_idx in range(n_labels):
label_selection = labels[:, lb_idx]
if scipy.sparse.issparse(label_selection):
label_selection = label_selection.toarray()
label_indexes = np.where(label_selection == 1)[0]
self.random_state.shuffle(label_indexes)
label_to_length.append(len(label_indexes))
label_to_idx_list.append(label_indexes)
self.longest_seq = max(label_to_length)
self.data_source = np.empty((self.longest_seq, len(label_to_length)),
dtype=np.uint32)
# Each column represents one "single instance per class" data piece
for ix, leng in enumerate(label_to_length):
# Fill first only "real" samples
self.data_source[:leng, ix] = label_to_idx_list[ix]
self.label_to_idx_list = label_to_idx_list
self.label_to_length = label_to_length
if sampling_mode == 'same':
self.data_length = data_samples
elif sampling_mode == 'over': # Sample all items
self.data_length = np.prod(self.data_source.shape)
def _reshuffle(self):
# Reshuffle
for ix, leng in enumerate(self.label_to_length):
leftover = self.longest_seq - leng
random_idxs = np.random.randint(leng, size=leftover)
self.data_source[leng:,
ix] = self.label_to_idx_list[ix][random_idxs]
def __iter__(self):
# Before each epoch, reshuffle random indicies
self._reshuffle()
n_samples = len(self.data_source)
random_indices = self.random_state.permutation(n_samples)
data = np.concatenate(
self.data_source[random_indices])[:self.data_length]
return iter(data)
def __len__(self):
return self.data_length
class MultiBalancedSampler(tdata.sampler.Sampler):
"""docstring for BalancedSampler
Samples for Multi-label training
Sampling is not totally equal, but aims to be roughtly equal
"""
def __init__(self, Y, replacement=False, num_samples=None):
assert Y.ndim == 2, "Y needs to be one hot encoded"
if scipy.sparse.issparse(Y):
raise ValueError("Not supporting sparse amtrices yet")
class_counts = np.sum(Y, axis=0)
class_weights = 1. / class_counts
class_weights = class_weights / class_weights.sum()
classes = np.arange(Y[0].shape[0])
# Revert from many_hot to one
class_ids = [tuple(classes.compress(idx)) for idx in Y]
sample_weights = []
for i in range(len(Y)):
# Multiple classes were chosen, calculate average probability
weight = class_weights[np.array(class_ids[i])]
# Take the mean of the multiple classes and set as weight
weight = np.mean(weight)
sample_weights.append(weight)
self._weights = torch.as_tensor(sample_weights, dtype=torch.float)
self._len = num_samples if num_samples else len(Y)
self._replacement = replacement
def __len__(self):
return self._len
def __iter__(self):
return iter(
torch.multinomial(self._weights, self._len,
self._replacement).tolist())
def gettraindataloader(h5files,
h5labels,
label_type=False,
transform=None,
**dataloader_kwargs):
dset = TrainHDF5Dataset(h5files,
h5labels,
label_type=label_type,
transform=transform)
return tdata.DataLoader(dset,
collate_fn=sequential_collate,
**dataloader_kwargs)
def getdataloader(h5file, h5label, fnames, transform=None,
**dataloader_kwargs):
dset = HDF5Dataset(h5file, h5label, fnames, transform=transform)
return tdata.DataLoader(dset,
collate_fn=sequential_collate,
**dataloader_kwargs)
def pad(tensorlist, padding_value=0.):
lengths = [len(f) for f in tensorlist]
max_len = np.max(lengths)
# max_len = 2000
batch_dim = len(lengths)
data_dim = tensorlist[0].shape[-1]
out_tensor = torch.full((batch_dim, max_len, data_dim),
fill_value=padding_value,
dtype=torch.float32)
for i, tensor in enumerate(tensorlist):
length = tensor.shape[0]
out_tensor[i, :length, ...] = tensor[:length, ...]
return out_tensor, torch.tensor(lengths)
def sequential_collate(batches):
# sort length wise
data, targets_time, targets_clip, fnames = zip(*batches)
data, lengths_data = pad(data)
targets_time, lengths_tar = pad(targets_time, padding_value=0)
targets_clip = torch.stack(targets_clip)
assert lengths_data.shape == lengths_tar.shape
return data, targets_time, targets_clip, fnames, lengths_tar
if __name__ == '__main__':
import utils
label_df = pd.read_csv(
'data/csv_labels/unbalanced_from_unbalanced/unbalanced.csv', sep='\s+')
data_df = pd.read_csv("data/data_csv/unbalanced.csv", sep='\s+')
merged = data_df.merge(label_df, on='filename')
common_idxs = merged['filename']
data_df = data_df[data_df['filename'].isin(common_idxs)]
label_df = label_df[label_df['filename'].isin(common_idxs)]
label = utils.df_to_dict(label_df)
data = utils.df_to_dict(data_df)
trainloader = gettraindataloader(
h5files=data,
h5labels=label,
transform=None,
label_type='soft',
batch_size=64,
num_workers=3,
shuffle=False,
)
with tqdm(total=len(trainloader)) as pbar:
for batch in trainloader:
inputs, targets_time, targets_clip, filenames, lengths = batch
pbar.set_postfix(inp=inputs.shape)
pbar.update()
|
python
|
import utils.train as train
class DataSet:
def __init__(self, config, problem):
self.config = config
self. problem = problem
self.update()
def update(self):
if not self.config.test:
y_, x_, y_val_, x_val_ = (
train.setup_input_sc(
self.config.test, self.problem, self.config.tbs, self.config.vbs, self.config.fixval,
self.config.supp_prob, self.config.SNR, self.config.magdist, **self.config.distargs))
self.x_ = x_
self.y_ = y_
self.x_val_ = x_val_
self.y_val_ = y_val_
else:
y_, x_ = (
train.setup_input_sc(
self.config.test, self.problem, self.config.tbs, self.config.vbs, self.config.fixval,
self.config.supp_prob, self.config.SNR, self.config.magdist, **self.config.distargs))
self.x_ = x_
self.y_ = y_
|
python
|
import json
from pathlib import Path
from typing import Dict
import pandas as pd
import sha_calc as sha_calc
from gmhazard_calc.im import IM
from gmhazard_calc import gm_data
class BranchUniGCIM(sha_calc.UniIMiDist, sha_calc.CondIMjDist):
"""Represents the GCIM for a specific IMi and branch
Parameters:
-----------
See Attributes
Attributes:
-----------
IMi: IM
IM Object of the IMi
IMj: IM
Conditioning IM
im_j: float
Value of the conditioning IM
branch: Branch
lnIMi_IMj_Rup: Uni_lnIMi_IMj_Rup
Conditional lnIMi|IMj,Rup distributions
lnIMi_IMj: Uni_lnIMi_IMj
Marginal lnIMi|IMj distribution
"""
VARIABLES_FN = "variables.json"
LNIMI_IMJ_RUP_MU_FN = "lnIMi_IMj_rup_mu_fn.csv"
LNIMI_IMJ_RUP_SIGMA_FN = "lnIMi_IMj_rup_sigma_fn.csv"
LNIMI_IMJ_CDF_FN = "lnIMi_IMj_fn.csv"
def __init__(
self,
IMi: IM,
IMj: IM,
im_j: float,
branch: gm_data.Branch,
lnIMi_IMj_Rup: sha_calc.Uni_lnIMi_IMj_Rup,
lnIMi_IMj: sha_calc.Uni_lnIMi_IMj,
):
sha_calc.UniIMiDist.__init__(self, IMi)
sha_calc.CondIMjDist.__init__(self, IMj, im_j)
self.branch = branch
self.lnIMi_IMj_Rup = lnIMi_IMj_Rup
self.lnIMi_IMj = lnIMi_IMj
def save(self, base_dir: Path):
save_dir = base_dir / self.branch.name
save_dir.mkdir(exist_ok=False)
with open(save_dir / self.VARIABLES_FN, "w") as f:
json.dump(
dict(
IMi=str(self.IMi),
IMj=str(self.IMj),
im_j=self.im_j,
branch_name=self.branch.name,
),
f,
)
self.lnIMi_IMj_Rup.mu.to_csv(save_dir / self.LNIMI_IMJ_RUP_MU_FN)
self.lnIMi_IMj_Rup.sigma.to_csv(save_dir / self.LNIMI_IMJ_RUP_SIGMA_FN)
self.lnIMi_IMj.cdf.to_csv(save_dir / self.LNIMI_IMJ_CDF_FN)
@classmethod
def load(cls, data_dir: Path, branch: gm_data.Branch):
with open(data_dir / f"{cls.VARIABLES_FN}", "r") as f:
variables_dict = json.load(f)
assert branch.name == variables_dict["branch_name"]
IMi = IM.from_str(variables_dict["IMi"])
IMj, im_j = IM.from_str(variables_dict["IMj"]), variables_dict["im_j"]
lnIMi_IMj_Rup = sha_calc.Uni_lnIMi_IMj_Rup(
pd.read_csv(data_dir / cls.LNIMI_IMJ_RUP_MU_FN, index_col=0).squeeze(),
pd.read_csv(data_dir / cls.LNIMI_IMJ_RUP_SIGMA_FN, index_col=0).squeeze(),
IMi,
IMj,
im_j,
)
lnIMi_IMj = sha_calc.Uni_lnIMi_IMj(
pd.read_csv(data_dir / cls.LNIMI_IMJ_CDF_FN, index_col=0).squeeze(),
IMi,
IMj,
im_j,
)
return cls(IMi, IMj, im_j, branch, lnIMi_IMj_Rup, lnIMi_IMj)
class IMEnsembleUniGCIM(sha_calc.UniIMiDist, sha_calc.CondIMjDist):
"""Represents the GCIM for a specific IMi and IMEnsemble
Parameters:
-----------
See Attributes
Attributes:
-----------
im_ensemble: IMEnsemble
IMi: IM
IM Object of the IMi
IMj: IM
Conditioning IM
im_j: float
Value of the conditioning IM
branch_uni_gcims: dictionary
Dictionary of the branch GCIM's that
make up this combined GCIM
"""
VARIABLES_FN = "variables.json"
LNIMI_IMJ_CDF_FN = "lnIMi_IMj_cdf.csv"
def __init__(
self,
im_ensemble: gm_data.IMEnsemble,
IMi: IM,
IMj: IM,
im_j: float,
ln_IMi_IMj: sha_calc.Uni_lnIMi_IMj,
branch_uni_gcims: Dict[str, BranchUniGCIM],
):
sha_calc.UniIMiDist.__init__(self, IMi)
sha_calc.CondIMjDist.__init__(self, IMj, im_j)
self.lnIMi_IMj = ln_IMi_IMj
self.im_ensemble = im_ensemble
self.branch_uni_gcims = branch_uni_gcims
def save(self, base_dir: Path):
save_dir = base_dir / f"{self.IMi}"
save_dir.mkdir(exist_ok=False)
with open(save_dir / self.VARIABLES_FN, "w") as f:
json.dump(dict(IMi=str(self.IMi), IMj=str(self.IMj), im_j=self.im_j), f)
self.lnIMi_IMj.cdf.to_csv(save_dir / self.LNIMI_IMJ_CDF_FN)
for cur_branch_name, branch_gcim in self.branch_uni_gcims.items():
branch_gcim.save(save_dir)
@classmethod
def load(cls, data_dir: Path, im_ensemble: gm_data.IMEnsemble):
with open(data_dir / cls.VARIABLES_FN, "r") as f:
variables_dict = json.load(f)
IMi = IM.from_str(variables_dict["IMi"])
IMj, im_j = IM.from_str(variables_dict["IMj"]), variables_dict["im_j"]
sha_calc.Uni_lnIMi_IMj(
pd.read_csv(data_dir / cls.LNIMI_IMJ_CDF_FN, index_col=0).squeeze(),
IMi,
IMj,
im_j,
)
return cls(
im_ensemble,
IMi,
IMj,
im_j,
sha_calc.Uni_lnIMi_IMj(
pd.read_csv(data_dir / cls.LNIMI_IMJ_CDF_FN, index_col=0).squeeze(),
IMi,
IMj,
im_j,
),
{
cur_branch_name: BranchUniGCIM.load(
data_dir / cur_branch_name, cur_branch
)
for cur_branch_name, cur_branch in im_ensemble.branches_dict.items()
},
)
|
python
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v4.proto.resources import campaign_bid_modifier_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__bid__modifier__pb2
from google.ads.google_ads.v4.proto.services import campaign_bid_modifier_service_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2
class CampaignBidModifierServiceStub(object):
"""Proto file describing the Campaign Bid Modifier service.
Service to manage campaign bid modifiers.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCampaignBidModifier = channel.unary_unary(
'/google.ads.googleads.v4.services.CampaignBidModifierService/GetCampaignBidModifier',
request_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.GetCampaignBidModifierRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__bid__modifier__pb2.CampaignBidModifier.FromString,
)
self.MutateCampaignBidModifiers = channel.unary_unary(
'/google.ads.googleads.v4.services.CampaignBidModifierService/MutateCampaignBidModifiers',
request_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersResponse.FromString,
)
class CampaignBidModifierServiceServicer(object):
"""Proto file describing the Campaign Bid Modifier service.
Service to manage campaign bid modifiers.
"""
def GetCampaignBidModifier(self, request, context):
"""Returns the requested campaign bid modifier in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCampaignBidModifiers(self, request, context):
"""Creates, updates, or removes campaign bid modifiers.
Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignBidModifierServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaignBidModifier': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaignBidModifier,
request_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.GetCampaignBidModifierRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__bid__modifier__pb2.CampaignBidModifier.SerializeToString,
),
'MutateCampaignBidModifiers': grpc.unary_unary_rpc_method_handler(
servicer.MutateCampaignBidModifiers,
request_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_campaign__bid__modifier__service__pb2.MutateCampaignBidModifiersResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v4.services.CampaignBidModifierService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
python
|