content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from rubicon_ml.client import Base
class Parameter(Base):
"""A client parameter.
A `parameter` is an input to an `experiment` (model run)
that depends on the type of model being used. It affects
the model's predictions.
For example, if you were using a random forest classifier,
'n_estimators' (the number of trees in the forest) could
be a parameter.
A `parameter` is logged to an `experiment`.
Parameters
----------
domain : rubicon.domain.Parameter
The parameter domain model.
config : rubicon.client.Config
The config, which specifies the underlying repository.
"""
def __init__(self, domain, config=None):
super().__init__(domain, config)
@property
def id(self):
"""Get the parameter's id."""
return self._domain.id
@property
def name(self):
"""Get the parameter's name."""
return self._domain.name
@property
def value(self):
"""Get the parameter's value."""
return self._domain.value
@property
def description(self):
"""Get the parameter's description."""
return self._domain.description
@property
def created_at(self):
"""Get the time the parameter was created."""
return self._domain.created_at
|
python
|
# Mirror data from source MSN's to destination MSN on same Mark6 unit
# Jan 29, 2017 Lindy Blackburn
import argparse
import subprocess
import stat
import os
parser = argparse.ArgumentParser(description="mount drives and create commands to copy data from source MSN(s) to destination MSN.")
parser.add_argument('source', metavar='source', type=str, nargs='+')
parser.add_argument('destination', metavar='destination', type=str)
args = parser.parse_args()
# unmount all disks
for a in open('/proc/mounts'):
if '/mnt/disks' in a:
dev = a.split()[0]
os.system('sudo umount %s' % dev)
# list of sas disks attached
res = subprocess.Popen(["lsscsi", "-t"], stdout=subprocess.PIPE)
out = res.communicate()[0]
disks = [a.split() for a in out.strip().split('\n') if "sas:" in a]
# get physical module number from a lsscsi -t line
# ['[0:0:17:0]', 'disk', 'sas:0x4433221100000000', '/dev/sdai']
def sasmod(disk):
host, channel, target, lun = map(int, disk[0][1:-1].split(':'))
mod = 1 + 2*(host > 0) + int(disk[2][-8:-6], 16) / 8
return mod
# get disk eMSN
diskinfo = dict()
for disk in disks:
(sasid, dev) = (disk[2], disk[3])
res = subprocess.Popen(["/usr/bin/sudo", "/sbin/parted", dev, "print"], stdout=subprocess.PIPE)
out = res.communicate()[0]
msn = out.strip().split('\n')[-2].strip().split()[5]
# '[0:0:17:0]' 'disk' 'sas:0x4433221100000000' '/dev/sdai' 1
# print " ".join(map(repr,disk + [sasmod(disk)]))
diskinfo[msn] = disk
# mount destination
for i in range(8):
disk = diskinfo["%s_%d" % (args.destination, i)]
dev = disk[3]
m = sasmod(disk)
os.system('sudo mount %s1 /mnt/disks/%d/%d' % (dev, m, i))
# mount sources read only
for sourcemsn in args.source:
for i in range(8):
disk = diskinfo["%s_%d" % (sourcemsn, i)]
dev = disk[3]
n = sasmod(disk)
os.system('sudo mount -o ro %s1 /mnt/disks/%d/%d' % (dev, n, i))
# create copy script
for sourcemsn in args.source:
for i in range(8):
disk_in = diskinfo["%s_%d" % (sourcemsn, i)]
disk_out = diskinfo["%s_%d" % (args.destination, i)]
n = sasmod(disk_in)
m = sasmod(disk_out)
print "mkdir -p /mnt/disks/%d/%d/%s" % (m, i, sourcemsn)
print "cp -u /mnt/disks/%d/%d/data/* /mnt/disks/%d/%d/%s/ &" % (n, i, m, i, sourcemsn)
print "wait"
|
python
|
# Generated by Django 2.2.10 on 2020-04-26 14:55
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('pacientes', '0013_consulta_turno'),
]
operations = [
migrations.CreateModel(
name='Empresa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=150)),
('direccion', models.CharField(blank=True, max_length=200, null=True)),
('cuit', models.CharField(blank=True, max_length=30, null=True)),
],
),
migrations.AddField(
model_name='consulta',
name='fecha',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.CreateModel(
name='EmpresaPaciente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ultimo_recibo_de_sueldo', models.DateTimeField(blank=True, null=True)),
('empresa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pacientes.Empresa')),
('paciente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pacientes.Paciente')),
],
),
]
|
python
|
"""Option handling polyfill for Flake8 2.x and 3.x."""
import optparse
import os
def register(parser, *args, **kwargs):
r"""Register an option for the Option Parser provided by Flake8.
:param parser:
The option parser being used by Flake8 to handle command-line options.
:param \*args:
Positional arguments that you might otherwise pass to ``add_option``.
:param \*\*kwargs:
Keyword arguments you might otherwise pass to ``add_option``.
"""
try:
# Flake8 3.x registration
parser.add_option(*args, **kwargs)
except (optparse.OptionError, TypeError):
# Flake8 2.x registration
# Pop Flake8 3 parameters out of the kwargs so they don't cause a
# conflict.
parse_from_config = kwargs.pop('parse_from_config', False)
comma_separated_list = kwargs.pop('comma_separated_list', False)
normalize_paths = kwargs.pop('normalize_paths', False)
# In the unlikely event that the developer has specified their own
# callback, let's pop that and deal with that as well.
preexisting_callback = kwargs.pop('callback', None)
callback = generate_callback_from(comma_separated_list,
normalize_paths,
preexisting_callback)
if callback:
kwargs['callback'] = callback
kwargs['action'] = 'callback'
# We've updated our args and kwargs and can now rather confidently
# call add_option.
option = parser.add_option(*args, **kwargs)
if parse_from_config:
parser.config_options.append(option.get_opt_string().lstrip('-'))
def parse_comma_separated_list(value):
"""Parse a comma-separated list.
:param value:
String or list of strings to be parsed and normalized.
:returns:
List of values with whitespace stripped.
:rtype:
list
"""
if not value:
return []
if not isinstance(value, (list, tuple)):
value = value.split(',')
return [item.strip() for item in value]
def normalize_path(path, parent=os.curdir):
"""Normalize a single-path.
:returns:
The normalized path.
:rtype:
str
"""
# NOTE(sigmavirus24): Using os.path.sep allows for Windows paths to
# be specified and work appropriately.
separator = os.path.sep
if separator in path:
path = os.path.abspath(os.path.join(parent, path))
return path.rstrip(separator)
def generate_callback_from(comma_separated_list, normalize_paths,
preexisting_callback):
"""Generate a callback from parameters provided for the option.
This uses composition to handle mixtures of the flags provided as well as
callbacks specified by the user.
"""
if comma_separated_list and normalize_paths:
callback_list = [comma_separated_callback,
normalize_paths_callback]
if preexisting_callback:
callback_list.append(preexisting_callback)
callback = compose_callbacks(*callback_list)
elif comma_separated_list:
callback = comma_separated_callback
if preexisting_callback:
callback = compose_callbacks(callback, preexisting_callback)
elif normalize_paths:
callback = normalize_paths_callback
if preexisting_callback:
callback = compose_callbacks(callback, preexisting_callback)
elif preexisting_callback:
callback = preexisting_callback
else:
callback = None
return callback
def compose_callbacks(*callback_functions):
"""Compose the callbacks provided as arguments."""
def _callback(option, opt_str, value, parser, *args, **kwargs):
"""Callback that encompasses the other callbacks."""
for callback in callback_functions:
callback(option, opt_str, value, parser, *args, **kwargs)
return _callback
def comma_separated_callback(option, opt_str, value, parser):
"""Parse the value into a comma-separated list."""
value = getattr(parser.values, option.dest, value)
comma_separated_list = parse_comma_separated_list(value)
setattr(parser.values, option.dest, comma_separated_list)
def normalize_paths_callback(option, opt_str, value, parser):
"""Normalize the path(s) value."""
value = getattr(parser.values, option.dest, value)
if isinstance(value, list):
normalized = [normalize_path(s) for s in value]
else:
normalized = normalize_path(value)
setattr(parser.values, option.dest, normalized)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on 2018/8/1
@author: xing yan
"""
from random import shuffle, sample, randint, choice
administrative_div_code = ['110101', '110102', '110105', '110106', '110107', '110108', '110109', '110111', '110112', '110113', '110114', '110115', '110116', '110117', '110118', '110119']
surname = ['赵', '钱', '孙', '李', '周', '吴', '郑', '王', '冯', '陈', '楮', '卫', '蒋', '沈', '韩', '杨', '朱', '秦', '尤', '许', '何', '吕', '施', '张', '孔', '曹', '严', '华', '金', '魏', '陶', '姜', '戚', '谢', '邹', '喻', '柏', '水', '窦', '章', '云', '苏', '潘', '葛', '奚', '范', '彭', '郎', '鲁', '韦', '昌', '马', '苗', '凤', '花', '方', '俞', '任', '袁', '柳', '酆', '鲍', '史', '唐', '费', '廉', '岑', '薛', '雷', '贺', '倪', '汤', '滕', '殷', '罗', '毕', '郝', '邬', '安', '常', '乐', '于', '时', '傅', '皮', '卞', '齐', '康', '伍', '余', '元', '卜', '顾', '孟', '平', '黄', '和', '穆', '萧', '尹', '姚', '邵', '湛', '汪', '祁', '毛', '禹', '狄', '米', '贝', '明', '臧', '计', '伏', '成', '戴', '谈', '宋', '茅', '庞', '熊', '纪', '舒', '屈', '项', '祝', '董', '梁', '杜', '阮', '蓝', '闽', '席', '季', '麻', '强', '贾', '路', '娄', '危', '江', '童', '颜', '郭', '梅', '盛', '林', '刁', '锺', '徐', '丘', '骆', '高', '夏', '蔡', '田', '樊', '胡', '凌', '霍', '虞', '万', '支', '柯', '昝', '管', '卢', '莫', '经', '房', '裘', '缪', '干', '解', '应', '宗', '丁', '宣', '贲', '邓', '郁', '单', '杭', '洪', '包', '诸', '左', '石', '崔', '吉', '钮', '龚', '程', '嵇', '邢', '滑', '裴', '陆', '荣', '翁', '荀', '羊', '於', '惠', '甄', '麹', '家', '封', '芮', '羿', '储', '靳', '汲', '邴', '糜', '松', '井', '段', '富', '巫', '乌', '焦', '巴', '弓', '牧', '隗', '山', '谷', '车', '侯', '宓', '蓬', '全', '郗', '班', '仰', '秋', '仲', '伊', '宫', '宁', '仇', '栾', '暴', '甘', '斜', '厉', '戎', '祖', '武', '符', '刘', '景', '詹', '束', '龙', '叶', '幸', '司', '韶', '郜', '黎', '蓟', '薄', '印', '宿', '白', '怀', '蒲', '邰', '从', '鄂', '索', '咸', '籍', '赖', '卓', '蔺', '屠', '蒙', '池', '乔', '阴', '郁', '胥', '能', '苍', '双', '闻', '莘', '党', '翟', '谭', '贡', '劳', '逄', '姬', '申', '扶', '堵', '冉', '宰', '郦', '雍', '郤', '璩', '桑', '桂', '濮', '牛', '寿', '通', '边', '扈', '燕', '冀', '郏', '浦', '尚', '农', '温', '别', '庄', '晏', '柴', '瞿', '阎', '充', '慕', '连', '茹', '习', '宦', '艾', '鱼', '容', '向', '古', '易', '慎', '戈', '廖', '庾', '终', '暨', '居', '衡', '步', '都', '耿', '满', '弘', '匡', '国', '文', '寇', '广', '禄', '阙', '东', '欧', '殳', '沃', '利', '蔚', '越', '夔', '隆', '师', '巩', '厍', '聂', '晁', '勾', '敖', '融', '冷', '訾', '辛', '阚', '那', '简', '饶', '空', '曾', '毋', '沙', '乜', '养', '鞠', '须', '丰', '巢', '关', '蒯', '相', '查', '后', '荆', '红', '游', '竺', '权', '逑', '盖', '益', '桓', '公', '晋', '楚', '阎', '法', '汝', '鄢', '涂', '钦', '岳', '帅', '缑', '亢', '况', '后', '有', '琴', '商', '牟', '佘', '佴', '伯', '赏', '墨', '哈', '谯', '笪', '年', '爱', '阳', '佟']
double_surname = ['万俟', '司马', '上官', '欧阳', '夏侯', '诸葛', '闻人', '东方', '赫连', '皇甫', '尉迟', '公羊', '澹台', '公冶', '宗政', '濮阳', '淳于', '单于', '太叔', '申屠', '公孙', '仲孙', '轩辕', '令狐', '锺离', '宇文', '长孙', '慕容', '鲜于', '闾丘', '司徒', '司空', '丌官', '司寇', '仉', '督', '子车', '颛孙', '端木', '巫马', '公西', '漆雕', '乐正', '壤驷', '公良', '拓拔', '夹谷', '宰父', '谷梁', '段干', '百里', '东郭', '南门', '呼延', '归', '海', '羊舌', '微生', '梁丘', '左丘', '东门', '西门', '南宫']
how_name = ['强', '志', '艺', '锦', '朗', '新', '义', '固', '毅', '鹏', '舒', '浩', '寒', '黛', '纯', '生', '成', '彪', '青', '有', '功', '鸣', '承', '哲', '士', '飘', '榕', '馨', '邦', '顺', '娥', '斌', '雪', '园', '竹', '明', '力', '维', '海', '滢', '树', '冰', '影', '荔', '荣', '裕', '昭', '仪', '盛', '克', '康', '香', '伯', '宁', '壮', '瑾', '利', '林', '振', '瑶', '枝', '琴', '岚', '彩', '萍', '凝', '仁', '融', '梁', '琰', '琬', '思', '军', '朋', '全', '贵', '先', '欢', '凡', '芳', '真', '德', '惠', '冠', '嘉', '旭', '露', '慧', '俊', '婕', '丹', '娣', '松', '梦', '卿', '勤', '钧', '磊', '瑞', '光', '华', '可', '澜', '琛', '春', '莲', '丽', '武', '宁', '时', '月', '晨', '达', '民', '超', '琳', '琼', '雁', '辉', '环', '福', '霄', '英', '红', '芸', '才', '佳', '爱', '苑', '芝', '馥', '素', '霞', '昌', '莉', '兰', '行', '波', '震', '广', '伟', '怡', '咏', '国', '莺', '婷', '纨', '家', '飞', '菁', '洁', '之', '刚', '绍', '珠', '学', '君', '诚', '芬', '思', '巧', '淑', '腾', '桂', '娟', '云', '涛', '良', '贞', '勇', '伊', '翰', '聪', '兴', '毓', '富', '山', '东', '群', '彬', '秋', '妹', '蕊', '媛', '言', '翠', '晶', '保', '星', '玲', '婉', '颖', '策', '欣', '会', '娴', '燕', '羽', '坚', '翔', '致', '珍', '龙', '宏', '奇', '艳', '希', '善', '筠', '亨', '峰', '进', '友', '弘', '亚', '凤', '茗', '娅', '秀', '航', '厚', '启', '建', '胜', '安', '柔', '祥', '玉', '敬', '梅', '河', '庆', '璐', '泰', '泽', '瑗', '心', '娜', '和', '栋', '健', '杰', '育', '平', '蓓', '子', '亮', '发', '博', '姬', '莎', '楠', '珊', '若', '静', '元', '轮', '信', '枫', '伦', '政', '豪', '天', '茂', '晓', '悦', '风', '永', '霭', '叶', '美', '姣', '宜', '世', '菲', '辰', '婵', '文', '炎', '雅', '璧', '菊', '雄', '江', '中', '薇', '妍', '谦', '韵', '茜', '清', '爽', '琦', '岩', '蓉', '乐', '倩', '以', '眉', '荷']
small_name = ['秋', '官', '佳', '尔', '天', '运', '公', '星', '晋', '西', '宛', '宗', '萌', '夜', '卷', '月', '珠', '豪', '易', '真', '若', '郑', '灿', '斌', '雅', '嘉', '奕', '一', '昕', '夏', '哲', '鸣', '宏', '蔚', '晶', '恒', '观', '盼', '榕', '锐', '寻', '睿', '晓', '莘', '紫', '渊', '子', '明', '思', '施', '添', '暄', '竹', '灵', '欣', '姚', '汝', '鹏', '玥', '浩', '彦', '德', '孔', '雨', '淑', '诗', '曼', '昔', '寒', '陀', '秦', '杰', '福', '润', '幻', '华', '顺', '笛', '又', '畅', '雁', '语', '瑜', '旭', '立', '尤', '璐', '舞', '言', '逸', '水', '承', '爱', '云', '强', '书', '玉', '丘', '晨', '安', '韵', '钟', '清', '州', '文', '泽', '辰', '瑞', '博']
name = ['茜', '画', '涵', '瓜', '蕊', '鸣', '园', '枫', '嘉', '莹', '文', '洋', '秀', '锐', '官', '佳', '政', '依', '雁', '祝', '杨', '悦', '丙', '璇', '顾', '林', '洁', '莎', '乐', '婉', '庆', '宛', '华', '彤', '昔', '合', '洪', '爱', '淼', '丽', '甜', '巧', '越', '闵', '果', '沛', '晶', '冉', '耿', '霞', '里', '怀', '易', '春', '汕', '远', '昊', '卉', '杰', '辰', '堂', '城', '池', '又', '光', '西', '谷', '豪', '赫', '璐', '松', '梅', '汝', '冰', '溶', '琪', '润', '如', '滢', '位', '醒', '尚', '霖', '泽', '俊', '溪', '鑫', '青', '子', '道', '国', '乙', '玉', '呈', '臻', '伽', '钰', '菡', '孔', '观', '罗', '馨', '惠', '轩', '栾', '翔', '暄', '建', '月', '言', '琴', '寒', '涛', '帆', '元', '晗', '东', '敏', '慧', '昆', '音', '珠', '祥', '苗', '栋', '宜', '君', '萌', '晴', '玥', '曼', '贤', '花', '美', '致', '钟', '源', '彦', '充', '落', '若', '守', '怡', '夏', '晨', '丘', '思', '齐', '屏', '安', '傲', '钗', '哲', '蓝', '天', '菲', '诗', '毅', '慈', '妍', '公']
full_surname = ['赵', '钱', '孙', '李', '周', '吴', '郑', '王', '冯', '陈', '楮', '卫', '蒋', '沈', '韩', '杨', '朱', '秦', '尤', '许', '何', '吕', '施', '张', '孔', '曹', '严', '华', '金', '魏', '陶', '姜', '戚', '谢', '邹', '喻', '柏', '水', '窦', '章', '云', '苏', '潘', '葛', '奚', '范', '彭', '郎', '鲁', '韦', '昌', '马', '苗', '凤', '花', '方', '俞', '任', '袁', '柳', '酆', '鲍', '史', '唐', '费', '廉', '岑', '薛', '雷', '贺', '倪', '汤', '滕', '殷', '罗', '毕', '郝', '邬', '安', '常', '乐', '于', '时', '傅', '皮', '卞', '齐', '康', '伍', '余', '元', '卜', '顾', '孟', '平', '黄', '和', '穆', '萧', '尹', '姚', '邵', '湛', '汪', '祁', '毛', '禹', '狄', '米', '贝', '明', '臧', '计', '伏', '成', '戴', '谈', '宋', '茅', '庞', '熊', '纪', '舒', '屈', '项', '祝', '董', '梁', '杜', '阮', '蓝', '闽', '席', '季', '麻', '强', '贾', '路', '娄', '危', '江', '童', '颜', '郭', '梅', '盛', '林', '刁', '锺', '徐', '丘', '骆', '高', '夏', '蔡', '田', '樊', '胡', '凌', '霍', '虞', '万', '支', '柯', '昝', '管', '卢', '莫', '经', '房', '裘', '缪', '干', '解', '应', '宗', '丁', '宣', '贲', '邓', '郁', '单', '杭', '洪', '包', '诸', '左', '石', '崔', '吉', '钮', '龚', '程', '嵇', '邢', '滑', '裴', '陆', '荣', '翁', '荀', '羊', '於', '惠', '甄', '麹', '家', '封', '芮', '羿', '储', '靳', '汲', '邴', '糜', '松', '井', '段', '富', '巫', '乌', '焦', '巴', '弓', '牧', '隗', '山', '谷', '车', '侯', '宓', '蓬', '全', '郗', '班', '仰', '秋', '仲', '伊', '宫', '宁', '仇', '栾', '暴', '甘', '斜', '厉', '戎', '祖', '武', '符', '刘', '景', '詹', '束', '龙', '叶', '幸', '司', '韶', '郜', '黎', '蓟', '薄', '印', '宿', '白', '怀', '蒲', '邰', '从', '鄂', '索', '咸', '籍', '赖', '卓', '蔺', '屠', '蒙', '池', '乔', '阴', '郁', '胥', '能', '苍', '双', '闻', '莘', '党', '翟', '谭', '贡', '劳', '逄', '姬', '申', '扶', '堵', '冉', '宰', '郦', '雍', '郤', '璩', '桑', '桂', '濮', '牛', '寿', '通', '边', '扈', '燕', '冀', '郏', '浦', '尚', '农', '温', '别', '庄', '晏', '柴', '瞿', '阎', '充', '慕', '连', '茹', '习', '宦', '艾', '鱼', '容', '向', '古', '易', '慎', '戈', '廖', '庾', '终', '暨', '居', '衡', '步', '都', '耿', '满', '弘', '匡', '国', '文', '寇', '广', '禄', '阙', '东', '欧', '殳', '沃', '利', '蔚', '越', '夔', '隆', '师', '巩', '厍', '聂', '晁', '勾', '敖', '融', '冷', '訾', '辛', '阚', '那', '简', '饶', '空', '曾', '毋', '沙', '乜', '养', '鞠', '须', '丰', '巢', '关', '蒯', '相', '查', '后', '荆', '红', '游', '竺', '权', '逑', '盖', '益', '桓', '公', '晋', '楚', '阎', '法', '汝', '鄢', '涂', '钦', '岳', '帅', '缑', '亢', '况', '后', '有', '琴', '商', '牟', '佘', '佴', '伯', '赏', '墨', '哈', '谯', '笪', '年', '爱', '阳', '佟', '万俟', '司马', '上官', '欧阳', '夏侯', '诸葛', '闻人', '东方', '赫连', '皇甫', '尉迟', '公羊', '澹台', '公冶', '宗政', '濮阳', '淳于', '单于', '太叔', '申屠', '公孙', '仲孙', '轩辕', '令狐', '锺离', '宇文', '长孙', '慕容', '鲜于', '闾丘', '司徒', '司空', '丌官', '司寇', '仉', '督', '子车', '颛孙', '端木', '巫马', '公西', '漆雕', '乐正', '壤驷', '公良', '拓拔', '夹谷', '宰父', '谷梁', '段干', '百里', '东郭', '南门', '呼延', '归', '海', '羊舌', '微生', '梁丘', '左丘', '东门', '西门', '南宫']
full_name = ['霖', '秦', '桂', '瑾', '奕', '茜', '永', '博', '贤', '洪', '官', '汝', '志', '舒', '瑶', '贵', '昆', '良', '冉', '闵', '光', '新', '义', '佳', '美', '秋', '婉', '屏', '仪', '全', '淼', '一', '亨', '孔', '勇', '浩', '又', '钧', '生', '珊', '菲', '伦', '可', '荣', '晓', '菡', '力', '露', '堂', '易', '滢', '有', '瑜', '琴', '树', '曼', '天', '言', '赫', '发', '馨', '中', '超', '琳', '琪', '健', '固', '瓜', '陀', '澜', '媛', '尔', '丽', '振', '刚', '尤', '山', '士', '旭', '园', '倩', '致', '栾', '行', '艺', '画', '落', '蔚', '芳', '榕', '河', '晴', '航', '观', '楠', '启', '承', '民', '灵', '鸣', '香', '梅', '嘉', '栋', '安', '林', '婕', '艳', '信', '时', '郑', '菊', '克', '语', '韵', '瑗', '敬', '燕', '傲', '策', '彤', '幻', '东', '溪', '娟', '薇', '星', '进', '枝', '卉', '豪', '灿', '裕', '冠', '呈', '元', '慈', '君', '里', '莉', '凝', '轮', '鹏', '茗', '杰', '雄', '婵', '巧', '保', '芸', '平', '娣', '子', '娜', '霭', '哲', '厚', '位', '纯', '寒', '静', '达', '风', '卿', '润', '腾', '莘', '夏', '渊', '波', '琰', '霄', '雅', '翔', '丹', '菁', '冰', '羽', '翠', '海', '强', '笛', '祥', '罗', '辰', '芝', '娥', '黛', '晶', '国', '岩', '妍', '盛', '道', '淑', '英', '甜', '勤', '如', '姚', '晋', '书', '苗', '鑫', '德', '慧', '添', '文', '玥', '兴', '姬', '震', '华', '雁', '政', '锐', '乙', '怀', '琦', '娴', '蓓', '莺', '诚', '若', '昔', '洋', '姣', '彩', '才', '睿', '合', '池', '莲', '荷', '龙', '仁', '源', '柔', '雨', '明', '卷', '玉', '坚', '成', '融', '花', '昊', '耿', '伟', '奇', '水', '颖', '梦', '朗', '悦', '琬', '思', '乐', '聪', '宛', '锦', '尚', '萍', '莎', '彪', '真', '雪', '舞', '蓝', '诗', '珍', '伽', '婷', '善', '蕊', '宁', '春', '芬', '毓', '充', '红', '炎', '珠', '莹', '环', '斌', '清', '胜', '壮', '彦', '影', '昭', '运', '友', '盼', '敏', '暄', '沛', '汕', '萌', '宜', '竹', '杨', '齐', '琛', '心', '彬', '宗', '越', '涵', '纨', '庆', '晗', '瑞', '醒', '俊', '秀', '谦', '顾', '磊', '泰', '依', '涛', '世', '枫', '臻', '果', '爽', '茂', '轩', '惠', '功', '璧', '丘', '远', '素', '州', '逸', '苑', '钰', '守', '弘', '维', '溶', '凡', '绍', '育', '亚', '富', '伯', '立', '毅', '先', '恒', '寻', '施', '爱', '丙', '荔', '晨', '和', '军', '钟', '璐', '妹', '朋', '邦', '希', '飘', '畅', '亮', '叶', '飞', '武', '咏', '梁', '洁', '公', '馥', '紫', '筠', '青', '祝', '松', '辉', '家', '康', '娅', '之', '翰', '钗', '璇', '以', '欣', '岚', '福', '蓉', '建', '宏', '泽', '利', '江', '兰', '谷', '群', '昕', '帆', '峰', '月', '城', '音', '广', '凤', '夜', '玲', '伊', '欢', '怡', '霞', '云', '琼', '西', '学', '昌', '会', '顺', '眉', '贞']
print(administrative_div_code)
|
python
|
#! /usr/bin/env python
#S.rodney
# 2011.05.04
"""
Extrapolate the Hsiao SED down to 300 angstroms
to allow the W filter to reach out to z=2.5 smoothly
in the k-correction tables
"""
import os
from numpy import *
from pylab import *
sndataroot = os.environ['SNDATA_ROOT']
MINWAVE = 300 # min wavelength for extrapolation (Angstroms)
MAXWAVE = 18000 # max wavelength for extrapolation (Angstroms)
def mkSALT2_UV2IR( showplots=False ) :
""" do all the extrapolations needed to extend the SALT2
model deep into the UV and the IR (300 to 25000 angstroms)
and out to +100 days after peak
"""
import shutil
indir = os.path.join( sndataroot, 'models/SALT2/SALT2.Guy10_LAMOPEN' )
outdir = os.path.join( sndataroot, 'models/SALT2/SALT2.Guy10_UV2IR' )
indat = os.path.join(indir,'salt2_color_correction.dat')
outdat = os.path.join(outdir,'salt2_color_correction.dat')
if not os.path.isfile( outdat ) : shutil.copy( indat, outdat )
outinfo = os.path.join(outdir,'SALT2.INFO')
fout = open(outinfo,'w')
print >> fout, """
# open rest-lambda range WAAAY beyond nominal 2900-7000 A range.
RESTLAMBDA_RANGE: 300. 25000.
COLORLAW_VERSION: 1
COLORCOR_PARAMS: 2800 7000 4 -0.537186 0.894515 -0.513865 0.0891927
COLOR_OFFSET: 0.0
MAG_OFFSET: 0.27
SEDFLUX_INTERP_OPT: 1 # 1=>linear, 2=>spline
ERRMAP_INTERP_OPT: 1 # 0=snake off; 1=>linear 2=>spline
ERRMAP_KCOR_OPT: 1 # 1/0 => on/off
MAGERR_FLOOR: 0.005 # don;t allow smaller error than this
MAGERR_LAMOBS: 0.1 2000 4000 # magerr minlam maxlam
MAGERR_LAMREST: 0.1 100 200 # magerr minlam maxlam
"""
extendSALT2_temp0( salt2dir = 'models/SALT2/SALT2.Guy10_UV2IR',
tailsedfile = 'snsed/Hsiao07.extrap.dat',
wjoinblue = 2800, wjoinred = 8500 ,
wmin = 300, wmax = 25000, tmin=-20, tmax=100,
showplots=showplots )
extendSALT2_temp1( salt2dir = 'models/SALT2/SALT2.Guy10_UV2IR',
wjoinblue = 2000, wjoinred = 8500 ,
wmin = 300, wmax = 25000, tmin=-20, tmax=100,
wstep = 10, showplots=showplots )
for sedfile in ['salt2_lc_dispersion_scaling.dat',
'salt2_lc_relative_covariance_01.dat',
'salt2_lc_relative_variance_0.dat',
'salt2_lc_relative_variance_1.dat',
'salt2_spec_covariance_01.dat',
'salt2_spec_variance_0.dat',
'salt2_spec_variance_1.dat' ] :
indat = os.path.join( indir, sedfile )
outdat = os.path.join( outdir, sedfile )
extrapolatesed_flatline( indat, outdat, showplots=showplots )
def getsed( sedfile = os.path.join( sndataroot, 'snsed/Hsiao07.dat') ) :
d,w,f = loadtxt( sedfile, unpack=True )
#d = d.astype(int)
days = unique( d )
dlist = [ d[ where( d == day ) ] for day in days ]
wlist = [ w[ where( d == day ) ] for day in days ]
flist = [ f[ where( d == day ) ] for day in days ]
return( dlist, wlist, flist )
def plotsed( sedfile= os.path.join( sndataroot, 'snsed/Hsiao07.dat'),
day='all', normalize=False, **kwarg):
dlist,wlist,flist = getsed( sedfile )
#days = unique( dlist )
for i in range( len(wlist) ) :
thisday = dlist[i][0]
#defaults = { 'label':str(thisday) }
#plotarg = dict( kwarg.items() + defaults.items() )
if day!='all' :
if abs(thisday-day)>0.6 : continue
if normalize :
plot( wlist[i], flist[i]/flist[i].max()+thisday, **kwarg )
else :
plot( wlist[i], flist[i], label=str(thisday), **kwarg )
# user_in=raw_input('%i : return to continue'%i)
def extrapolatesed_linear(sedfile, newsedfile, minwave=MINWAVE, maxwave=MAXWAVE, Npt=2,tmin=-20, tmax=100, showplots=False ):
""" use a linear fit of the first/last Npt points on the SED
to extrapolate """
from scipy import interpolate as scint
from scipy import stats
import shutil
dlist,wlist,flist = getsed( sedfile )
dlistnew, wlistnew, flistnew = [],[],[]
fout = open( newsedfile, 'w' )
for i in range( len(dlist) ) :
d,w,f = dlist[i],wlist[i],flist[i]
wavestep = w[1] - w[0]
# blueward linear extrapolation from first N points
wN = w[:Npt]
fN = f[:Npt]
(a,b,rval,pval,stderr)=stats.linregress(wN,fN)
Nbluestep = len( arange( minwave, w[0], wavestep ) )
wextBlue = sorted( [ w[0] -(i+1)*wavestep for i in range(Nbluestep) ] )
fextBlue = array( [ max( 0, a * wave + b ) for wave in wextBlue ] )
# redward linear extrapolation from first N points
wN = w[-Npt:]
fN = f[-Npt:]
(a,b,rval,pval,stderr)=stats.linregress(wN,fN)
Nredstep = len( arange( w[-1], maxwave, wavestep ) )
wextRed = sorted( [ w[-1] + (i+1)*wavestep for i in range(Nredstep) ] )
fextRed = array( [ max( 0, a * wave + b ) for wave in wextRed ] )
wnew = append( append( wextBlue, w ), wextRed )
fnew = append( append( fextBlue, f ), fextRed )
# dnew = zeros( len(wnew) ) + d[0]
for i in range( len( wnew ) ) :
print >> fout, "%5.1f %10i %12.7e"%( d[0], wnew[i], fnew[i] )
fout.close()
return( newsedfile )
def extrapolatesed_flatline(sedfile, newsedfile, minwave=MINWAVE, maxwave=MAXWAVE, tmin=-20, tmax=100, showplots=False ):
""" use a linear fit of the first/last Npt points on the SED
to extrapolate """
from scipy import interpolate as scint
from scipy import stats
import shutil
dlist,wlist,flist = getsed( sedfile )
dlistnew, wlistnew, flistnew = [],[],[]
olddaylist = [ round(d) for d in unique(ravel(array(dlist))) ]
fout = open( newsedfile, 'w' )
newdaylist = range( tmin, tmax+1 )
fmed = []
for thisday in newdaylist :
if thisday in olddaylist :
iday = olddaylist.index( thisday )
d,w,f = dlist[iday],wlist[iday],flist[iday]
wavestep = w[1] - w[0]
# blueward flatline extrapolation from first point
Nbluestep = len( arange( minwave, w[0], wavestep ) )
wextBlue = sorted( [ w[0] -(i+1)*wavestep for i in range(Nbluestep) ] )
fextBlue = array( [ f[0] for wave in wextBlue ] )
# redward flatline extrapolation from last point
Nredstep = len( arange( w[-1], maxwave, wavestep ) )
wextRed = sorted( [ w[-1] + (i+1)*wavestep for i in range(Nredstep) ] )
fextRed = array( [ f[-1] for wave in wextRed ] )
wnew = append( append( wextBlue, w ), wextRed )
fnew = append( append( fextBlue, f ), fextRed )
fmed.append( median(f) )
else :
fscaleperday = median( array(fmed[-19:]) / array(fmed[-20:-1]) )
fnew = fnew * fscaleperday**(thisday-thisdaylast)
if showplots :
clf()
plot( w, f, 'r-' )
plot( wnew, fnew, 'k--' )
ax = gca()
rcParams['text.usetex']=False
text(0.95,0.95,'%s\nDay=%i'%(os.path.basename(newsedfile),thisday),ha='right',va='top',transform=ax.transAxes )
draw()
userin = raw_input('return to continue')
for i in range( len( wnew ) ) :
print >> fout, "%5.1f %10i %12.7e"%( thisday, wnew[i], fnew[i] )
thisdaylast = thisday
fout.close()
return( newsedfile )
def extendNon1a():
import glob
import shutil
sedlist = glob.glob("non1a/SED_NOEXTRAP/*.SED")
for sedfile in sedlist :
newsedfile = 'non1a/' + os.path.basename( sedfile )
print("EXTRAPOLATING %s"%sedfile)
extrapolatesed_linear(sedfile, newsedfile, minwave=MINWAVE, maxwave=MAXWAVE, tmin=-20, tmax=100, Npt=2 )
print(" Done with %s.\a\a\a"%sedfile)
def extendSALT2_temp0( salt2dir = 'models/SALT2/SALT2.Guy10_UV2IR',
tailsedfile = 'snsed/Hsiao07.extrap.dat',
wjoinblue = 2800, wjoinred = 8500 ,
wmin = 300, wmax = 25000, tmin=-20, tmax=100,
showplots=False ):
""" extend the salt2 Template_0 model component
by adopting the UV and IR tails from another SED model.
The default is to use SR's extrapolated modification
of the Hsiao 2007 sed model, scaled and joined at the
wjoin wavelengths, and extrapolated out to wmin and wmax.
"""
import shutil
sndataroot = os.environ['SNDATA_ROOT']
salt2dir = os.path.join( sndataroot, salt2dir )
temp0fileIN = os.path.join( salt2dir, '../SALT2.Guy10_LAMOPEN/salt2_template_0.dat' )
temp0fileOUT = os.path.join( salt2dir, 'salt2_template_0.dat' )
temp0dat = getsed( sedfile=temp0fileIN )
tailsedfile = os.path.join( sndataroot, tailsedfile )
taildat = getsed( sedfile=tailsedfile )
dt,wt,ft = loadtxt( tailsedfile, unpack=True )
taildays = unique( dt )
fscale = []
# build up modified template from day -20 to +100
outlines = []
daylist = range( tmin, tmax+1 )
for i in range( len(daylist) ) :
thisday = daylist[i]
if thisday < 50 :
# get the tail SED for this day from the Hsiao template
it = where( taildays == thisday )[0]
dt = taildat[0][it]
wt = taildat[1][it]
ft = taildat[2][it]
# get the SALT2 template SED for this day
d0 = temp0dat[0][i]
w0 = temp0dat[1][i]
f0 = temp0dat[2][i]
print( 'splicing tail onto template for day : %i'%thisday )
i0blue = argmin( abs(w0-wjoinblue) )
itblue = argmin( abs( wt-wjoinblue))
i0red = argmin( abs(w0-wjoinred) )
itred = argmin( abs( wt-wjoinred))
itmin = argmin( abs( wt-wmin))
itmax = argmin( abs( wt-wmax))
bluescale = f0[i0blue]/ft[itblue]
redscale = f0[i0red]/ft[itred]
d0new = dt.tolist()[itmin:itblue] + d0.tolist()[i0blue:i0red] + dt.tolist()[itred:itmax+1]
w0new = wt.tolist()[itmin:itblue] + w0.tolist()[i0blue:i0red] + wt.tolist()[itred:itmax+1]
f0newStage = (bluescale*ft).tolist()[itmin:itblue] + f0.tolist()[i0blue:i0red] + (redscale*ft).tolist()[itred:itmax+1]
# compute the flux scaling decrement from the last epoch (for extrapolation)
if i>1: fscale.append( np.where( np.array(f0newStage)<=0, 0, ( np.array(f0newStage) / np.array(f0new) ) ) )
f0new = f0newStage
# elif thisday < 85 :
# # get the full SED for this day from the Hsiao template
# it = where( taildays == thisday )[0]
# dt = taildat[0][it]
# wt = taildat[1][it]
# ft = taildat[2][it]
# d0new = dt
# w0new = wt
# f0new = ft * (bluescale+redscale)/2. * (fscaleperday**(thisday-50))
else :
print( 'scaling down last template to extrapolate to day : %i'%thisday )
# linearly scale down the last Hsiao template
d0new = zeros( len(dt) ) + thisday
w0new = wt
#f0new = f0new * (bluescale+redscale)/2. * (fscaleperday**(thisday-50))
f0new = np.array(f0new) * np.median( np.array(fscale[-20:]), axis=0 )
#f0new = np.array(f0new) * ( np.median(fscale[-20:])**(thisday-50))
if showplots:
# plot it
print( 'plotting modified template for day : %i'%thisday )
clf()
plot( w0, f0, ls='-',color='b', lw=1)
plot( wt, (bluescale+redscale)/2. * ft, ls=':',color='r', lw=1)
plot( w0new, f0new, ls='--',color='k', lw=2)
ax = gca()
ax.grid()
ax.set_xlim( 500, 13000 )
ax.set_ylim( -0.001, 0.02 )
draw()
raw_input('return to continue')
# append to the list of output data lines
for j in range( len( d0new ) ) :
outlines.append( "%6.2f %12i %12.7e\n"%(
d0new[j], w0new[j], f0new[j] ) )
# write it out to the new template sed .dat file
fout = open( temp0fileOUT, 'w' )
fout.writelines( outlines )
fout.close()
def extendSALT2_temp1( salt2dir = 'models/SALT2/SALT2.Guy10_UV2IR',
wjoinblue = 2000, wjoinred = 8500 ,
wmin = 300, wmax = 25000, tmin=-20, tmax=100,
wstep = 10, showplots=False ):
""" extend the salt2 Template_1 model component
with a flat line at 0 to the blue and to the red.
"""
import shutil
sndataroot = os.environ['SNDATA_ROOT']
salt2dir = os.path.join( sndataroot, salt2dir )
temp1fileIN = os.path.join( salt2dir, '../SALT2.Guy10_LAMOPEN/salt2_template_1.dat' )
temp1fileOUT = os.path.join( salt2dir, 'salt2_template_1.dat' )
temp1dat = getsed( sedfile=temp1fileIN )
# build up modified template from day -20 to +100
outlines = []
daylist = range( tmin, tmax+1 )
f1med = []
for i in range( len(daylist) ) :
thisday = daylist[i]
if thisday < 50 :
print( 'extrapolating with flatline onto template for day : %i'%thisday )
# get the SALT2 template SED for this day
d1 = temp1dat[0][i]
w1 = temp1dat[1][i]
f1 = temp1dat[2][i]
i1blue = argmin( abs(w1-wjoinblue) )
i1red = argmin( abs(w1-wjoinred) )
Nblue = (wjoinblue-wmin )/wstep + 1
Nred = (wmax -wjoinred )/wstep + 1
d1new = (ones(Nblue)*thisday).tolist() + d1.tolist()[i1blue+1:i1red] + (ones(Nred)*thisday).tolist()
w1new = range(wmin,wmin+Nblue*wstep,wstep) + w1.tolist()[i1blue+1:i1red] + range(wjoinred,wjoinred+Nred*wstep,wstep)
f1new = array( zeros(Nblue).tolist() + f1.tolist()[i1blue+1:i1red] + zeros(Nred).tolist() )
f1med.append( median( f1 ) )
else :
print( 'blind extrapolation for day : %i'%thisday )
d1new = zeros( len(d1) ) + thisday
f1scaleperday = median(array(f1med[-19:]) / array(f1med[-20:-1]) )
f1new = array( zeros(Nblue).tolist() + f1.tolist()[i1blue+1:i1red-1] + zeros(Nred).tolist() )
f1new = f1new * f1scaleperday**(thisday-50)
if showplots and thisday>45:
# plot it
clf()
plot( w1, f1, ls='-',color='r', lw=1)
plot( w1new, f1new, ls='--',color='k', lw=2)
draw()
raw_input('return to continue')
# append to the list of output data lines
for j in range( len( d1new ) ) :
outlines.append( "%6.2f %12i %12.7e\n"%(
d1new[j], w1new[j], f1new[j] ) )
# write it out to the new template sed .dat file
fout = open( temp1fileOUT, 'w' )
fout.writelines( outlines )
fout.close()
def extendSALT2_flatline( salt2dir = 'models/SALT2/SALT2.Guy10_UV2IR',
wjoinblue = 2000, wjoinred = 8500 ,
wmin = 300, wmax = 18000, tmin=-20, tmax=100,
wstep = 10, showplots=False ):
""" extrapolate the *lc* and *spec* .dat files for SALT2
using a flatline to the blue and red """
sndataroot = os.environ['SNDATA_ROOT']
salt2dir = os.path.join( sndataroot, salt2dir )
filelist = ['salt2_lc_dispersion_scaling.dat',
'salt2_lc_relative_covariance_01.dat',
'salt2_lc_relative_variance_0.dat',
'salt2_lc_relative_variance_1.dat',
'salt2_spec_covariance_01.dat',
'salt2_spec_variance_0.dat',
'salt2_spec_variance_1.dat']
#for filename in ['salt2_lc_dispersion_scaling.dat']:
#for filename in ['salt2_lc_relative_covariance_01.dat']:
for filename in filelist :
infile = os.path.join( salt2dir, 'NO_SED_EXTRAP/' + filename )
outfile = os.path.join( salt2dir, filename )
newsedfile = extrapolatesed_flatline( infile, outfile, minwave=wmin, maxwave=wmax, tmin=tmin, tmax=tmax )
# plot it
if showplots:
#for d in range(-20,50) :
for d in [-10,-5,0,5,10,15,20,25,30,35,40,45,50,60,70,80,90] :
clf()
plotsed( infile, day=d, ls='-',color='r', lw=1)
plotsed( outfile,day=d, ls='--',color='k', lw=2)
print( '%s : day %i'%(filename,d) )
draw()
# raw_input('%s : day %i. return to continue'%(filename,d))
|
python
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class ProbeInfo(object):
def __init__(self, name=None, privateIp=None, probeResult=None, probeStatus=None, publicIp=None, targetStatus=None, uuid=None, vmStatus=None):
"""
:param name: (Optional) 探测源的所在主机名称
:param privateIp: (Optional) 探测源的内网ip
:param probeResult: (Optional) 探测结果,缺点返回null,对应前端显示 "--" ,1:探测正常,2:探测失败,3:探测超时
:param probeStatus: (Optional) 插件状态, 1:正常,2:异常
:param publicIp: (Optional) 探测源的公网ip
:param targetStatus: (Optional) 探测目标状态,1:正常,2:异常(探测失败、探测超时),缺点返回null,对应前端显示 "--"
:param uuid: (Optional) 探测源主机的uuid
:param vmStatus: (Optional) 云主机状态,对应云主机的状态,当找不到云主机,状态为"unExist"
"""
self.name = name
self.privateIp = privateIp
self.probeResult = probeResult
self.probeStatus = probeStatus
self.publicIp = publicIp
self.targetStatus = targetStatus
self.uuid = uuid
self.vmStatus = vmStatus
|
python
|
# === Start Python 2/3 compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import * # noqa pylint: disable=W0401, W0614
from future.builtins.disabled import * # noqa pylint: disable=W0401, W0614
# == End Python 2/3 compatibility
import mmap
import os
import posix_ipc
import pytest
import struct
import tempfile
from kotekan import runner, shared_memory_buffer
# use tempfile creation to get exclusive random strings
useless_file = tempfile.NamedTemporaryFile()
fname = "calBuffer_" + os.path.split(useless_file.name)[-1]
page_size = 4096
global_params = {
"num_elements": 4,
"num_ev": 2,
"total_frames": 10,
"cadence": 5.0,
"freq": [3, 777, 554],
"mode": "default",
"dataset_manager": {"use_dataset_broker": False},
}
params_fakevis = {
"freq_ids": global_params["freq"],
"num_frames": global_params["total_frames"],
"mode": global_params["mode"],
"cadence": global_params["cadence"],
}
params_fakevis_small = {
"freq_ids": global_params["freq"],
"num_frames": global_params["total_frames"] - 4,
"mode": global_params["mode"],
"cadence": global_params["cadence"],
}
params_fakevis_large = {
"freq_ids": global_params["freq"],
"num_frames": global_params["total_frames"] + 1,
"mode": global_params["mode"],
"cadence": global_params["cadence"],
}
global num_frames
params_writer_stage = {"num_samples": global_params["total_frames"], "name": fname}
size_of_uint64 = 8
num_structural_params = 6
pos_access_record = size_of_uint64 * num_structural_params
pos_ring_buffer = pos_access_record + size_of_uint64 * params_writer_stage[
"num_samples"
] * len(global_params["freq"])
@pytest.fixture(
scope="module", params=[params_fakevis, params_fakevis_small, params_fakevis_large]
)
def vis_data(tmpdir_factory, request):
global num_frames
# keeping all the data this test produced here (probably do not need it)
# using FakeVisBuffer to produce fake data
fakevis_buffer = runner.FakeVisBuffer(**request.param)
num_frames = request.param["num_frames"]
# KotekanStageTester is used to run kotekan with my config
test = runner.KotekanStageTester(
stage_type="VisSharedMemWriter",
stage_config=params_writer_stage,
buffers_in=fakevis_buffer,
buffers_out=None,
global_config=global_params,
)
test.run()
@pytest.fixture(scope="module")
def semaphore(vis_data):
sem = posix_ipc.Semaphore(fname)
yield sem
@pytest.fixture(scope="module")
def memory_map_buf(vis_data):
memory = posix_ipc.SharedMemory(fname)
mapfile = mmap.mmap(memory.fd, memory.size, prot=mmap.PROT_READ)
os.close(memory.fd)
yield mapfile
def test_structured_data(semaphore, memory_map_buf):
semaphore.acquire()
## Test Structured Data
num_writes = struct.unpack("<Q", memory_map_buf.read(8))[0]
num_time = struct.unpack("<Q", memory_map_buf.read(8))[0]
num_freq = struct.unpack("<Q", memory_map_buf.read(8))[0]
size_frame = struct.unpack("<Q", memory_map_buf.read(8))[0]
size_frame_meta = struct.unpack("<Q", memory_map_buf.read(8))[0]
size_frame_data = struct.unpack("<Q", memory_map_buf.read(8))[0]
assert num_writes == 0
assert num_time == params_writer_stage["num_samples"]
assert num_freq == len(params_fakevis["freq_ids"])
assert size_frame == page_size
print("TODO: test if frame metadata size should be {}".format(size_frame_meta))
print("TODO: test if frame data size should be {}".format(size_frame_data))
semaphore.release()
def test_access_record(semaphore, memory_map_buf):
global num_frames
semaphore.acquire()
num_time = params_writer_stage["num_samples"]
num_freq = len(global_params["freq"])
memory_map_buf.seek(pos_access_record)
fpga_seq = 0
if num_time == num_frames:
# if ring buffer is the same size as the number of frames
for t in range(num_time):
for f in range(num_freq):
access_record = struct.unpack("q", memory_map_buf.read(size_of_uint64))[
0
]
assert access_record == fpga_seq
fpga_seq += 800e6 / 2048 * global_params["cadence"]
elif num_time > num_frames:
# if ring buffer is larger than the number of frames
for t in range(num_time):
for f in range(num_freq):
access_record = struct.unpack("q", memory_map_buf.read(size_of_uint64))[
0
]
assert access_record == fpga_seq
if t + 1 < num_frames:
fpga_seq += 800e6 / 2048 * global_params["cadence"]
else:
fpga_seq = -1
elif num_time < num_frames:
# if ring buffer is smaller than number of frames
fpga_seqs = []
fpga_seqs.append(fpga_seq)
for t in range(1, num_frames):
fpga_seq += 800e6 / 2048 * global_params["cadence"]
fpga_seqs.append(fpga_seq)
for t in range(num_time):
for f in range(num_freq):
access_record = struct.unpack("q", memory_map_buf.read(size_of_uint64))[
0
]
if t == 0:
assert access_record == fpga_seqs[-1]
else:
assert access_record == fpga_seqs[t]
semaphore.release()
|
python
|
__version__ = "1.5.0"
default_app_config = "oauth2_provider.apps.DOTConfig"
|
python
|
import os, requests, json, redis
from flask import Flask
from openarticlegauge import config, licenses
from flask.ext.login import LoginManager, current_user
login_manager = LoginManager()
def create_app():
app = Flask(__name__)
configure_app(app)
if app.config['INITIALISE_INDEX']: initialise_index(app)
prep_redis(app)
setup_error_email(app)
login_manager.setup_app(app)
return app
def configure_app(app):
app.config.from_object(config)
# parent directory
here = os.path.dirname(os.path.abspath( __file__ ))
config_path = os.path.join(os.path.dirname(here), 'app.cfg') # this file will be in the package dir, app.cfg is at the root of the repo
if os.path.exists(config_path):
app.config.from_pyfile(config_path)
def prep_redis(app):
# wipe the redis temp cache (not the non-temp one)
client = redis.StrictRedis(host=app.config['REDIS_CACHE_HOST'], port=app.config['REDIS_CACHE_PORT'], db=app.config['REDIS_CACHE_DB'])
client.flushdb()
def initialise_index(app):
# refreshing the mappings and making all known licenses available
# in the index are split out since the latter can take quite a while
# but refreshing the mappings has to be done every time dao.DomainObject.delete_all() is called
refresh_mappings(app)
put_licenses_in_index(app)
def get_index_path(app):
i = str(app.config['ELASTIC_SEARCH_HOST']).rstrip('/')
i += '/' + app.config['ELASTIC_SEARCH_DB']
return i
def refresh_mappings(app):
i = get_index_path(app)
mappings = app.config["MAPPINGS"]
for key, mapping in mappings.iteritems():
im = i + '/' + key + '/_mapping'
exists = requests.get(im)
if exists.status_code != 200:
ri = requests.post(i)
r = requests.put(im, json.dumps(mapping))
print key, r.status_code
def put_licenses_in_index(app):
i = get_index_path(app)
# put the currently available licences into the licence index
for l in licenses.LICENSES:
r = requests.post(i + '/license/' + l, json.dumps(licenses.LICENSES[l]))
def setup_error_email(app):
ADMINS = app.config.get('ADMINS', '')
if not app.debug and ADMINS:
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler('127.0.0.1',
'[email protected]',
ADMINS, 'error')
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
app = create_app()
|
python
|
#!/usr/bin/env python3
import logging
import sys
import tempfile
import threading
import time
import unittest
import warnings
from os.path import dirname, realpath
sys.path.append(dirname(dirname(dirname(realpath(__file__)))))
from logger.utils import formats # noqa: E402
from logger.readers.text_file_reader import TextFileReader # noqa: E402
SAMPLE_DATA = {
'f1': ['f1 line 1',
'f1 line 2',
'f1 line 3'],
'f2': ['f2 line 1',
'f2 line 2',
'f2 line 3'],
'f3': ['f3 line 1',
'f3 line 2',
'f3 line 3']
}
def create_file(filename, lines, interval=0, pre_sleep_interval=0):
time.sleep(pre_sleep_interval)
logging.info('creating file "%s"', filename)
f = open(filename, 'w')
for line in lines:
time.sleep(interval)
f.write(line + '\n')
f.flush()
f.close()
class TestTextFileReader(unittest.TestCase):
############################
# To suppress resource warnings about unclosed files
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
############################
def test_all_files(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
for line in expected_lines:
self.assertEqual(line, reader.read())
self.assertEqual(None, reader.read())
############################
def test_tail_false(self):
# Don't specify 'tail' and expect there to be no data
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
# Create a file slowly, one line at a time
target = 'f1'
tmpfilename = tmpdirname + '/' + target
threading.Thread(target=create_file,
args=(tmpfilename, SAMPLE_DATA[target], 0.25)).start()
time.sleep(0.05) # let the thread get started
# Read, and wait for lines to come
reader = TextFileReader(tmpfilename, tail=False)
self.assertEqual(None, reader.read())
############################
def test_tail_true(self):
# Do the same thing as test_tail_false, but specify tail=True. We should
# now get all the lines that are eventually written to the file.
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
# Create a file slowly, one line at a time
target = 'f1'
tmpfilename = tmpdirname + '/' + target
threading.Thread(target=create_file,
args=(tmpfilename, SAMPLE_DATA[target], 0.25)).start()
time.sleep(0.05) # let the thread get started
# Read, and wait for lines to come
reader = TextFileReader(tmpfilename, tail=True)
for line in SAMPLE_DATA[target]:
self.assertEqual(line, reader.read())
############################
def test_refresh_file_spec(self):
# Delay creation of the file, but tell reader to keep checking for
# new files.
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
# Create a file slowly, one line at a time, and delay even
# creating the file so that when our TextFileReader starts, its
# file_spec matches nothing.
target = 'f1'
tmpfilename = tmpdirname + '/' + target
threading.Thread(target=create_file,
args=(tmpfilename, SAMPLE_DATA[target],
0.25, 0.5)).start()
time.sleep(0.05) # let the thread get started
with self.assertLogs(logging.getLogger(), logging.WARNING):
reader = TextFileReader(tmpfilename, refresh_file_spec=True)
for line in SAMPLE_DATA[target]:
self.assertEqual(line, reader.read())
############################
# Check that reader output_formats work the way we expect
def test_formats(self):
reader = TextFileReader(file_spec=None)
self.assertEqual(reader.output_format(), formats.Text)
self.assertEqual(reader.output_format(formats.NMEA), formats.NMEA)
self.assertEqual(reader.output_format(), formats.NMEA)
with self.assertRaises(TypeError):
reader.output_format('not a format')
############################
# Check some simple cases, forward movement only.
def test_seek_forward(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
self.assertEqual(2, reader.seek(2, 'start'))
self.assertEqual(expected_lines[2], reader.read())
self.assertEqual(expected_lines[3], reader.read())
self.assertEqual(9, reader.seek(0, 'end'))
self.assertEqual(None, reader.read())
self.assertEqual(1, reader.seek(1, 'start'))
self.assertEqual(expected_lines[1], reader.read())
self.assertEqual(3, reader.seek(1, 'current'))
self.assertEqual(expected_lines[3], reader.read())
self.assertEqual(4, reader.seek(0, 'current'))
self.assertEqual(expected_lines[4], reader.read())
self.assertEqual(7, reader.seek(2, 'current'))
self.assertEqual(expected_lines[7], reader.read())
self.assertEqual(expected_lines[8], reader.read())
self.assertEqual(None, reader.read())
############################
# Check special cases for origin.
def test_seek_origin(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
with self.assertRaises(ValueError):
reader.seek(0, 'xyz')
# Move to middle of file (so current position isn't start or end).
reader.seek(4, 'start')
# Check that seek with no origin is relative to the current location.
self.assertEqual(6, reader.seek(2))
self.assertEqual(expected_lines[6], reader.read())
############################
# Check seek with offset < 0 and origin = 'current'
def test_seek_current_negative_offset(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
for i in range(5):
reader.read()
self.assertEqual(3, reader.seek(-2, 'current'))
self.assertEqual(expected_lines[3], reader.read())
# Now try a bigger offset, so we have to go back a couple files.
reader.seek(8, 'start')
self.assertEqual(2, reader.seek(-6, 'current'))
self.assertEqual(expected_lines[2], reader.read())
############################
# Check seek with offset < 0 and origin = 'end'
def test_seek_end_negative_offset(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
self.assertEqual(7, reader.seek(-2, 'end'))
self.assertEqual(expected_lines[7], reader.read())
self.assertEqual(9, reader.seek(0, 'end'))
self.assertEqual(None, reader.read())
# Now try a bigger offset, so we have to go back a couple files.
self.assertEqual(2, reader.seek(-7, 'end'))
self.assertEqual(expected_lines[2], reader.read())
############################
# Check that seek with negative offset larger than current position
# results in a ValueError.
def test_seek_before_beginning(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
with self.assertRaises(ValueError):
reader.seek(-1, 'current')
with self.assertRaises(ValueError):
reader.seek(-10, 'end')
# check seek still works for in-bounds value
self.assertEqual(2, reader.seek(-7, 'end'))
self.assertEqual(expected_lines[2], reader.read())
############################
# Check that after an error due to a seek beyond the beginning,
# the state is unchanged, i.e. read() returns the record it would
# have before the seek.
def test_seek_position_unchanged_after_error(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
reader.seek(5, 'start')
with self.assertRaises(ValueError):
reader.seek(-8, 'current')
self.assertEqual(expected_lines[5], reader.read())
reader.seek(2, 'start')
with self.assertRaises(ValueError):
reader.seek(-1, 'start')
self.assertEqual(expected_lines[2], reader.read())
reader.seek(7, 'start')
with self.assertRaises(ValueError):
reader.seek(-10, 'end')
self.assertEqual(expected_lines[7], reader.read())
############################
# Check a sequence of seeks of different types.
def test_seek_multiple(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
self.assertEqual(8, reader.seek(8, 'start'))
self.assertEqual(expected_lines[8], reader.read())
self.assertEqual(5, reader.seek(-4, 'current'))
self.assertEqual(expected_lines[5], reader.read())
self.assertEqual(7, reader.seek(-2, 'end'))
self.assertEqual(expected_lines[7], reader.read())
self.assertEqual(expected_lines[8], reader.read())
self.assertEqual(None, reader.read())
self.assertEqual(2, reader.seek(-7, 'end'))
self.assertEqual(expected_lines[2], reader.read())
self.assertEqual(8, reader.seek(5, 'current'))
self.assertEqual(expected_lines[8], reader.read())
############################
# Check that read_range() returns the expected list of records
# for various values of start and stop.
def test_read_range(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
self.assertEqual(expected_lines[1:4], reader.read_range(1, 4))
self.assertEqual(expected_lines[0:9], reader.read_range(0, 9))
self.assertEqual(expected_lines[2:3], reader.read_range(start=2, stop=3))
self.assertEqual(expected_lines[2:], reader.read_range(start=2))
self.assertEqual(expected_lines[:3], reader.read_range(stop=3))
self.assertEqual(expected_lines[2:], reader.read_range(start=2, stop=40))
############################
# Check that after calling read_range(), the next read() returns
# the first record after the range.
def test_position_after_read_range(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
reader.read_range(1, 6)
self.assertEqual(expected_lines[6], reader.read())
reader.read_range(0, 4)
self.assertEqual(expected_lines[4], reader.read())
reader.read_range(7, 9)
self.assertEqual(None, reader.read())
############################
# Check that after reading some records, read_range() still works.
def test_read_range_after_read(self):
with tempfile.TemporaryDirectory() as tmpdirname:
logging.info('created temporary directory "%s"', tmpdirname)
expected_lines = []
for f in sorted(SAMPLE_DATA):
create_file(tmpdirname + '/' + f, SAMPLE_DATA[f])
expected_lines.extend(SAMPLE_DATA[f])
reader = TextFileReader(tmpdirname + '/f*')
for i in range(5):
reader.read()
self.assertEqual(expected_lines[1:4], reader.read_range(1, 4))
if __name__ == '__main__':
unittest.main()
|
python
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import session
from indico.core.db import db
def get_attached_folders(linked_object, include_empty=True, include_hidden=True, preload_event=False):
"""Return a list of all the folders linked to an object.
:param linked_object: The object whose attachments are to be returned
:param include_empty: Whether to return empty folders as well.
:param include_hidden: Include folders that the user can't see
:param preload_event: in the process, preload all objects tied to the
corresponding event and keep them in cache
"""
from indico.modules.attachments.models.folders import AttachmentFolder
folders = AttachmentFolder.get_for_linked_object(linked_object, preload_event=preload_event)
if not include_hidden:
folders = [f for f in folders if f.can_view(session.user)]
if not include_empty:
folders = [f for f in folders if f.attachments]
return folders
def get_attached_items(linked_object, include_empty=True, include_hidden=True, preload_event=False):
"""
Return a structured representation of all the attachments linked
to an object.
:param linked_object: The object whose attachments are to be returned
:param include_empty: Whether to return empty folders as well.
:param include_hidden: Include folders that the user can't see
:param preload_event: in the process, preload all objects tied to the
corresponding event and keep them in cache
"""
folders = get_attached_folders(linked_object, include_empty=include_empty,
include_hidden=include_hidden, preload_event=preload_event)
if not folders:
return {}
# the default folder is never shown as a folder. instead, its
# files are shown on the same level as other folders
files = folders.pop(0).attachments if folders[0].is_default else []
if not files and not folders:
return {}
return {
'folders': folders,
'files': files
}
def get_nested_attached_items(obj):
"""
Return a structured representation of all attachments linked to an object
and all its nested objects.
:param obj: A :class:`Event`, :class:`Session`, :class:`Contribution`
or :class:`SubContribution` object.
"""
attachments = get_attached_items(obj, include_empty=False, include_hidden=False)
nested_objects = []
if isinstance(obj, db.m.Event):
nested_objects = obj.sessions + obj.contributions
elif isinstance(obj, db.m.Session):
nested_objects = obj.contributions
elif isinstance(obj, db.m.Contribution):
nested_objects = obj.subcontributions
if nested_objects:
children = filter(None, map(get_nested_attached_items, nested_objects))
if children:
attachments['children'] = children
if attachments:
attachments['object'] = obj
return attachments
def can_manage_attachments(obj, user):
"""Check if a user can manage attachments for the object."""
if not user:
return False
if obj.can_manage(user):
return True
if isinstance(obj, db.m.Event) and obj.can_manage(user, 'submit'):
return True
if isinstance(obj, db.m.Contribution) and obj.can_manage(user, 'submit'):
return True
if isinstance(obj, db.m.SubContribution):
return can_manage_attachments(obj.contribution, user)
return False
def get_default_folder_names():
return [
'Agenda',
'Document',
'Drawings',
'List of Actions',
'Minutes',
'Notes',
'Paper',
'Pictures',
'Poster',
'Proceedings',
'Recording',
'Slides',
'Summary',
'Text',
'Video',
'Webcast',
]
def get_event(linked_object):
from indico.modules.categories import Category
if isinstance(linked_object, Category):
return None
else:
return linked_object.event
|
python
|
import math
import time
import copy
import random
import numpy as np
import eugene as eu
import pdb
#Parent Sensor
class VABSensor( object ):
def __init__(self, dynamic_range):
self._init_value = None
self._range = dynamic_range
def get_range(self):
return self._range
###############################################
###############################################
#Particular Sensors
#do we want Virtual and Real separate OR
# Virtual next to Real?
class VABTimeSensor(VABSensor):
def read(self, sys):
return sys.update_time()
class VABConcentrationSensor(VABSensor):
def __init__(self, dynamic_range, noise_stdev=0, proportional=False):
self._range = dynamic_range
self._noise_stdev = noise_stdev
self._proportional = proportional
def read(self, sys):
if len(self._range) != 2:
raise ValueError('No sensor range specified.')
else:
if self._noise_stdev == 0:
concentration = sys._x
elif self._proportional:
x = sys._x
noise = np.random.normal(0, self._noise_stdev * x)
concentration = x + noise
else:
concentration = sys._x + np.random.normal(0,
self._noise_stdev)
if concentration > self._range[1] or concentration < self._range[0]:
return 'outofrange'
else:
return concentration
class VABVoltageSensor(VABSensor):
def __init__(self, dynamic_range, noise_stdev=0, proportional=False):
self._range = dynamic_range
self._noise_stdev = noise_stdev
def read(self, sys):
if len(self._range) != 9: # Is the right range (0-10) specified?
raise ValueError('No sensor range specified.')
else:
# start with what our noise looks like.
if self._noise_stdev == 0:
voltage = sys._v
elif self.proportional: # if the noise is proportional...
v = sys._v
noise = np.random.normal(0, self._noise_stdev * v)
voltage = v + noise
else:
voltage = sys._v + np.random.normal(0,self._noise_stdev)
# now check that the voltage falls within sensor range
if voltage > self._range(9) or voltage < self._range(0):
return 'Error, that\'s not in range!'
else:
return voltage
class PopulationSensor(VABSensor):
def __init__(self, dynamic_range, noise_stdev=0, proportional=False,
skew=0):
self._range = dynamic_range
self._noise_stdev = noise_stdev
self._proportional = proportional
self._skew = skew
def read(self, sys):
if len(self._range) != 2:
raise ValueError('No sensor range specified.')
else:
if self._noise_stdev == 0:
population = sys._x
elif self._proportional:
x = sys._x
if self._skew > 0:
s = self._noise_stdev * x / np.sqrt(1 - (2. * self._skew**2) /
(np.pi * (1. - self._skew**2)))
noise = eu.probability.SampleSkewNorm(0, s, self._skew)
population = x + noise
else:
x = sys._x
noise = np.random.normal(0, self._noise_stdev * x)
population = x + noise
else:
x = sys._x
if self._skew > 0:
s = self._noise_stdev / np.sqrt(1 - (2. * self._skew**2) /
(np.pi * (1. - self._skew**2)))
noise = eu.probability.SampleSkewNorm(0, s, self._skew)
population = x + noise
else:
population = sys._x + np.random.normal(0, self._noise_stdev)
if population > self._range[1] or population < self._range[0]:
return 'out of range'
else:
return population
class CCVoltageSensor(VABSensor):
""" For use with simulated chaotic circuits. The passed value deriv
indicates which derivative of voltage the sensor should measure.
"""
def __init__(self, dynamic_range, deriv, noise_stdev=0, proportional=False):
self._range = dynamic_range
self._noise_stdev = noise_stdev
self._proportional = proportional
self._deriv = deriv
def read(self, sys):
if len(self._range) != 2:
raise ValueError('No sensor range specified.')
else:
if self._noise_stdev == 0:
out = sys._x[self._deriv]
elif self._proportional:
x = sys._x[self._deriv]
noise = np.random.normal(0, self._noise_stdev * x)
out = x + noise
else:
out = sys._x[self._deriv] + np.random.normal(0,
self._noise_stdev)
if out > self._range[1] or out < self._range[0]:
return 'outofrange'
else:
return out
class LorenzSensor(VABSensor):
def __init__(self, variable, dynamic_range, noise_stdev=0, proportional=False,
skew=0):
if not variable in set(['x','y','z']):
raise ValueError('Inorrect variable specification. Must be string "x", "y", or "z".')
else:
self._variable = variable
self._range = dynamic_range
self._noise_stdev = noise_stdev
self._proportional = proportional
self._skew = skew
def read(self, sys):
if len(self._range) != 2:
raise ValueError('No sensor range specified.')
else:
if self._noise_stdev == 0:
exec('val = sys._' + self._variable)
elif self._proportional:
exec('temp = sys._' + self._variable)
if self._skew > 0:
noise = eu.probability.SampleSkewNorm(0, self._noise_stdev *
temp, self._skew)
val = temp + noise
else:
exec('temp = sys._' + self._variable)
noise = np.random.normal(0, self._noise_stdev * temp)
val = temp + noise
else:
exec('temp = sys._' + self._variable)
if self._skew > 0:
noise = eu.probability.SampleSkewNorm(0, self._noise_stdev,
self._skew)
val = temp + noise
else:
val = temp + np.random.normal(0, self._noise_stdev)
if val > self._range[1] or val < self._range[0]:
return 'out of range'
else:
return val
class LotkaVolterra2DSensor(VABSensor):
def __init__(self, variable, dynamic_range, noise_stdev=0, proportional=False,
skew=0):
if not variable in set([1, 2]):
raise ValueError('Inorrect variable specification. Must be 1 or 2.')
else:
self._variable = variable
self._range = dynamic_range
self._noise_stdev = noise_stdev
self._proportional = proportional
self._skew = skew
def read(self, sys):
if len(self._range) != 2:
raise ValueError('No sensor range specified.')
else:
if self._noise_stdev == 0:
if self._variable == 1:
val = sys._x1
elif self._varibale == 2:
val = sys._x2
else:
raise ValueError('Inorrect variable specification. Must be 1 or 2.')
elif self._proportional:
if self._variable == 1:
temp = sys._x1
elif self._variable ==2:
temp = sys._x2
else:
raise ValueError('Inorrect variable specification. Must be 1 or 2.')
if self._skew > 0:
s = self._noise_stdev * x / np.sqrt(1 - (2. * self._skew**2) /
(np.pi * (1. - self._skew**2)))
noise = eu.probability.SampleSkewNorm(0, s, self._skew)
val = temp + noise
else:
noise = np.random.normal(0, self._noise_stdev * temp)
val = temp + noise
else:
if self._variable == 1:
temp = sys._x1
elif self._variable ==2:
temp = sys._x2
else:
raise ValueError('Inorrect variable specification. Must be 1 or 2.')
if self._skew > 0:
s = self._noise_stdev / np.sqrt(1 - (2. * self._skew**2) /
(np.pi * (1. - self._skew**2)))
noise = eu.probability.SampleSkewNorm(0, s, self._skew)
val = temp + noise
else:
val = temp + np.random.normal(0, self._noise_stdev)
if val > self._range[1] or val < self._range[0]:
return 'out of range'
else:
return val
|
python
|
#!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""Numbering formats for converted XML lists.
:author: Shay Hill
:created: 6/26/2019
I don't want to add non-ascii text to a potentially ascii-only file, so all bullets
are '--' and Roman numerals stop at 3999.
Doesn't capture formatting like 1.1.1 or b) or (ii). Only the six basic formats are
covered::
-- bullet
1 decimal
a lowerLetter
A upperLetter
i lowerRoman
I upperRoman
"""
from string import ascii_lowercase
"""Subs to convert any number of i's to a proper Roman numeral"""
# fmt=off
# noinspection SpellCheckingInspection
ROMAN_SUBS = [
("iiiii", "v"), # 1+1+1+1+1 -> 5
("vv", "x"), # 5+5 -> 10
("xxxxx", "l"), # 10+10+10+10 -> 50
("ll", "c"), # 50+50 -> 100
("ccccc", "d"), # 100+100+100+100+100 -> 500
("dd", "m"), # 500+500 -> 1000
("iiii", "iv"), # 1+1+1+1 -> 4
("viv", "ix"), # 5+4 -> 9
("xxxx", "xl"), # 10+10+10+10 -> 40
("lxl", "xc"), # 50+40 -> 90
("cccc", "cd"), # 100+100+100+100 -> 40
("dcd", "cm"), # 500+400 -> 900
]
# fmt=on
def lower_letter(n: int) -> str:
"""
Convert a positive integer to a string of letters representing base 26.
:param n: any positive integer
:return: the kind of "numbering" used for numbered lists and excel columns.
(a, b, c ... aa, ab ...) Zero is undefined.
>>> lower_letter(1)
'a'
>>> lower_letter(26)
'z'
>>> lower_letter(27)
'aa'
"""
if n < 1:
raise ValueError("0 and <1 are not defined for this numbering")
result = ""
while n:
n, remainder = divmod(n - 1, 26)
result = ascii_lowercase[remainder] + result
return result
def upper_letter(n: int) -> str:
return lower_letter(n).upper()
# noinspection SpellCheckingInspection
def lower_roman(n: int) -> str:
# noinspection SpellCheckingInspection
"""
Convert a positive integer to a lowercase Roman numeral
:param n: any positive integer
:return: Roman number equivalent of n
>>> lower_roman(1)
'i'
>>> lower_roman(9)
'ix'
>>> lower_roman(44)
'xliv'
Numbers greater than 3999 can be expressed with a bar over the number. The bar
means "times 1000" (e.g., iv with a bar over it would be 4000).
It'll never happen in this project, and I don't want to add non-ascii to what
might be a pure ascii file, so this function will keep adding 'm' to as many
thousand as you'd like.
>>> lower_roman(10000)
'mmmmmmmmmm'
"""
if n < 1:
raise ValueError("the Romans hadn't figured out {}".format(n))
result = "i" * n
for pattern, replacement in ROMAN_SUBS:
result = result.replace(pattern, replacement)
return result
def upper_roman(n: int) -> str:
return lower_roman(n).upper()
def decimal(n: int) -> str:
return str(n)
def bullet(_: int = 0) -> str:
return "--"
|
python
|
import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues(gitUrl = "https://invent.kde.org/utilities/ktrip.git")
self.description = "Public transport assistant"
def setDependencies(self):
self.buildDependencies["virtual/base"] = None
self.buildDependencies["kde/frameworks/extra-cmake-modules"] = None
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["libs/qt5/qtdeclarative"] = None
self.runtimeDependencies["libs/qt5/qtquickcontrols2"] = None
self.runtimeDependencies["kde/frameworks/tier1/ki18n"] = None
self.runtimeDependencies["kde/frameworks/tier1/kirigami"] = None
self.runtimeDependencies["kde/frameworks/tier1/kitemmodels"] = None
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = None
self.runtimeDependencies["kde/frameworks/tier2/kcontacts"] = None
self.runtimeDependencies["kde/libs/kpublictransport"] = None
self.runtimeDependencies["kde/unreleased/kirigami-addons"] = None
if not CraftCore.compiler.isAndroid:
self.runtimeDependencies["kde/frameworks/tier1/breeze-icons"] = None
self.runtimeDependencies["kde/frameworks/tier3/qqc2-desktop-style"] = None
self.runtimeDependencies["kde/plasma/breeze"] = None
else:
self.runtimeDependencies["libs/qt5/qtandroidextras"] = None
self.runtimeDependencies["kde/plasma/qqc2-breeze-style"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
self.defines["executable"] = r"bin\ktrip.exe"
self.addExecutableFilter(r"(bin|libexec)/(?!(ktrip|update-mime-database)).*")
self.ignoredPackages.append("binary/mysql")
self.ignoredPackages.append("libs/dbus")
|
python
|
def Predict(X_user) :
# Libs
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from catboost import CatBoostClassifier
X_user = np.array(X_user).reshape(1,-1)
print(X_user)
# Reading the dataset
dataset = pd.read_csv("modified.csv")
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,-1].values
x = np.concatenate((x, X_user))
ct = ColumnTransformer(
[('one_hot_encoder', OneHotEncoder(categories='auto'), [4])], # The column numbers to be transformed (here is [0] but can be [0, 1, 3])
remainder='passthrough' # Leave the rest of the columns untouched
)
x = ct.fit_transform(x)
x = x.astype(np.float64)
X_train = x[:-1,:]
Y_train = y
X_test = x[-1:,:]
# Catboost
classifier1 = CatBoostClassifier()
classifier1.fit(X_train, Y_train)
pred1 = classifier1.predict(X_test)
return pred1
|
python
|
#!/usr/bin/env python3
def test_prime(test_value):
if test_value == 2:
return 1
for x in range(2, test_value):
if test_value % x == 0:
return 0
return 1
def main():
test_value = 179424673
if test_prime(test_value):
print(test_value, "is prime.")
else:
print(test_value, "is not prime.")
if __name__ == '__main__':
main()
|
python
|
from .color import *
from .display import *
from .patterns import *
|
python
|
from api import views
from django.contrib import admin
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from rest_framework.schemas import get_schema_view
from rest_framework.documentation import include_docs_urls
router = DefaultRouter()
router.register(r'match', views.MatchViewSet)
schema_view = get_schema_view(title='Bookings API',
description='An API to book matches or update odds.')
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
path('schema/', schema_view),
path('docs/', include_docs_urls(title='Bookings API'))
]
|
python
|
# Generated by Django 2.1.5 on 2019-01-19 20:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('access', '0015_auto_20170416_2044'),
]
operations = [
migrations.AddField(
model_name='smtpserver',
name='password_file_path_on_server',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='smtpserver',
name='ssh_port',
field=models.IntegerField(default=22),
),
migrations.AddField(
model_name='smtpserver',
name='ssh_server',
field=models.CharField(blank=True, help_text='If set, whenever the SMTP passwords for this server are changed, Kompassi will SSH to the server and write the password file on the server.', max_length=255, verbose_name='SSH server'),
),
migrations.AddField(
model_name='smtpserver',
name='ssh_username',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='smtpserver',
name='trigger_file_path_on_server',
field=models.CharField(blank=True, max_length=255),
),
]
|
python
|
from earthnet_models_pytorch.setting.en21_data import EarthNet2021DataModule
from earthnet_models_pytorch.setting.en22_data import EarthNet2022DataModule
from earthnet_models_pytorch.setting.en21x_data import EarthNet2021XDataModule,EarthNet2021XpxDataModule
from earthnet_models_pytorch.setting.en21_std_metric import EarthNetScore
from earthnet_models_pytorch.setting.en21_veg_metric import RootMeanSquaredError as RMSEVegScore
from earthnet_models_pytorch.setting.en21_veg_metric import RMSE_ens21x, RMSE_ens22
SETTINGS = ["en21-std", "en21-veg", "europe-veg", "en21x","en21x-px", "en22"]
METRICS = {"en21-std": EarthNetScore, "en21-veg": RMSEVegScore, "en21x": RMSE_ens21x,"en21x-px": RMSE_ens21x, "en22": RMSE_ens22}
DATASETS = {"en21-std": EarthNet2021DataModule, "en21-veg": EarthNet2021DataModule, "en21x": EarthNet2021XDataModule, "en21x-px": EarthNet2021XpxDataModule, "en22": EarthNet2022DataModule}
METRIC_CHECKPOINT_INFO = {
"en21-std": {
"monitor": "EarthNetScore",
"filename": 'Epoch-{epoch:02d}-ENS-{EarthNetScore:.4f}',
"mode": 'max'
},
"en21-veg": {
"monitor": "RMSE_Veg",
"filename": 'Epoch-{epoch:02d}-RMSE (Vegetation)-{RMSE_Veg:.4f}',
"mode": 'min'
},
"en21x": {
"monitor": "RMSE_Veg",
"filename": 'Epoch-{epoch:02d}-RMSE (Vegetation)-{RMSE_Veg:.4f}',
"mode": 'min'
},
"en21x-px": {
"monitor": "RMSE_Veg",
"filename": 'Epoch-{epoch:02d}-RMSE (Vegetation)-{RMSE_Veg:.4f}',
"mode": 'min'
},
"en22": {
"monitor": "RMSE_Veg",
"filename": 'Epoch-{epoch:02d}-RMSE (Vegetation)-{RMSE_Veg:.4f}',
"mode": 'min'
}
}
|
python
|
#!/usr/bin/env python3
"""
This example shows how to generate basic plots and manipulate them with FEA
In this case, we'll generate a 4D model and ruse FEA to find all possible
2D and 3D reduced models.
We'll then solve for the solution spaces by breaking up each solution and
finding the maximum and minimum values in a 10 step grid in each dimension.
Finally we'll generate a graph of the reduced solution showing the nodes
we discovered
Note: If you are using GLPK as your solver your results may vary. I highly
recommend using CPLEX or Gurobi if you are licensed to do so.
"""
import subprocess
import numpy as np
from optlang import *
from itertools import combinations, chain
from fea import flux_envelope_analysis as fea
from fea.plot import generate_graphviz
#### EXAMPLE PARAMETERS ####
# how many dimensions and constraints we want in our original random model
original_dims=4
original_cons=original_dims*2
# upper and lower bounds for our variable
var_limit=10
#### MODEL FORMULATION AND HELPER FUNCTIONS ####
# Generate a Random Model
np.random.seed(1)
model=Model(name='Random')
original_vars=[]
for i in range(original_dims):
original_vars.append(Variable(chr(65+i),ub=var_limit,lb=-var_limit))
model.add(original_vars)
for i in range(original_cons):
val=np.random.rand(original_dims)
val=val/np.linalg.norm(val)
cns=(np.random.random()-.5)*var_limit
if cns>0:
model.add(Constraint(np.dot(original_vars,val),ub=cns,name='C'+str(i)))
else:
model.add(Constraint(np.dot(original_vars,val),lb=cns,name='C'+str(i)))
#### SOLVE AND GRAPH ####
# Use all possible 2d and 3d combos
for combo in chain(combinations(original_vars, 2),combinations(original_vars, 3)):
# Solve FEA model (if using GLPK, expect the 3d ones to be very noisy)
reduced = fea(model, combo)
# Get graphviz format
reduced_graph = generate_graphviz(reduced)
# Generate the image from the input
proc = subprocess.Popen(["dot","-Tpng","-o",'./graph_all_reduced_solutions/'+''.join([v.name for v in combo])+'.png'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate(input=reduced_graph.encode())[0]
|
python
|
#!/usr/bin/env python3
# Intro & methodology:
#
# 自从几个月前大号被百度永封了就没怎么来hhy,来了也不发贴回贴。今天听说
# 有人统计了五选周期每人的直播次数,就来看了一下,发现受统计方法所限,原
# 贴数据问题很大(例:路人最爱的那位大人分明直播了两次——虽然都是外务要求;
# 但原贴说那位大人直播了零次,以致收了很多酸菜,本人心痛不已),所以本人
# 决定小号出山,拿出箱底的数据拨乱反正一下,也顺便提供一点细节。
#
# 还是老样子,本人先介绍数据来源、统计方法,指出局限性;然后提供数据,但
# 不多加主观发挥,由读者自行理解。
#
# 本人数据来源是每整十分钟爬一次口袋的当前直播列表,所以很好地解决了删直
# 播问题(不过全程都在两个整十分钟之间的直播就无法统计到,但这种直播应该
# 很少,且过于敷衍,不算也罢)。并且,关联口袋的一直播也进入了统计(未关
# 联口袋的一直播无法统计)。遗憾的是,本人从2017年11月13日起爬,所以无法
# 提供整个五选周期的数据,所以本贴将提供2017年12月至2018年7月每月的数据。
# 表中所有数据都将是【直播的天数而非次数】,所以一天开多次(一般是技术问
# 题所致)只记一次;另外,【凌晨5点前开的直播均算在前一日】,所以零点后
# 卡了重开不会算两次。所有原始数据、处理后的数据、处理用的代码均发布在
# git.io/snh48g-la ,可自行验证。
#
# 该说的都说完了,下面奉上五选圈内成员数据,每组一个表。赶时间,表比较丑,
# 凑合着看。顺便说一句,目前本人已记录了近两万次直播数据(并公开——见上一
# 段)。
import collections
import csv
import pathlib
import sys
HERE = pathlib.Path(__file__).resolve().parent
ROOT = HERE.parent
DATA_DIR = ROOT / 'data'
PROCESSED_DATA_DIR = DATA_DIR / 'processed'
PROCESSED_INDIVIDUAL_DATA_DIR = PROCESSED_DATA_DIR / 'members'
PROBE_DATA_DIR = HERE / 'monthly'
sys.path.insert(0, ROOT.as_posix())
from e5 import G1, G2, G3, G4
MONTHS = [
'2017-12',
'2018-01',
'2018-02',
'2018-03',
'2018-04',
'2018-05',
'2018-06',
'2018-07',
]
def extract_dates(path):
with path.open() as fp:
reader = csv.DictReader(fp)
return [row['date'] for row in reader]
def dump_frequency_csv(path, rows):
print(f'Dumping to {path}')
with path.open('w') as fp:
writer = csv.writer(fp)
title_row = ['成员'] + MONTHS + ['总天数']
writer.writerow(title_row)
for row in rows:
writer.writerow(row)
def main():
for member_specs, filename in [(G1, 'g1.csv'), (G2, 'g2.csv'), (G3, 'g3.csv'), (G4, 'g4.csv')]:
rows = []
for name, member_id in member_specs:
dates = extract_dates(PROCESSED_INDIVIDUAL_DATA_DIR.joinpath(f'{member_id}.csv'))
monthly = collections.Counter(d[:7] for d in set(dates))
fields = [name]
total = 0
for month in MONTHS:
count = monthly.get(month, 0)
total += count
fields.append(count)
fields.append(total)
rows.append(fields)
dump_frequency_csv(PROBE_DATA_DIR.joinpath(filename), rows)
if __name__ == '__main__':
main()
|
python
|
import multiprocessing
import os
import random
import time
import pytest
import requests
from finetuner.toydata import generate_fashion
from finetuner import __default_tag_key__
from jina.helper import random_port
os.environ['JINA_LOG_LEVEL'] = 'DEBUG'
all_test_losses = ['SiameseLoss', 'TripletLoss']
def _run(framework_name, loss, port_expose):
from finetuner import fit
import paddle
import tensorflow as tf
import torch
embed_models = {
'keras': lambda: tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(32),
]
),
'pytorch': lambda: torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(
in_features=28 * 28,
out_features=128,
),
torch.nn.ReLU(),
torch.nn.Linear(in_features=128, out_features=32),
),
'paddle': lambda: paddle.nn.Sequential(
paddle.nn.Flatten(),
paddle.nn.Linear(
in_features=28 * 28,
out_features=128,
),
paddle.nn.ReLU(),
paddle.nn.Linear(in_features=128, out_features=32),
),
}
rv1, rv2 = fit(
embed_models[framework_name](),
generate_fashion(num_total=10),
loss=loss,
interactive=True,
port_expose=port_expose,
)
assert rv1
assert not rv2
# 'keras' does not work under this test setup
# Exception ... ust be from the same graph as Tensor ...
# TODO: add keras backend back to the test
@pytest.mark.parametrize('framework', ['pytorch', 'paddle'])
@pytest.mark.parametrize('loss', all_test_losses)
def test_all_frameworks(framework, loss, tmpdir):
port = random_port()
p = multiprocessing.Process(
target=_run,
args=(
framework,
loss,
port,
),
)
p.start()
try:
while True:
try:
req = requests.post(
f'http://localhost:{port}/next',
json={
'data': [],
'parameters': {
'start': 0,
'end': 1,
'topk': 5,
'sample_size': 10,
},
},
)
assert req.status_code == 200
assert req.json()['data']['docs']
break
except:
print('wait for ready...')
time.sleep(2)
# mimic next page
req = requests.post(
f'http://localhost:{port}/next',
json={
'data': [],
'parameters': {'start': 0, 'end': 1, 'topk': 5, 'sample_size': 10},
},
)
assert req.status_code == 200
rj = req.json()
assert len(rj['data']['docs']) == 1
assert len(rj['data']['docs'][0]['matches']) >= 4
time.sleep(1)
print('test fit...')
# mimic label & fit
for lbl_doc in rj['data']['docs']:
for m in lbl_doc['matches']:
m[__default_tag_key__] = random.sample([-1, 1], 1)[0]
req = requests.post(
f'http://localhost:{port}/fit',
json={'data': rj['data']['docs'], 'parameters': {'epochs': 10}},
)
assert req.status_code == 200
model_path = os.path.join(tmpdir, 'model.train')
req = requests.post(
f'http://localhost:{port}/save',
json={
'data': [],
'parameters': {
'model_path': model_path,
},
},
)
assert req.status_code == 200
assert os.path.isfile(model_path)
req = requests.post(
f'http://localhost:{port}/terminate',
json={
'data': [],
'parameters': {},
},
)
assert req.status_code == 200
except:
raise
finally:
p.terminate()
|
python
|
from .Layer import *
class Slice(Layer):
def __init__(self, model, *args, **kwargs):
Layer.__init__(self, model, *args, **kwargs)
self.axis = kwargs.get("axis", 1)
if "slice_point" in kwargs:
self.slice_points = [kwargs["slice_point"]]
else:
self.slice_points = kwargs.get("slice_points", [])
self.set_output(len(self.slice_points) + 1)
def reshape(self):
self.slices = [[slice(None)] * self.X.ndim for _ in range(len(self.slice_points) + 1)]
last = None
for i, k in enumerate(self.slice_points):
s = slice(last, k)
self.slices[i][self.axis] = s
last = k
self.Y[i] = self.X[s]
s = slice(last, None)
self.slices[-1][self.axis] = s
self.slices = [tuple(s) for s in self.slices]
self.Y[-1] = self.X[s]
def forward(self):
self.Y = [self.X[s] for s in self.slices]
def backward(self):
self.dX = np.concatenate(self.dY, axis = self.axis)
|
python
|
class NaturalNumbers:
def __init__(self):
pass
def get_first_n_for(self, n): # Ejemplo
"""
Obtener los primeros n naturales en una lista con for
"""
first_n = [] # Se declara una lista donde almacenaremos los numeros
for i in range(n): # Se itera sobre range que genera un rango de 0 a n
first_n.append(i) # Almacenamos la variable del ciclo en la lista con append
print("FIRST n (n={}) FOR: {}".format(n, first_n))
return first_n # Regresamos la lista
def get_first_n_while(self, n): # Ejemplo
"""
Obtener los primeros n naturales en una lista con while
"""
first_n = [] # Se declara una lista donde almacenaremos los numeros
n_count = 0 # Inicializamos un contador para saber en que iteracion vamos dentro del ciclo
while n_count < n: # Condición de terminación del ciclo
first_n.append(n_count) # ALmacenamos el contador (contablizador del ciclo) en la lista
n_count += 1 # Sumamos uno al contador puesto que termina ek ciclo, si no nunca n_count será mayor o igual que n y tendremos un loop infinito
print(f"FIRST n (n={n}) WHILE: {first_n}")
return first_n
def get_first_n_pair_for(self, n): # Ejercicio
"""
Obtener los primeros n pares en una lista con for
"""
return []
def get_first_n_pair_while(self, n): # Ejercicio
"""
Obtener los primeros n pares en una lista con while
"""
return []
def get_factorial_for(self, n): # Ejercicio
"""
Obtener el factorial de n con for, regresa un int
"""
return 0
def get_factorial_while(self, n): # Ejercicio
"""
Obtener el factorial de n con while, regresa un int
"""
return 0
def get_factorial_recursive(self, n): #Ejemplo
"""
Obtener el factorial de n recursivamente, regresa un int
"""
if n <= 1:
return 1
return n * self.get_factorial_recursive(n-1)
def get_n_pow_2_for(self, n): # Ejemplo
"""
Obtener el cuadrado de los primeros n con for, regresa una lista
"""
n_pow_2 = []
for i in range(n):
n_pow_2.append(
i ** 2
)
print(f"FIRST n (n={n}) POW 2: {n_pow_2}")
return n_pow_2
def get_n_pow_2_while(self, n): # Ejercicio
"""
Obtener el cuadrado de los primeros n con while, regresa una lista
"""
return []
def get_n_sum_recursive(self, n): #Ejemplo
"""
Obtener la suma de los primeros n recursivamente, regresa un int
"""
if n <= 0:
return 0
return n + self.get_n_sum_recursive(n-1)
def get_n_sum_for(self, n): # Ejercicio
"""
Obtener la suma de los primeros n con for, regresa un int
"""
return 0
def get_n_sum_while(self, n): # Ejercicio
"""
Obtener la suma de los primeros n con while, regresa un int
"""
return 0
|
python
|
"""
MIT License
Copyright (c) 2021 Defxult#8269
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
class DiscordLevelingSystemError(Exception):
"""Base exception for :class:`DiscordLevelingSystem`"""
def __init__(self, message: str):
super().__init__(message)
class RoleAwardError(DiscordLevelingSystemError):
"""Base exception for :class:`DiscordLevelingSystem`"""
def __init__(self, message: str):
super().__init__(message)
class ConnectionFailure(DiscordLevelingSystemError):
"""Attempted to connect to the database file when the event loop is already running"""
def __init__(self):
super().__init__('Cannot connect to database file because the event loop is already running')
class NotConnected(DiscordLevelingSystemError):
"""Attempted to use a method that requires a connection to a database file"""
def __init__(self):
super().__init__('You attempted to use a method that requires a database connection. Did you forget to connect to the database file first using "DiscordLevelingSystem.connect_to_database_file()"?')
class DatabaseFileNotFound(DiscordLevelingSystemError):
"""The database file was not found"""
def __init__(self, message):
super().__init__(message)
class ImproperRoleAwardOrder(RoleAwardError):
"""When setting the awards list in the :class:`DiscordLevelingSystem` constructor, :attr:`RoleAward.level_requirement` was not greater than the last level"""
def __init__(self, message):
super().__init__(message)
class ImproperLeaderboard(DiscordLevelingSystemError):
"""Raised when the leaderboard table in the database file does not have the correct settings"""
def __init__(self):
super().__init__('It seems like the leaderboard table was altered. Components changed or deleted')
class LeaderboardNotFound(DiscordLevelingSystemError):
"""When accessing the "DiscordLevelingSystem.db" file, the table "leaderboard" was not found inside that file"""
def __init__(self):
super().__init__('When accessing the "DiscordLevelingSystem.db" file, the table "leaderboard" was not found inside that file. Use DiscordLevelingSystem.create_database_file() to create the file')
class FailSafe(DiscordLevelingSystemError):
"""Raised when the expected value for a method that can cause massive unwanted results, such as :meth:`DiscordLevelingSystem.wipe_database()`, was set to `False`"""
def __init__(self):
super().__init__('Failsafe condition raised due to default argument. "intentional" was set to False')
|
python
|
# Generated by Django 2.2.10 on 2020-04-06 06:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('items', '0002_item_whereabouts'),
]
operations = [
migrations.AddField(
model_name='item',
name='sale_type',
field=models.CharField(choices=[('SHIPPING', 'Ship the item to a provided address.'), ('MEETING', 'Deliver the item to an agreed upon public location.')], default='ship', max_length=20),
),
]
|
python
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from cpo.utils.string import removeprefix, removesuffix
class TestStringUtilities(unittest.TestCase):
def test_removeprefix(self):
"""Tests cpo.utils.string.removeprefix()"""
self.assertEqual(removeprefix("XYZ", "X"), "YZ")
self.assertEqual(removeprefix("XYZ", "Z"), "XYZ")
self.assertEqual(removeprefix("", "X"), "")
self.assertEqual(removeprefix("", ""), "")
def test_removesuffix(self):
"""Tests cpo.utils.string.test_removesuffix()"""
self.assertEqual(removesuffix("XYZ", "Z"), "XY")
self.assertEqual(removesuffix("XYZ", "X"), "XYZ")
self.assertEqual(removesuffix("", "Z"), "")
self.assertEqual(removesuffix("", ""), "")
if __name__ == "__main__":
unittest.main()
|
python
|
import os
import shutil
import unittest
from xbrr.tdnet.client.document_client import DocumentClient
from xbrr.edinet.reader.reader import Reader
from xbrr.tdnet.reader.doc import Doc
import pandas as pd
class TestReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
_dir = os.path.join(os.path.dirname(__file__), "../data")
client = DocumentClient()
# "081220210818487667" J-共和工業 2022年4月期 第1四半期決算短信〔日本基準〕(連結)
# root_dir = client.get_xbrl("081220210818487667", save_dir=_dir,
# expand_level="dir")
root_dir = os.path.join(_dir, "081220210818487667")
xbrl_doc = Doc(root_dir=root_dir, xbrl_kind="public")
cls.reader = Reader(xbrl_doc, save_dir=_dir)
cls._dir = _dir
@classmethod
def tearDownClass(cls):
# shutil.rmtree(cls.reader.xbrl_doc.root_dir)
if os.path.exists(cls.reader.taxonomies_root):
shutil.rmtree(cls.reader.taxonomies_root)
def test_custom_roles(self):
roles = self.reader.custom_roles
self.assertTrue(len(roles) > 0)
self.assertIn('rol_QuarterlyConsolidatedBalanceSheet', roles) # 310030 四半期連結貸借対照表
self.assertIn('rol_YearToQuarterEndConsolidatedStatementOfComprehensiveIncome', roles) # 322031 四半期連結包括利益計算書 四半期連結累計期間
self.assertIn('rol_YearToQuarterEndConsolidatedStatementOfIncome', roles) # 321031 四半期連結損益(及び包括利益)計算書 四半期連結累計期間
self.assertIn('RoleAttachedDocument', roles)
# for k,v in roles.items():
# print(v.label, k)
def test_namespaces(self):
namespaces = self.reader.namespaces
self.assertTrue(len(namespaces) > 0)
self.assertIn('jpdei_cor', namespaces)
self.assertIn('jppfs_cor', namespaces)
self.assertIn('jpcrp_cor', namespaces)
# self.assertIn('tse-qcedjpfr-15150', namespaces)
def test_read_value_by_role(self):
# rol_QuarterlyConsolidatedBalanceSheet 310030 四半期連結貸借対照表
bro = self.reader.read_value_by_role('rol_YearToQuarterEndConsolidatedStatementOfIncome')
# bro.to_csv(os.path.join(self._dir, 'test_bro.csv'))
self.assertGreater(len(bro), 0)
|
python
|
from noise import pnoise2
from time import sleep
from threading import Thread
import os
import color
import platform
import socket
try: from pynput import keyboard
except ImportError: keyboard = None
# unused ports: 26490-26999
port = 26969
address = "192.168.50.172"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
width, height = 80, 40
octaves = 1
frequency = 200 * octaves
sea_level = 15
coordinates = []
local_coordinates = (200,50)
x_change = 0
y_change = 0
direction = None
speed = 1
system = platform.system()
delay = 0.065 if "Windows" in system else 0.65
if keyboard:
keyboard_controller = keyboard.Controller()
def connect(address=socket.gethostbyname(socket.gethostname()), port=26490):
print("Connecting to \"{0}:{1}\"...".format(address,port))
s.connect((address, port))
print("Connected!")
def recv(buffer=1024, encoding="utf8"):
msg = s.recv(buffer).decode(encoding)
return msg
def send(msg, encoding="utf8"):
if msg.strip() != "":
s.send(msg.encode(encoding))
def compress_list(data):
compressed = f"{data[0]},{data[1]}"
return compressed
def recv_coordinates():
pairs = recv().split("|") # ['215,55', '235,52']
msg = []
for pair in pairs:
pair = pair.split(",") # ['215', '55']
coordinate_pair = []
for coordinate in pair:
coordinate_pair.append(int(coordinate))
msg.append(tuple(coordinate_pair))
print(msg)
sleep(0.5)
return msg
def on_release(key):
global direction
if key:
direction = key
def start():
keyboard_listener = keyboard.Listener(on_release=on_release)
keyboard_listener.start()
if keyboard:
threaded_function = Thread(target=start)
threaded_function.start()
connect(address=address, port=port)
for i in range(1000):
frame_buffer = []
coordinates = recv_coordinates()
print(f"Recieved {coordinates}")
x_change, y_change = local_coordinates
packet = compress_list(local_coordinates)
send(packet)
if direction:
direction = str(direction)
if "w" in direction:
y_change -= speed
elif "a" in direction:
x_change -= speed
elif "s" in direction:
y_change += speed
elif "d" in direction:
x_change += speed
direction = None
for y in range(height):
frame_buffer.append("|")
for x in range(width):
n = int(pnoise2(x/(frequency+2)+x_change/frequency, y/(frequency+1)+y_change/frequency, int(octaves))*sea_level)
n = str(n) if n >= 1 else " "
n = "9" if len(n) > 1 else n
n = color.highlight(" ", n)
for count, player in enumerate(coordinates):
if y+y_change == coordinates[count][1] and x+x_change == coordinates[count][0]:
n = "\033[1;31m!\033[1;m"
if y == int(height/2) and x == int(width/2):
frame_buffer.append("\033[1;31m!\033[1;m")
local_coordinates = (x_change, y_change)
else:
frame_buffer.append(n)
frame_buffer.append("|\n")
os.system("cls" if "Windows" in system else "clear")
print("".join(frame_buffer))
print(f"{coordinates}+{local_coordinates}")
#sleep(delay)
|
python
|
expected_output = {
"vrf": {
"User": {
"iid": 4100,
"number_of_entries": 2186,
"eid": {
"0.0.0.0/0": {
"uptime": "1w6d",
"expire": "never",
"via": [
"static-send-map-request"
],
"rloc": {
"status": "Negative cache entry",
"action": "send-map-request"
}
},
"0.0.0.0/5": {
"uptime": "4w6d",
"expire": "00:14:38",
"via": [
"map-reply",
"forward-native"
],
"rloc": {
"status": "Encapsulating to proxy ETR"
}
},
"10.64.0.0/7": {
"uptime": "07:18:55",
"expire": "00:07:19",
"via": [
"map-reply",
"forward-native"
],
"rloc": {
"status": "Encapsulating to proxy ETR"
}
},
"10.0.0.0/9": {
"uptime": "1w4d",
"expire": "00:00:13",
"via": [
"map-reply",
"forward-native"
],
"rloc": {
"status": "Encapsulating to proxy ETR"
}
},
"10.128.0.0/11": {
"uptime": "4w6d",
"expire": "00:04:45",
"via": [
"map-reply",
"forward-native"
],
"rloc": {
"status": "Encapsulating to proxy ETR"
}
},
"10.16.0.17/32": {
"uptime": "1w6d",
"expire": "13:36:24",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.94",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.18/32": {
"uptime": "1w6d",
"expire": "13:40:30",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.65",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.21/32": {
"uptime": "1w6d",
"expire": "12:55:06",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.124",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.22/32": {
"uptime": "1w6d",
"expire": "20:44:39",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.94",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.24/32": {
"uptime": "1w6d",
"expire": "20:44:39",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.94",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.25/32": {
"uptime": "1w6d",
"expire": "12:45:50",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.94",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.26/32": {
"uptime": "1w6d",
"expire": "12:52:40",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.124",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.27/32": {
"uptime": "00:54:02",
"expire": "23:06:55",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.65",
"uptime": "00:54:02",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.28/32": {
"uptime": "1w6d",
"expire": "20:44:39",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.112",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.29/32": {
"uptime": "1w6d",
"expire": "20:44:39",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.138",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.30/32": {
"uptime": "01:56:35",
"expire": "22:44:59",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.16",
"uptime": "01:56:35",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.31/32": {
"uptime": "03:12:53",
"expire": "20:47:07",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.129.17",
"uptime": "03:12:53",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.130/32": {
"uptime": "1w1d",
"expire": "20:26:54",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.173",
"uptime": "1w1d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.131/32": {
"uptime": "1w6d",
"expire": "20:44:39",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.135",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.132/32": {
"uptime": "1w6d",
"expire": "22:07:00",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.147",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.133/32": {
"uptime": "5d21h",
"expire": "02:29:10",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.152",
"uptime": "5d21h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.134/32": {
"uptime": "1w6d",
"expire": "22:04:32",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.141",
"uptime": "1w6d",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.135/32": {
"uptime": "08:39:11",
"expire": "15:20:49",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.153",
"uptime": "08:39:11",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.136/32": {
"uptime": "1d08h",
"expire": "15:20:44",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.141",
"uptime": "1d08h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.137/32": {
"uptime": "1d08h",
"expire": "15:20:40",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.178",
"uptime": "1d08h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.138/32": {
"uptime": "1d08h",
"expire": "15:20:46",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.152",
"uptime": "1d08h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.139/32": {
"uptime": "1d08h",
"expire": "15:21:32",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.140",
"uptime": "1d08h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.140/32": {
"uptime": "1d08h",
"expire": "15:20:46",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.140",
"uptime": "1d08h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.141/32": {
"uptime": "1d08h",
"expire": "15:21:41",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.140",
"uptime": "1d08h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.142/32": {
"uptime": "1d08h",
"expire": "15:21:29",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.147",
"uptime": "1d08h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.143/32": {
"uptime": "1d08h",
"expire": "15:20:37",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.152",
"uptime": "1d08h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.144/32": {
"uptime": "1d08h",
"expire": "15:21:28",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.178",
"uptime": "1d08h",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
},
"10.16.0.145/32": {
"uptime": "08:39:13",
"expire": "15:20:46",
"via": [
"map-reply",
"self",
"complete"
],
"rloc": {
"ip": "10.8.128.178",
"uptime": "08:39:13",
"state": "up",
"priority": 10,
"weight": 10,
"encap_iid": "-"
}
}
}
}
}
}
|
python
|
import random
from environment import TicTacToe, X, O
class Agent:
def __init__(self, plays=X, episodes=10_000):
"""
Initialise the agent with all the possible board states in a look up table.
Winning states have a value of 1, losing states have -1. All else are 0
"""
# initiate all possible board states
self.plays = plays
self.episodes = episodes
self.board_states = {}
tic_tac_toe = TicTacToe() # blank board to test against
# Get all possible states of the board (.set_board will remove duplicate permiatations)
for board in self.fill_board([0 for x in range(9)]):
_, winner, terminated = tic_tac_toe.eval(board)
value = random.random() - 0.5
if terminated:
if self.plays == X:
if winner == X: value = 1
if winner == O: value = -1
else:
if winner == O: value = 1
if winner == X: value = -1
self.set_board_value(board, value)
# LEARN
self.optimise()
def optimise(self):
"""
This funciton is called to optimise it's self and learn the values of different boards
"""
plays = {
"X Wins!": 0,
"Draw!": 0,
"O Wins!": 0
}
print("Training")
epsilon = 0.8
epsilon_decay = epsilon / self.episodes
for episode in range(1, self.episodes + 1):
epsilon -= epsilon_decay
ttt = TicTacToe()
epsiode_moves, episode_reward = [], 0
if episode % 2 == 1:
ttt.random_step(player=-self.plays)
terminated = False
while not terminated:
action = self.predict(ttt, epsilon)
state, reward, terminated, info = ttt.step(action, player=self.plays)
epsiode_moves.append(state)
if terminated:
episode_reward = reward
plays[info["status"]] += 1
break
state, reward, terminated, info = ttt.random_step(player=-self.plays)
if terminated:
plays[info["status"]] += 1
episode_reward = -reward
# train the results
self.train(epsiode_moves, episode_reward)
print("Done.")
print(plays)
def fill_board(self, initial_board, i=0):
"""
This function gets all possible states of the board (with symetry)
"""
boards = []
if i < 9:
x_board = [x for x in initial_board]
o_board = [x for x in initial_board]
b_board = [x for x in initial_board]
x_board[i], o_board[i], b_board[i] = X, O, 0
if abs(x_board.count(X) - x_board.count(O)) < 2: boards += [x_board]
if abs(o_board.count(X) - o_board.count(O)) < 2: boards += [o_board]
if abs(b_board.count(X) - b_board.count(O)) < 2: boards += [b_board]
boards += self.fill_board(x_board, i+1)
boards += self.fill_board(o_board, i+1)
boards += self.fill_board(b_board, i+1)
return boards
def board_to_string(self, board):
"""
Convert the given board to a string of the boad
"""
board_string = ""
for v in board:
if v == X: board_string += "X"
elif v == O: board_string += "O"
else: board_string += "_"
return board_string
def string_to_board(self, board_string):
"""
Convert the given string to the numeric board
"""
board = []
for v in board_string:
if v == "X": board += [X]
elif v == "O": board += [O]
else: board += [0]
return board
def train(self, games_states, game_reward, lr=0.1):
"""
Given all resulting states of game that the agent played and the reward for the
game, the agent will train using the function temporal difference function:
V(st) = V(st) + lr * (V(st+1) - V(st))
"""
games_states.reverse()
last_state = games_states[0]
self.set_board_value(last_state, game_reward)
for i in range(1, len(games_states)):
current_state = games_states[i]
next_value = self.get_board_value(last_state)
current_value = self.get_board_value(current_state)
new_value = current_value + lr * (next_value - current_value)
self.set_board_value(current_state, new_value)
last_state = current_state
def get_board_permutations(self, board):
"""
Given a board, this function will get all possible permiatations of the same board
"""
permutations = []
def flip_vert(board):
return [board[2], board[1], board[0], board[5], board[4], board[3], board[8], board[7], board[6]]
def flip_horz(board):
return [board[6], board[7], board[8], board[3], board[4], board[5], board[0], board[1], board[2]]
def flip_diag_a(board):
return [board[0], board[3], board[6], board[1], board[4], board[7], board[2], board[5], board[8]]
def flip_diag_b(board):
return [board[8], board[5], board[2], board[7], board[4], board[1], board[6], board[3], board[0]]
def rotate(board):
return [board[6], board[3], board[0], board[7], board[4], board[1], board[8], board[5], board[2]]
for rotation in range(4): # for rotation
for flipped_vert in [False, True]:
for flipped_horz in [False, True]:
for flipped_diag_a in [False, True]:
for flipped_diag_b in [False, True]:
board_copy = [x for x in board]
if flipped_vert: board_copy = flip_vert(board_copy)
if flipped_horz: board_copy = flip_horz(board_copy)
if flipped_diag_a: board_copy = flip_diag_a(board_copy)
if flipped_diag_b: board_copy = flip_diag_b(board_copy)
for _ in range(rotation): board_copy = rotate(board_copy)
permutations += [board_copy]
return permutations
def get_board_value(self, board):
"""
Get the value of the board
"""
for permutations in self.get_board_permutations(board):
board_string = self.board_to_string(permutations)
if board_string in self.board_states:
return self.board_states[board_string]
return
def set_board_value(self, board, value):
"""
Set the value of the board
"""
# check if it's permutation exists in the known states
for permutations in self.get_board_permutations(board):
board_string = self.board_to_string(permutations)
if board_string in self.board_states:
self.board_states[board_string] = value
return
self.board_states[board_string] = value
return
def predict(self, ttt, epsilon=0):
"""
Given the environment, get all moves and compare the value of the moves
then choose the best value. If random < epsilon then return random aciton.
"""
possible_moves = ttt.moves(self.plays)
action, value = 0, -2
for move in possible_moves:
future_action, board = move
future_value = self.get_board_value(board)
if future_value > value:
action, value = future_action, future_value
if random.random() < epsilon:
action = random.choice(possible_moves)[0]
return action
|
python
|
import random
import uuid
LOGGED_IN_DATA = {
'host': 'https://app.valohai.com/',
'user': {'id': 'x'},
'token': 'x',
}
PROJECT_DATA = {
'id': '000',
'name': 'nyan',
'description': 'nyan',
'owner': 1,
'ctime': '2016-12-16T12:25:52.718310Z',
'mtime': '2017-01-20T14:35:02.196871Z',
'urls': {
'display': 'https://app.valohai.com/p/nyan/nyan/',
}
}
execution_id = str(uuid.uuid4())
EXECUTION_DATA = {
'counter': random.randint(1, 100),
'ctime': '2017-02-08T11:09:16.120102Z',
'id': execution_id,
'project': PROJECT_DATA,
'commit': {
'repository': 666,
'identifier': '000',
'ref': 'master',
'commit_time': '2017-02-15T08:46:58Z',
'url': 'https://app.valohai.com/api/v0/commits/7/'
},
'task': None,
'duration': 777,
'status': 'complete',
'step': 'run training',
'url': 'https://app.valohai.com/api/v0/executions/{id}/'.format(id=execution_id),
'urls': {
'copy': 'https://app.valohai.com/api/v0/executions/34/copy/',
'display': 'https://app.valohai.com/p/test/mnist/execution/34/',
'stop': 'https://app.valohai.com/api/v0/executions/34/stop/',
},
'parameters': {
'dropout': 0.9,
'learning_rate': 0.001,
'max_steps': 300,
},
'outputs': [
{
'id': '123',
'name': 'a.txt',
'ctime': '2017-02-16T15:25:59.304888Z',
'size': 120500,
'url': 'http://filestash.example.com/foo/a.txt',
},
{
'id': '456',
'name': 'b.txt',
'ctime': '2017-02-16T15:25:59.420535Z',
'size': 25000,
'url': 'http://filestash.example.com/foo/b.txt'
},
],
'environment': {
'id': '88888888-8888-8888-8888-888888888888',
'name': 'local',
'owner': None,
'unfinished_job_count': 0,
},
'cumulative_metadata': {
'oispa': 'beer',
},
}
EVENT_RESPONSE_DATA = {
'total': 5,
'truncated': False,
'events': [
{
'time': '2017-02-16T15:25:33.037000',
'stream': 'status',
'message': 'hOI!!! I\'m temmie!'
},
{
'time': '2017-02-16T15:25:33.037000',
'stream': 'stderr',
'message': 'oh no',
},
],
}
CONFIG_YAML = """
---
- step:
name: Train model
image: busybox
command: "false"
inputs:
- name: in1
default: http://example.com/
parameters:
- name: max_steps
pass-as: --max_steps={v}
description: Number of steps to run the trainer
type: integer
default: 300
"""
INVALID_CONFIG_YAML = """
---
- step:
image: 8
command:
foo: 6
bar: n
outputs: yes
parameters:
- name: a
type: integer
- 38
"""
BROKEN_CONFIG_YAML = """'"""
|
python
|
#!/usr/bin/env python
from distutils.core import setup
LONG_DESCRIPTION = \
'''Convert DNA structural variants in VCF files into BED format'''
setup(
name='svdistil',
version='0.1.0.0',
author='Bernie Pope',
author_email='[email protected]',
packages=['svdistil'],
package_dir={'svdistil': 'svdistil'},
entry_points={
'console_scripts': ['svdistil = svdistil.svdistil:main',
'svqualfilter = svdistil.svqualfilter:main',
'svmerge = svdistil.svmerge:main',
'svannotate = svdistil.svannotate:main',
]
},
url='https://github.com/bjpop/svdistil',
license='LICENSE',
description=('Convert DNA structural variants in VCF files into BED format'),
long_description=(LONG_DESCRIPTION),
install_requires=["cyvcf2", "networkx", "intervaltree"],
)
|
python
|
import h5py
import numpy as np
import pandas as pd
import lightgbm as lgb
class HDFSequence(lgb.Sequence):
def __init__(self, hdf_dataset, batch_size):
"""
Construct a sequence object from HDF5 with required interface.
Parameters
----------
hdf_dataset : h5py.Dataset
Dataset in HDF5 file.
batch_size : int
Size of a batch. When reading data to construct lightgbm Dataset, each read reads batch_size rows.
"""
# We can also open HDF5 file once and get access to
self.data = hdf_dataset
self.batch_size = batch_size
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return len(self.data)
def create_dataset_from_multiple_hdf(input_flist, batch_size):
data = []
ylist = []
for f in input_flist:
f = h5py.File(f, 'r')
data.append(HDFSequence(f['X'], batch_size))
ylist.append(f['Y'][:])
params = {
'bin_construct_sample_cnt': 200000,
'max_bin': 255,
}
y = np.concatenate(ylist)
dataset = lgb.Dataset(data, label=y, params=params)
# With binary dataset created, we can use either Python API or cmdline version to train.
#
# Note: in order to create exactly the same dataset with the one created in simple_example.py, we need
# to modify simple_example.py to pass numpy array instead of pandas DataFrame to Dataset constructor.
# The reason is that DataFrame column names will be used in Dataset. For a DataFrame with Int64Index
# as columns, Dataset will use column names like ["0", "1", "2", ...]. While for numpy array, column names
# are using the default one assigned in C++ code (dataset_loader.cpp), like ["Column_0", "Column_1", ...].
dataset.save_binary('regression.train.from_hdf.bin')
def save2hdf(input_data, fname, batch_size):
"""Store numpy array to HDF5 file.
Please note chunk size settings in the implementation for I/O performance optimization.
"""
with h5py.File(fname, 'w') as f:
for name, data in input_data.items():
nrow, ncol = data.shape
if ncol == 1:
# Y has a single column and we read it in single shot. So store it as an 1-d array.
chunk = (nrow,)
data = data.values.flatten()
else:
# We use random access for data sampling when creating LightGBM Dataset from Sequence.
# When accessing any element in a HDF5 chunk, it's read entirely.
# To save I/O for sampling, we should keep number of total chunks much larger than sample count.
# Here we are just creating a chunk size that matches with batch_size.
#
# Also note that the data is stored in row major order to avoid extra copy when passing to
# lightgbm Dataset.
chunk = (batch_size, ncol)
f.create_dataset(name, data=data, chunks=chunk, compression='lzf')
def generate_hdf(input_fname, output_basename, batch_size):
# Save to 2 HDF5 files for demonstration.
df = pd.read_csv(input_fname, header=None, sep='\t')
mid = len(df) // 2
df1 = df.iloc[:mid]
df2 = df.iloc[mid:]
# We can store multiple datasets inside a single HDF5 file.
# Separating X and Y for choosing best chunk size for data loading.
fname1 = f'{output_basename}1.h5'
fname2 = f'{output_basename}2.h5'
save2hdf({'Y': df1.iloc[:, :1], 'X': df1.iloc[:, 1:]}, fname1, batch_size)
save2hdf({'Y': df2.iloc[:, :1], 'X': df2.iloc[:, 1:]}, fname2, batch_size)
return [fname1, fname2]
def main():
batch_size = 64
output_basename = 'regression'
hdf_files = generate_hdf('../regression/regression.train', output_basename, batch_size)
create_dataset_from_multiple_hdf(hdf_files, batch_size=batch_size)
if __name__ == '__main__':
main()
|
python
|
import json
from src.main import ReadFile
if __name__ == '__main__':
path_to_annotell_annotation = 'annotell_1.json'
with open(path_to_annotell_annotation, 'r') as content:
json_body = json.load(content)
result = ReadFile().convert(json_body)
print(result)
|
python
|
import httpretty
import pytest
from h_matchers import Any
@pytest.fixture
def pyramid_settings():
return {
"client_embed_url": "http://hypothes.is/embed.js",
"nginx_server": "http://via.hypothes.is",
"via_html_url": "https://viahtml.hypothes.is/proxy",
"checkmate_url": "http://localhost:9099",
"nginx_secure_link_secret": "not_a_secret",
"via_secret": "not_a_secret",
"signed_urls_required": False,
"checkmate_api_key": "dev_api_key",
"checkmate_ignore_reasons": None,
"checkmate_allow_all": False,
"enable_front_page": True,
}
def assert_cache_control(headers, cache_parts):
"""Assert that all parts of the Cache-Control header are present."""
assert dict(headers) == Any.dict.containing({"Cache-Control": Any.string()})
assert (
headers["Cache-Control"].split(", ") == Any.list.containing(cache_parts).only()
)
@pytest.fixture(autouse=True)
def httpretty_():
"""Monkey-patch Python's socket core module to mock all HTTP responses.
We never want real HTTP requests to be sent by the tests so replace them
all with mock responses. This handles requests sent using the standard
urllib2 library and the third-party httplib2 and requests libraries.
"""
httpretty.enable(allow_net_connect=False)
yield
httpretty.disable()
httpretty.reset()
|
python
|
#! /usr/bin/env python
from metadata_file import MetadataFile
import json
class FalseJSONMetadataFile(MetadataFile):
"""A metadata file type for files that claim to be JSON files but aren't """
category_name = 'FALSE_JSON';
def collect_metadata(self):
print('not parsing json from %s' % self.path)
with open(self.path, 'rU') as f:
md = {}
return md
|
python
|
"""
# FRACTION TO RECURRING DECIMAL
Given two integers representing the numerator and denominator of a fraction, return the fraction in string format.
If the fractional part is repeating, enclose the repeating part in parentheses.
If multiple answers are possible, return any of them.
It is guaranteed that the length of the answer string is less than 104 for all the given inputs.
Example 1:
Input: numerator = 1, denominator = 2
Output: "0.5"
Example 2:
Input: numerator = 2, denominator = 1
Output: "2"
Example 3:
Input: numerator = 2, denominator = 3
Output: "0.(6)"
Example 4:
Input: numerator = 4, denominator = 333
Output: "0.(012)"
Example 5:
Input: numerator = 1, denominator = 5
Output: "0.2"
Constraints:
-231 <= numerator, denominator <= 231 - 1
denominator != 0
"""
class Solution:
def fractionToDecimal(self, numerator: int, denominator: int) -> str:
pos = False
if numerator * denominator >= 0:
pos = True
numerator = abs(numerator)
denominator = abs(denominator)
quot = numerator // denominator
remainder = numerator % denominator
if remainder == 0:
if pos:
return (str)(quot)
else:
return "-" + (str)(quot)
frac = ""
rem = []
i = -1
while True:
remainder *= 10
q = remainder // denominator
frac += str(q)
if remainder in rem:
i = rem.index(remainder)
break
rem.append(remainder)
remainder = remainder % denominator
if remainder == 0:
break
res = ""
if i == -1:
res = str(quot) + "." + frac
else:
res = str(quot) + "." + frac[:i] + "(" + frac[i:-1] + ")"
print(pos)
if pos == False:
res = "-" + res
return res
|
python
|
from youtube_search import YoutubeSearch as YS
from config import TOKEN
from aiogram import Bot,types,Dispatcher,utils
from aiogram.utils import executor
from aiogram.types import InputTextMessageContent,InlineQueryResultArticle, ReplyKeyboardMarkup,KeyboardButton
import hashlib
async def on_startup(_):
print("Bot is online.")
def searcher(text):
res = YS(text,max_results=20).to_dict()
return res
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(commands=["start"])
async def send_welcome(message: types.Message):
username = message.from_user.username
#b1 = KeyboardButton("@ytlook_bot ")
#main = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True).add(b1)
await bot.send_message(message.from_user.id,f"Hi {username}")#, reply_markup=main
@dp.inline_handler()
async def inline_handler(query: types.InlineQuery):
text = query.query or "video"
links = searcher(text)
articles = [types.InlineQueryResultArticle(
id = hashlib.md5(f'{link["id"]}'.encode()).hexdigest(),
title = f'{link["title"]}',
url = f'https://www.youtube.com/watch?v={link["id"]}',
thumb_url = f'{link["thumbnails"][0]}',
input_message_content=types.InputTextMessageContent(
message_text=f'via @YTlook_BOT\nhttps://www.youtube.com/watch?v={link["id"]}')
) for link in links]
await query.answer(articles,cache_time=60,is_personal=True)
if __name__ == "__main__":
executor.start_polling(dp,skip_updates=True,on_startup=on_startup)
|
python
|
# do imports
import matplotlib.pyplot as plt
import logomaker as logomaker
# load ARS enrichment matrix
ars_df = logomaker.get_example_matrix('ars_enrichment_matrix',
print_description=False)
# load wild-type ARS1 sequence
with logomaker.open_example_datafile('ars_wt_sequence.txt',
print_description=False) as f:
lines = f.readlines()
lines = [l.strip() for l in lines if '#' not in l]
ars_seq = ''.join(lines)
# trim matrix and sequence
start = 10
stop = 100
ars_df = ars_df.iloc[start:stop, :]
ars_df.reset_index(inplace=True, drop=True)
ars_seq = ars_seq[start:stop]
# create Logo object
ars_logo = logomaker.Logo(ars_df,
color_scheme='dimgray',
font_name='Luxi Mono')
# color wild-type ARS1 sequence within logo
ars_logo.style_glyphs_in_sequence(sequence=ars_seq, color='darkorange')
# highlight functional regions of ARS1
ars_logo.highlight_position_range(pmin=7, pmax=22, color='lightcyan')
ars_logo.highlight_position_range(pmin=33, pmax=40, color='honeydew')
ars_logo.highlight_position_range(pmin=64, pmax=81, color='lavenderblush')
# additional styling using Logo methods
ars_logo.style_spines(visible=False)
# style using Axes methods
ars_logo.ax.set_ylim([-4, 4])
ars_logo.ax.set_ylabel('$\log_2$ enrichment', labelpad=0)
ars_logo.ax.set_yticks([-4, -2, 0, 2, 4])
ars_logo.ax.set_xticks([])
# show plot
ars_logo.fig.show()
|
python
|
"""Portfolio Helper"""
__docformat__ = "numpy"
from datetime import datetime
from dateutil.relativedelta import relativedelta
import yfinance as yf
import pandas as pd
# pylint: disable=too-many-return-statements
BENCHMARK_LIST = {
"SPDR S&P 500 ETF Trust (SPY)": "SPY",
"iShares Core S&P 500 ETF (IVV)": "IVV",
"Vanguard Total Stock Market ETF (VTI)": "VTI",
"Vanguard S&P 500 ETF (VOO)": "VOO",
"Invesco QQQ Trust (QQQ)": "QQQ",
"Vanguard Value ETF (VTV)": "VTV",
"Vanguard FTSE Developed Markets ETF (VEA)": "VEA",
"iShares Core MSCI EAFE ETF (IEFA)": "IEFA",
"iShares Core U.S. Aggregate Bond ETF (AGG)": "AGG",
"Vanguard Total Bond Market ETF (BND)": "BND",
"Vanguard FTSE Emerging Markets ETF (VWO)": "VWO",
"Vanguard Growth ETF (VUG)": "VUG",
"iShares Core MSCI Emerging Markets ETF (IEMG)": "IEMG",
"iShares Core S&P Small-Cap ETF (IJR)": "IJR",
"SPDR Gold Shares (GLD)": "GLD",
"iShares Russell 1000 Growth ETF (IWF)": "IWF",
"iShares Core S&P Mid-Cap ETF (IJH)": "IJH",
"Vanguard Dividend Appreciation ETF (VIG)": "VIG",
"iShares Russell 2000 ETF (IWM)": "IWM",
"iShares Russell 1000 Value ETF (IWD)": "IWD",
"Vanguard Mid-Cap ETF (VO)": "VO",
"iShares MSCI EAFE ETF (EFA)": "EFA",
"Vanguard Total International Stock ETF (VXUS)": "VXUS",
"Vanguard Information Technology ETF (VGT)": "VGT",
"Vanguard High Dividend Yield Index ETF (VYM)": "VYM",
"Vanguard Total International Bond ETF (BNDX)": "BNDX",
"Vanguard Real Estate ETF (VNQ)": "VNQ",
"Vanguard Small Cap ETF (VB)": "VB",
"Technology Select Sector SPDR Fund (XLK)": "XLK",
"iShares Core S&P Total U.S. Stock Market ETF (ITOT)": "ITOT",
"Vanguard Intermediate-Term Corporate Bond ETF (VCIT)": "VCIT",
"Vanguard Short-Term Corporate Bond ETF (VCSH)": "VCSH",
"Energy Select Sector SPDR Fund (XLE)": "XLE",
"Health Care Select Sector SPDR Fund (XLV)": "XLV",
"Vanguard Short-Term Bond ETF (BSV)": "BSV",
"Financial Select Sector SPDR Fund (XLF)": "XLF",
"Schwab US Dividend Equity ETF (SCHD)": "SCHD",
"Invesco S&P 500® Equal Weight ETF (RSP)": "RSP",
"iShares iBoxx $ Investment Grade Corporate Bond ETF (LQD)": "LQD",
"iShares S&P 500 Growth ETF (IVW)": "IVW",
"Vanguard FTSE All-World ex-US Index Fund (VEU)": "VEU",
"iShares TIPS Bond ETF (TIP)": "TIP",
"iShares Gold Trust (IAU)": "IAU",
"Schwab U.S. Large-Cap ETF (SCHX)": "SCHX",
"iShares Core MSCI Total International Stock ETF (IXUS)": "IXUS",
"iShares Russell Midcap ETF (IWR)": "IWR",
"iShares Russell 1000 ETF (IWB)": "IWB",
"SPDR Dow Jones Industrial Average ETF Trust (DIA)": "DIA",
"iShares MSCI Emerging Markets ETF (EEM)": "EEM",
"iShares MSCI USA Min Vol Factor ETF (USMV)": "USMV",
"Schwab International Equity ETF (SCHF)": "SCHF",
"iShares S&P 500 Value ETF (IVE)": "IVE",
"iShares National Muni Bond ETF (MUB)": "MUB",
"Vanguard Large Cap ETF (VV)": "VV",
"Vanguard Small Cap Value ETF (VBR)": "VBR",
"iShares ESG Aware MSCI USA ETF (ESGU)": "ESGU",
"Vanguard Total World Stock ETF (VT)": "VT",
"iShares Core Dividend Growth ETF (DGRO)": "DGRO",
"iShares 1-3 Year Treasury Bond ETF (SHY)": "SHY",
"iShares Select Dividend ETF (DVY)": "DVY",
"iShares MSCI USA Quality Factor ETF (QUAL)": "QUAL",
"Schwab U.S. Broad Market ETF (SCHB)": "SCHB",
"iShares MBS ETF (MBB)": "MBB",
"SPDR S&P Dividend ETF (SDY)": "SDY",
"iShares 1-5 Year Investment Grade Corporate Bond ETF (IGSB)": "IGSB",
"Vanguard Short-Term Inflation-Protected Securities ETF (VTIP)": "VTIP",
"JPMorgan Ultra-Short Income ETF (JPST)": "JPST",
"iShares 20+ Year Treasury Bond ETF (TLT)": "TLT",
"iShares MSCI ACWI ETF (ACWI)": "ACWI",
"SPDR S&P Midcap 400 ETF Trust (MDY)": "MDY",
"iShares Core Total USD Bond Market ETF (IUSB)": "IUSB",
"iShares Short Treasury Bond ETF (SHV)": "SHV",
"Vanguard FTSE Europe ETF (VGK)": "VGK",
"Consumer Discretionary Select Sector SPDR Fund (XLY)": "XLY",
"SPDR Bloomberg 1-3 Month T-Bill ETF (BIL)": "BIL",
"iShares U.S. Treasury Bond ETF (GOVT)": "GOVT",
"Vanguard Health Care ETF (VHT)": "VHT",
"Vanguard Mid-Cap Value ETF (VOE)": "VOE",
"Consumer Staples Select Sector SPDR Fund (XLP)": "XLP",
"Schwab U.S. TIPS ETF (SCHP)": "SCHP",
"iShares 7-10 Year Treasury Bond ETF (IEF)": "IEF",
"iShares Preferred & Income Securities ETF (PFF)": "PFF",
"Utilities Select Sector SPDR Fund (XLU)": "XLU",
"Vanguard Tax-Exempt Bond ETF (VTEB)": "VTEB",
"iShares MSCI EAFE Value ETF (EFV)": "EFV",
"Schwab U.S. Large-Cap Growth ETF (SCHG)": "SCHG",
"iShares J.P. Morgan USD Emerging Markets Bond ETF (EMB)": "EMB",
"Dimensional U.S. Core Equity 2 ETF (DFAC)": "DFAC",
"Schwab U.S. Small-Cap ETF (SCHA)": "SCHA",
"VanEck Gold Miners ETF (GDX)": "GDX",
"Vanguard Mortgage-Backed Securities ETF (VMBS)": "VMBS",
"ProShares UltraPro QQQ (TQQQ)": "TQQQ",
"Vanguard Short-Term Treasury ETF (VGSH)": "VGSH",
"iShares iBoxx $ High Yield Corporate Bond ETF (HYG)": "HYG",
"Industrial Select Sector SPDR Fund (XLI)": "XLI",
"iShares Russell Mid-Cap Value ETF (IWS)": "IWS",
"Vanguard Extended Market ETF (VXF)": "VXF",
"SPDR Portfolio S&P 500 ETF (SPLG)": "SPLG",
"SPDR Portfolio S&P 500 Value ETF (SPYV)": "SPYV",
"iShares Russell 2000 Value ETF (IWN)": "IWN",
}
PERIODS = ["mtd", "qtd", "ytd", "3m", "6m", "1y", "3y", "5y", "10y", "all"]
PERIODS_DAYS = {
"3m": 3 * 21,
"6m": 6 * 21,
"1y": 12 * 21,
"3y": 3 * 12 * 21,
"5y": 5 * 12 * 21,
"10y": 10 * 12 * 21,
}
def is_ticker(ticker: str) -> bool:
"""Determine whether a string is a valid ticker
Parameters
----------
ticker : str
The string to be tested
Returns
----------
bool
Whether the string is a ticker
"""
item = yf.Ticker(ticker)
return "previousClose" in item.info
def beta_word(beta: float) -> str:
"""Describe a beta
Parameters
----------
beta : float
The beta for a portfolio
Returns
----------
str
The description of the beta
"""
if abs(1 - beta) > 3:
part = "extremely "
elif abs(1 - beta) > 2:
part = "very "
elif abs(1 - beta) > 1:
part = ""
else:
part = "moderately "
return part + "high" if beta > 1 else "low"
def clean_name(name: str) -> str:
"""Clean a name to a ticker
Parameters
----------
name : str
The value to be cleaned
Returns
----------
str
A cleaned value
"""
return name.replace("beta_", "").upper()
def filter_df_by_period(df: pd.DataFrame, period: str = "all") -> pd.DataFrame:
"""Filter dataframe by selected period
Parameters
----------
df: pd.DataFrame
Dataframe to be filtered in terms of time
period : str
Period in which to filter dataframe.
Possible choices are: mtd, qtd, ytd, 3m, 6m, 1y, 3y, 5y, 10y, all
Returns
----------
str
A cleaned value
"""
if period == "mtd":
return df[df.index.strftime("%Y-%m") == datetime.now().strftime("%Y-%m")]
if period == "qtd":
if datetime.now().month < 4:
return df[
df.index.strftime("%Y-%m") < f"{datetime.now().strftime('%Y')}-04"
]
if datetime.now().month < 7:
return df[
(df.index.strftime("%Y-%m") >= f"{datetime.now().strftime('%Y')}-04")
& (df.index.strftime("%Y-%m") < f"{datetime.now().strftime('%Y')}-07")
]
if datetime.now().month < 10:
return df[
(df.index.strftime("%Y-%m") >= f"{datetime.now().strftime('%Y')}-07")
& (df.index.strftime("%Y-%m") < f"{datetime.now().strftime('%Y')}-10")
]
return df[df.index.strftime("%Y-%m") >= f"{datetime.now().strftime('%Y')}-10"]
if period == "ytd":
return df[df.index.strftime("%Y") == datetime.now().strftime("%Y")]
if period == "3m":
return df[df.index >= (datetime.now() - relativedelta(months=3))]
if period == "6m":
return df[df.index >= (datetime.now() - relativedelta(months=6))]
if period == "1y":
return df[df.index >= (datetime.now() - relativedelta(years=1))]
if period == "3y":
return df[df.index >= (datetime.now() - relativedelta(years=3))]
if period == "5y":
return df[df.index >= (datetime.now() - relativedelta(years=5))]
if period == "10y":
return df[df.index >= (datetime.now() - relativedelta(years=10))]
return df
def sharpe_ratio(return_series: pd.Series, risk_free_rate: float) -> float:
"""Get sharpe ratio
Parameters
----------
return_series : pd.Series
Returns of the portfolio
risk_free_rate: float
Value to use for risk free rate
Returns
-------
float
Sharpe ratio
"""
mean = return_series.mean() - risk_free_rate
sigma = return_series.std()
return mean / sigma
def sortino_ratio(return_series: pd.Series, risk_free_rate: float) -> float:
"""Get sortino ratio
Parameters
----------
return_series : pd.Series
Returns of the portfolio
risk_free_rate: float
Value to use for risk free rate
Returns
-------
float
Sortino ratio
"""
mean = return_series.mean() - risk_free_rate
std_neg = return_series[return_series < 0].std()
return mean / std_neg
def get_maximum_drawdown(return_series: pd.Series) -> float:
"""Get maximum drawdown
Parameters
----------
return_series : pd.Series
Returns of the portfolio
Returns
-------
float
maximum drawdown
"""
comp_ret = (return_series + 1).cumprod()
peak = comp_ret.expanding(min_periods=1).max()
dd = (comp_ret / peak) - 1
return dd.min()
|
python
|
import json
from testtools.matchers import raises, Not
import falcon.testing as testing
import falcon
class FaultyResource:
def on_get(self, req, resp):
status = req.get_header('X-Error-Status')
title = req.get_header('X-Error-Title')
description = req.get_header('X-Error-Description')
code = 10042
raise falcon.HTTPError(status, title, description, code=code)
def on_post(self, req, resp):
raise falcon.HTTPForbidden(
'Request denied',
'You do not have write permissions for this queue.',
href='http://example.com/api/rbac')
def on_put(self, req, resp):
raise falcon.HTTPError(
falcon.HTTP_792,
'Internet crashed',
'Catastrophic weather event due to climate change.',
href='http://example.com/api/climate',
href_text='Drill baby drill!')
def on_patch(self, req, resp):
raise falcon.HTTPError(falcon.HTTP_400, 'No-can-do')
class UnicodeFaultyResource(object):
def __init__(self):
self.called = False
def on_get(self, req, resp):
self.called = True
raise falcon.HTTPError(
falcon.HTTP_792,
u'Internet \xe7rashed!',
u'\xc7atastrophic weather event',
href=u'http://example.com/api/\xe7limate',
href_text=u'Drill b\xe1by drill!')
class MiscErrorsResource:
def __init__(self, exception, needs_title):
self.needs_title = needs_title
self._exception = exception
def on_get(self, req, resp):
if self.needs_title:
raise self._exception('Excuse Us', 'Something went boink!')
else:
raise self._exception('Something went boink!')
class UnauthorizedResource:
def on_get(self, req, resp):
raise falcon.HTTPUnauthorized('Authentication Required',
'Missing or invalid token header.',
'Token')
class UnauthorizedResourceSchemaless:
def on_get(self, req, resp):
raise falcon.HTTPUnauthorized('Authentication Required',
'Missing or invalid token header.')
class NotFoundResource:
def on_get(self, req, resp):
raise falcon.HTTPNotFound()
class MethodNotAllowedResource:
def on_get(self, req, resp):
raise falcon.HTTPMethodNotAllowed(['PUT'])
class LengthRequiredResource:
def on_get(self, req, resp):
raise falcon.HTTPLengthRequired('title', 'description')
class RangeNotSatisfiableResource:
def on_get(self, req, resp):
raise falcon.HTTPRangeNotSatisfiable(123456)
def on_put(self, req, resp):
raise falcon.HTTPRangeNotSatisfiable(123456, 'x-falcon/peregrine')
class ServiceUnavailableResource:
def on_get(self, req, resp):
raise falcon.HTTPServiceUnavailable('Oops', 'Stand by...', 60)
class TestHTTPError(testing.TestBase):
def before(self):
self.resource = FaultyResource()
self.api.add_route('/fail', self.resource)
def _misc_test(self, exception, status, needs_title=True):
self.api.add_route('/misc', MiscErrorsResource(exception, needs_title))
self.simulate_request('/misc')
self.assertEqual(self.srmock.status, status)
def test_base_class(self):
headers = {
'X-Error-Title': 'Storage service down',
'X-Error-Description': ('The configured storage service is not '
'responding to requests. Please contact '
'your service provider'),
'X-Error-Status': falcon.HTTP_503
}
expected_body = [
b'{\n'
b' "title": "Storage service down",\n'
b' "description": "The configured storage service is not '
b'responding to requests. Please contact your service provider",\n'
b' "code": 10042\n'
b'}'
]
# Try it with Accept: */*
headers['Accept'] = '*/*'
body = self.simulate_request('/fail', headers=headers)
self.assertEqual(self.srmock.status, headers['X-Error-Status'])
self.assertThat(lambda: json.loads(body[0]), Not(raises(ValueError)))
self.assertEqual(expected_body, body)
# Now try it with application/json
headers['Accept'] = 'application/json'
body = self.simulate_request('/fail', headers=headers)
self.assertEqual(self.srmock.status, headers['X-Error-Status'])
self.assertThat(lambda: json.loads(body[0]), Not(raises(ValueError)))
self.assertEqual(body, expected_body)
def test_no_description(self):
body = self.simulate_request('/fail', method='PATCH')
self.assertEqual(self.srmock.status, falcon.HTTP_400)
self.assertEqual(body, [b'{\n "title": "No-can-do"\n}'])
def test_client_does_not_accept_json(self):
headers = {
'Accept': 'application/soap+xml',
'X-Error-Title': 'Storage service down',
'X-Error-Description': ('The configured storage service is not '
'responding to requests. Please contact '
'your service provider'),
'X-Error-Status': falcon.HTTP_503
}
body = self.simulate_request('/fail', headers=headers)
self.assertEqual(self.srmock.status, headers['X-Error-Status'])
self.assertEqual(body, [])
def test_client_does_not_accept_anything(self):
headers = {
'Accept': '45087gigo;;;;',
'X-Error-Title': 'Storage service down',
'X-Error-Description': ('The configured storage service is not '
'responding to requests. Please contact '
'your service provider'),
'X-Error-Status': falcon.HTTP_503
}
body = self.simulate_request('/fail', headers=headers)
self.assertEqual(self.srmock.status, headers['X-Error-Status'])
self.assertEqual(body, [])
def test_forbidden(self):
headers = {
'Accept': 'application/json'
}
expected_body = [
b'{\n'
b' "title": "Request denied",\n'
b' "description": "You do not have write permissions for this '
b'queue.",\n'
b' "link": {\n'
b' "text": "API documention for this error",\n'
b' "href": "http://example.com/api/rbac",\n'
b' "rel": "help"\n'
b' }\n'
b'}'
]
body = self.simulate_request('/fail', headers=headers, method='POST')
self.assertEqual(self.srmock.status, falcon.HTTP_403)
self.assertThat(lambda: json.loads(body[0]), Not(raises(ValueError)))
self.assertEqual(body, expected_body)
def test_epic_fail(self):
headers = {
'Accept': 'application/json'
}
expected_body = [
b'{\n'
b' "title": "Internet crashed",\n'
b' "description": "Catastrophic weather event due to climate '
b'change.",\n'
b' "link": {\n'
b' "text": "Drill baby drill!",\n'
b' "href": "http://example.com/api/climate",\n'
b' "rel": "help"\n'
b' }\n'
b'}'
]
body = self.simulate_request('/fail', headers=headers, method='PUT')
self.assertEqual(self.srmock.status, falcon.HTTP_792)
self.assertThat(lambda: json.loads(body[0]), Not(raises(ValueError)))
self.assertEqual(body, expected_body)
def test_unicode(self):
unicode_resource = UnicodeFaultyResource()
expected_body = [
b'{\n'
b' "title": "Internet \xc3\xa7rashed!",\n'
b' "description": "\xc3\x87atastrophic weather event",\n'
b' "link": {\n'
b' "text": "Drill b\xc3\xa1by drill!",\n'
b' "href": "http://example.com/api/%C3%A7limate",\n'
b' "rel": "help"\n'
b' }\n'
b'}'
]
self.api.add_route('/unicode', unicode_resource)
body = self.simulate_request('/unicode')
self.assertTrue(unicode_resource.called)
#self.assertEqual(self.srmock.status, falcon.HTTP_792)
self.assertEquals(expected_body, body)
def test_401(self):
self.api.add_route('/401', UnauthorizedResource())
self.simulate_request('/401')
self.assertEqual(self.srmock.status, falcon.HTTP_401)
self.assertIn(('WWW-Authenticate', 'Token'), self.srmock.headers)
def test_401_schemaless(self):
self.api.add_route('/401', UnauthorizedResourceSchemaless())
self.simulate_request('/401')
self.assertEqual(self.srmock.status, falcon.HTTP_401)
self.assertNotIn(('WWW-Authenticate', 'Token'), self.srmock.headers)
def test_404(self):
self.api.add_route('/404', NotFoundResource())
body = self.simulate_request('/404')
self.assertEqual(self.srmock.status, falcon.HTTP_404)
self.assertEqual(body, [])
def test_405(self):
self.api.add_route('/405', MethodNotAllowedResource())
body = self.simulate_request('/405')
self.assertEqual(self.srmock.status, falcon.HTTP_405)
self.assertEqual(body, [])
self.assertIn(('Allow', 'PUT'), self.srmock.headers)
def test_411(self):
self.api.add_route('/411', LengthRequiredResource())
body = self.simulate_request('/411')
parsed_body = json.loads(body[0].decode())
self.assertEqual(self.srmock.status, falcon.HTTP_411)
self.assertEqual(parsed_body['title'], 'title')
self.assertEqual(parsed_body['description'], 'description')
def test_416_default_media_type(self):
self.api = falcon.API('application/xml')
self.api.add_route('/416', RangeNotSatisfiableResource())
body = self.simulate_request('/416')
self.assertEqual(self.srmock.status, falcon.HTTP_416)
self.assertEqual(body, [])
self.assertIn(('Content-Range', 'bytes */123456'), self.srmock.headers)
self.assertIn(('Content-Type', 'application/xml'), self.srmock.headers)
self.assertNotIn(('Content-Length', '0'), self.srmock.headers)
def test_416_custom_media_type(self):
self.api.add_route('/416', RangeNotSatisfiableResource())
body = self.simulate_request('/416', method='PUT')
self.assertEqual(self.srmock.status, falcon.HTTP_416)
self.assertEqual(body, [])
self.assertIn(('Content-Range', 'bytes */123456'),
self.srmock.headers)
self.assertIn(('Content-Type', 'x-falcon/peregrine'),
self.srmock.headers)
def test_503(self):
self.api.add_route('/503', ServiceUnavailableResource())
body = self.simulate_request('/503')
expected_body = (b'{\n "title": "Oops",\n "description": '
b'"Stand by..."\n}')
self.assertEqual(self.srmock.status, falcon.HTTP_503)
self.assertEqual(body, [expected_body])
self.assertIn(('Retry-After', '60'), self.srmock.headers)
def test_misc(self):
self._misc_test(falcon.HTTPBadRequest, falcon.HTTP_400)
self._misc_test(falcon.HTTPNotAcceptable, falcon.HTTP_406,
needs_title=False)
self._misc_test(falcon.HTTPConflict, falcon.HTTP_409)
self._misc_test(falcon.HTTPPreconditionFailed, falcon.HTTP_412)
self._misc_test(falcon.HTTPUnsupportedMediaType, falcon.HTTP_415,
needs_title=False)
self._misc_test(falcon.HTTPUpgradeRequired, falcon.HTTP_426)
self._misc_test(falcon.HTTPInternalServerError, falcon.HTTP_500)
self._misc_test(falcon.HTTPBadGateway, falcon.HTTP_502)
|
python
|
from django import forms
from .models import AVAILABLE_SLOTS_LEVELS, Spell, Spellbook
class SpellbookForm(forms.ModelForm):
class Meta:
model = Spellbook
fields = ['name']
class SpellbookSlotsForm(forms.Form):
spellbook_pk = forms.IntegerField(label="spellbook")
slot_level_field_names = [
'spell_slots_level_%s' % spell_slot
for spell_slot in AVAILABLE_SLOTS_LEVELS
]
def __init__(self, *args, **kwargs):
super(SpellbookSlotsForm, self).__init__(*args, **kwargs)
for spell_slot_name in self.slot_level_field_names:
self.fields[spell_slot_name] = forms.IntegerField(
label=spell_slot_name,
min_value=0,
)
def clean_spellbook_pk(self):
pk = self.cleaned_data['spellbook_pk']
try:
self.cleaned_data['spellbook'] = Spellbook.objects.get(pk=pk)
except Spellbook.DoesNotExist:
raise forms.ValidationError("The spellbook does not exist")
return pk
def clean(self):
self.cleaned_data['slots'] = [{
'level': spell_slot,
'value': self.cleaned_data[spell_slot_name],
} for spell_slot_name, spell_slot in zip(self.slot_level_field_names,
AVAILABLE_SLOTS_LEVELS)]
class SpellPkForm(forms.Form):
spell_pk = forms.IntegerField(label="spell_pk")
def clean_spell_pk(self):
pk = self.cleaned_data['spell_pk']
try:
self.cleaned_data['spell'] = Spell.objects.get(pk=pk)
except Spell.DoesNotExist:
raise forms.ValidationError("The spell does not exist")
return pk
|
python
|
# terrascript/provider/vcd.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:30:19 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.provider.vcd
#
# instead of
#
# >>> import terrascript.provider.vmware.vcd
#
# This is only available for 'official' and 'partner' providers.
from terrascript.provider.vmware.vcd import *
|
python
|
import re
import jsonschema
import json
import copy
from anchore_engine.services.policy_engine.engine.policy.exceptions import (
RequiredParameterNotSetError,
ValidationError,
ParameterValidationError,
)
from anchore_engine.subsys import logger
class InputValidator(object):
__validator_description__ = None
__validator_type__ = None
def validation_criteria(self):
"""
Returns a description of the validation criteria. May be a regex or similar executable type of validation. Returns an object with keys:
"validator_type": <id>
"validation_criteria": <obj>
:return:
"""
return None
def json(self):
"""
Returns a description of this validator as a json-serializable object
:return: dict
"""
return {
"description": self.__validator_description__,
"criteria": self.validation_criteria(),
"type": self.__validator_type__,
}
def validate(self, value):
"""
Returns true if the value passes validation, raises an exception otherwise with details on the failure.
Raises a ValidationError object
:param value:
:return:
"""
return True
def __call__(self, *args, **kwargs):
if args and len(args) > 0:
value = args[0]
else:
value = None
return self.validate(value)
class LinkedValidator(InputValidator):
__validator_type__ = "LinkedValidator"
__validator_description__ = "Validates a value based on the value of another parameter. If that parameter fails validation so will this one"
def __init__(self, discriminator_parameter, default_validator, value_map):
"""
:param discriminator_parameter: The parameter definition that will determine the validator behavior (TriggerParameter type)
:param default_validator: The validator to use if the value_map does not contain the value of the discriminator parameter
:param value_map: dict mapping values of the discriminator parameter to InputValidators to use for this parameter value.
"""
self.discriminator_name = discriminator_parameter
self.discriminator_value = None
self.mapper = value_map
self.default_validator = default_validator
def inject_discriminator(self, param):
self.discriminator_value = param
def validate(self, value):
try:
validator = self.mapper.get(self.discriminator_value)
if not validator:
validator = self.default_validator
logger.debug(
"Mapped discriminator param {} with value {} to validator {}".format(
self.discriminator_name, self.discriminator_value, validator
)
)
return validator.validate(value)
except ValidationError:
raise
except Exception as e:
raise e
def validation_criteria(self):
"""
Returns a json schema validation description
:return:
"""
options = set(
[json.dumps(x.validation_criteria()) for x in self.mapper.values()]
+ [json.dumps(self.default_validator.validation_criteria())]
)
return {"anyOf": [json.loads(x) for x in options]}
class JsonSchemaValidator(InputValidator):
__validator_type__ = "JSONSchemaValidator"
__validator_description__ = "Validates input against the specified json schema"
__validation_schema__ = {} # Will pass everything
def __init__(self):
self.validation_schema = copy.deepcopy(self.__validation_schema__)
def validate(self, value):
try:
jsonschema.validate(instance=value, schema=self.validation_schema)
return True
except jsonschema.ValidationError as e:
raise ValidationError(
"JSON Schema validation failed. Schema={}, Detail={}".format(
e.schema, e.message
)
)
def validation_criteria(self):
return self.validation_schema
class TypeValidator(JsonSchemaValidator):
"""
Validates the input against a specific python type: str, int, etc.
"""
__validator_description__ = "A single value of a basic json type: {}"
__validator_type__ = "type"
__json_types__ = [
"null",
"boolean",
"object",
"array",
"number",
"string",
"integer",
]
def __init__(self, expected_type):
"""
Configure validator with type name. Type names are the jsonschema type names
:param expected_type: str name of type
"""
super(TypeValidator, self).__init__()
if expected_type not in self.__json_types__:
raise ValueError(
"Not supported json type: {}. Must be one of: {}".format(
expected_type, self.__json_types__
)
)
self.expected_type = expected_type
self.validation_schema["type"] = self.expected_type
self.__validator_description__ = TypeValidator.__validator_description__.format(
self.expected_type
)
class BooleanStringValidator(JsonSchemaValidator):
__validator_description__ = 'Value must be string representation of a boolean. One of: ["true","false"], case insensitive'
__validator_type__ = "BooleanString"
__validation_schema__ = {"type": "string", "enum": ["true", "false"]}
def validate(self, value):
value = str(value).lower() # handle any weird unicode chars
return super(BooleanStringValidator, self).validate(value)
class RegexParamValidator(JsonSchemaValidator):
__regex__ = ".*"
__validator_type__ = "RegexValidator"
__validator_description__ = "Value must pass regex match"
__validation_schema__ = {"type": "string", "pattern": ".*"}
def __init__(self, regex=None):
super(RegexParamValidator, self).__init__()
if regex:
self.regex = regex
else:
self.regex = self.__regex__
# update with instance-value of subclassed, etc
self.validation_schema["pattern"] = self.regex
def legacy_call(self, value):
"""
Returns boolean True for pass, False for fail validation
:param args:
:param kwargs:
:return:
"""
if type(value) not in [str, str]:
return False
return re.match(self.regex, value) is not None
class DelimitedStringValidator(RegexParamValidator):
__regex__ = r"^\s*(\s*({item})\s*{delim})*\s*({item}){mult}\s*$"
__validator_description__ = (
"A string of character delimited values validated by a regex"
)
__validator_type__ = "DelimitedString"
__item_regex__ = ".*"
__delim__ = ","
def __init__(self, item_regex=None, delim=None):
super(DelimitedStringValidator, self).__init__()
if item_regex:
self.item_regex = item_regex
else:
self.item_regex = self.__item_regex__
if delim:
self.delim = delim
else:
self.delim = self.__delim__
self.regex = self.__regex__
self.regex = self.regex.format(
item=self.item_regex, delim=self.delim, mult="{1}"
)
self.validation_schema["pattern"] = self.regex
class CommaDelimitedNumberListValidator(DelimitedStringValidator):
__item_regex__ = r"\d+"
__validator_type__ = "CommaDelimitedStringOfNumbers"
__validator_description__ = "Comma delimited list of numbers"
class NameVersionListValidator(DelimitedStringValidator):
__validator_description__ = (
"Comma delimited list of name/version strings of format: name|version."
)
__validator_type__ = "CommaDelimitedStringOfNameVersionPairs"
__item_regex__ = r"[^|,]+\|[^|,]+"
__delim__ = ","
class CommaDelimitedStringListValidator(DelimitedStringValidator):
__item_regex__ = r"[^,]+"
__delim__ = ","
__validator_type__ = "CommaDelimitedStringList"
__validator_description__ = "Comma delimited list of strings"
class PipeDelimitedStringListValidator(DelimitedStringValidator):
__item_regex__ = r"[^|]+"
__delim__ = r"\|"
__validator_type__ = "PipeDelimitedStringList"
__validator_description__ = "Pipe delimited list of strings"
class IntegerValidator(RegexParamValidator):
__regex__ = r"^\s*[\d]+\s*$"
__validator_type__ = "IntegerString"
__validator_description__ = "Single integer number as a string"
class FloatValidator(RegexParamValidator):
__regex__ = r"^\s*-*(\d)*(\.\d*)?\s*$"
__validator_type__ = "FloatString"
__validator_description__ = "Single float number as a string"
class EnumValidator(JsonSchemaValidator):
__enums__ = []
__validation_schema__ = {"type": "string", "enum": []}
__validator_type__ = "EnumString"
def __init__(self, enums):
super(EnumValidator, self).__init__()
if enums:
self.__enums__ = enums
self.validation_schema["enum"] = self.__enums__
self.__validator_description__ = "One of [{}]".format(self.__enums__)
class DelimitedEnumStringValidator(RegexParamValidator):
__enums__ = []
__regex__ = r"^\s*(({enums})\s*{delim}\s*)*({enums})\s*$"
__validator_type__ = "DelimitedEnumString"
def __init__(self, enum_choices, delimiter=","):
if enum_choices:
self.__enums__ = enum_choices
choice_regex = "|".join(self.__enums__)
self.delimiter = delimiter
regex = self.__regex__.format(enums=choice_regex, delim=delimiter)
super(DelimitedEnumStringValidator, self).__init__(regex=regex)
self.__validator_description__ = (
"Delimited (char={}) string where each item must be one of: [{}]".format(
self.delimiter, self.__enums__
)
)
def delim_parser(param_value, item_delimiter=","):
if param_value:
return [i.strip() for i in param_value.strip().split(item_delimiter)]
else:
return []
def nested_item_delim_parser(param_value, item_delimiter=",", item_splitter="|"):
"""
A parser for lists of items with a delimter where each item has a splitter (e.g. for name, version tuples)
e.g. a|b,c|d,e|f -> {'a':'b', 'c':'d', 'e':'f'}
:param param_value: the value to parse
:param item_delimiter: string to delimit items
:param item_splitter: string to split item key value pairs on
:return:
"""
matches = {}
if not param_value:
return matches
try:
for param in param_value.strip().split(item_delimiter):
param = param.strip()
if param != [""]:
k, v = param.split(item_splitter)
matches[k.strip()] = v.strip()
except:
raise ValueError(param_value)
return matches
class TriggerParameter(object):
"""
A generic trigger parameter and associated validation configuration to support self-describing triggers and validation functions.
To create a parameter for a trigger, instantiate this class with a validations function.
param = TriggerParameter('strname', description='a string', is_required=False, validator=lambda x: bool(str(x)))
In kwargs, options are:
sort_order: allows the trigger to define the output order of parameters in the policy spec display. It does not affect evaluation.
"""
# Optional class-level validator if it does not require instance-specific configuration
__validator__ = None
def __init__(
self,
name,
description=None,
is_required=False,
related_to=None,
validator=None,
example_str=None,
**kwargs
):
"""
:param name: The name to use for the parameter, will be matched and displayed in docs (converted to lower-case for comparisons)
:param validator: An InputValidator object to call against the input
:param is_required: Boolean, is this a required param or not
:param related_to: List of strings for other parameter names related to this parameter (primarily for user comprehension)
"""
self.name = name.lower() # Use lower case for comparisons
self.description = description
self.required = is_required
self.related_params = related_to
self._param_value = None
self.sort_order = kwargs.get("sort_order", 100)
self.aliases = kwargs.get("aliases", [])
self.example = example_str
if validator:
self.validator = validator
else:
self.validator = self.__validator__
def _output_value(self):
return self._param_value
def value(self, default_if_none=None):
if self._param_value is not None:
return self._output_value()
else:
return default_if_none
def set_value(self, input_value):
if input_value is None:
if self.required:
raise RequiredParameterNotSetError(parameter_name=self.name)
# Skip validation if None, no value set. This means no way to validate json 'null' cleanly but not really a use-case for that.
else:
try:
if not self.validator.validate(input_value):
raise ParameterValidationError(
parameter=self.name,
value=input_value,
expected=self.validator.validation_criteria(),
)
except ParameterValidationError:
raise
except Exception as e:
raise ParameterValidationError(
parameter=self.name,
value=input_value,
expected=self.validator.validation_criteria(),
message=e.message,
)
self._param_value = input_value
def schema_json(self):
"""
Return a json schema for this trigger parameter
:return:
"""
return {
"name": self.name,
"aliases": self.aliases,
"description": self.description,
"is_required": self.required,
"related_parameters": self.related_params,
"validator": self.validator.json(),
}
class CommaDelimitedStringListParameter(TriggerParameter):
"""
Convenience class for paramters where the value is string of comma-delimited strings. e.g. "a,b,c"
"""
__validator__ = CommaDelimitedStringListValidator()
def _output_value(self):
return delim_parser(self._param_value, ",")
class SimpleStringParameter(TriggerParameter):
"""
Convenience class for paramters where the value is string of comma-delimited strings. e.g. "a"
"""
__validator__ = TypeValidator(expected_type="string")
class PipeDelimitedStringListParameter(TriggerParameter):
"""
Convenience class for paramters where the value is string of pipe-delimited strings. e.g. "a|b|c"
"""
__validator__ = PipeDelimitedStringListValidator()
def _output_value(self):
return delim_parser(self._param_value, "|")
class CommaDelimitedNumberListParameter(TriggerParameter):
"""
Convenience class for paramters where the value is string of comma-delimited strings. e.g. "1,2,3"
"""
__validator__ = CommaDelimitedNumberListValidator()
def _output_value(self):
return [int(x.strip()) for x in delim_parser(self._param_value, ",")]
class NameVersionStringListParameter(TriggerParameter):
"""
Convenience class for parameters where the value is string of comma-delimited strings. e.g. "a|b,c|d,e|f"
"""
__validator__ = NameVersionListValidator()
def _output_value(self):
return nested_item_delim_parser(
self._param_value, item_delimiter=",", item_splitter="|"
)
class EnumStringParameter(TriggerParameter):
"""
Parameter that allows one of a list of values.
"""
__choices__ = None
__validator__ = None
def __init__(
self,
name,
description,
is_required=False,
related_to=None,
enum_values=None,
**kwargs
):
"""
:param name:
:param description:
:param is_required:
:param related_to:
:param enum_values: the list of acceptable strings
"""
if not enum_values:
enum_values = self.__choices__
super(EnumStringParameter, self).__init__(
name,
description,
is_required=is_required,
related_to=related_to,
validator=EnumValidator(enum_values),
**kwargs
)
class EnumCommaDelimStringListParameter(TriggerParameter):
"""
A parameter that is a string that is comma delimited list of other strings each of which must be one of a set of strings.
"""
__choices__ = None
__validator__ = None
def __init__(
self,
name,
description,
is_required=False,
related_to=None,
enum_values=None,
**kwargs
):
"""
:param name:
:param description:
:param is_required:
:param related_to:
:param enum_values: the list of acceptable strings
"""
if not enum_values:
enum_values = self.__choices__
super(EnumCommaDelimStringListParameter, self).__init__(
name,
description,
is_required=is_required,
related_to=related_to,
validator=DelimitedEnumStringValidator(enum_values, delimiter=","),
**kwargs
)
def _output_value(self):
return delim_parser(self._param_value, item_delimiter=",")
class BooleanStringParameter(TriggerParameter):
__validator__ = BooleanStringValidator()
def _output_value(self):
"""
Convert the string value into a python boolean
:return: boolean or None if not set
"""
return self._param_value.lower() == "true" if self._param_value else None
class IntegerStringParameter(TriggerParameter):
__validator__ = IntegerValidator()
def _output_value(self):
"""
Return a python int if set
:return: integer or None
"""
return int(self._param_value) if self._param_value is not None else None
class FloatStringParameter(TriggerParameter):
__validator__ = FloatValidator()
def _output_value(self):
"""
Return a python float if set
:return: float or None
"""
return float(self._param_value) if self._param_value is not None else None
|
python
|
from django.http import HttpResponse
from django.db.models import Q
from apiApp.models import Profile, Comment, Tag, User, Group, Post
from apiApp.serializers import ProfileSerializer, CommentSerializer, UserSerializer, TagSerializer, GroupSerializer, PostSerializer
from apiApp.permissions import IsOwnerOrReadOnly
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions, status
class UserList(generics.ListAPIView):
"""
Retrieve a list of users.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
"""
Retrieve information of a specific User.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
class CommentList(generics.ListCreateAPIView):
"""
get:
Retrieve a list of Comments. Comments from private groups that the User has not joined, are hidden.
post:
Create a new Comment. Should have a parent post.
"""
serializer_class = CommentSerializer
def get_queryset(self):
user = self.request.user
comments = Comment.objects.filter(
Q(post__group__isPublic=True) | Q(post__group__members__id__exact=user.id)).distinct()
return comments
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class CommentDetail(generics.RetrieveUpdateDestroyAPIView):
"""
get:
Get details of a specific Comment.
put:
Edit an existing Comment that belongs to the authenticated User.
patch:
Edit an existing Comment that belongs to the authenticated User.
delete:
Delete an existing Comment that belongs to the authenticated User.
"""
permission_classes = [
IsOwnerOrReadOnly
]
serializer_class = CommentSerializer
def get_queryset(self):
user = self.request.user
comments = Comment.objects.filter(
Q(post__group__isPublic=True) | Q(post__group__members__id__exact=user.id)).distinct()
return comments
class TagList(generics.ListCreateAPIView):
"""
get:
Retrieve a list of Tags.
post:
Create a new Tag.
"""
queryset = Tag.objects.all()
serializer_class = TagSerializer
class TagDetail(generics.RetrieveUpdateDestroyAPIView):
"""
get:
Get details of a specific Tag.
put:
Edit an existing Tag.
patch:
Edit an existing Tag.
delete:
Delete an existing Tag.
"""
queryset = Tag.objects.all()
serializer_class = TagSerializer
class GroupList(generics.ListCreateAPIView):
"""
get:
Retrieve a list of Groups. Groups that are private, are not visible.
post:
Create a new Group. Authenticated User will be the admin of the created group.
"""
permission_classes = [
permissions.IsAuthenticatedOrReadOnly
]
serializer_class = GroupSerializer
def get_queryset(self):
user = self.request.user
if user:
groups = Group.objects.filter(
Q(isPublic=True) | Q(members__id__exact=user.id)).distinct()
else:
groups = Group.objects.filter(
Q(isPublic=True)).distinct()
return groups
class GroupDetail(generics.RetrieveUpdateDestroyAPIView):
"""
get:
Get details of a specific Group. Groups that are private will return 404 Not Found.
put:
Edit an existing Group. The authenticated user should be the admin of the Group.
patch:
Edit an existing Group. The authenticated user should be the admin of the Group.
delete:
Delete an existing Group. The authenticated user should be the admin of the Group.
"""
permission_classes = [
IsOwnerOrReadOnly,
permissions.IsAuthenticatedOrReadOnly
]
serializer_class = GroupSerializer
def get_queryset(self):
user = self.request.user
if user:
groups = Group.objects.filter(
Q(isPublic=True) | Q(members__id__exact=user.id)).distinct()
else:
groups = Group.objects.filter(isPublic=True)
return groups
def perform_create(self, serializer):
serializer.save(admin=self.request.user)
class ProfileList(generics.ListAPIView):
"""
get:
Retrieve a list of Profiles.
"""
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
class ProfileDetail(generics.RetrieveUpdateAPIView):
"""
get:
Retrieve a specific Profile.
put:
Edit an existing Profile.
patch:
Edit an existing Profile.
"""
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
class PostList(generics.ListCreateAPIView):
"""
get:
Retrieve a list of Posts. Posts that are posted to a private
Group are not visible unless the authenticated User is a member
of the private Group.
post:
Create a Post on a Group. The authenticated User should be a
member of the Group.
"""
permission_classes = [
permissions.IsAuthenticatedOrReadOnly
]
serializer_class = PostSerializer
def get_queryset(self):
user = self.request.user
posts = Post.objects.filter(
Q(group__isPublic=True) | Q(group__members__id__exact=user.id)).distinct()
return posts
def perform_create(self, serializer):
serializer.save(author=self.request.user)
class PostDetail(generics.RetrieveUpdateDestroyAPIView):
"""
get:
Retrieve a specific Post. The Group of the Post must be public.
Or the authenticated User should be a member of this Group.
put:
Edit an existing Post. The authenticated User must be the
author of this Post.
patch:
Edit an existing Post. The authenticated User must be the
author of this Post.
delete:
Delete an existing Post. The authenticated User must be the
author of this Post.
"""
permission_classes = [
permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly
]
serializer_class = PostSerializer
def get_queryset(self):
user = self.request.user
posts = Post.objects.filter(
Q(group__isPublic=True) | Q(group__members__id__exact=user.id)).distinct()
return posts
class RegisterView(generics.CreateAPIView):
"""
post:
Registers a User. Allowed for non-authenticated Users, too.
"""
permission_classes = [
permissions.AllowAny
]
model = User
serializer_class = UserSerializer
def index(request):
return HttpResponse("Hello, group 5. This is our first version of API project with Django.")
|
python
|
from bson.objectid import ObjectId
from app.models.domain.training_data_set import TrainingDataSet
from tests.stubs.models.domain.feature_extraction_data import get_feature_extraction_data_stub_5_1, get_feature_extraction_data_stub_4_2
def get_training_data_set_stub():
return TrainingDataSet(
last_modified=1617981582111,
sample_list_file_ID=ObjectId("607070acc7559b9ccb3335fc"),
feature_extraction_cache={"5_1": get_feature_extraction_data_stub_5_1(), "4_2": get_feature_extraction_data_stub_4_2()}
)
|
python
|
"""Slack platform for notify component."""
import asyncio
import logging
import os
from urllib.parse import urlparse
from aiohttp import BasicAuth, FormData
from aiohttp.client_exceptions import ClientError
from slack import WebClient
from slack.errors import SlackApiError
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_API_KEY, CONF_ICON, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
import homeassistant.helpers.template as template
_LOGGER = logging.getLogger(__name__)
ATTR_BLOCKS = "blocks"
ATTR_BLOCKS_TEMPLATE = "blocks_template"
ATTR_FILE = "file"
ATTR_PASSWORD = "password"
ATTR_PATH = "path"
ATTR_URL = "url"
ATTR_USERNAME = "username"
CONF_DEFAULT_CHANNEL = "default_channel"
DEFAULT_TIMEOUT_SECONDS = 15
FILE_PATH_SCHEMA = vol.Schema({vol.Required(ATTR_PATH): cv.isfile})
FILE_URL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_URL): cv.url,
vol.Inclusive(ATTR_USERNAME, "credentials"): cv.string,
vol.Inclusive(ATTR_PASSWORD, "credentials"): cv.string,
}
)
DATA_FILE_SCHEMA = vol.Schema(
{vol.Required(ATTR_FILE): vol.Any(FILE_PATH_SCHEMA, FILE_URL_SCHEMA)}
)
DATA_TEXT_ONLY_SCHEMA = vol.Schema(
{vol.Optional(ATTR_BLOCKS): list, vol.Optional(ATTR_BLOCKS_TEMPLATE): list}
)
DATA_SCHEMA = vol.All(
cv.ensure_list, [vol.Any(DATA_FILE_SCHEMA, DATA_TEXT_ONLY_SCHEMA)]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DEFAULT_CHANNEL): cv.string,
vol.Optional(CONF_ICON): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
}
)
async def async_get_service(hass, config, discovery_info=None):
"""Set up the Slack notification service."""
session = aiohttp_client.async_get_clientsession(hass)
client = WebClient(token=config[CONF_API_KEY], run_async=True, session=session)
try:
await client.auth_test()
except SlackApiError as err:
_LOGGER.error("Error while setting up integration: %s", err)
return
return SlackNotificationService(
hass,
client,
config[CONF_DEFAULT_CHANNEL],
username=config.get(CONF_USERNAME),
icon=config.get(CONF_ICON),
)
@callback
def _async_get_filename_from_url(url):
"""Return the filename of a passed URL."""
parsed_url = urlparse(url)
return os.path.basename(parsed_url.path)
@callback
def _async_sanitize_channel_names(channel_list):
"""Remove any # symbols from a channel list."""
return [channel.lstrip("#") for channel in channel_list]
@callback
def _async_templatize_blocks(hass, value):
"""Recursive template creator helper function."""
if isinstance(value, list):
return [_async_templatize_blocks(hass, item) for item in value]
if isinstance(value, dict):
return {
key: _async_templatize_blocks(hass, item) for key, item in value.items()
}
tmpl = template.Template(value, hass=hass)
return tmpl.async_render()
class SlackNotificationService(BaseNotificationService):
"""Define the Slack notification logic."""
def __init__(self, hass, client, default_channel, username, icon):
"""Initialize."""
self._client = client
self._default_channel = default_channel
self._hass = hass
self._icon = icon
self._username = username
async def _async_send_local_file_message(self, path, targets, message, title):
"""Upload a local file (with message) to Slack."""
if not self._hass.config.is_allowed_path(path):
_LOGGER.error("Path does not exist or is not allowed: %s", path)
return
parsed_url = urlparse(path)
filename = os.path.basename(parsed_url.path)
try:
await self._client.files_upload(
channels=",".join(targets),
file=path,
filename=filename,
initial_comment=message,
title=title or filename,
)
except SlackApiError as err:
_LOGGER.error("Error while uploading file-based message: %s", err)
async def _async_send_remote_file_message(
self, url, targets, message, title, *, username=None, password=None
):
"""Upload a remote file (with message) to Slack.
Note that we bypass the python-slackclient WebClient and use aiohttp directly,
as the former would require us to download the entire remote file into memory
first before uploading it to Slack.
"""
if not self._hass.config.is_allowed_external_url(url):
_LOGGER.error("URL is not allowed: %s", url)
return
filename = _async_get_filename_from_url(url)
session = aiohttp_client.async_get_clientsession(self.hass)
kwargs = {}
if username and password is not None:
kwargs = {"auth": BasicAuth(username, password=password)}
resp = await session.request("get", url, **kwargs)
try:
resp.raise_for_status()
except ClientError as err:
_LOGGER.error("Error while retrieving %s: %s", url, err)
return
data = FormData(
{
"channels": ",".join(targets),
"filename": filename,
"initial_comment": message,
"title": title or filename,
"token": self._client.token,
},
charset="utf-8",
)
data.add_field("file", resp.content, filename=filename)
try:
await session.post("https://slack.com/api/files.upload", data=data)
except ClientError as err:
_LOGGER.error("Error while uploading file message: %s", err)
async def _async_send_text_only_message(self, targets, message, title, blocks):
"""Send a text-only message."""
tasks = {
target: self._client.chat_postMessage(
channel=target,
text=message,
blocks=blocks,
icon_emoji=self._icon,
link_names=True,
username=self._username,
)
for target in targets
}
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for target, result in zip(tasks, results):
if isinstance(result, SlackApiError):
_LOGGER.error(
"There was a Slack API error while sending to %s: %s",
target,
result,
)
async def async_send_message(self, message, **kwargs):
"""Send a message to Slack."""
data = kwargs.get(ATTR_DATA)
if data is None:
data = {}
try:
DATA_SCHEMA(data)
except vol.Invalid as err:
_LOGGER.error("Invalid message data: %s", err)
data = {}
title = kwargs.get(ATTR_TITLE)
targets = _async_sanitize_channel_names(
kwargs.get(ATTR_TARGET, [self._default_channel])
)
# Message Type 1: A text-only message
if ATTR_FILE not in data:
if ATTR_BLOCKS_TEMPLATE in data:
blocks = _async_templatize_blocks(self.hass, data[ATTR_BLOCKS_TEMPLATE])
elif ATTR_BLOCKS in data:
blocks = data[ATTR_BLOCKS]
else:
blocks = {}
return await self._async_send_text_only_message(
targets, message, title, blocks
)
# Message Type 2: A message that uploads a remote file
if ATTR_URL in data[ATTR_FILE]:
return await self._async_send_remote_file_message(
data[ATTR_FILE][ATTR_URL],
targets,
message,
title,
username=data[ATTR_FILE].get(ATTR_USERNAME),
password=data[ATTR_FILE].get(ATTR_PASSWORD),
)
# Message Type 3: A message that uploads a local file
return await self._async_send_local_file_message(
data[ATTR_FILE][ATTR_PATH], targets, message, title
)
|
python
|
from main import notion
from commands.run_daily_reset import run_daily_reset
from commands.run_update_duration import run_update_duration
if notion.UPDATE_DURATION:
print("UPDATE_DURATION : ", run_update_duration())
if notion.DAILY_RESET:
print("DAILY_REST : ", run_daily_reset())
|
python
|
{% extends "_common/main.py" %}
{% set typeStep = "Delete" %}
|
python
|
#! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#
# Configuration file for ttbd (place in ~/.ttbd/) or run ttbd with
# `--config-file PATH/TO/THISFILE`
#
import ttbl.tt_qemu
def nw_default_targets_zephyr_add(letter, bsps = [ 'x86', 'arm', 'nios2',
'riscv32', 'xtensa' ]):
"""
Add the default Zephyr targets to a configuration
This adds a configuration which consists of a network and five
QEMU Linux and five QEMU Zephyr (times available BSPSs)
"""
assert isinstance(letter, basestring)
assert len(letter) == 1
nw_idx = ord(letter)
nw_name = "nw" + letter
# Add five QEMU Zephyr targets on the network, one of each architecture
#
# Numbering them sequentially so their IP address matches and does
# not conflict with the linux targets in the same network
base = 30
for bsp in bsps:
for count in range(base, base + 2):
ttbl.config.target_add(
tt_qemu_zephyr("qz%02d%s-%s" % (count, letter, bsp), [ bsp ]),
target_type = "qemu-zephyr-%s" % bsp,
tags = {
"interconnects": {
nw_name: dict(
ipv4_addr = "192.168.%d.%d" % (nw_idx, count),
ipv4_prefix_len = 24,
ipv6_addr = "fc00::%02x:%02x" % (nw_idx, count),
ipv6_prefix_len = 112,
ic_index = count,
mac_addr = "02:%02x:00:00:00:%02x" \
% (nw_idx, count),
),
}
}
)
base = count + 1
#
# Add QEMUs targets
#
#
# These are for a default example, you can add as many as you care and
# your server can execute concurrently.
if ttbl.config.defaults_enabled:
# Creates 10 QEMU targets of each BSP interconnected to networks nwa,
# and nwb (defined in conf_06_defaults)
for letter in [ 'a', 'b' ]:
nw_default_targets_zephyr_add(letter)
|
python
|
from collections import Counter
anzZweier, anzDreier = 0,0
with open('AdventOfCode_02_1_Input.txt') as f:
for zeile in f:
zweierGefunden, dreierGefunden = False, False
counter = Counter(zeile)
for key,value in counter.items():
if value == 3 and not dreierGefunden:
anzDreier += 1
dreierGefunden = True
if value == 2 and not zweierGefunden:
anzZweier += 1
zweierGefunden = True
print(anzDreier*anzZweier)
|
python
|
import pandas as pd
import numpy as np
from scipy.sparse import data
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import SimpleImputer
from sklearn.impute import IterativeImputer
from sklearn.impute import KNNImputer
class BasicImputation():
""" This class supports basic imputation methods.
"""
def __init__(self, data, method, max, parameter):
""" Set data, imputation method, max imputation limit value, imputation parameter
"""
self.method = method
self.data = data
self.max = max
self.columns = data.columns
self.index = data.index
self.parameter = parameter
def makeDF(self, series_result):
dfResult = pd.DataFrame(series_result, columns = self.columns, index = self.index)
return dfResult
def ScikitLearnMethod(self):
""" Get imputed data from scikit library methods. (KNN, MICE)
"""
data = self.data
# TODO Extend parameter
if self.method =='KNN':
n_neighbors = self.parameter['n_neighbors']
weights =self.parameter['weights']
metric = self.parameter['metric']
# https://scikit-learn.org/stable/modules/generated/sklearn.impute.KNNImputer.html
# n_neighbors # 대체에 참고하기 위한 이웃 개수, int(default: 5)
# weights: 예측하는 과정에서 이웃에 부여할 가중치 여부, {‘uniform’, ‘distance’} or callable(default: ’uniform’)
# metric: 이웃을 정의하기 위한 거리 척도, {‘nan_euclidean’} or callable(default: ’nan_euclidean’)
series_result = KNNImputer(n_neighbors=n_neighbors, weights = weights, metric = metric).fit_transform(data)
elif self.method =='MICE':
#{‘mean’, ‘median’, ‘most_frequent’, ‘constant’}, default=’mean’
# https://scikit-learn.org/stable/modules/generated/sklearn.impute.IterativeImputer.html#sklearn-impute-iterativeimputer
series_result = IterativeImputer(random_state=0, initial_strategy='mean', sample_posterior=True).fit_transform(data)
else:
series_result = data
result = self.makeDF(series_result)
return result
def simpleMethod(self):
""" Get imputed data from scikit SimpleImputer methods
"""
series_result = SimpleImputer(strategy=self.method, missing_values = np.nan).fit_transform(self.data)
result = self.makeDF(series_result)
return result
def fillNAMethod(self):
""" Get imputed data from fillNA methods
"""
result = self.data.fillna(method=self.method, limit=self.max)
return result
def simpleIntMethod(self):
""" Get imputed data from simple other methods
"""
result = self.data.interpolate(method=self.method, limit = self.max, limit_direction='both')
return result
def orderIntMethod(self):
""" Get imputed data from interpolation methods
"""
result = self.data.interpolate(method=self.method, limit = self.max, order = 2, limit_direction='both')
return result
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.unit import utils
from tests.unit.hazmat import test_curve_helpers
@utils.needs_speedup
class Test_speedup_subdivide_nodes(test_curve_helpers.Test_subdivide_nodes):
@staticmethod
def _call_function_under_test(nodes):
from bezier import _speedup
return _speedup.subdivide_nodes_curve(nodes)
@utils.needs_speedup
class Test_speedup_evaluate_multi_barycentric(
test_curve_helpers.Test_evaluate_multi_barycentric
):
@staticmethod
def _call_function_under_test(nodes, lambda1, lambda2):
from bezier import _speedup
return _speedup.evaluate_multi_barycentric(nodes, lambda1, lambda2)
@utils.needs_speedup
class Test_speedup_evaluate_multi(test_curve_helpers.Test_evaluate_multi):
@staticmethod
def _call_function_under_test(nodes, s_vals):
from bezier import _speedup
return _speedup.evaluate_multi(nodes, s_vals)
@utils.needs_speedup
class Test_speedup_compute_length(test_curve_helpers.Test_compute_length):
@staticmethod
def _call_function_under_test(nodes):
from bezier import _speedup
return _speedup.compute_length(nodes)
def _scipy_skip(self):
# Fortran implementation directly includes QUADPACK, so the presence
# or absence of SciPy is irrelevant.
pass
def test_without_scipy(self):
# Fortran implementation directly includes QUADPACK, so the presence
# or absence of SciPy is irrelevant.
pass
@utils.needs_speedup
class Test_speedup_elevate_nodes(test_curve_helpers.Test_elevate_nodes):
@staticmethod
def _call_function_under_test(nodes):
from bezier import _speedup
return _speedup.elevate_nodes(nodes)
@utils.needs_speedup
class Test_speedup_specialize_curve(test_curve_helpers.Test_specialize_curve):
@staticmethod
def _call_function_under_test(nodes, start, end):
from bezier import _speedup
return _speedup.specialize_curve(nodes, start, end)
@utils.needs_speedup
class Test_speedup_evaluate_hodograph(
test_curve_helpers.Test_evaluate_hodograph
):
@staticmethod
def _call_function_under_test(s, nodes):
from bezier import _speedup
return _speedup.evaluate_hodograph(s, nodes)
@utils.needs_speedup
class Test_speedup_get_curvature(test_curve_helpers.Test_get_curvature):
@staticmethod
def _call_function_under_test(nodes, tangent_vec, s):
from bezier import _speedup
return _speedup.get_curvature(nodes, tangent_vec, s)
@utils.needs_speedup
class Test_speedup_newton_refine(test_curve_helpers.Test_newton_refine):
@staticmethod
def _call_function_under_test(nodes, point, s):
from bezier import _speedup
return _speedup.newton_refine_curve(nodes, point, s)
@utils.needs_speedup
class Test_speedup_locate_point(test_curve_helpers.Test_locate_point):
@staticmethod
def _call_function_under_test(nodes, point):
from bezier import _speedup
return _speedup.locate_point_curve(nodes, point)
@utils.needs_speedup
class Test_speedup_reduce_pseudo_inverse(
test_curve_helpers.Test_reduce_pseudo_inverse
):
@staticmethod
def _call_function_under_test(nodes):
from bezier import _speedup
return _speedup.reduce_pseudo_inverse(nodes)
@utils.needs_speedup
class Test_speedup_full_reduce(test_curve_helpers.Test_full_reduce):
@staticmethod
def _call_function_under_test(nodes):
from bezier import _speedup
return _speedup.full_reduce(nodes)
|
python
|
# https://leetcode.com/problems/pascals-triangle/description/
class Solution(object):
def generate(self, numRows):
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
elif numRows == 2:
return [[1], [1, 1]]
res = [[1], [1, 1]]
for row in range(2, numRows):
level = [1]
for i in range(1, row):
tmp = res[row - 1][i - 1] + res[row - 1][i]
level.append(tmp)
level.append(1)
res.append(level)
return res
test = Solution()
print test.generate(5)
|
python
|
from math import ceil
def to_bytes(n):
binary = '{:b}'.format(n)
width = int(ceil(len(binary) / 8.0) * 8)
padded = binary.zfill(width)
return [padded[a:a + 8] for a in xrange(0, width, 8)]
|
python
|
#!python3
import numpy as np
from magLabUtilities.datafileutilities.timeDomain import importFromXlsx
from magLabUtilities.signalutilities.signals import SignalThread, Signal, SignalBundle
from magLabUtilities.signalutilities.hysteresis import XExpQA, HysteresisSignalBundle
from magLabUtilities.uiutilities.plotting.hysteresis import MofHPlotter, XofMPlotter, MofHXofMPlotter
if __name__=='__main__':
fp = './tests/workflowTests/datafiles/test21kLoop.xlsx'
refBundle = HysteresisSignalBundle(importFromXlsx(fp, '21k', 2, 'C,D', dataColumnNames=['H','M']))
mMatrix = refBundle.signals['M'].independentThread.data
tMatrix = refBundle.signals['M'].dependentThread.data
pMAmpIndex = np.argmax(mMatrix[0:int(mMatrix.shape[0]/2)])
nMAmpIndex = np.argmin(mMatrix)
virginGen = XExpQA(xInit=67.0, hCoercive=630.0, mSat=1.67e6, hCoop=1200.0, hAnh=3000.0, xcPow=4.0, mRev=0.0)
pRevGen = XExpQA(xInit=67.0, hCoercive=630.0, mSat=1.67e6, hCoop=1200.0, hAnh=3000.0, xcPow=4.0, mRev=mMatrix[pMAmpIndex])
nRevGen = XExpQA(xInit=67.0, hCoercive=630.0, mSat=1.67e6, hCoop=1200.0, hAnh=3000.0, xcPow=4.0, mRev=mMatrix[nMAmpIndex])
virginM = Signal.fromThreadPair(SignalThread(mMatrix[0:pMAmpIndex]), SignalThread(tMatrix[0:pMAmpIndex]))
virginX = virginGen.evaluate(mSignal=virginM)
pRevM = Signal.fromThreadPair(SignalThread(mMatrix[pMAmpIndex:nMAmpIndex]), SignalThread(tMatrix[pMAmpIndex:nMAmpIndex]))
pRevX = pRevGen.evaluate(mSignal=pRevM)
nRevM = Signal.fromThreadPair(SignalThread(mMatrix[nMAmpIndex:]), SignalThread(tMatrix[nMAmpIndex:]))
nRevX = nRevGen.evaluate(mSignal=nRevM)
testBundle = HysteresisSignalBundle.fromSignalBundleSequence([virginX, pRevX, nRevX])
plotter = MofHXofMPlotter()
plotter.addMofHPlot(refBundle, 'Data')
plotter.addXofMPlot(testBundle, 'Model')
# plotter.addXofMPlot(virginX, 'Virgin')
# plotter.addXofMPlot(pRevX, 'Positive Reversal')
# plotter.addXofMPlot(nRevX, 'Negative Reversal')
input('Press Return to exit...')
print('done')
|
python
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import collections
import gzip
import json
import logging
import os
import re
import subprocess
import sys
import uuid
from rally_openstack import osclients
from rally import api
from rally.ui import utils
LOG = logging.getLogger("verify-job")
LOG.setLevel(logging.DEBUG)
# NOTE(andreykurilin): this variable is used to generate output file names
# with prefix ${CALL_COUNT}_ .
_call_count = 0
class Status(object):
PASS = "success"
ERROR = "error"
SKIPPED = "skip"
FAILURE = "fail"
class Step(object):
COMMAND = None
DEPENDS_ON = None
CALL_ARGS = {}
BASE_DIR = "rally-verify"
HTML_TEMPLATE = ("<span class=\"%(status)s\">[%(status)s]</span>\n"
"<a href=\"%(output_file)s\">%(doc)s</a>\n"
"<code>$ %(cmd)s</code>")
def __init__(self, args, rapi):
self.args = args
self.rapi = rapi
self.result = {"status": Status.PASS,
"doc": self.__doc__}
@property
def name(self):
return " ".join(re.findall("[A-Z][^A-Z]*",
self.__class__.__name__)).lower()
def check(self, results):
"""Check weather this step should be executed or skipped."""
if self.DEPENDS_ON is not None:
if results[self.DEPENDS_ON].result["status"] in (
Status.PASS, Status.FAILURE):
return True
else:
self.result["status"] = Status.SKIPPED
msg = ("Step '%s' is skipped, since depends on step '%s' is "
"skipped or finished with an error." %
(self.name, results[self.DEPENDS_ON].name))
stdout_file = self._generate_path(
"%s.txt.gz" % self.__class__.__name__)
self.result["output_file"] = self._write_file(
stdout_file, msg, compress=True)
return False
return True
def setUp(self):
"""Obtain variables required for execution"""
pass
def run(self):
"""Execute step. The default action - execute the command"""
self.setUp()
cmd = "rally --rally-debug %s" % (self.COMMAND % self.CALL_ARGS)
self.result["cmd"] = cmd
self.result["status"], self.result["output"] = self.call_rally(cmd)
stdout_file = self._generate_path("%s.txt.gz" % cmd)
self.result["output_file"] = self._write_file(
stdout_file, self.result["output"], compress=True)
@classmethod
def _generate_path(cls, root):
global _call_count
_call_count += 1
root = root.replace("<", "").replace(">", "").replace("/", "_")
parts = ["%s" % _call_count]
for path in root.split(" "):
if path.startswith(cls.BASE_DIR):
path = path[len(cls.BASE_DIR) + 1:]
parts.append(path)
return os.path.join(cls.BASE_DIR, "_".join(parts))
@classmethod
def _write_file(cls, path, data, compress=False):
"""Create a file and write some data to it."""
if compress:
with gzip.open(path, "wb") as f:
f.write(data)
else:
with open(path, "wb") as f:
f.write(data)
return path
@staticmethod
def call_rally(command):
"""Execute a Rally verify command."""
try:
LOG.info("Start `%s` command." % command)
stdout = subprocess.check_output(command.split(),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.error("Command `%s` failed." % command)
return Status.ERROR, e.output
else:
return Status.PASS, stdout
def to_html(self):
return self.HTML_TEMPLATE % self.result
class SetUpStep(Step):
"""Validate deployment, create required resources and directories."""
DEPLOYMENT_NAME = "devstack"
def run(self):
if not os.path.exists("%s/extra" % self.BASE_DIR):
os.makedirs("%s/extra" % self.BASE_DIR)
# ensure that deployment exit
deployment = self.rapi.deployment._get(self.DEPLOYMENT_NAME)
# check it
result = self.rapi.deployment.check(
deployment=self.DEPLOYMENT_NAME)["openstack"]
if "admin_error" in result[0] or "user_error" in result[0]:
self.result["status"] = Status.ERROR
return
try:
subprocess.check_call(["rally", "deployment", "use",
"--deployment", self.DEPLOYMENT_NAME],
stdout=sys.stdout)
except subprocess.CalledProcessError:
self.result["status"] = Status.ERROR
return
credentials = None
for platform, creds in deployment.to_dict()["credentials"].items():
if platform == "openstack":
credentials = creds[0]["admin"]
if credentials is None:
return Status.ERROR, "There is no openstack credentials."
clients = osclients.Clients(credentials)
if self.args.ctx_create_resources:
# If the 'ctx-create-resources' arg is provided, delete images and
# flavors, and also create a shared network to make Tempest context
# create needed resources.
LOG.info("The 'ctx-create-resources' arg is provided. Deleting "
"images and flavors, and also creating a shared network "
"to make Tempest context create needed resources.")
LOG.info("Deleting images.")
for image in clients.glance().images.list():
clients.glance().images.delete(image.id)
LOG.info("Deleting flavors.")
for flavor in clients.nova().flavors.list():
clients.nova().flavors.delete(flavor.id)
LOG.info("Creating a shared network.")
net_body = {
"network": {
"name": "shared-net-%s" % str(uuid.uuid4()),
"tenant_id": clients.keystone.auth_ref.project_id,
"shared": True
}
}
clients.neutron().create_network(net_body)
else:
# Otherwise, just in case create only flavors with the following
# properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make
# Tempest context discover them.
LOG.info("The 'ctx-create-resources' arg is not provided. "
"Creating flavors to make Tempest context discover them.")
for flv_ram in [64, 128]:
params = {
"name": "flavor-%s" % str(uuid.uuid4()),
"ram": flv_ram,
"vcpus": 1,
"disk": 0
}
LOG.info("Creating flavor '%s' with the following properties: "
"RAM = %dMB, VCPUs = 1, disk = 0GB" %
(params["name"], flv_ram))
clients.nova().flavors.create(**params)
def to_html(self):
return ""
class ListPlugins(Step):
"""List plugins for verifiers management."""
COMMAND = "verify list-plugins"
DEPENDS_ON = SetUpStep
class CreateVerifier(Step):
"""Create a Tempest verifier."""
COMMAND = ("verify create-verifier --type %(type)s --name %(name)s "
"--source %(source)s")
DEPENDS_ON = ListPlugins
CALL_ARGS = {"type": "tempest",
"name": "my-verifier",
"source": "https://git.openstack.org/openstack/tempest"}
class ShowVerifier(Step):
"""Show information about the created verifier."""
COMMAND = "verify show-verifier"
DEPENDS_ON = CreateVerifier
class ListVerifiers(Step):
"""List all installed verifiers."""
COMMAND = "verify list-verifiers"
DEPENDS_ON = CreateVerifier
class UpdateVerifier(Step):
"""Switch the verifier to the penultimate version."""
COMMAND = "verify update-verifier --version %(version)s --update-venv"
DEPENDS_ON = CreateVerifier
def setUp(self):
"""Obtain penultimate verifier commit for downgrading to it"""
verifier_id = self.rapi.verifier.list()[0]["uuid"]
verifications_dir = os.path.join(
os.path.expanduser("~"),
".rally/verification/verifier-%s/repo" % verifier_id)
# Get the penultimate verifier commit ID
p_commit_id = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=format:%H"],
cwd=verifications_dir).strip()
self.CALL_ARGS = {"version": p_commit_id}
class ConfigureVerifier(Step):
"""Generate and show the verifier config file."""
COMMAND = "verify configure-verifier --show"
DEPENDS_ON = CreateVerifier
class ExtendVerifier(Step):
"""Extend verifier with keystone integration tests."""
COMMAND = "verify add-verifier-ext --source %(source)s"
DEPENDS_ON = CreateVerifier
CALL_ARGS = {"source": "https://git.openstack.org/openstack/"
"keystone-tempest-plugin"}
class ListVerifierExtensions(Step):
"""List all extensions of verifier."""
COMMAND = "verify list-verifier-exts"
DEPENDS_ON = ExtendVerifier
class ListVerifierTests(Step):
"""List all tests of specific verifier."""
COMMAND = "verify list-verifier-tests"
DEPENDS_ON = CreateVerifier
class RunVerification(Step):
"""Run a verification."""
DEPENDS_ON = ConfigureVerifier
COMMAND = ("verify start --pattern set=%(set)s --skip-list %(skip_tests)s "
"--xfail-list %(xfail_tests)s --tag %(tag)s %(set)s-set "
"--detailed")
SKIP_TESTS = {
"tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON."
"test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]":
"This test was skipped intentionally"}
XFAIL_TESTS = {
"tempest.api.compute.servers.test_server_actions."
"ServerActionsTestJSON.test_get_vnc_console"
"[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]":
"This test fails because 'novnc' console type is unavailable"}
def setUp(self):
self.CALL_ARGS["tag"] = "tag-1 tag-2"
self.CALL_ARGS["set"] = "full" if self.args.mode == "full" else "smoke"
# Start a verification, show results and generate reports
skip_tests = json.dumps(self.SKIP_TESTS)
xfail_tests = json.dumps(self.XFAIL_TESTS)
self.CALL_ARGS["skip_tests"] = self._write_file(
self._generate_path("skip-list.json"), skip_tests)
self.CALL_ARGS["xfail_tests"] = self._write_file(
self._generate_path("xfail-list.json"), xfail_tests)
def run(self):
super(RunVerification, self).run()
if "Success: 0" in self.result["output"]:
self.result["status"] = Status.FAILURE
class ReRunVerification(RunVerification):
"""Re-Run previous verification."""
COMMAND = "verify rerun --tag one-more-attempt"
class ShowVerification(Step):
"""Show results of verification."""
COMMAND = "verify show"
DEPENDS_ON = RunVerification
class ShowSecondVerification(ShowVerification):
"""Show results of verification."""
DEPENDS_ON = ReRunVerification
class ShowDetailedVerification(Step):
"""Show detailed results of verification."""
COMMAND = "verify show --detailed"
DEPENDS_ON = RunVerification
class ShowDetailedSecondVerification(ShowDetailedVerification):
"""Show detailed results of verification."""
DEPENDS_ON = ReRunVerification
class ReportVerificationMixin(Step):
"""Mixin for obtaining reports of verifications."""
COMMAND = "verify report --uuid %(uuids)s --type %(type)s --to %(out)s"
HTML_TEMPLATE = ("<span class=\"%(status)s\">[%(status)s]</span>\n"
"<a href=\"%(out)s\">%(doc)s</a> "
"[<a href=\"%(output_file)s\">Output from CLI</a>]\n"
"<code>$ %(cmd)s</code>")
def setUp(self):
self.CALL_ARGS["out"] = "<path>"
self.CALL_ARGS["uuids"] = "<uuid-1> <uuid-2>"
cmd = self.COMMAND % self.CALL_ARGS
report = "%s.%s" % (cmd.replace("/", "_").replace(" ", "_"),
self.CALL_ARGS["type"])
print(report)
self.CALL_ARGS["out"] = self._generate_path(report)
self.CALL_ARGS["uuids"] = " ".join(
[v["uuid"] for v in self.rapi.verification.list()])
print(self.COMMAND % self.CALL_ARGS)
def run(self):
super(ReportVerificationMixin, self).run()
creport = "%s.gz" % self.CALL_ARGS["out"]
with open(self.CALL_ARGS["out"], "rb") as f_in:
with gzip.open(creport, "wb") as f_out:
f_out.writelines(f_in)
self.result["out"] = creport
class HtmlVerificationReport(ReportVerificationMixin):
"""Generate HTML report for verification(s)."""
CALL_ARGS = {"type": "html-static"}
DEPENDS_ON = RunVerification
def setUp(self):
super(HtmlVerificationReport, self).setUp()
self.CALL_ARGS["out"] = self.CALL_ARGS["out"][:-7]
class JsonVerificationReport(ReportVerificationMixin):
"""Generate JSON report for verification(s)."""
CALL_ARGS = {"type": "json"}
DEPENDS_ON = RunVerification
class JunitVerificationReport(ReportVerificationMixin):
"""Generate JUNIT report for verification(s)."""
CALL_ARGS = {"type": "junit-xml"}
DEPENDS_ON = RunVerification
class ListVerifications(Step):
"""List all verifications."""
COMMAND = "verify list"
DEPENDS_ON = CreateVerifier
class DeleteVerifierExtension(Step):
"""Delete keystone extension."""
COMMAND = "verify delete-verifier-ext --name %(name)s"
CALL_ARGS = {"name": "keystone_tests"}
DEPENDS_ON = ExtendVerifier
class DeleteVerifier(Step):
"""Delete only Tempest verifier.
all verifications will be delete when destroy deployment.
"""
COMMAND = "verify delete-verifier --id %(id)s --force"
CALL_ARGS = {"id": CreateVerifier.CALL_ARGS["name"]}
DEPENDS_ON = CreateVerifier
class DestroyDeployment(Step):
"""Delete the deployment, and verifications of this deployment."""
COMMAND = "deployment destroy --deployment %(id)s"
CALL_ARGS = {"id": SetUpStep.DEPLOYMENT_NAME}
DEPENDS_ON = SetUpStep
def run(args):
steps = [SetUpStep,
ListPlugins,
CreateVerifier,
ShowVerifier,
ListVerifiers,
UpdateVerifier,
ConfigureVerifier,
ExtendVerifier,
ListVerifierExtensions,
ListVerifierTests,
RunVerification,
ShowVerification,
ShowDetailedVerification,
HtmlVerificationReport,
JsonVerificationReport,
JunitVerificationReport,
ListVerifications,
DeleteVerifierExtension,
DestroyDeployment,
DeleteVerifier]
if args.compare:
# need to launch one more verification
place_to_insert = steps.index(ShowDetailedVerification) + 1
# insert steps in reverse order to be able to use the same index
steps.insert(place_to_insert, ShowDetailedSecondVerification)
steps.insert(place_to_insert, ShowSecondVerification)
steps.insert(place_to_insert, ReRunVerification)
results = collections.OrderedDict()
rapi = api.API()
for step_cls in steps:
step = step_cls(args, rapi=rapi)
if step.check(results):
step.run()
results[step_cls] = step
return results.values()
def main():
parser = argparse.ArgumentParser(description="Launch rally-verify job.")
parser.add_argument("--mode", type=str, default="light",
help="Mode of job. The 'full' mode corresponds to the "
"full set of verifier tests. The 'light' mode "
"corresponds to the smoke set of verifier tests.",
choices=["light", "full"])
parser.add_argument("--compare", action="store_true",
help="Start the second verification to generate a "
"trends report for two verifications.")
# TODO(ylobankov): Remove hard-coded Tempest related things and make it
# configurable.
parser.add_argument("--ctx-create-resources", action="store_true",
help="Make Tempest context create needed resources "
"for the tests.")
return
args = parser.parse_args()
steps = run(args)
results = [step.to_html() for step in steps]
template = utils.get_template("ci/index_verify.html")
with open(os.path.join(Step.BASE_DIR, "extra/index.html"), "w") as f:
f.write(template.render(steps=results))
if len([None for step in steps
if step.result["status"] == Status.PASS]) == len(steps):
return 0
return 1
if __name__ == "__main__":
sys.exit(main())
|
python
|
# place `import` statement at top of the program
import string
# don't modify this code or the variable may not be available
input_string = input()
# use capwords() here
print(string.capwords(input_string))
|
python
|
#!/usr/bin/env python
# Select sequences by give seqIDs
import sys
import os
import myfunc
usage="""
usage: selectfastaseq.py -f fastaseqfile
[ ID [ID ... ] ] [-l FILE]
[ [-mine FLOAT ] [-maxe FLOAT] ]
Description: select fasta sequences by seqID from the given sequence file either
by the supplied seqID or by evalue threshold
-l FILE Set the ID list file
-o FILE Output the result to file, (default: stdout)
-mine FLOAT Set the minimal evalue threshold, (default: 0)
-maxe FLOAT Set the maximal evalue threshold, (default: 1e9)
-h,--help Print this help message and exit
Created 2011-11-16, updated 2012-05-30, Nanjiang Shu
Examples:
selectfastaseq.py -f seq.fa -l idlist.txt
selectfastaseq.py -f seq.fa -maxe 1e-3
"""
def PrintHelp():
print(usage)
def main(g_params):#{{{
numArgv = len(sys.argv)
if numArgv < 2:
PrintHelp()
return 1
outFile=""
idList=[]
idListFile=""
fastaFile=""
i = 1
isNonOptionArg=False
while i < numArgv:
if isNonOptionArg == True:
idList.append(sys.argv[i])
isNonOptionArg=False
i = i + 1
elif sys.argv[i] == "--":
isNonOptionArg=True
i = i + 1
elif sys.argv[i][0] == "-":
if (sys.argv[i] in [ "-h", "--help"]):
PrintHelp()
return 1
elif (sys.argv[i] in [ "-l", "--l", "-list", "--list"]):
idListFile=sys.argv[i+1]
i = i + 2
elif (sys.argv[i] in [ "-f", "--f", "-fasta", "--fasta"]):
fastaFile=sys.argv[i+1]
i = i + 2
elif (sys.argv[i] in [ "-o", "--o", "-outfile", "--outfile"]):
outFile=sys.argv[i+1]
i = i + 2
elif (sys.argv[i] in [ "-mine", "--mine"]):
g_params['min_evalue']=float(sys.argv[i+1])
g_params['isEvalueSet'] = True
i = i + 2
elif (sys.argv[i] in [ "-maxe", "--maxe"]):
g_params['max_evalue']=float(sys.argv[i+1])
g_params['isEvalueSet'] = True
i = i + 2
else:
print(("Error! Wrong argument:%s" % sys.argv[i]), file=sys.stderr)
return 1
else:
idList.append(sys.argv[i])
i+=1
if fastaFile == "":
print("Fatal! fasta file not set. Exit.", file=sys.stderr)
return 1
elif not os.path.exists(fastaFile):
print("Fatal! fasta file %s does not exist. Exit."%(fastaFile), file=sys.stderr)
return 1
if os.path.exists(idListFile):
idList += myfunc.ReadIDList(idListFile)
if len(idList) > 0:
isIDSet = True
else:
isIDSet = False
if not g_params['isEvalueSet'] and not isIDSet:
print("Error! no ID nor evalue threshold is set. Eixt", file=sys.stderr)
return 1
idListSet = set(idList)
fpout = myfunc.myopen(filename= outFile, default_fp = sys.stdout, mode="w", isRaise=False);
fpin = open (fastaFile, "r")
if not fpin:
print("Failed to open fastafile %s"%(fastaFile), file=sys.stderr)
return -1
unprocessedBuffer=""
isEOFreached = False
BLOCK_SIZE = g_params['BLOCK_SIZE']
isEvalueSet = g_params['isEvalueSet']
min_evalue = g_params['min_evalue']
max_evalue = g_params['max_evalue']
while 1:
buff = fpin.read(BLOCK_SIZE)
if len(buff) < BLOCK_SIZE:
isEOFreached=True
buff = unprocessedBuffer + buff
recordList = []
unprocessedBuffer = myfunc.ReadFastaFromBuffer(buff,recordList, isEOFreached)
if len(recordList) > 0:
for r in recordList:
if ((not isIDSet) or (r[0] in idListSet)):
if (not isEvalueSet or r[1].lower().find('evalue') < 0):
fpout.write(">%s\n"%r[1])
fpout.write("%s\n"%r[2])
else:
evalue=myfunc.GetEvalueFromAnnotation(r[1])
if (evalue == None or (evalue >= min_evalue and
evalue <= max_evalue)):
fpout.write(">%s\n"%r[1])
fpout.write("%s\n"%r[2])
if isEOFreached == True:
break
fpin.close()
myfunc.myclose(fpout)
#}}}
if __name__ == '__main__' :
# Check argv
g_params = {}
g_params['BLOCK_SIZE'] = 100000
g_params['min_evalue'] = 0.0
g_params['max_evalue'] = 1e9
g_params['isEvalueSet'] = False;#whether evalue threshold is set as a criteria
sys.exit(main(g_params))
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 29 00:56:43 2017
@author: roshi
"""
import pandas as pd
import matplotlib.pyplot as plt
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from app import app
data = pd.read_csv('./data/youth_tobacco_analysis.csv')
"""Pandas DataFrame Implemented"""
final_data = pd.DataFrame(data.groupby(['LocationDesc','MeasureDesc']).count())
final_data.to_csv('./data/question1.csv', sep = ',', encoding='utf-8')
qn1data = pd.read_csv('./data/question1.csv')
state_names = list(qn1data['LocationDesc'].unique())
layout = html.Div(children=[
html.Div([
dcc.Dropdown(
id='state_names',
options=[{'label': i, 'value': i} for i in state_names],
value='Arizona'
),
dcc.Dropdown(
id='state_names2',
options=[{'label': i, 'value': i} for i in state_names],
value='Connecticut'
),
],
style={'width': '30%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='simple-bar'),
],style={ 'width': '49%'}),
])
@app.callback(
dash.dependencies.Output('simple-bar', 'figure'),
[dash.dependencies.Input('state_names', 'value'),
dash.dependencies.Input('state_names2', 'value')])
def update_bar_chart(statename1,statename2):
"""
Forms a Stacked Bar Chart
Keyword Arguments:
statename1 -- Gets the first state name to compare
statename2 -- Gets the second state name to compare
The values of the states are fetched and compared using a stacked Bar chart
List Operation - Functions - PandasDataFrame - Dict Used Operations Implemented
"""
value_list = list(qn1data['YEAR'][(qn1data['LocationDesc'] == statename1)])
name_list = list(qn1data['MeasureDesc'][(qn1data['LocationDesc'] == statename1)])
value_list2 = list(qn1data['YEAR'][(qn1data['LocationDesc'] == statename2)])
name_list2 = list(qn1data['MeasureDesc'][(qn1data['LocationDesc'] == statename2)])
return {
'data': ([
{'x': name_list, 'y': value_list, 'type': 'bar', 'name': statename1},
{'x': name_list2, 'y': value_list2, 'type': 'bar', 'name': statename2},
]),
'layout': go.Layout(
title = "Smoking Status Comarison by States",
xaxis={'title': 'Somking Status of Youth'},
yaxis={'title': 'Count of Youth Over the Years'}),
}
|
python
|
import numpy as np
import matplotlib.pyplot as plt
from infinity import inf
from scipy.spatial import ConvexHull
from scipy.spatial.distance import euclidean
from shapely.geometry import LineString, Point
from sklearn.model_selection import KFold
from .metrics import MetaMetrics
from . import util
class Meta:
def __init__( self, M_train, M_predict, B_train
, B_predict, epsilons ):
self.M_train = M_train
self.M_predict = M_predict
self.B_train = B_train
self.B_predict = B_predict
self.epsilons = epsilons
self.epsilons_invert = [1 - e for e in epsilons]
self.Ts = [inf for _ in epsilons]
def train(self, X, y, k_folds, plot = False):
X, y = util.format(X, y)
kf = KFold(n_splits = k_folds, shuffle = True)
y_meta_ = np.array([], dtype=np.int64)
for idx_train, idx_test in kf.split(X):
X_train, y_train = X[idx_train], y[idx_train]
X_test, y_test = X[idx_test], y[idx_test]
self.B_train(X_train, y_train)
y_meta_ = np.append(
y_meta_,
np.array([1 if p == t else 0 for p, t \
in zip(self.B_predict(X_test),y_test)])
)
self.B_train(X, y)
ratios = np.array([])
y_meta = np.array([], dtype=np.int64)
for idx_train, idx_test in kf.split(X):
X_train = X[idx_train]
y_train = y_meta_[idx_train]
X_test = X[idx_test]
y_test = y_meta_[idx_test]
self.M_train(X_train, y_train)
P = self.M_predict(X_test)
ratios_ = [p[1] / p[0] for p in P]
ratios = np.append(ratios, np.array(ratios_))
y_meta = np.append(y_meta, y_test)
self.M_train(X, y_meta)
S = sorted(zip(ratios, y_meta), reverse=True)
t = sum(y_meta)
above = 0
ROCPTS = []
for i, s in enumerate(S):
TP = above
FP = i - above
FN = t - above
TN = len(S) - FN - i
tpr_denom = TP + FN
fpr_denom = FP + TN
ROCPTS.append([TP / tpr_denom, FP / fpr_denom])
above += s[1]
ROCPTS = np.array(ROCPTS)
hull = ConvexHull(ROCPTS)
vs = []
for i in hull.vertices:
vs.append(i)
if sum(ROCPTS[i]) == 0.0: break
HULLPTS = ROCPTS[vs]
iso = lambda target, fpr: target / (1.0 - target) \
* (len(S) - t) / t * fpr
for ti, e in enumerate(self.epsilons_invert):
iso_line = LineString([
(0.0, iso(e, 0.0)),
(1.0, iso(e, 1.0))
])
for i in range(len(HULLPTS) - 1):
line = LineString([
tuple(HULLPTS[i]),
tuple(HULLPTS[i + 1])
])
inter = iso_line.intersection(line)
if type(inter) is Point:
inter = np.array(inter.coords)[0]
d_inter = euclidean(inter, HULLPTS[i])
d_hull = euclidean(
HULLPTS[i], HULLPTS[i + 1]
)
prc = d_inter / d_hull
j = hull.vertices[i]
j_ = hull.vertices[i + 1]
d_ratio = euclidean(
ratios[j], ratios[j_]
)
T = ratios[j] + prc * d_ratio
self.Ts[ti] = T
break
if plot:
iso_line = np.array(list(iso_line.coords))
plt.plot( iso_line[:,0], iso_line[:,1]
, color = "g" )
if plot:
plt.scatter(ROCPTS[:,0], ROCPTS[:,1])
plt.plot(HULLPTS[:,0], HULLPTS[:,1], color="r")
plt.show()
def predict(self, X):
X = util.format(X)
p_vals = self.M_predict(X)
pred = self.B_predict(X)
res = []
for pv, pr in zip(p_vals, pred):
predicted = {}
ratio = pv[1] / pv[0]
for i, e in enumerate(self.epsilons):
if self.Ts[i] < ratio:
predicted[e] = pr
else:
predicted[e] = -1
res.append(predicted)
return res
def score(self, X, y):
X, y = util.format(X, y)
res = MetaMetrics(self.epsilons)
predicted = self.predict(X)
for p_, y_ in zip(predicted, y):
res.update(p_, y_)
return res
|
python
|
"""Parameter layer in TensorFlow."""
import tensorflow as tf
from tensorflow.python.ops.gen_array_ops import broadcast_to
def parameter(input_var,
length,
initializer=tf.zeros_initializer(),
dtype=tf.float32,
trainable=True,
name='parameter'):
"""
Parameter layer.
Used as layer that could be broadcast to a certain shape to
match with input variable during training.
Example: A trainable parameter variable with shape (2,), it needs to be
broadcasted to (32, 2) when applied to a batch with size 32.
Args:
input_var (tf.Tensor): Input tf.Tensor.
length (int): Integer dimension of the variables.
initializer (callable): Initializer of the variables. The function
should return a tf.Tensor.
dtype: Data type of the variables (default is tf.float32).
trainable (bool): Whether these variables are trainable.
name (str): Variable scope of the variables.
Return:
A tensor of the broadcasted variables.
"""
with tf.variable_scope(name):
p = tf.get_variable(
'parameter',
shape=(length, ),
dtype=dtype,
initializer=initializer,
trainable=trainable)
broadcast_shape = tf.concat(
axis=0, values=[tf.shape(input_var)[:-1], [length]])
p_broadcast = broadcast_to(p, shape=broadcast_shape)
return p_broadcast
|
python
|
import gym
import torch as th
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from torch import nn
class CNNFeatureExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 128, **kwargs):
super().__init__(observation_space, features_dim)
channels, height, width = observation_space.shape
self.cnn = nn.Sequential(
nn.LayerNorm([channels, height, width]),
nn.Conv2d(channels, 32, kernel_size=8, stride=4, padding=0, bias=False),
nn.LayerNorm([32, 24, 39]), # TODO: find automatically the weights of the layer norm
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0, bias=False),
nn.LayerNorm([64, 11, 18]),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0, bias=False),
nn.LayerNorm([64, 9, 16]),
nn.LeakyReLU(negative_slope=0.1),
nn.Flatten(),
)
# Compute shape by doing one forward pass
with th.no_grad():
n_flatten = self.cnn(th.as_tensor(observation_space.sample()[None]).float()).shape[1]
self.linear = nn.Sequential(
nn.Linear(n_flatten, features_dim, bias=False),
nn.LayerNorm(features_dim),
nn.LeakyReLU(negative_slope=0.1),
)
def forward(self, observations: th.Tensor) -> th.Tensor:
return self.linear(self.cnn(observations))
|
python
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
from wopsego import Optimization, ValidOptimumNotFoundError
# Objective
def G24(point):
"""
Function G24
1 global optimum y_opt = -5.5080 at x_opt =(2.3295, 3.1785)
"""
p = np.atleast_2d(point)
return -p[:, 0] - p[:, 1]
# Constraints < 0
def G24_c1(point):
p = np.atleast_2d(point)
return (
-2.0 * p[:, 0] ** 4.0
+ 8.0 * p[:, 0] ** 3.0
- 8.0 * p[:, 0] ** 2.0
+ p[:, 1]
- 2.0
)
def G24_c2(point):
p = np.atleast_2d(point)
return (
-4.0 * p[:, 0] ** 4.0
+ 32.0 * p[:, 0] ** 3.0
- 88.0 * p[:, 0] ** 2.0
+ 96.0 * p[:, 0]
+ p[:, 1]
- 36.0
)
# Grouped evaluation
def f_grouped(point):
p = np.atleast_2d(point)
return np.array([G24(p), G24_c1(p), G24_c2(p)]).T
xlimits = [[0, 3], [0, 4]]
cstr_specs = 2 * [{"type": "<", "bound": 0.0}]
optim = Optimization(xlimits, cstr_specs)
# from smt.sampling_methods import LHS
# lhs = LHS(xlimits=np.array(xlimits), criterion='ese', random_state=42)
# xdoe = lhs(5)
# ydoe = f_grouped(xdoe)
# print("Initial DOE")
# print("xdoe={}".format(xdoe))
# print("ydoe={}".format(ydoe))
xdoe = [
[1.29361118, 3.76645806],
[0.22472407, 3.09294092],
[1.83485017, 0.76057145],
[1.03919637, 1.72479562],
[2.76066901, 1.27892679],
]
ydoe = [
[-5.06006925, 0.0964247, 2.76239458][-3.31766499, 0.77462312, -15.4246689][
-2.59542161, -1.4230771, -3.0242084
][-2.76399198, -2.26906368, 1.70116801][-4.03959579, -9.54069818, 0.5686734]
]
optim.tell_doe(xdoe, ydoe)
optim.run(f_grouped, n_iter=20)
|
python
|
document = ["Whip-Off World Championships - Gallery","ROWDY DH COURSE PREVIEW! 2019 iXS European Downhill Cup #5, Spicak","IXS Launch the Trigger FF, The Lightest Full Face Out There.","Watch: Breck Epic Day 2","Cam Zink Slams Hard on a 110 Foot Backflip Attempt.","Pivot’s All-New Mach 4 SL","Meet The Riders And Their Rides: Russell Finsterwald","Crankworx Roundup Vol. 2","iXS releases Trigger Lightweight Full-Face Helmet","Watch the Most Creative Riding at Whistler Bike Park","Trek Unveils All-New Fuel EX Lineup","iXS Introduces the World's Lightest Full Face Helmet - Trigger FF","Keystone Bike Park has Something for Every Mountain Biker - Singletracks Mountain Bike News","2019 Whip-Off World Champs Crankworx Whistler","Mavic Crossride Belt: A Simple and Comfortable Hip Pack [Review] - Singletracks Mountain Bike News","Watch: We Rode our Mountain Bikes With Wild Animals in Tanzania - Singletracks Mountain Bike News","Navigate with komoot – the app that helps you to discover new trails | ENDURO Mountainbike Magazine","[Photos] The Wildest Whips from Crankworx Whistler Whip-Off World Championships - Singletracks Mountain Bike News","Fox New Damper Update & Roller-Bearing Shock Hardware Kit | Mountain Bike Action Magazine","MBA Product Test: Thule Rail 12L Pro Hydration Pack | Mountain Bike Action Magazine","Best Bike Hacks with The Syndicate | Mountain Bike Action Magazine","New Product Discovery: 2020 Trek Fuel EX | Mountain Bike Action Magazine","Crankworx Dual Slalom Replay - Long Live Slalom","Gee Milner Dream Build - Pivot Firebird 29","First Ride: 2020 Trek Fuel EX 9.9 | BIKE Magazine","First ride review: the new Trek Fuel EX 9.9 2020 – longer, faster and even better? | ENDURO Mountainbike Magazine","Pro Skills: How to Start a Mountain Bike YouTube Channel, Featuring BKXC - Singletracks Mountain Bike News","Mountain Biking Bellingham, Washington: Loamy, Steep, and Beautiful - Singletracks Mountain Bike News","First Look | Maxxis Has A Brand New Tyre Called The Dissector","Watch: 5 Easy Jump Tricks // Mountain Bike Skills - Singletracks Mountain Bike News","Tune-In To Watch Live Mountain Bike Action at Crankworx | Mountain Bike Action Magazine","PIT BITS - Crankworx Whistler - New Five Ten Trailcross Shoes","All-New 2020 Knolly Warden","Mt. Bachelor's Much Anticipated New Downhill MTB Trail Is Open | Mountain Bike Action Magazine","Rocky Mountain's All-New Slayer | Mountain Bike Action Magazine","Video: Cam Zink's 110-Foot World Record Backflip Crash | Mountain Bike Action Magazine","New Product Discovery: Motool Slacker Digital Sag Scale | Mountain Bike Action Magazine","The TDS Enduro Race | Mountain Bike Action Magazine","Crankworx Roundup: The Sendy Addition","Loic Bruni Interviews Brendan Fairclough - Outspoken Episode 7","Fear-Less: 3 Things That Can Help You Control The Fears You Struggle With When Mountain Biking - Singletracks Mountain Bike News","Family-Owned Patrol Mountain Bikes Is Expanding from Indonesia to the Rest of the World - Singletracks Mountain Bike News","Watch: A Wet Finish to the 2019 Superenduro Series [Italy] - Singletracks Mountain Bike News","Race Face Keeps Wallets in Mind with new AEffect R Dropper and Crankset - Singletracks Mountain Bike News","Wise Words | Caroline Buchanan.","First Look: CUBE Stereo 170 – CUBE 2020 mountain bike news and highlights | ENDURO Mountainbike Magazine","Big, Bad Whistler - Enduro World Series Race Show","Rocky Mountain Slayer 29","Scott-Sports New Gambler Alloy Bike | Mountain Bike Action Magazine","2019 Enduro World Series | Round 6 | Crankworx Whistler Highlights.","Pivot Cycles Kurt Refsnider Wins Colorado Trail Race | Mountain Bike Action Magazine","Cam Zink Crashes Attempting 110-Foot Backflip While Working Up to 150!","POV: The Best Slopestyle Course Yet? | Mountain Bike Action Magazine","Photo of the Day | Mountain Bike Action Magazine","MBA Product Test: SDG Components JR Pro Kit | Mountain Bike Action Magazine","The 2020 Specialized Enduro Mountain Bike is Overhauled and Now Exclusively a 29er - Singletracks Mountain Bike News","Video: Set Up for Success with Kate Courtney | Mountain Bike Action Magazine","FOX Releases Updated FIT4 Damper and New Bearing Shock Hardware","Mountain Bike Packs and Our Packable Picks - Singletracks Mountain Bike News","Specialized Introduces the 2020 Enduro | BIKE Magazine","First Look | The 2020 Specialized Enduro Is Nothing Like The Old One","First Ride Review: All-new Specialized Enduro 29 | ENDURO Mountainbike Magazine","2020 Specialized Enduro | Everything You Need to Know.","Get in The Van! Mountain Bike Shuttling Tahoe's Best Trails - Singletracks Mountain Bike News","Watch: Urban Freeriding, New York City - Singletracks Mountain Bike News","First Impressions: 2020 Rocky Mountain Slayer 29 | BIKE Magazine","Full Details Released: Specialized's Brand New Enduro","New Rocky Mountain Slayer is a Bigger, Badder, and Heftier Mountain Bike Than Ever Before - Singletracks Mountain Bike News","First ride review: 2020 Rocky Mountain Slayer Carbon 90 – What a machine! | ENDURO Mountainbike Magazine","The All-New 2020 Rocky Mountain Slayer Unveiled","Vital RAW - 2019 ENDURO WORLD SERIES WHISTLER!","Vital MTB on Instagram: “Its GIVEAWAY TIME! In celebration of their 30th anniversary, Slime and Genuine Innovations’ are giving away an amazing tire repair package…”","Maxxis New Dissector Tire | Mountain Bike Action Magazine","MBA Bike Review: Scott Spark RC 900 World Cup | Mountain Bike Action Magazine","E*thirteen's New Cockpit | Mountain Bike Action Magazine","France threatens to block trade over Amazon fires","Trump says US firms 'hereby ordered' to quit China","EuroHockey Championships 2019: England women thrashed by Netherlands in semi-final","Former Houston police officer charged with murder over raid","Minnesota filmmakers' lawsuit over gay weddings reinstated","Deputies: Would-be robber high-fives clerk, leaves knife","Johnson and Trump speak ahead of G7 meeting","Teen in hospital after horror crash that killed her boyfriend still asks for him","Bury owner Steve Dale says club has been sold with EFL deadline approaching","Ex-Happy Mondays manager almost trapped in London tower block fire near Grenfell","Republicans battle for conservative support ahead of Mississippi runoff this month","Exhilarating close-up video captures a ULA rocket's final blast off","Klopp recalls his key Aubameyang decision ahead of Liverpool vs Arsenal","Unai Emery makes Arsenal vow ahead of latest Anfield clash with Liverpool","Boris Johnson urges Donald Trump to back George Osborne for New York bank job","Luke Campbell refuses to be awed by Vasyl Lomachenko ahead of world title fight","Pochettino takes brutal parting shot at Tottenham midfielder Wanyama","Anthony Yarde confident of beating Sergey Kovalev to win world title","Boris Johnson flies in to France for 'critical' first meeting with Donald Trump","Hodgson makes Aaron Wan-Bissaka prediction ahead of Man Utd vs Crystal Palace","Minneapolis City Council OKs settlement in police shooting","Jeremy Corbyn's son Tommy runs shop selling products made of cannabis","Texas woman finds 3.72-carat yellow diamond during visit to Arkansas park","Lawsuit: Ex-Citadel staffer drugged, sexually abused cadet","Music fans hit booze as party starts at Leeds and Reading Festivals","Taylor Swift hilariously mocked for song London Boy's rose-tinted view of city","Trump Slaps New Tariffs On Chinese Goods As Trade War Continues","Dad slams 'disgraceful' warden after getting £75 fine while visiting sick son","Justice Dept. Under Fire For Allegedly Sending Staff Link To White Nationalist Site","Boris tells migrants 'we will send you back' if they cross Channel","More millennial women choosing religious life: 'This is what I'm supposed to be doing'","Prem star Andre Gray says Boris Johnson 'will never understand' knife crime","Hong Kong's human chain of protest","Bury owner agrees deal to sell club hours before EFL deadline","Couple says hospital misplaced remains of miscarried baby","Protesters demonstrate outside Brazilian embassies over Amazon","Desperate search for mum and daughter, 2, missing after getting in taxi","Beverley Turner: Critics of Train Your Baby Like A Dog are barking mad","Disney makes 'shelter mutt' a Hollywood star in new version of Lady and the Tramp","Jeffrey Epstein 'madam' in secret Palace visits to see Prince Andrew says ex-cop","Jair Bolsonaro is forced into action over Amazon wildfire","EastEnders fans think Hunter 'died in mum's arms' after suffering from sepsis","Aston Villa 2-0 Everton: Wesley & Anwar El Ghazi on target for hosts","'Boris Johnson's ignorance means he's part of the problem as knife crimes soar'","Warrington boss Price likened to Klopp ahead of Challenge Cup final","Boy, 8, wrecks mum's car after joyriding it down motorway at 112mph","Rob Holding steps up Arsenal injury comeback by playing 90 minutes for U23s","Judge says she can't order new trial for convicted murderer","Candidate: Michigan city should be as white 'as possible'","Lindsey Vonn engaged to P.K. Subban","Colton Underwood on finding 'fulfillment' post-'Bachelor': I'm at 'an interesting point in my career'","Robert Downey Jr. And Tom Holland Reunite Amid Disney/Sony ‘Spider-Man’ Drama","Ring of steel: Biarritz is on lockdown ahead of G7 summit","Bette Midler lobs profane attack at leading conservative who honored David Koch","3 young Michigan boys convicted of raping brothers","Man gets life for 1998 killing after confessions met doubt","The Latest: Jury deliberating over parking lot shooting","Bernie Sanders indicates climate plan will require nationalization of US energy production","Prosecutor: Florida parking lot shooter was a 'vigilante'","Man accused of killing 2 found competent to stand trial","Doctors Remove Venomous Spider From Woman's Ear","Brazil Plans to Mobilize the Military to Fight Fires in the Amazon","Westlife heading on massive world tour after Mark Feehily's paternity leave","Evangelical Group: Indefinitely Detaining Migrant Children Is Wrong","US school spends $48 million on curved corridors and hiding places to foil mass shooters","Markets Right Now: Markets shudder as trade tensions flare","Kids TV star victim of seagull 'revenge' after failed sandwich swoop","Ruth Bader Ginsburg, 86, completed treatment for tumor on pancreas","Indigenous Amazon tribes vow to fight against loss of their home","Man charged in campus slaying of California co-worker","Marc Thiessen: Don't dismiss Trump's Greenland proposal. It's far from ridiculous","Awful Magaluf tattoo blunder after teenager stops halfway through due to pain","Camila Cabello Tells Fans About The 'Life Changing' Mental Health Tool She Swears By","England warned they 'must get better' after dismal second day in third Test","Archaeologists uncover ancient fort’s secrets before it falls into the sea"]
|
python
|
"""Test the Aurora ABB PowerOne Solar PV sensors."""
from datetime import timedelta
from unittest.mock import patch
from aurorapy.client import AuroraError
from homeassistant.components.aurora_abb_powerone.const import (
ATTR_DEVICE_NAME,
ATTR_FIRMWARE,
ATTR_MODEL,
ATTR_SERIAL_NUMBER,
DEFAULT_INTEGRATION_TITLE,
DOMAIN,
)
from homeassistant.const import CONF_ADDRESS, CONF_PORT
import homeassistant.util.dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed
TEST_CONFIG = {
"sensor": {
"platform": "aurora_abb_powerone",
"device": "/dev/fakedevice0",
"address": 2,
}
}
def _simulated_returns(index, global_measure=None):
returns = {
3: 45.678, # power
21: 9.876, # temperature
5: 12345, # energy
}
return returns[index]
def _mock_config_entry():
return MockConfigEntry(
version=1,
domain=DOMAIN,
title=DEFAULT_INTEGRATION_TITLE,
data={
CONF_PORT: "/dev/usb999",
CONF_ADDRESS: 3,
ATTR_DEVICE_NAME: "mydevicename",
ATTR_MODEL: "mymodel",
ATTR_SERIAL_NUMBER: "123456",
ATTR_FIRMWARE: "1.2.3.4",
},
source="dummysource",
entry_id="13579",
unique_id="654321",
)
async def test_sensors(hass):
"""Test data coming back from inverter."""
mock_entry = _mock_config_entry()
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=_simulated_returns,
), patch(
"aurorapy.client.AuroraSerialClient.serial_number",
return_value="9876543",
), patch(
"aurorapy.client.AuroraSerialClient.version",
return_value="9.8.7.6",
), patch(
"aurorapy.client.AuroraSerialClient.pn",
return_value="A.B.C",
), patch(
"aurorapy.client.AuroraSerialClient.firmware",
return_value="1.234",
), patch(
"aurorapy.client.AuroraSerialClient.cumulated_energy",
side_effect=_simulated_returns,
):
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power
assert power.state == "45.7"
temperature = hass.states.get("sensor.temperature")
assert temperature
assert temperature.state == "9.9"
energy = hass.states.get("sensor.total_energy")
assert energy
assert energy.state == "12.35"
async def test_sensor_dark(hass):
"""Test that darkness (no comms) is handled correctly."""
mock_entry = _mock_config_entry()
utcnow = dt_util.utcnow()
# sun is up
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure", side_effect=_simulated_returns
), patch(
"aurorapy.client.AuroraSerialClient.serial_number",
return_value="9876543",
), patch(
"aurorapy.client.AuroraSerialClient.version",
return_value="9.8.7.6",
), patch(
"aurorapy.client.AuroraSerialClient.pn",
return_value="A.B.C",
), patch(
"aurorapy.client.AuroraSerialClient.firmware",
return_value="1.234",
):
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power is not None
assert power.state == "45.7"
# sunset
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=AuroraError("No response after 10 seconds"),
):
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power.state == "unknown"
# sun rose again
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure", side_effect=_simulated_returns
):
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power is not None
assert power.state == "45.7"
# sunset
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=AuroraError("No response after 10 seconds"),
):
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power.state == "unknown" # should this be 'available'?
async def test_sensor_unknown_error(hass):
"""Test other comms error is handled correctly."""
mock_entry = _mock_config_entry()
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=AuroraError("another error"),
):
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power is None
|
python
|
from typing import Generic, List, Optional, TypeVar
from py_moysklad.entities.context import Context
from py_moysklad.entities.meta_entity import MetaEntity
T = TypeVar("T", bound=MetaEntity)
class ListEntity(MetaEntity, Generic[T]):
context: Optional[Context]
rows: Optional[List[T]]
|
python
|
from .script import test
def main():
test()
|
python
|
def rosenbrock_list(**kwargs):
num_fns = kwargs['functions']
# if num_fns > 1:
# least_sq_flag = true
# else:
# least_sq_flag = false
x = kwargs['cv']
ASV = kwargs['asv']
# get analysis components
#an_comps = kwargs['analysis_components']
#print an_comps
f0 = x[1]-x[0]*x[0]
f1 = 1-x[0]
retval = dict([])
if (ASV[0] & 1): # **** f:
f = [100*f0*f0+f1*f1]
retval['fns'] = f
if (ASV[0] & 2): # **** df/dx:
g = [ [-400*f0*x[0] - 2*f1, 200*f0] ]
retval['fnGrads'] = g
if (ASV[0] & 4): # **** d^2f/dx^2:
fx = x[1]-3*x[0]*x[0]
h = [
[ [-400*fx + 2, -400*x[0]],
[-400*x[0], 200 ] ]
]
retval['fnHessians'] = h
return(retval)
def rosenbrock_numpy(**kwargs):
from numpy import array
num_fns = kwargs['functions']
# if num_fns > 1:
# least_sq_flag = true
# else:
# least_sq_flag = false
x = kwargs['cv']
ASV = kwargs['asv']
f0 = x[1]-x[0]*x[0]
f1 = 1-x[0]
retval = dict([])
if (ASV[0] & 1): # **** f:
f = array([100*f0*f0+f1*f1])
retval['fns'] = f
if (ASV[0] & 2): # **** df/dx:
g = array([[-400*f0*x[0] - 2*f1, 200*f0]])
retval['fnGrads'] = g
if (ASV[0] & 4): # **** d^2f/dx^2:
fx = x[1]-3*x[0]*x[0]
h = array([ [ [-400*fx + 2, -400*x[0]],
[-400*x[0], 200 ] ] ] )
retval['fnHessians'] = h
return(retval)
|
python
|
from django.test import TestCase
from ..factories import UrlFactory
from ..helpers import BASE62IdConverter
from ..models import Url
class TestUrlManager(TestCase):
def setUp(self):
self.url_objs = [UrlFactory() for i in range(100)]
def test_get_by_shortcut(self):
received_url_obj = [
Url.object.get_by_shortcut(obj.url_shortcut) for obj in self.url_objs
]
for get_obj, expected_obj in zip(received_url_obj, self.url_objs):
self.assertEqual(expected_obj.id, get_obj.id)
def test_get_by_shortcut_return__none__when_obj_does_not_exist(self):
latest_id = Url.object.last().id
not_existed_id = latest_id + 1
not_existed_id_short_url = BASE62IdConverter.encode_id_to_string(not_existed_id)
self.assertIsNone(Url.object.get_by_shortcut(not_existed_id_short_url))
|
python
|
from Pluto.Libraries.Libraries.GLM import *
|
python
|
""" BOVI(n)E getsecuritygroup endpoint """
import json
import boto3
from botocore.exceptions import ClientError
from lib import rolesession
def get_instance_info(session, account, sg_id):
""" Get EC2 instance info for a specific security group.
:param session: AWS boto3 session
:param account: AWS account
:param sg_id: AWS Security group id
"""
ec2 = session.client('ec2')
instance_data = []
instance_info = ec2.describe_instances(Filters=[dict(
Name='instance.group-id',
Values=[sg_id])])
instances = instance_info['Reservations']
for res in instances:
instance_name = None
for instance in res['Instances']:
for tag in instance['Tags']:
if tag['Key'] == 'Name':
instance_name = tag.get('Value', None)
break
instance_data.append(dict(
Name=instance_name,
InstanceId=instance['InstanceId'],
AccountNum=account))
return instance_data
def format_outbound_rules(rules):
""" Format security group egress rules.
:param rules: Security group rules
"""
formatted_rules = []
for rule in rules:
print rule
from_port = rule.get('FromPort')
to_port = rule.get('ToPort')
if from_port and to_port:
ports = str(from_port) + "-" + str(to_port)
else:
ports = 'all'
if rule['IpProtocol'] == '-1':
protocol = 'all'
else:
protocol = rule['IpProtocol']
for ip_addr in rule['IpRanges']:
destination = ip_addr['CidrIp']
formatted_rules.append(
{"Destination": destination, "Ports": ports,
"Protocol": protocol})
prefixlist = rule.get('PrefixListIds')
if prefixlist:
for ip_prefix in prefixlist:
destination = ip_prefix['GroupId']
formatted_rules.append(
{"Destination": destination, "Ports": ports,
"Protocol": protocol})
for sg_group in rule['UserIdGroupPairs']:
destination = sg_group['GroupId']
formatted_rules.append(
{"Destination": destination, "Ports": ports,
"Protocol": protocol})
print formatted_rules
return formatted_rules
def format_inbound_rules(rules):
""" Format security group ingress rules.
:param rules: security group rules
"""
formatted_rules = []
for rule in rules:
from_port = rule.get('FromPort')
to_port = rule.get('ToPort')
if from_port and to_port:
ports = str(from_port) + "-" + str(to_port)
else:
ports = 'all'
if rule['IpProtocol'] == '-1':
protocol = 'all'
else:
protocol = rule['IpProtocol']
for ip_addr in rule['IpRanges']:
source = ip_addr['CidrIp']
formatted_rules.append(
{"Source": source, "Ports": ports, "Protocol": protocol})
for ip_prefix in rule['PrefixListIds']:
source = ip_prefix['GroupId']
formatted_rules.append(
{"Source": source, "Ports": ports, "Protocol": protocol})
for sg_group in rule['UserIdGroupPairs']:
source = sg_group['GroupId']
formatted_rules.append(
{"Source": source, "Ports": ports, "Protocol": protocol})
return formatted_rules
def get_security_group(account, group, region):
""" Get security group details.
:param account: AWS account
:param group: security group id
:param region: AWS region
"""
session = boto3.session.Session(region_name=region)
assume = rolesession.assume_crossact_audit_role(
session, account, region)
if assume:
ec2 = assume.client('ec2')
try:
sg_info = ec2.describe_security_groups(
GroupIds=[group])['SecurityGroups'][0]
except ClientError:
print "Security group not found"
return dict(Account=dict(accountNum=account),
Instance=dict(message='Security group not found'))
instance_data = get_instance_info(assume, account, sg_info['GroupId'])
client = assume.client('elb')
elb_data = []
for elb in client.describe_load_balancers()['LoadBalancerDescriptions']:
for sec_group in elb['SecurityGroups']:
if sec_group == sg_info['GroupId']:
elb_data.append(dict(
Name=elb['LoadBalancerName'],
AccountNum=account))
return dict(
Account=dict(accountNum=account),
SecurityGroup=dict(
GroupName=sg_info['GroupName'],
GroupId=sg_info['GroupId'],
Description=sg_info['Description'],
Tags=sg_info.get('Tags', None),
InboundRules=format_inbound_rules(sg_info['IpPermissions']),
OutboundRules=format_outbound_rules(sg_info['IpPermissionsEgress']),
VpcId=sg_info['VpcId'],
Region=region),
Instances=instance_data,
ELB=elb_data)
else:
print '{"message":"Account not assumable"}'
return dict(Account=dict(accountNum=account),
Message='Account not assumable')
def lambda_handler(*kwargs):
""" Lambda handler function.
:param event: Lambda event
:param context: Lambda context
"""
account = None
region = None
query_params = kwargs[0].get('queryStringParameters')
if query_params:
account = query_params.get('account')
group = query_params.get('group')
region = query_params.get('region')
results = get_security_group(account, group, region)
body = results
else:
body = {"Message": "Security Group not found."}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
|
python
|
from typing import Dict
def count_duplicates(sample_dict: Dict) -> int:
"""takes a single dictionary as an argument and returns the number of values that appear two or more times
>>> sample_dict = {'red': 1, 'green': 1, 'blue': 2, 'purple': 2, 'black': 3, 'magenta': 4}
>>> count_duplicates(sample_dict)
2
"""
seen_values = []
duplicate_values = 0
for k,v in sample_dict.items():
if v not in seen_values:
seen_values.append(v)
else:
duplicate_values += 1
return duplicate_values
if __name__ == "__main__":
import doctest
doctest.testmod()
|
python
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
"""Tests of statistical probability distribution integrals.
Currently using tests against calculations in R, spreadsheets being unreliable.
"""
from unittest import TestCase, main
import numpy as np
from skbio.maths.stats.distribution import (chi_high, z_high, zprob, f_high,
binomial_high, bdtrc, stdtr)
class DistributionsTests(TestCase):
"""Tests of particular statistical distributions."""
def setUp(self):
self.values = [0, 0.01, 0.1, 0.5, 1, 2, 5, 10, 20, 30, 50, 200]
self.negvalues = [-i for i in self.values]
self.df = [1, 10, 100]
def test_z_high(self):
"""z_high should match R's pnorm(lower.tail=FALSE) function"""
negprobs = [
0.5000000, 0.5039894, 0.5398278, 0.6914625, 0.8413447,
0.9772499, 0.9999997, 1.0000000, 1.0000000, 1.0000000,
1.0000000, 1.0000000,
]
probs = [
5.000000e-01, 4.960106e-01, 4.601722e-01, 3.085375e-01,
1.586553e-01, 2.275013e-02, 2.866516e-07, 7.619853e-24,
2.753624e-89, 4.906714e-198, 0.000000e+00, 0.000000e+00]
for z, p in zip(self.values, probs):
np.testing.assert_allclose(z_high(z), p, atol=10e-7)
for z, p in zip(self.negvalues, negprobs):
np.testing.assert_allclose(z_high(z), p)
def test_zprob(self):
"""zprob should match twice the z_high probability for abs(z)"""
probs = [2 * i for i in [
5.000000e-01, 4.960106e-01, 4.601722e-01, 3.085375e-01,
1.586553e-01, 2.275013e-02, 2.866516e-07, 7.619853e-24,
2.753624e-89, 4.906714e-198, 0.000000e+00, 0.000000e+00]]
for z, p in zip(self.values, probs):
np.testing.assert_allclose(zprob(z), p, atol=10e-7)
for z, p in zip(self.negvalues, probs):
np.testing.assert_allclose(zprob(z), p, atol=10e-7)
def test_chi_high(self):
"""chi_high should match R's pchisq(lower.tail=FALSE) function"""
probs = {
1: [1.000000e+00, 9.203443e-01, 7.518296e-01, 4.795001e-01,
3.173105e-01, 1.572992e-01, 2.534732e-02, 1.565402e-03,
7.744216e-06, 4.320463e-08, 1.537460e-12, 2.088488e-45,
],
10: [1.000000e+00, 1.000000e-00, 1.000000e-00, 9.999934e-01,
9.998279e-01, 9.963402e-01, 8.911780e-01, 4.404933e-01,
2.925269e-02, 8.566412e-04, 2.669083e-07, 1.613931e-37,
],
100: [1.00000e+00, 1.00000e+00, 1.00000e+00, 1.00000e+00,
1.00000e+00, 1.00000e+00, 1.00000e+00, 1.00000e+00,
1.00000e+00, 1.00000e+00, 9.99993e-01, 1.17845e-08,
],
}
for df in self.df:
for x, p in zip(self.values, probs[df]):
np.testing.assert_allclose(chi_high(x, df), p, atol=10e-7)
def test_binomial_high(self):
"""Binomial high should match values from R for integer successes"""
expected = {
(0, 1, 0.5): 0.5,
(1, 1, 0.5): 0,
(1, 1, 0.0000001): 0,
(1, 1, 0.9999999): 0,
(3, 5, 0.75): 0.6328125,
(0, 60, 0.5): 1,
(129, 130, 0.5): 7.34684e-40,
(299, 300, 0.099): 4.904089e-302,
(9, 27, 0.0003): 4.958496e-29,
(1032, 2050, 0.5): 0.3702155,
(-1, 3, 0.1): 1, # if successes less than 0, return 1
(-0.5, 3, 0.1): 1,
}
for (key, value) in expected.items():
np.testing.assert_allclose(binomial_high(*key), value, 1e-4)
# should reject if successes > trials or successes < -1
self.assertRaises(ValueError, binomial_high, 7, 5, 0.5)
def test_f_high(self):
"""F high should match values from R for integer successes"""
expected = {
(1, 1, 0): 1,
(1, 1, 1): 0.5,
(1, 1, 20): 0.1400487,
(1, 1, 1000000): 0.0006366196,
(1, 10, 0): 1,
(1, 10, 5): 0.0493322,
(1, 10, 20): 0.001193467,
(10, 1, 0): 1,
(10, 10, 14.7): 0.0001062585,
# test non-integer degrees of freedom
(13.7, 11.9, 3.8): 0.01340347,
# used following series to track down a bug after a failed test
# case
(28, 29, 2): 0.03424088,
(28, 29, 10): 1.053019e-08,
(28, 29, 20): 1.628245e-12,
(28, 29, 300): 5.038791e-29,
(28, 35, 1): 0.4946777,
(28, 37, 1): 0.4934486,
(28, 38, 1): 0.4928721,
(28, 38.001, 1): 0.4928716,
(28, 38.5, 1): 0.4925927,
(28, 39, 1): 0.492319,
(28, 39, 10): 1.431901e-10,
(28, 39, 20): 1.432014e-15,
(28, 39, 30): 1.059964e-18,
(28, 39, 50): 8.846678e-23,
(28, 39, 10): 1.431901e-10,
(28, 39, 300): 1.226935e-37,
(28, 39, 50): 8.846678e-23,
(28, 39, 304.7): 9.08154e-38,
(28.4, 39.2, 304.7): 5.573927e-38,
(1032, 2050, 0): 1,
(1032, 2050, 4.15): 1.23535e-165,
(1032, 2050, 0.5): 1,
(1032, 2050, 0.1): 1,
}
e = sorted(expected.items())
for (key, value) in e:
np.testing.assert_allclose(f_high(*key), value, atol=10e-7)
def test_bdtrc(self):
"""bdtrc should give same results as cephes"""
k_s = [0, 1, 2, 3, 5]
n_s = [5, 10, 1000]
p_s = [1e-10, .1, .5, .9, .999999]
exp = [
4.999999999e-10,
0.40951,
0.96875,
0.99999,
1.0,
9.9999999955e-10,
0.6513215599,
0.9990234375,
0.9999999999,
1.0,
9.9999995005e-08,
1.0,
1.0,
1.0,
1.0,
9.999999998e-20,
0.08146,
0.8125,
0.99954,
1.0,
4.4999999976e-19,
0.2639010709,
0.9892578125,
0.9999999909,
1.0,
4.99499966766e-15,
1.0,
1.0,
1.0,
1.0,
9.9999999985e-30,
0.00856,
0.5,
0.99144,
1.0,
1.19999999937e-28,
0.0701908264,
0.9453125,
0.9999996264,
1.0,
1.66166987575e-22,
1.0,
1.0,
1.0,
1.0,
4.9999999996e-40,
0.00046,
0.1875,
0.91854,
0.99999999999,
2.09999999899e-38,
0.0127951984,
0.828125,
0.9999908784,
1.0,
4.14171214499e-30,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
2.09999999928e-58,
0.0001469026,
0.376953125,
0.9983650626,
1.0,
1.36817318242e-45,
1.0,
1.0,
1.0,
1.0,
]
index = 0
for k in k_s:
for n in n_s:
for p in p_s:
np.testing.assert_allclose(bdtrc(k, n, p), exp[index])
index += 1
def test_stdtr(self):
"""stdtr should match cephes results"""
t = [-10, -3.1, -0.5, -0.01, 0, 1, 0.5, 10]
k = [2, 10, 100]
exp = [
0.00492622851166,
7.94776587798e-07,
4.9508444923e-17,
0.0451003650651,
0.00562532860804,
0.00125696358826,
0.333333333333,
0.313946802871,
0.309086782915,
0.496464554479,
0.496108987495,
0.496020605117,
0.5,
0.5,
0.5,
0.788675134595,
0.829553433849,
0.840137922108,
0.666666666667,
0.686053197129,
0.690913217085,
0.995073771488,
0.999999205223,
1.0,
]
index = 0
for i in t:
for j in k:
np.testing.assert_allclose(stdtr(j, i), exp[index])
index += 1
if __name__ == "__main__":
main()
|
python
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
"""This module contains function declarations for several functions of libc
(based on ctypes) and constants relevant for these functions.
"""
import ctypes as _ctypes
from ctypes import c_int, c_uint32, c_long, c_ulong, c_size_t, c_char_p, c_void_p
import os as _os
_libc = _ctypes.CDLL("libc.so.6", use_errno=True)
"""Reference to standard C library."""
_libc_with_gil = _ctypes.PyDLL("libc.so.6", use_errno=True)
"""Reference to standard C library, and we hold the GIL during all function calls."""
def _check_errno(result, func, arguments):
assert func.restype in [c_int, c_void_p]
if (func.restype == c_int and result == -1) or (
func.restype == c_void_p and c_void_p(result).value == c_void_p(-1).value
):
errno = _ctypes.get_errno()
try:
func_name = func.__name__
except AttributeError:
func_name = "__unknown__"
msg = (
func_name
+ "("
+ ", ".join(map(str, arguments))
+ ") failed: "
+ _os.strerror(errno)
)
raise OSError(errno, msg)
return result
# off_t is a signed integer type required for mmap.
# In my tests it is equal to long on both 32bit and 64bit x86 Linux.
c_off_t = c_long
clone = _libc_with_gil.clone # Important to have GIL, cf. container.py!
"""Create copy of current process, similar to fork()."""
CLONE_CALLBACK = _ctypes.CFUNCTYPE(c_int, c_void_p)
"""Type use for callback functions of clone, can be used as decorator."""
clone.argtypes = [
CLONE_CALLBACK,
c_void_p,
c_int,
c_void_p,
] # fn, child_stack, flags, arg (varargs omitted)
clone.errcheck = _check_errno
# /usr/include/linux/sched.h
CLONE_NEWNS = 0x00020000
CLONE_NEWUTS = 0x04000000
CLONE_NEWIPC = 0x08000000
CLONE_NEWUSER = 0x10000000
CLONE_NEWPID = 0x20000000
CLONE_NEWNET = 0x40000000
unshare = _libc.unshare
"""Put current process into new namespace(s)."""
unshare.argtypes = [c_int]
unshare.errcheck = _check_errno
mmap = _libc.mmap
"""Map file into memory."""
mmap.argtypes = [
c_void_p,
c_size_t,
c_int,
c_int,
c_int,
c_off_t,
] # addr, length, prot, flags, fd, offset
mmap.restype = c_void_p
mmap.errcheck = _check_errno
def mmap_anonymous(length, prot, flags=0):
"""Allocate anonymous memory with mmap. Length must be multiple of page size."""
return mmap(None, length, prot, flags | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)
munmap = _libc.munmap
"""Free mmap()ed memory."""
munmap.argtypes = [c_void_p, c_size_t]
munmap.errcheck = _check_errno
mprotect = _libc.mprotect
"""Set protection on a region of memory."""
mprotect.argtypes = [c_void_p, c_size_t, c_int] # addr, length, prot
mprotect.errcheck = _check_errno
PROT_NONE = 0x0 # /usr/include/bits/mman-linux.h
MAP_GROWSDOWN = 0x00100 # /usr/include/bits/mman.h
MAP_STACK = 0x20000 # /usr/include/bits/mman.h
from mmap import ( # noqa: F401 E402
PROT_EXEC,
PROT_READ,
PROT_WRITE,
MAP_ANONYMOUS,
MAP_PRIVATE,
) # @UnusedImport imported for users of this module
mount = _libc.mount
"""Mount a filesystem."""
mount.argtypes = [
c_char_p,
c_char_p,
c_char_p,
c_ulong,
c_void_p,
] # source, target, fstype, mountflags, data
mount.errcheck = _check_errno
# /usr/include/sys/mount.h
MS_RDONLY = 1
MS_NOSUID = 2
MS_NODEV = 4
MS_NOEXEC = 8
MS_REMOUNT = 32
MS_BIND = 4096
MS_MOVE = 8192
MS_REC = 16384
MS_PRIVATE = 262144
MOUNT_FLAGS = {
b"ro": MS_RDONLY,
b"nosuid": MS_NOSUID,
b"nodev": MS_NODEV,
b"noexec": MS_NOEXEC,
}
umount = _libc.umount
"""Unmount a filesystem."""
umount.argtypes = [c_char_p] # target
umount.errcheck = _check_errno
umount2 = _libc.umount2
"""Unmount a filesystem."""
umount2.argtypes = [c_char_p, c_int] # target, flags
umount2.errcheck = _check_errno
# /usr/include/sys/mount.h
MNT_DETACH = 2
pivot_root = _libc.pivot_root
"""Replace root file system with a different directory."""
pivot_root.argtypes = [c_char_p, c_char_p]
pivot_root.errcheck = _check_errno
class CapHeader(_ctypes.Structure):
"""Structure for first parameter of capset()."""
_fields_ = ("version", c_uint32), ("pid", c_int)
class CapData(_ctypes.Structure):
"""Structure for second parameter of capset()."""
_fields_ = (
("effective", c_uint32),
("permitted", c_uint32),
("inheritable", c_uint32),
)
capset = _libc.capset
"""Configure the capabilities of the current thread."""
capset.errcheck = _check_errno
capset.argtypes = [
_ctypes.POINTER(CapHeader),
_ctypes.POINTER(CapData * 2), # pytype: disable=invalid-typevar
]
LINUX_CAPABILITY_VERSION_3 = 0x20080522 # /usr/include/linux/capability.h
CAP_SYS_ADMIN = 21 # /usr/include/linux/capability.h
prctl = _libc.prctl
"""Modify options of processes: http://man7.org/linux/man-pages/man2/prctl.2.html"""
prctl.errcheck = _check_errno
prctl.argtypes = [c_int, c_ulong, c_ulong, c_ulong, c_ulong]
# /usr/include/linux/prctl.h
PR_SET_DUMPABLE = 4
PR_GET_SECCOMP = 21
PR_SET_SECCOMP = 22
SUID_DUMP_DISABLE = 0
SUID_DUMP_USER = 1
|
python
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mars/serialize/protos/chunk.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mars.serialize.protos import value_pb2 as mars_dot_serialize_dot_protos_dot_value__pb2
from mars.serialize.protos import indexvalue_pb2 as mars_dot_serialize_dot_protos_dot_indexvalue__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mars/serialize/protos/chunk.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n!mars/serialize/protos/chunk.proto\x1a!mars/serialize/protos/value.proto\x1a&mars/serialize/protos/indexvalue.proto\"\xef\x01\n\x08\x43hunkDef\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x11\n\x05index\x18\x02 \x03(\rB\x02\x10\x01\x12\r\n\x05shape\x18\x03 \x03(\x03\x12\x12\n\x02op\x18\x04 \x01(\x0b\x32\x06.Value\x12\x0e\n\x06\x63\x61\x63hed\x18\x05 \x01(\x08\x12\x15\n\x05\x64type\x18\x06 \x01(\x0b\x32\x06.Value\x12\x16\n\x06\x64types\x18\n \x01(\x0b\x32\x06.Value\x12 \n\x0bindex_value\x18\x0b \x01(\x0b\x32\x0b.IndexValue\x12\x1b\n\x08\x63omposed\x18\x07 \x03(\x0b\x32\t.ChunkDef\x12\x16\n\x06params\x18\x08 \x01(\x0b\x32\x06.Value\x12\n\n\x02id\x18\t \x01(\tb\x06proto3')
,
dependencies=[mars_dot_serialize_dot_protos_dot_value__pb2.DESCRIPTOR,mars_dot_serialize_dot_protos_dot_indexvalue__pb2.DESCRIPTOR,])
_CHUNKDEF = _descriptor.Descriptor(
name='ChunkDef',
full_name='ChunkDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ChunkDef.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='index', full_name='ChunkDef.index', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shape', full_name='ChunkDef.shape', index=2,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='op', full_name='ChunkDef.op', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cached', full_name='ChunkDef.cached', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtype', full_name='ChunkDef.dtype', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dtypes', full_name='ChunkDef.dtypes', index=6,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='index_value', full_name='ChunkDef.index_value', index=7,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='composed', full_name='ChunkDef.composed', index=8,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='params', full_name='ChunkDef.params', index=9,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='ChunkDef.id', index=10,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=352,
)
_CHUNKDEF.fields_by_name['op'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_CHUNKDEF.fields_by_name['dtype'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_CHUNKDEF.fields_by_name['dtypes'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
_CHUNKDEF.fields_by_name['index_value'].message_type = mars_dot_serialize_dot_protos_dot_indexvalue__pb2._INDEXVALUE
_CHUNKDEF.fields_by_name['composed'].message_type = _CHUNKDEF
_CHUNKDEF.fields_by_name['params'].message_type = mars_dot_serialize_dot_protos_dot_value__pb2._VALUE
DESCRIPTOR.message_types_by_name['ChunkDef'] = _CHUNKDEF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ChunkDef = _reflection.GeneratedProtocolMessageType('ChunkDef', (_message.Message,), dict(
DESCRIPTOR = _CHUNKDEF,
__module__ = 'mars.serialize.protos.chunk_pb2'
# @@protoc_insertion_point(class_scope:ChunkDef)
))
_sym_db.RegisterMessage(ChunkDef)
_CHUNKDEF.fields_by_name['index']._options = None
# @@protoc_insertion_point(module_scope)
|
python
|
def subset(arr, ind, sub, ans):
if ind == len(arr):
return ans
for i in range(ind, len(arr)):
temp = sub.copy()
temp.append(arr[i])
ans.append(temp)
subset(arr, i + 1, temp, ans)
return ans
arr = [1, 2, 3]
ans = []
ans = subset(arr, 0, [], ans)
for i in range(len(ans)):
print(ans[i])
|
python
|
import numpy as np
import os
from scipy.io import loadmat
from sklearn.datasets import load_svmlight_file
def gisette2npz():
data_path = "./dataset/GISETTE"
train_data = np.loadtxt(os.path.join(data_path, "GISETTE/gisette_train.data"))
train_labels = np.loadtxt(os.path.join(data_path, "GISETTE/gisette_train.labels"))
valid_data = np.loadtxt(os.path.join(data_path, "GISETTE/gisette_valid.data"))
valid_labels = np.loadtxt(os.path.join(data_path, "gisette_valid.labels"))
x = np.vstack((train_data, valid_data))
y = np.hstack((train_labels, valid_labels))
np.savez(os.path.join(data_path, "gisette.npz"), x=x, y=y)
def madelon2npz():
data_path = "./dataset/MADELON"
train_data = np.loadtxt(os.path.join(data_path, "MADELON/madelon_train.data"))
train_labels = np.loadtxt(os.path.join(data_path, "MADELON/madelon_train.labels"))
valid_data = np.loadtxt(os.path.join(data_path, "MADELON/madelon_valid.data"))
valid_labels = np.loadtxt(os.path.join(data_path, "madelon_valid.labels"))
x = np.vstack((train_data, valid_data))
y = np.hstack((train_labels, valid_labels))
np.savez(os.path.join(data_path, "madelon.npz"), x=x, y=y)
def load_basehock():
data_path = "../dataset/BASEHOCK.mat"
basehock = loadmat(data_path)
return basehock['X'], basehock['Y'].reshape(-1)
def load_madelon():
data_path = "../dataset/MADELON"
madelon = np.load(os.path.join(data_path, "madelon.npz"))
x, y = madelon['x'], madelon['y']
return x, y
def load_gisette():
data_path = "../dataset/GISETTE"
gisette = np.load(os.path.join(data_path, "gisette.npz"))
x, y = gisette['x'], gisette['y']
return x, y
def load_coil20():
data_path = "../dataset/COIL20.mat"
coil20 = loadmat(data_path)
return coil20['X'], coil20['Y'].reshape(-1)
def load_usps():
data_path = "../dataset/USPS.mat"
usps = loadmat(data_path)
return usps['X'], usps['Y'].reshape(-1)
def load_news20():
data_path = "../dataset/news20.binary"
news20 = load_svmlight_file(data_path)
x, y = news20[0].toarray(), news20[1].astype(int)
return x, y
def load_dataset(dataset):
x = None
y = None
if dataset == "madelon":
x, y = load_madelon()
if dataset == 'gisette':
x, y = load_gisette()
if dataset == "basehock":
x, y = load_basehock()
if dataset == "coil20":
x, y = load_coil20()
if dataset == "usps":
x, y = load_usps()
if dataset == "news20":
x, y = load_news20()
if x is None and y is None:
FileNotFoundError("Not such dataset")
return x, y
def generate_synthetic_data(n=100, datatype=""):
"""
Generate data (X,y)
Args:
n(int): number of samples
datatype(string): The type of data
choices: 'orange_skin', 'XOR', 'regression'.
Return:
X(float): [n,d].
y(float): n dimensional array.
"""
if datatype == 'orange_skin':
X = []
i = 0
while i < n // 2:
x = np.random.randn(10)
if 9 < sum(x[:4] ** 2) < 16:
X.append(x)
i += 1
X = np.array(X)
X = np.concatenate((X, np.random.randn(n // 2, 10)))
y = np.concatenate((-np.ones(n // 2), np.ones(n // 2)))
perm_inds = np.random.permutation(n)
X, y = X[perm_inds], y[perm_inds]
elif datatype == 'XOR':
X = np.random.randn(n, 10)
y = np.zeros(n)
splits = np.linspace(0, n, num=8 + 1, dtype=int)
signals = [[1, 1, 1], [-1, -1, -1], [1, 1, -1], [-1, -1, 1], [1, -1, -1], [-1, 1, 1], [-1, 1, -1], [1, -1, 1]]
for i in range(8):
X[splits[i]:splits[i + 1], :3] += np.array([signals[i]])
y[splits[i]:splits[i + 1]] = i // 2
perm_inds = np.random.permutation(n)
X, y = X[perm_inds], y[perm_inds]
elif datatype == 'regression':
X = np.random.randn(n, 10)
y = -2 * np.sin(2 * X[:, 0]) + np.maximum(X[:, 1], 0) + X[:, 2] + np.exp(-X[:, 3]) + np.random.randn(n)
elif datatype == 'regression_approx':
X = np.random.randn(n, 10)
y = -2 * np.sin(2 * X[:, 0]) + np.maximum(X[:, 1], 0) + X[:, 2] + np.exp(-X[:, 3]) + np.random.randn(n)
else:
raise AttributeError("not such datatype")
return X, y
if __name__ == '__main__':
x, y = load_gisette()
print(x.shape, y.shape)
print(x.dtype, y.dtype)
|
python
|
""" Web application main script. """
import os
from typing import Any, Dict, List
import cfbd
import streamlit as st
import college_football_rankings as cfr
import ui
import ui.rankings
import ui.schedule
from college_football_rankings import iterative
FIRST_YEAR: int = 1869
LAST_YEAR: int = 2020
RANKINGS_LEN = 25
ALGORITHM_NAME = "Algorithm"
# @st.cache(show_spinner=False, ttl=10800)
def request_games(year: int, season_type: str) -> List[cfbd.Game]:
""" Request games and keep it in cache for an hour. """
return cfr.get_games(year=year, season_type=season_type)
# @st.cache(show_spinner=False, ttl=10800)
def request_teams() -> List[cfbd.Team]:
""" Request teams and keep it in cache. """
return cfr.get_teams()
# @st.cache(show_spinner=False, ttl=10800)
def create_polls(year: int, max_week: int) -> Dict[str, cfr.Ranking]:
""" Create polls and keep it in cache for an hour. """
return cfr.create_polls(year=year, max_week=max_week)
def create_teams(games: List[cfbd.Game]) -> Dict[str, cfr.Team]:
""" Create teams instances and fill schedules with games. """
teams = request_teams()
teams = cfr.create_teams_instances(teams=teams)
cfr.fill_schedules(games=games, teams=teams)
return cfr.clean_teams(teams=teams)
def main():
""" Web app main routine. """
st.set_page_config(
page_title="College Football Rankings",
page_icon=os.path.join("img", "favicon.png"),
)
st.title(":football: College Football Rankings")
# Year
year = ui.year_selector(first_year=FIRST_YEAR, last_year=LAST_YEAR)
# Games
games = request_games(year=year, season_type="both")
# Week
n_reg_weeks = cfr.last_played_week(games=games, season_type="regular")
reg_weeks = [f"{i + 1}" for i in range(n_reg_weeks)]
n_post_weeks = cfr.last_played_week(games=games, season_type="postseason")
post_weeks = [f"Post {i + 1}" for i in range(n_post_weeks)]
options = reg_weeks + post_weeks
week = st.select_slider(label="Select week", options=options, value=options[-1])
if "Post" in week:
week = int(week.replace("Post ", ""))
season_type = "postseason"
else:
week = int(week)
season_type = "regular"
# Polls
cached_rankings = create_polls(year=year, max_week=week)
rankings = cached_rankings.copy() # Avoid streamlit cached object mutation.
# Teams
teams = create_teams(games=games)
cfr.filter_schedules(teams=teams, max_week=week, season_type=season_type)
st.sidebar.title("Algorithm Settings")
margin = st.sidebar.checkbox("Consider margin")
post_win_prob = st.sidebar.checkbox("Consider post win probability")
# Evaluate rankings.
try:
rankings[ALGORITHM_NAME] = cfr.Ranking(name=ALGORITHM_NAME)
ranks = cfr.evaluate(
teams=teams,
func=iterative.power,
consider_margin=margin,
consider_post_win_prob=post_win_prob,
)
rankings[ALGORITHM_NAME].add_week(
week=week, rankings=[rank.name for rank in ranks]
)
except cfr.RankingError:
st.error("Could not find an equilibrium for algorithm rankings.")
# Rankings.
st.text("")
st.header(":trophy: Rankings")
# Filter week.
rankings_week = {
key: val.get_week(week) for key, val in rankings.items() if val.get_week(week)
}
# Rankings comparison.
ui.rankings.comparison(rankings=rankings_week, teams=teams, length=RANKINGS_LEN)
st.text("")
st.header(":date: Schedule")
ui.schedule.table(rankings=rankings_week, teams=teams)
if __name__ == "__main__":
main()
|
python
|
import app.api.point
import app.api.track
import app.api.tracker
import app.api.user
|
python
|
from functools import wraps
from flask import session, flash, redirect, url_for
from app.log import get_logger
logger = get_logger(__name__)
# Login required decorator
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if "logged_in" in session:
return f(*args, **kwargs)
else:
flash("You need to login first.")
return redirect(url_for("login"))
return wrap
|
python
|
#!/usr/bin/env python
"""
Functions for building tutorial docs from
Libre Office .fodt files using PanDoc
Required formats are:
html
pdf
(eBook)
pdf will need to be generated by Sphinx
via LaTeX intermediate
"""
|
python
|
num1 = int(input('Primeiro valor: '))
num2 = int(input('Segundo valor: '))
num3 = int(input('Treceiro valor: '))
# verificando qual é o menor
menor = num1
if num2 < num1 and num2 < num3:
menor = num2
if num3 < num1 and num3 < num2:
menor = num3
# verificando qual é o maior
maior = num1
if num2 > num1 and num2 > num3:
maior = num2
if num3 > num1 and num3 > num2:
maior = num3
print('O menor digitado valor é {}.'.format(menor))
print('O maior digitado valor e {}.'.format(maior))
|
python
|
from collections import defaultdict
from dataclasses import dataclass
from typing import List
import torch
import torch.nn as nn
from luafun.model.actor_critic import ActorCritic
class TrainEngine:
def __init__(self, train, model, args):
self.engine = LocalTrainEngine(*args)
@property
def weights(self):
"""Returns new weights if ready, returns none if not ready"""
return None
def push(self, uid, state, reward, done, info, action, logprob, filter):
self.engine.push(uid, state, reward, done, info, action, logprob, filter)
def train(self):
self.engine.train()
def ready(self):
return self.engine.ready()
@dataclass
class Observation:
action: torch.Tensor
state: torch.Tensor
logprob: torch.Tensor
reward: torch.Tensor
is_terminal: torch.Tensor
filter: torch.Tensor
info: dict
class RolloutDataset:
def __init__(self, timestep):
# One time step is 4 frames (0.133 seconds)
self.timestep = timestep
# One sample is 16 time steps (2.1333 seconds)
self.sample = 16
# One episode is 16 Samples (34.1328 seconds)
self.episode = 16
self.memory = defaultdict(list)
self.size = 0
def push(self, uid, state, reward, done, info, action, logprob, filter):
memory = self.memory[uid]
self.size += 1
memory.append(Observation(
action,
state,
logprob,
reward,
done,
filter,
info
))
def reset(self):
self.memory = defaultdict(list)
self.size = 0
@property
def game_count(self) -> int:
return len(self.memory)
def game_size(self, uid) -> int:
uid = list(self.memory.keys())[uid]
return len(self.memory.get(uid, []))
def __getitem__(self, uid, time):
# Make sure our index are correct
uid = uid % len(self.memory)
data = self.memory[uid]
time = (time % len(data)) + self.timestep
# ==
sample = data[time - self.timestep:time]
return sample
class RolloutSampler:
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
def new_sample(self):
# Make a tensor for a single game
uid = torch.randint(self.dataset.game_count, (1,)).item()
time = self.dataset.game_size(uid)
obs: List[Observation] = self.dataset[uid, time]
states = torch.stack([m.state for m in obs])
action = torch.stack([m.action for m in obs])
logprob = torch.stack([m.logprob for m in obs])
# Compute reward
done = torch.stack([m.is_terminal for m in obs])
reward = torch.stack([m.reward for m in obs])
return states, action, logprob, reward
class LocalTrainEngine:
def __init__(self, obssize, batch=10, timestep=16):
self.dataset = RolloutDataset(timestep)
self.sampler = RolloutSampler(self.dataset, batch)
self.actor_critic = ActorCritic(batch, timestep, obssize)
self.ppo_epochs = 10
self.loss = nn.MSELoss()
self.eps_clip = 1e-3
# self.optimizer = torch.optim.Adam(self.actor_critic.parameters(), lr=lr, betas=betas)
@property
def weights(self):
return None
def push(self, uid, state, reward, done, info, action, logprob, filter):
self.dataset.push(uid, state, reward, done, info, action, logprob, filter)
def ready(self):
return self.dataset.size > 16
def train(self):
# batch = self.sampler.new_sample()
# print(batch)
pass
# for _ in range(self.ppo_epochs):
# state, action, logprobs = memory.states, memory.action, action.logprobs
#
# action_logprobs, state_value, dist_entropy = self.actor_critic.evaluate(state, action)
#
# ratios = torch.exp(action_logprobs - logprobs.detach())
#
# advantages = rewards - state_value.detach()
#
# surr1 = ratios * advantages
# surr2 = torch.clamp(ratios, 1 - self.eps_clip, 1 + self.eps_clip) * advantages
# loss = -torch.min(surr1, surr2) + 0.5 * self.loss(state_value, rewards) - 0.01 * dist_entropy
#
# # take gradient step
# self.optimizer.zero_grad()
# loss.mean().backward()
# self.optimizer.step()
# Save Actor
# Save Policy
# Update Actor
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-09 09:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submission', '0008_submission_sender'),
]
operations = [
migrations.AlterField(
model_name='sender',
name='is_blacklisted',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='sender',
name='is_whitelisted',
field=models.BooleanField(default=False),
),
]
|
python
|
from .core import Event, EventType
class JoyAxisMotion(Event):
__slots__ = ('axis', 'value', )
type = EventType.JOY_AXIS_MOTION
def __init__(self, source, axis, value):
super().__init__(source)
self.axis = axis
self.value = value
def __repr__(self):
return f'<{self.__class__.__name__} axis={self.axis}, value={self.value}>'
class JoyHatMotion(Event):
__slots__ = ('hat', 'value', )
type = EventType.JOY_HAT_MOTION
def __init__(self, source, hat, value):
super().__init__(source)
self.hat = hat
self.value = value
def __repr__(self):
return f'<{self.__class__.__name__} hat={self.hat}, value={self.value}>'
class JoyButtonEvent(Event):
__slots__ = ('button', )
def __init__(self, source, button):
super().__init__(source)
self.button = button
def __repr__(self):
return f'<{self.__class__.__name__} button={self.button}>'
class JoyButtonPress(JoyButtonEvent):
__slots__ = ()
type = EventType.JOY_BUTTON_PRESS
class JoyButtonUp(JoyButtonEvent):
__slots__ = ()
type = EventType.JOY_BUTTON_UP
|
python
|
import numpy as np
import pytest
from psydac.core.bsplines import make_knots
from psydac.fem.basic import FemField
from psydac.fem.splines import SplineSpace
from psydac.fem.tensor import TensorFemSpace
from psydac.fem.vector import ProductFemSpace
from psydac.feec.global_projectors import Projector_H1, Projector_L2
#==============================================================================
@pytest.mark.parametrize('domain', [(0, 2*np.pi)])
@pytest.mark.parametrize('ncells', [500])
@pytest.mark.parametrize('degree', [1, 2, 3, 4, 5, 6, 7])
@pytest.mark.parametrize('periodic', [False, True])
def test_H1_projector_1d(domain, ncells, degree, periodic):
breaks = np.linspace(*domain, num=ncells+1)
knots = make_knots(breaks, degree, periodic)
# H1 space (0-forms)
N = SplineSpace(degree=degree, knots=knots, periodic=periodic, basis='B')
V0 = TensorFemSpace(N)
# Projector onto H1 space (1D interpolation)
P0 = Projector_H1(V0)
# Function to project
f = lambda xi1 : np.sin( xi1 + 0.5 )
# Compute the projection
u0 = P0(f)
# Create evaluation grid, and check if u0(x) == f(x)
xgrid = np.linspace(*N.domain, num=101)
vals_u0 = np.array([u0(x) for x in xgrid])
vals_f = np.array([f(x) for x in xgrid])
# Test if max-norm of error is <= TOL
maxnorm_error = abs(vals_u0 - vals_f).max()
print(ncells, maxnorm_error)
# assert maxnorm_error <= 1e-14
#==============================================================================
@pytest.mark.parametrize('domain', [(0, 2*np.pi)])
@pytest.mark.parametrize('ncells', [100, 200, 300])
@pytest.mark.parametrize('degree', [2])
@pytest.mark.parametrize('periodic', [False])
@pytest.mark.parametrize('nquads', [100, 120, 140, 160])
def test_L2_projector_1d(domain, ncells, degree, periodic, nquads):
breaks = np.linspace(*domain, num=ncells+1)
knots = make_knots(breaks, degree, periodic)
# H1 space (0-forms)
N = SplineSpace(degree=degree, knots=knots, periodic=periodic, basis='B')
V0 = TensorFemSpace(N)
# L2 space (1-forms)
V1 = V0.reduce_degree(axes=[0], basis='M')
# Projector onto L2 space (1D histopolation)
P1 = Projector_L2(V1, nquads=[nquads])
# Function to project
f = lambda xi1 : np.sin( xi1 + 0.5 )
# Compute the projection
u1 = P1(f)
# Create evaluation grid, and check if u1(x) == f(x)
xgrid = np.linspace(*N.domain, num=11)
vals_u1 = np.array([u1(x) for x in xgrid])
vals_f = np.array([f(x) for x in xgrid])
# Test if max-norm of error is <= TOL
maxnorm_error = abs(vals_u1 - vals_f).max()
print(ncells, maxnorm_error)
# assert maxnorm_error <= 1e-14
#==============================================================================
if __name__ == '__main__':
domain = (0, 2*np.pi)
degree = 3
periodic = True
ncells = [10, 20, 40, 80, 160, 320, 640]
for nc in ncells:
test_H1_projector_1d(domain, nc, degree, periodic)
nquads = degree
for nc in ncells:
test_L2_projector_1d(domain, nc, degree, periodic, nquads)
|
python
|
from pyrules.dictobj import DictObject
class RuleContext(DictObject):
"""
Rule context to store values and attributes (or any object)
The rule context is used to pass in attribute values that
for the rules to consider. A rule does not have access to
any other data except provided in this rule context.
"""
def __init__(self, initial=None):
super(RuleContext, self).__init__(initial=initial)
self._executed = []
def __setitem__(self, item, value):
self.__setattr__(item, value)
def __getitem__(self, item):
if item.startswith('_'):
raise KeyError('Key {} not found'.format(item))
else:
try:
return self.__getattr__(item)
except AttributeError:
raise KeyError('Key {} not found'.format(item))
@property
def as_dict(self):
return {'context' : self}
def to_dict(self):
# Return copy of context data to prevent later modification by
# caller
return dict(self._data)
def __unicode__(self):
return unicode(self.to_dict())
def __repr__(self):
return u'<RuleContext: {}>'.format(self.to_dict()).encode('utf-8')
class RuleEngine(object):
"""
This basic rule engine runs through all rules in the ruleset, then:
1. call each rules should_trigger method
2. if True, call the rule's perform method to evaluate it
3. then call the rule's record method, to record the evaluation's result
"""
def execute(self, ruleset, context, unsafe=True):
"""
Execute ruleset in given context
@param unsafe: enable unsafe evaluation using eval
"""
for rule in ruleset:
if rule.should_trigger(context):
result = rule.perform(context)
rule.record(context, result)
return context
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from ladder_network import tensor_utils
from ladder_network.wrappers import Registry
initializers = Registry('Initialization')
@initializers.register('glorot_uniform')
def glorot_uniform(weight_shape, dtype=tf.float32, partition_info=None, seed=42):
"""Glorot uniform weight initialization"""
n_inputs, n_outputs = tensor_utils.get_fans(weight_shape)
init_range = np.sqrt(6. / (n_inputs + n_outputs))
return tf.random_uniform(
weight_shape, -init_range, init_range, seed=seed, dtype=dtype)
@initializers.register('glorot_normal')
def glorot_normal(weight_shape, dtype=tf.float32, partition_info=None, seed=42):
"""Glorot normal weight initialization"""
n_inputs, n_outputs = tensor_utils.get_fans(weight_shape)
stddev = np.sqrt(2. / (n_inputs + n_outputs))
return tf.truncated_normal(
weight_shape, 0.0, stddev=stddev, seed=seed, dtype=dtype)
@initializers.register('he_uniform')
def he_uniform(weight_shape, dtype=tf.float32, partition_info=None, seed=42):
"""He uniform weight initialization."""
n_inputs, n_outputs = tensor_utils.get_fans(weight_shape)
init_range = np.sqrt(6. / n_inputs)
return tf.random_uniform(
weight_shape, -init_range, init_range, seed=seed, dtype=dtype)
@initializers.register('he_normal')
def he_normal(weight_shape, dtype=tf.float32, partition_info=None, seed=42):
"""He normal weight initialization."""
n_inputs, n_outputs = tensor_utils.get_fans(weight_shape)
stddev = np.sqrt(2. / n_inputs)
return tf.truncated_normal(
weight_shape, 0.0, stddev=stddev, seed=seed, dtype=dtype)
|
python
|
import netns
import pdb
fin = open("host.config", "r")
_ = fin.readline()
host_mnet_pid = int(fin.readline().split()[0])
with netns.NetNS(nspid=host_mnet_pid):
from scapy.all import *
class sync_header(Packet):
name = "sync_header"
fields_desc = [\
BitField("msg_type", 0, 8),\
BitField("stage", 0, 32),\
BitField("channel_id", 0, 8),\
BitField("message_id", 0, 8)
]
class payload_t(Packet):
name = "payload_t"
fields_desc = [\
BitField("value", 0, 32)
]
fin = open("host2.config", "r")
src, dst = fin.readline().split()
#pdb.set_trace()
pkt = Ether(type=0x88b5, src=src, dst=dst) / sync_header(msg_type=0, stage=1, message_id=1, channel_id=0)
fin = open("diameter.txt", "r")
switch, _ = fin.readline().split()
sendp(pkt, iface="h{}-eth0".format(switch))
|
python
|
import unittest
from cellular_automata_population_python import Model
class TestModel(unittest.TestCase):
def setUp(self):
"""Set up a model"""
self.model = Model.Model()
self.model.run()
def test_model(self):
"""Test that the model is not null"""
self.assertIsNotNone(self.model)
def test_cells(self):
"""Test that the cells exist"""
self.assertIsNotNone(self.model.cells)
def test_count(self):
"""Test that the count is not negative"""
model = Model.Model()
model.run()
for step, c in enumerate(model.count_alive):
with self.subTest(step=step):
self.assertGreater(c, 0, "Count is less that zero at step = {}".format(step))
if __name__ == "__main__":
unittest.main()
|
python
|
from functools import cached_property
from typing import Tuple
from prompt_toolkit.completion import Completer
from .argument_base import ArgumentBase
from ..errors import FrozenAccessError
from ..utils import abbreviated
class FrozenArgument(ArgumentBase):
"""An encapsulation of an argument which can no longer be mutated.
Setters for the various fields that can be mutated :class:`MutableCommand` will
raise :class:`~almanac.errors.argument_errors.FrozenAccessError`.
"""
def _abstract_display_name_setter(
self,
new_display_name: str
) -> None:
raise FrozenAccessError('Cannot change the display name of a FrozenArgument')
def _abstract_description_setter(
self,
new_description: str
) -> None:
raise FrozenAccessError('Cannot change the description of a FrozenArgument')
def _abstract_hidden_setter(
self,
new_value: bool
) -> None:
raise FrozenAccessError('Cannot change the hidden status of a FrozenArgument')
@cached_property
def abbreviated_description(
self
) -> str:
"""A shortened version of this arguments description."""
return abbreviated(self._description)
@property
def completers(
self
) -> Tuple[Completer, ...]:
return tuple(self._completers)
|
python
|
##################################################################################
#
# By Cascade Tuholske on 2019.12.31
# Updated 2020.02.23
#
# Modified from 7_make_pdays.py now in oldcode dir
#
# NOTE: Fully rewriten on 2021.02.01 see 'oldcode' for prior version / CPT
#
# These are the functions needed for 08_Exposure.py & 10_Trends.py
#
#################################################################################
#### Dependencies
import pandas as pd
import numpy as np
import geopandas as gpd
import statsmodels.api as sm
import warnings
#### Functions
def tot_days(df):
""" Calulates the total number of days per year when a heat threshold was met
"""
df_out = df[['ID_HDC_G0','year','duration']].groupby(['ID_HDC_G0','year']).sum().reset_index()
df_out.rename(columns={'duration':'tot_days'}, inplace=True)
return df_out
def make_pdays(df_stats, df_pop, scale):
""" Makes a dataframe with Tmax stats and population to calc people days.
Args:
df_stats = Tmax stats output
df_pop = interpolated GHS-UCDB population
scale = if you want to divide the data ... 10**9 is best for global scale
"""
# Make Population Long Format
pop_long = pd.wide_to_long(df_pop, stubnames = 'P', i = 'ID_HDC_G0', j = 'year')
pop_long.reset_index(level=0, inplace=True)
pop_long.reset_index(level=0, inplace=True)
pop_long = pop_long.drop('Unnamed: 0', axis = 1)
# Get Total Days
data = df_stats
pdays = pd.DataFrame()
pdays['ID_HDC_G0'] = data['ID_HDC_G0']
pdays['year'] = data['year']
pdays['tot_days'] = data['tot_days']
# Merge
pdays_merge = pdays.merge(pop_long, on=['ID_HDC_G0', 'year'], how = 'left')
# Now get people days from 1983 and change
p = pd.DataFrame()
p['ID_HDC_G0'] = df_pop['ID_HDC_G0']
p['P1983'] = df_pop['P1983']
p['P2016'] = df_pop['P2016']
pdays_merge = pdays_merge.merge(p ,on=['ID_HDC_G0'], how = 'left')
# Calc p days = total days i * pop i
pdays_merge['people_days'] = pdays_merge['tot_days'] * pdays_merge['P'] / scale # total people days
# Pdays due to heat increase = total days total days >40.6 / yr * Pop in 1983
pdays_merge['people_days_heat'] = pdays_merge['tot_days'] * pdays_merge['P1983'] / scale # people days w/ pop con
# Pdays due to pop increase = total days i * (pop i - pop 83)
pdays_merge['people_days_pop'] = pdays_merge['tot_days'] *(pdays_merge['P'] - pdays_merge['P1983']) / scale # dif
return pdays_merge
def add_years(df):
""" Function adds zero to people days for all missing years for each city
so that regressions aren't screwed up. New data points have NAN for P column.
If needed, look them up in interim/GHS-UCDB-Interp.csv"""
years = list(np.unique(df['year']))
row_list = []
for city in list(np.unique(df['ID_HDC_G0'])):
city_id = city # Get city Id
city_df = df.loc[df['ID_HDC_G0'] == city] # find the location
city_years = list(np.unique(city_df['year'])) # figure out the number of years
years_dif = list(set(years) - set(city_years)) # find the missing years
if len(years_dif) > 0: # add in the missing years
for year in years_dif: # add rows with dummy data and zeros
row = []
row.append(city)
row.append(year)
row.append(0) # tot_days = 0 days
row.append(np.nan) # population for that year is not needed
row.append(df[(df['ID_HDC_G0'] == city)]['P1983'].values[0]) # P 1983 should be 0
row.append(df[(df['ID_HDC_G0'] == city)]['P2016'].values[0]) # P 2016
row.append(0) # people_days = 0 days
row.append(0) # people_days_heat = 0 days
row.append(0) # people_days_pop = 0 days
row_list.append(row) # append row list
df_new = pd.DataFrame(row_list, columns= df.columns) # merge the new rows into a df
df_new = df.append(df_new) # add the rows back to the original data frame
# Drop any city with zero people in 1983 because it will screw up the OLS, plus we don't want cities that didn't exist
# in the record
df_new = df_new[df_new['P1983'] > 0]
return df_new
def OLS(df, geog, col, alpha):
"""Finds linear coef for increase in stat by a given geography from 1983 - 2016, as well
as the pct change in population of the cities within the given geography
NOTE 2020.03.01 - This will throw a run time warning if all values of a col are zero (e.g. can regress
a bunch of zeros) ... See note in run_OLS. CPT
NOTE 2020.03.01 - Later in the day this issue is resolved by removing the offending cities. See comments
in code. CPT
NOTE 2021.07.23 - Fixed RuntimeWarnings. Needed to deal with p value out puts and add warnings.
See addition of filterwarnings below.
Args:
df = HI stats dataframe
geog = subset geography to calc people days regression
col = col to regress on
alpha = ci alpha for coef
"""
# Get results
labels = []
coef_list = []
p_list = []
df_out = pd.DataFrame()
# turn warnings on
warnings.filterwarnings("error")
for label, df_geog in df.groupby(geog):
#print(label)
# Get Data
X_year = np.array(df_geog.groupby('year')['ID_HDC_G0'].mean().index).reshape((-1, 1))
Y_stats = np.array(df_geog.groupby('year')[col].sum()).reshape((-1, 1))
# Add Intercept
X_year_2 = sm.add_constant(X_year)
# Regress
try:
model = sm.OLS(Y_stats, X_year_2).fit()
except RuntimeWarning:
break
# Get slope
# first param in intercept coef, second is slope of line but if slope = 0, then intecept
if len(model.params) == 2:
coef = model.params[1]
else:
coef = model.params[0]
#P value - added cpt July 2021
# deal with zero slope models
if (model.params[0] == 0) & (model.params[1] == 0):
p = np.nan
else:
p = model.pvalues[0]
# Make lists
labels.append(label)
coef_list.append(coef)
p_list.append(p)
# Make data frame
df_out[geog] = labels
df_out['coef'] = coef_list
df_out['p_value'] = [round(elem, 4) for elem in p_list]
return df_out
def run_OLS(stats, geog, alpha):
""" Function calculate OLS coef of people days due to pop and heat and the
attribution index for distribution plots.
NOTE 2020.03.01 - This will throw a run time warning if all values of a col are zero (e.g. can regress
a bunch of zeros, now can we). This will happen if people_days, people_days_pop, people_days_heat or
total_days is zero for all years for a given city. This is still OK for our analysis. What is happening is
that for some cities, the people-days due to heat is zero, meaning pday increases in only due to population.
This is because with the GHS-UCDB some city's population in 1983 is zero, which forces the pdays due to heat
to be zero.
NOTE 2020.03.01 - Later in the day this issue is resolved by removing the offending cities. See comments
in code.
-- CPT
Args:
stats = df to feed in
geog = geography level to conduct analysis (city-level is 'ID-HDC-G0')
alpha = alpha for CI coef
"""
# Get coef for people days
print('pdays ols')
out = OLS(stats, geog, 'people_days', alpha = alpha)
out.rename(columns={"coef": "coef_pdays"}, inplace = True)
out.rename(columns={"p_value": "p_value_pdays"}, inplace = True)
# Get people days due to heat coef
print('heat ols')
heat = OLS(stats, geog, 'people_days_heat', alpha = alpha) # get stats
heat.rename(columns={"coef": "coef_heat"}, inplace = True)
heat.rename(columns={"p_value": "p_value_heat"}, inplace = True)
out = out.merge(heat, on = geog, how = 'left') # merge
# Get people days due to pop
# CPT July 2021 ---- this throws an error
print('pop ols')
pop = OLS(stats, geog, 'people_days_pop', alpha = alpha) # get stats
pop.rename(columns={"coef": "coef_pop"}, inplace = True)
pop.rename(columns={"p_value": "p_value_pop"}, inplace = True)
out = out.merge(pop, on = geog, how = 'left') # merge
# Get total days
print('tot days ols')
totDays = OLS(stats, geog, 'tot_days', alpha = alpha) # get stats
totDays.rename(columns={"coef": "coef_totDays"}, inplace = True)
totDays.rename(columns={"p_value": "p_value_totDays"}, inplace = True)
out = out.merge(totDays, on = geog, how = 'left') # merge
# drop all neg or zero pday slopes (e.g. cooling cities)
out = out[out['coef_pdays'] > 0]
out = out[out['coef_heat'] > 0]
out = out[out['coef_pop'] > 0]
# attrib coef --- creates range -1 to 1 index of heat vs. population as a driver of total pdays increase
out['coef_attrib'] = (out['coef_pop'] - out['coef_heat']) / (out['coef_pop'] + out['coef_heat'])
# normalize coef of attribution
norm = out['coef_attrib']
out['coef_attrib_norm'] = (norm-min(norm))/(max(norm)-min(norm))
return out
|
python
|
#!/usr/bin/env python3
import os, logging
from argparse import ArgumentParser
from mg996r import MG996R
start_degree = 360
state_file = '.servo-state'
if __name__ == '__main__':
# set logging level
logging.basicConfig(level=logging.DEBUG)
# parse arguments
parser = ArgumentParser()
parser.add_argument('--deg', type=int, required=True)
parser.add_argument('--pin', type=str, default='PA6',
help='GPIO pin to use')
parser.add_argument('--reset', action='store_true',
help=f'Use clean default state (degree = {start_degree})')
args = parser.parse_args()
# restore previous degree from a file
if not args.reset:
try:
if os.path.exists(state_file):
with open(state_file, 'r') as f:
start_degree = int(f.read())
if not 0 <= start_degree <= 360:
raise ValueError(f'invalid degree value in {state_file}')
except (IOError, ValueError) as e:
logging.exception(e)
servo = MG996R(args.pin, start_degree)
servo.move(args.deg)
# save degree to a file
try:
with open(state_file, 'w') as f:
f.write(str(args.deg))
except IOError as e:
logging.exception(e)
|
python
|
import threading
import random
from time import sleep
from math import sqrt,cos,sin,pi
from functools import partial
from Tkinter import *
import tkMessageBox as messagebox
from ttk import *
import tkFont
from PIL import Image, ImageTk
import numpy as np
import networkx as nx
from event import myEvent
from Object import Object
win_size = '1100x600'
g_height = 600
g_width = 1100
fr_topo_height = 400
fr_topo_width = 400
qpktThreshold = 0
rpktThreshold = 0
class MyScrollbar(Scrollbar, object):
""" scrollbar management """
def __init__(self, parent, canvas, nodes, node_size, event = object,l_shohid="", c_shohid="", orient="horizental", command=None):
super(MyScrollbar, self).__init__(parent, orient=orient, command=command)
self.cv = canvas
self.nodes = nodes
self.node_size = node_size
self.event = event
self.l_shohid = l_shohid
self.c_shohid = c_shohid
self.orient = orient
def set(self, a, b, nodes={}, node_size=10, l_shohid="", c_shohid=""):
super(MyScrollbar, self).set(a,b)
self.node_size = node_size
self.nodes = nodes
self.l_shohid = l_shohid
self.c_shohid = c_shohid
if self.cv.labelGw != None:
self.cv.labelGw.place_forget()
self.cv.labelRt.place_forget()
self.cv.labelSv.place_forget()
self.cv.labelVt.place_forget()
self.cv.labelCt.place_forget()
x0 = self.cv.canvasx(0)
y0 = self.cv.canvasy(0)
for node, pos in self.nodes.items():
wx = pos[0]-x0
wy = pos[1]-y0
if node[15:] == "00" :
if node[0:] == "00:00:00:04:15:00":
self.cv.labelGw.place(x=wx , y=wy+self.node_size)
self.cv.labelCt.place(x=wx+6*self.node_size, y=10*self.node_size+wy+sqrt(3)*self.node_size)
if node[0:] == "00:00:00:05:15:00":
self.cv.labelRt.place(x=wx , y=wy+self.node_size)
else:
if node[0:] == "00:00:00:00:03:03":
self.cv.labelSv.place(x=wx , y=wy+self.node_size)
if node[0:] == self.event.getVictim().mac:
self.cv.labelVt.place(x=wx , y=wy+self.node_size)
if self.l_shohid == "show":
self.cv.labelGw.place_forget()
self.cv.labelRt.place_forget()
self.cv.labelSv.place_forget()
self.cv.labelVt.place_forget()
self.cv.labelCt.place_forget()
if self.c_shohid == "show":
self.cv.labelCt.place_forget()
x = self.cv.ctrPos
self.cv.itemconfig(self.cv.controller, state="normal")
self.cv.coords(self.cv.controller, x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11])
self.cv.itemconfig(self.cv.controller, state="hidden")
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv.itemconfig(self.cv.controllers[node], state="hidden")
class ControllerGui():
def __init__(self, event, sw_mac, h_mac, topology):
""" init
"""
self.event = event
self.root = Toplevel()
self.root.title("Controller GUI")
self.root.geometry(win_size)
self.sw_mac = sw_mac
self.h_mac = h_mac
self.topology = topology
self.initStyle()
self.fr_bg = Frame(self.root, height = g_height-100, width = g_width)
self.fr_tp = Frame(self.fr_bg, height = 100, width = g_width)
# self.fr_tb = Frame(self.fr_bg, height = g_height-100, width = g_width/2)
self.cv_tp = Canvas(self.fr_bg, height = 100, width = g_width,highlightthickness=0)
self.cv_tp.create_image(0,0, image=self.t_bgPhoto, anchor = "nw")
self.fr_topo = Frame(self.fr_bg, height = fr_topo_height, width = fr_topo_width)
self.cv_topo = Canvas(self.fr_topo,bg = self.bg, height = fr_topo_height, width = fr_topo_width, highlightthickness=0)
self.cv_topo.create_image(0,0, image=self.topo_bgPhoto, anchor="nw")
self.cv_topo.labelGw = None
self.cv_topo.labelRt = None
self.cv_topo.labelSv = None
self.cv_topo.labelVt = None
self.cv_topo.labelCt = None
self.fr_mid = Frame(self.fr_bg, height = 400, width = 300, style="TFrame")
self.fr_table = Frame(self.fr_bg, height = 400, width = 400)
self.cv_btm= Canvas(self.fr_tp, height = 100, width = g_width,highlightthickness=0)
self.cv_btm.create_image(0,0, image=self.b_bgPhoto, anchor = "nw")
self.var = StringVar()
self.L1 = Label(self.fr_mid, textvariable=self.var, width=30, anchor="center", background=self.bg)
self.thres = Label(self.fr_mid, text="Threshold:", anchor="center", background=self.bg)
self.zoom = Object(x1=0, y1=0, x2=0, y2=0, area=0,
rect=self.cv_topo.create_rectangle(0,0,0,0),
width = fr_topo_width,
height = fr_topo_height)
self.zoomState = "Not"
self.zoomIn = Button(self.fr_mid, style="in.zoom.TButton", command=partial(self.topoZoom, InOut="in"))
self.zoomOut = Button(self.fr_mid, style="out.zoom.TButton", command=partial(self.topoZoom, InOut="out"))
self.usrIn = StringVar()
self.usrIn.set("")
self.thresIn = Entry(self.fr_mid, textvariable=self.usrIn, width=8, font=self.fonts)
self.enter = Button(self.fr_mid, text="Enter", command=self.getThreshold, width=5)
self.tree = Treeview(self.fr_table, columns=('col1', 'col2', 'col3', 'col4') ,show='headings')
self.ybar = Scrollbar(self.fr_table, orient=VERTICAL, command=self.tree.yview)
self.tree.column('col1', width=100, anchor='center')
self.tree.column('col2', width=100, anchor='center')
self.tree.column('col3', width=92, anchor='center')
self.tree.column('col4', width=92, anchor='center')
self.tree.heading('col1', text='name')
self.tree.heading('col2', text='port')
self.tree.heading('col3', text='q_pkt')
self.tree.heading('col4', text='r_pkt')
self.tree.configure(yscrollcommand=self.ybar.set)
self.tree.bind("<Double-1>", self.dbClick2ShowNode)
self.ge_network()
self.create_node()
self.cv_topo.l_shohid = StringVar()
self.cv_topo.l_shohid.set("show")
self.cv_topo.c_shohid = StringVar()
self.cv_topo.c_shohid.set("hide")
self.cv_topo.ctrPos = []
self.cv_topo.ctrCenter = []
self.button_quit = Button(self.fr_mid, style="Q.TButton",command=self.quit)
self.button_refresh = Button(self.fr_mid, style="R.TButton", command=self.refresh_network)
self.topo_xscroll = MyScrollbar(self.fr_topo,
canvas = self.cv_topo, nodes = self.nodes,
node_size = self.node_size, event = self.event,
l_shohid=self.cv_topo.l_shohid.get(),
c_shohid=self.cv_topo.c_shohid.get(),
orient="horizontal", command=self.cv_topo.xview)
self.topo_yscroll = MyScrollbar(self.fr_topo,
canvas = self.cv_topo, nodes = self.nodes,
node_size = self.node_size, event = self.event,
l_shohid=self.cv_topo.l_shohid.get(),
c_shohid=self.cv_topo.c_shohid.get(),
orient="vertical", command=self.cv_topo.yview)
self.cv_topo.configure(
yscrollcommand=partial(self.topo_yscroll.set,
node_size=self.node_size,
l_shohid=self.cv_topo.l_shohid.get(),
c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand=partial(self.topo_xscroll.set,
node_size = self.node_size,
l_shohid=self.cv_topo.l_shohid.get(),
c_shohid=self.cv_topo.c_shohid.get()))
#self.cv.pack()
self.cv_topo.bind('<Motion>' , self.move_handler)
self.cv_topo.bind('<Button-1>', self.click_handler)
self.button_lShowHide = Button(self.fr_mid, style="h.label.TButton", textvariable=self.cv_topo.l_shohid, command=self.labelShowHide)
self.button_cShowHide = Button(self.fr_mid, style="v.controller.TButton", textvariable=self.cv_topo.c_shohid, command=self.controllerShowHide)
self.edgeWarn_th = threading.Thread(target=self.edge_traffic_warn, args=(self.event,self.topology, self.cv_topo))
self.edgeWarn_th.setDaemon(True)
self.edgeWarn_th.start()
self.v = StringVar()
self.on_off_xpos = 150
self.on_off_ypos = 500
# self.rate_set = []
# for text, mode in modes:
# self.rate_set.append(Radiobutton(self.fr_mid, text=text, variable=self.v, value=mode, command=self.mitigation))
# self.on_off_ypos += 25
self.rate_set = Checkbutton(self.fr_mid, text="Mitigation", variable=self.v, onvalue="On", offvalue="Off", command=self.mitigation)
self.typeSetting()
self.labelShowHide()
def typeSetting(self):
""" manage objects position """
self.fr_bg.pack()
self.fr_tp.pack(side="bottom")
# self.fr_tb.pack(side="right")
# self.L1.place(x=85, y=100)
# self.thres.place(x=480, y=420)
# self.thresIn.place(x=600, y=420)
# self.button_lShowHide.place(x=10, y=370)
# self.enter.place(x=655, y=420)
# self.button_quit.place(x=850, y=450)
# self.button_refresh.place(x=850, y=400)
self.L1.grid(row=0, column=0, pady=(0,20))
self.thres.grid(row=1, column=0, sticky="W")
self.thresIn.grid(row=1, column=0)
self.enter.grid(row=1, column=0, sticky="E")
self.zoomIn.grid(row=2, column=0)
self.zoomOut.grid(row=3, column=0)
self.button_lShowHide.grid(row=4, column=0)
self.button_cShowHide.grid(row=5, column=0)
self.rate_set.grid(row=6, column=0, pady=(10,0))
# self.rate_set[1].grid(row=6, column=0)
self.button_refresh.grid(row=8, column = 0, pady=(10,0))
self.button_quit.grid(row=9, column=0)
self.topo_xscroll.pack(side="bottom", fill="x", ipady=0)
self.topo_yscroll.pack(side="right", fill="y", ipadx=0)
self.cv_topo.pack(expand="Yes", anchor="center", side="left")
self.cv_tp.pack(expand="Yes", side="top", fill="both",ipadx=0,ipady=0,padx=0,pady=0)
self.fr_topo.pack(expand="Yes", anchor="center",side="left", fill="both")
self.fr_mid.pack(expand="Yes",side="left", anchor="center")
self.fr_table.pack(expand="Yes", side="right",anchor="center",fill="both")
self.cv_btm.pack(expand="Yes", side="bottom", fill="both")
def initStyle(self):
""" manage style """
self.node_size = 10
self.fonts = ("arial", 12)
# self.fonts = tkFont.Font(family="mono", size=12)
#################### Color ####################
self.bg_tp = "black"
self.bg = "white"
self.host_color = "white"
self.sw_color = "white"
self.r_color = "#ffcc66"
self.q_color = "#B585BE"
#self.ov_r_color = "red"
#self.ov_q_color = "yellow"
self.notice_color = "#5D5D5D"
self.ctrline_color = "#d7d7ff"
#################### Img ####################
quitImage = Image.open('Img/up_quit.png').resize((180,42), Image.ANTIALIAS)
refreshImage = Image.open('Img/up_refresh.png').resize((180,42), Image.ANTIALIAS)
b_quitImage = Image.open('Img/down_quit.png').resize((180,42), Image.ANTIALIAS)
b_refreshImage = Image.open('Img/down_refresh.png').resize((180,42), Image.ANTIALIAS)
self.quitPhoto = ImageTk.PhotoImage(quitImage)
self.refreshPhoto = ImageTk.PhotoImage(refreshImage)
self.b_quitPhoto = ImageTk.PhotoImage(b_quitImage)
self.b_refreshPhoto = ImageTk.PhotoImage(b_refreshImage)
TBgImage = Image.open('Img/top_bg.png').resize((1100,100), Image.ANTIALIAS)
BBgImage = Image.open('Img/bottom_bg.png').resize((1100,100), Image.ANTIALIAS)
TopoBgImage = Image.open('Img/gray_bg.png').resize((400,400), Image.ANTIALIAS)
self.t_bgPhoto = ImageTk.PhotoImage(TBgImage)
self.b_bgPhoto = ImageTk.PhotoImage(BBgImage)
self.topo_bgPhoto = ImageTk.PhotoImage(TopoBgImage)
upzinImage = Image.open('Img/up_zoomin.png').resize((180,42), Image.ANTIALIAS)
downzinImage = Image.open('Img/down_zoomin.png').resize((180,42), Image.ANTIALIAS)
actzinImage = Image.open('Img/active_zoomin.png').resize((180,42), Image.ANTIALIAS)
diszinImage = Image.open('Img/disable_zoomin.png').resize((180,42), Image.ANTIALIAS)
self.upzinPhoto = ImageTk.PhotoImage(upzinImage)
self.downzinPhoto = ImageTk.PhotoImage(downzinImage)
self.actzinPhoto = ImageTk.PhotoImage(actzinImage)
self.diszinPhoto = ImageTk.PhotoImage(diszinImage)
upzoutImage = Image.open('Img/up_zoomout.png').resize((180,42), Image.ANTIALIAS)
downzoutImage = Image.open('Img/down_zoomout.png').resize((180,42), Image.ANTIALIAS)
actzoutImage = Image.open('Img/active_zoomout.png').resize((180,42), Image.ANTIALIAS)
diszoutImage = Image.open('Img/disable_zoomout.png').resize((180,42), Image.ANTIALIAS)
self.upzoutPhoto = ImageTk.PhotoImage(upzoutImage)
self.downzoutPhoto = ImageTk.PhotoImage(downzoutImage)
self.actzoutPhoto = ImageTk.PhotoImage(actzoutImage)
self.diszoutPhoto = ImageTk.PhotoImage(diszoutImage)
upvlImage = Image.open('Img/up_vlabel.png').resize((180,42), Image.ANTIALIAS)
downvlImage = Image.open('Img/down_vlabel.png').resize((180,42), Image.ANTIALIAS)
uphlImage = Image.open('Img/up_hlabel.png').resize((180,42), Image.ANTIALIAS)
downhlImage = Image.open('Img/down_hlabel.png').resize((180,42), Image.ANTIALIAS)
upvcImage = Image.open('Img/up_vcontroller.png').resize((180,42), Image.ANTIALIAS)
downvcImage = Image.open('Img/down_vcontroller.png').resize((180,42), Image.ANTIALIAS)
uphcImage = Image.open('Img/up_hcontroller.png').resize((180,42), Image.ANTIALIAS)
downhcImage = Image.open('Img/down_hcontroller.png').resize((180,42), Image.ANTIALIAS)
self.upvlPhoto = ImageTk.PhotoImage(upvlImage)
self.downvlPhoto = ImageTk.PhotoImage(downvlImage)
self.uphlPhoto = ImageTk.PhotoImage(uphlImage)
self.downhlPhoto = ImageTk.PhotoImage(downhlImage)
self.upvcPhoto = ImageTk.PhotoImage(upvcImage)
self.downvcPhoto = ImageTk.PhotoImage(downvcImage)
self.uphcPhoto = ImageTk.PhotoImage(uphcImage)
self.downhcPhoto = ImageTk.PhotoImage(downhcImage)
#################### Style ####################
self.style = Style()
# self.style.configure("TButton",
# font=self.fonts, relief="flat")
# self.style.map("TButton",
# # background=[("active", self.bg), ("disabled", self.bg)],
# background=[("active", "pink"), ("disabled", "#f0f0f0")],
# foreground=[("active", "white"), ("disabled", "white")]
# )
self.style.map("Selected.TButton",
background=[("active", "pink"), ("disabled", "#f0f0f0")],
foreground=[("active", "white"), ("disabled", "white")]
)
self.style.configure("Q.TButton",
background=self.bg,
font=self.fonts, relief="flat",
image = self.quitPhoto, padding=0,
)
self.style.map("Q.TButton",
background=[("active",self.bg)],
image=[("active",self.b_quitPhoto)],
)
self.style.configure("R.TButton",
background=self.bg,
font=self.fonts, relief="flat",
image = self.refreshPhoto, padding=0)
self.style.map("R.TButton",
background=[("active",self.bg)],
image=[("active",self.b_refreshPhoto)],
)
self.style.configure("zoom.TButton",
font=self.fonts, relief="flat",
background=self.bg, padding=0)
self.style.map("zoom.TButton",
background=[("active", self.bg), ("disabled", self.bg)])
self.style.configure("in.zoom.TButton", image = self.upzinPhoto)
self.style.map("in.zoom.TButton",
image = [("active", self.actzinPhoto), ("disabled", self.diszinPhoto)])
self.style.configure("S.in.zoom.TButton", image = self.downzinPhoto)
self.style.configure("out.zoom.TButton", image = self.upzoutPhoto)
self.style.map("out.zoom.TButton",
image = [("active", self.actzoutPhoto), ("disabled", self.diszoutPhoto)])
self.style.configure("S.out.zoom.TButton", image = self.downzoutPhoto)
self.style.configure("label.TButton",
font=self.fonts, relief="flat",
background=self.bg, padding=0)
self.style.map("label.TButton",
background=[("active", self.bg)])
self.style.configure("v.label.TButton", image = self.upvlPhoto)
self.style.map("v.label.TButton",
image = [("active", self.downvlPhoto)])
self.style.configure("h.label.TButton", image = self.uphlPhoto)
self.style.map("h.label.TButton",
image = [("active", self.downhlPhoto)])
self.style.configure("controller.TButton",
font=self.fonts, relief="flat",
background=self.bg, padding=0)
self.style.map("controller.TButton",
background=[("active", self.bg)])
self.style.configure("v.controller.TButton", image = self.upvcPhoto)
self.style.map("v.controller.TButton",
image = [("active", self.downvcPhoto)])
self.style.configure("h.controller.TButton", image = self.uphcPhoto)
self.style.map("h.controller.TButton",
image = [("active", self.downhcPhoto)])
self.style.configure("TFrame",
background = self.bg,
font=self.fonts
)
self.style.configure("TLabel",
background = self.bg,
font=self.fonts
)
self.style.configure("TCheckbutton",
font=self.fonts,
background = self.bg)
def ge_network(self):
""" generate network """
self.G = nx.Graph()
pos = {}
fixed = []
connected_gw = []
for port, node in self.event.node_links["s4"]:
if node != "s5":
connected_gw.append(node)
myCos = lambda x: np.cos(np.deg2rad(x))
mySin = lambda x: np.sin(np.deg2rad(x))
for s, mac in sorted(self.sw_mac.items()):
self.G.add_node(mac.encode('utf-8'))
if s in connected_gw:
pos[mac] = (0.2+1.1*myCos(90+15.0*connected_gw.index(s)), -1.4+connected_gw.index(s)*0.225)
# pos[mac] = (-1, -1.2+connected_gw.index(s)*0.225)
for port, node in self.event.node_links[s]:
if node[0] == 's':
pos[self.sw_mac[node]] = (-1.2,pos[mac][1])
fixed.append(self.sw_mac[node])
for p,n in self.event.node_links[node]:
if n[0] == 'h':
pos[self.h_mac[n]] = (-1.7, pos[mac][1])
fixed.append(self.h_mac[n])
elif node[0] == 'h':
pos[self.h_mac[node]] = (-1.7, pos[mac][1])
fixed.append(self.h_mac[node])
fixed.append(mac)
for h, mac in sorted(self.h_mac.items()):
self.G.add_node(mac.encode('utf-8'))
# pos[mac] = (0,int(h[1:])/15)
# fixed.append(mac)
edge = []
for no, link in sorted(self.topology.items()):
keys = link.keys()
edge.append((keys[0],keys[1]))
self.G.add_edges_from(edge)
pos["00:00:00:04:15:00"] = (0.2,0)
pos["00:00:00:05:15:00"] = (0.7,0)
pos["00:00:00:00:03:03"] = (1.5,0.5)
pos["00:00:00:00:02:02"] = (1.5,-0.5)
pos["00:00:00:03:15:00"] = (1.1,0.25)
pos["00:00:00:02:15:00"] = (1.1,-0.25)
fixed.append("00:00:00:04:15:00")
fixed.append("00:00:00:05:15:00")
fixed.append("00:00:00:00:03:03")
fixed.append("00:00:00:00:02:02")
fixed.append("00:00:00:03:15:00")
fixed.append("00:00:00:02:15:00")
self.links = self.G.edges # [[mac1,mac2],[mac3,mac4],...]
self.nodes = nx.spring_layout(self.G, pos=pos, fixed=fixed) # {mac1:[x1,y1], mac2:[x2, y2]}
def refresh_network(self):
""" refresh network """
self.node_size = 10
self.G.clear()
self.cv_topo.delete("all")
self.cv_topo.labelGw.destroy()
self.cv_topo.labelRt.destroy()
self.cv_topo.labelSv.destroy()
self.cv_topo.labelVt.destroy()
self.cv_topo.labelCt.destroy()
self.event.cleanObjID()
self.ge_network()
self.cv_topo.create_image(0,0, image=self.topo_bgPhoto, anchor="nw")
self.create_node()
self.zoom.width = fr_topo_width
self.zoom.height = fr_topo_height
self.cv_topo.configure(scrollregion=(0,0,self.zoom.width,self.zoom.height))
self.topoZoom(InOut = self.zoomState)
self.zoomIn.state(["!disabled"])
self.zoomOut.state(["!disabled"])
self.cv_topo.l_shohid.set("show")
self.cv_topo.c_shohid.set("show")
self.labelShowHide()
self.controllerShowHide()
def create_node(self):
""" create nodes and lines """
for node, pos in self.nodes.items():
pos[0] = (pos[0]+2)*100
pos[1] = (pos[1]+2)*100
for link in self.links:
if self.event.getQR(link[0], link[1], 1) == 'q':
# link[0] -> half : query
No = self.cv_topo.create_line(
self.nodes[link[0]][0]+self.node_size/2,
self.nodes[link[0]][1]+self.node_size/2,
(self.nodes[link[0]][0]+self.nodes[link[1]][0]+self.node_size)/2,
(self.nodes[link[0]][1]+self.nodes[link[1]][1]+self.node_size)/2,
fill=self.q_color, arrow=LAST, width=2)
self.event.putObjID(No, link[0], link[1])
# link[1] -> half : response
No = self.cv_topo.create_line(
self.nodes[link[1]][0]+self.node_size/2,
self.nodes[link[1]][1]+self.node_size/2,
(self.nodes[link[0]][0]+self.nodes[link[1]][0]+self.node_size)/2,
(self.nodes[link[0]][1]+self.nodes[link[1]][1]+self.node_size)/2,
fill=self.r_color, arrow=LAST, width=2)
self.event.putObjID(No, link[0], link[1])
elif self.event.getQR(link[0], link[1], 1) == 'r':
# link[1] -> half : query
No = self.cv_topo.create_line(
self.nodes[link[1]][0]+self.node_size/2,
self.nodes[link[1]][1]+self.node_size/2,
(self.nodes[link[0]][0]+self.nodes[link[1]][0]+self.node_size)/2,
(self.nodes[link[0]][1]+self.nodes[link[1]][1]+self.node_size)/2,
fill=self.q_color, arrow=LAST, width=2)
self.event.putObjID(No, link[0], link[1])
# link[0] -> half : response
No = self.cv_topo.create_line(
self.nodes[link[0]][0]+self.node_size/2,
self.nodes[link[0]][1]+self.node_size/2,
(self.nodes[link[0]][0]+self.nodes[link[1]][0]+self.node_size)/2,
(self.nodes[link[0]][1]+self.nodes[link[1]][1]+self.node_size)/2,
fill=self.r_color, arrow=LAST, width=2)
self.event.putObjID(No, link[0], link[1])
self.switches = {}
self.hosts = {}
self.cv_topo.controllers = {}
for node, pos in self.nodes.items():
if node[15:] == "00" :
# sw = self.cv.create_image(pos[0]+10, pos[1]+10, image=self.photo_sw)
sw = self.cv_topo.create_oval(pos[0], pos[1], pos[0]+self.node_size, pos[1]+self.node_size, fill=self.sw_color)
self.switches[node] = sw
if node[0:] == "00:00:00:04:15:00":
self.cv_topo.labelGw = Label(self.cv_topo, text="Gateway\n Switch", width=8, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.labelGw.place(x=pos[0] , y=pos[1]+self.node_size)
self.cv_topo.controller = self.cv_topo.create_polygon(
pos[0]+6*self.node_size, 10*self.node_size+pos[1],
pos[0]+7*self.node_size, 10*self.node_size+pos[1],
pos[0]+7.5*self.node_size, 10*self.node_size+pos[1]+sqrt(3)*self.node_size/2,
pos[0]+7*self.node_size, 10*self.node_size+pos[1]+sqrt(3)*self.node_size,
pos[0]+6*self.node_size, 10*self.node_size+pos[1]+sqrt(3)*self.node_size,
pos[0]+5.5*self.node_size, 10*self.node_size+pos[1]+sqrt(3)*self.node_size/2, fill="white", outline="black")
self.cv_topo.labelCt = Label(self.cv_topo, text="Controller", width=8, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.ctrPos = self.cv_topo.coords(self.cv_topo.controller)
self.cv_topo.ctrCenter = [(self.cv_topo.ctrPos[0]+self.cv_topo.ctrPos[2])/2, self.cv_topo.ctrPos[5]]
self.cv_topo.labelCt.place(x=pos[0]+6*self.node_size, y=10*self.node_size+pos[1]+sqrt(3)*self.node_size)
if node[0:] == "00:00:00:05:15:00":
self.cv_topo.labelRt = Label(self.cv_topo, text="Router", width=7, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.labelRt.place(x=pos[0] , y=pos[1]+self.node_size)
else:
host = self.cv_topo.create_rectangle(pos[0], pos[1], pos[0]+self.node_size, pos[1]+self.node_size, fill=self.host_color, outline="black")
# host = self.cv.create_image(pos[0]+10, pos[1]+10, image=self.photo_host)
self.hosts[node] = host
if node[0:] == "00:00:00:00:03:03":
self.cv_topo.labelSv = Label(self.cv_topo, text=" DNS\nServer", width=7, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.labelSv.place(x=pos[0] , y=pos[1]+self.node_size)
if node[0:] == self.event.getVictim().mac:
self.cv_topo.labelVt = Label(self.cv_topo, text="Victim", width=7, foreground="white", background="black", borderwidth=0, anchor="center", font=("arial", 10))
self.cv_topo.labelVt.place(x=pos[0] , y=pos[1]+self.node_size)
for node, pos in self.nodes.items():
ctrx = self.cv_topo.ctrCenter[0]
ctry = self.cv_topo.ctrCenter[1]
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
ct = self.cv_topo.create_line(pos[0]+self.node_size/2, pos[1]+self.node_size/2, ctrx, ctry, fill=self.ctrline_color, width=2)
self.cv_topo.controllers[node] = ct
for node, pos in self.nodes.items():
if node[15:] == "00":
self.cv_topo.tag_raise(self.switches[node])
self.cv_topo.tag_raise(self.cv_topo.controller)
self.overlaplist = []
self.comparelist = []
for no, link in sorted(self.topology.items()):
mac1 = link.keys()[0]
mac2 = link.keys()[1]
self.overlaplist.append(self.event.getObjID(mac1, mac2)[0])
self.overlaplist.append(self.event.getObjID(mac1, mac2)[1])
self.comparelist = self.overlaplist
for Id in self.overlaplist:
flag = 0
if self.comparelist == None:
break
del self.comparelist[self.comparelist.index(Id)]
x1, y1, x2, y2 = self.cv_topo.coords(Id)
result = self.cv_topo.find_overlapping(x1, y1, x2, y2)
for x in self.comparelist:
x_pos = self.cv_topo.coords(x)
if x_pos in result:
self.refresh_network()
flag = 1
break
if flag == 1:
break
def edge_traffic_warn(self, event, topology, cv_topo):
""" detect which edge is busy, warn user via color changing """
while event.is_set() is True:
pktMax = 0
edgeWidth_q = 2
edgeWidth_r = 2
for no, link in sorted(topology.items()):
mac1 = link.keys()[0]
mac2 = link.keys()[1]
pktNum_q = event.getPktNum(mac1, mac2, 'q')
pktNum_r = event.getPktNum(mac1, mac2, 'r')
pktMax = pktNum_q if pktNum_q > pktMax else pktMax
pktMax = pktNum_r if pktNum_r > pktMax else pktMax
pktMax = 20 if pktMax < 20 else pktMax
if pktNum_q <= qpktThreshold:
edgeWidth_q = (pktNum_q%5)+2
edgeWidth_q = 2 if edgeWidth_q < 2 else edgeWidth_q
cv_topo.itemconfig(event.getObjID(mac1, mac2)[0], fill=self.q_color, width=edgeWidth_q)
elif pktNum_q > qpktThreshold:
edgeWidth_q = int(pktNum_q*20/pktMax)
edgeWidth_q = 7 if edgeWidth_q < 7 else edgeWidth_q
cv_topo.itemconfig(event.getObjID(mac1, mac2)[0], fill=self.edgeColorCtr(self.q_color, edgeWidth_q, "q"), width=edgeWidth_q)
if pktNum_r <= rpktThreshold:
edgeWidth_r = (pktNum_r%5)+2
edgeWidth_r = 2 if edgeWidth_r < 2 else edgeWidth_r
cv_topo.itemconfig(event.getObjID(mac1, mac2)[1], fill=self.r_color, width=edgeWidth_r)
elif pktNum_r > rpktThreshold:
edgeWidth_r = int(pktNum_r*20/pktMax)
edgeWidth_r = 7 if edgeWidth_r < 7 else edgeWidth_r
cv_topo.itemconfig(event.getObjID(mac1, mac2)[1], fill=self.edgeColorCtr(self.r_color, edgeWidth_r, "r"), width=edgeWidth_r)
self.labelShowHide()
self.labelShowHide()
for i in range(0, 10):
if event.is_set() is False:
break
sleep(1)
def edgeColorCtr(self, color, width, pkttype="q"):
""" make line color change with its width """
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
if pkttype == "q":
while width > 6:
g -= 15
width -= 2
elif pkttype == "r":
while width > 6:
g -= 10
b -= 10
width -= 2
return "#{0:02x}{1:02x}{2:02x}".format(r,g,b)
def mitigation(self):
""" call conttoller to open defense system """
if self.v.get() == "On":
self.event.setMeterFlag(1)
# messagebox.showinfo("Mitigation is opened", "Our defense system is operating", parent=self.root)
print "Mitigation is opened"
elif self.v.get() == "Off":
self.event.setMeterFlag(0)
# messagebox.showinfo("Mitigation is closed", "Our defense system is stopped operating", parent=self.root)
print "Mitigation is closed"
def dbClick2ShowNode(self, event):
""" click one row to show node position """
for s_mac, pos in self.switches.items():
self.cv_topo.itemconfig(self.switches[s_mac], fill=self.sw_color)
for h_mac, pos in self.hosts.items():
self.cv_topo.itemconfig(self.hosts[h_mac], fill=self.host_color)
name = self.tree.item(self.tree.selection())['values'][0]
if name == "DNS Server":
name = "h3"
elif name == "victim":
name = self.event.getVictim().name
elif name == "gateway sw":
name = "s4"
elif name == "router":
name = "s5"
mac = self.event.name2mac(name)
x1, y1, x2, y2 = 0,0,0,0
if mac[15:] == "00":
self.cv_topo.itemconfig(self.switches[mac], fill=self.notice_color)
else:
self.cv_topo.itemconfig(self.hosts[mac], fill=self.notice_color)
x,y = self.nodes[mac]
borderX1 = self.cv_topo.canvasx(fr_topo_width/2-self.node_size/2)
borderY1 = self.cv_topo.canvasy(fr_topo_width/2-self.node_size/2)
borderX2 = self.cv_topo.canvasx(fr_topo_width/2+self.node_size/2)
borderY2 = self.cv_topo.canvasy(fr_topo_width/2+self.node_size/2)
while borderX1 > x and self.cv_topo.canvasx(0) > 0:
self.cv_topo.xview_scroll(-1,"unit")
borderX1 = self.cv_topo.canvasx(fr_topo_width/2-self.node_size/2)
borderX2 = self.cv_topo.canvasx(fr_topo_width/2+self.node_size/2)
while borderX2 < x and self.cv_topo.canvasx(fr_topo_width) < self.zoom.width:
self.cv_topo.xview_scroll(1,"unit")
borderX2 = self.cv_topo.canvasx(fr_topo_width/2+self.node_size/2)
while borderY1 > y and self.cv_topo.canvasy(0) > 0:
self.cv_topo.yview_scroll(-1,"unit")
borderY1 = self.cv_topo.canvasy(fr_topo_width/2-self.node_size/2)
borderY2 = self.cv_topo.canvasy(fr_topo_width/2+self.node_size/2)
while borderY2 < y and self.cv_topo.canvasy(fr_topo_height) < self.zoom.height:
self.cv_topo.yview_scroll(1,"unit")
borderY2 = self.cv_topo.canvasy(fr_topo_width/2+self.node_size/2)
def quit(self):
""" end the controller gui """
self.G.clear()
self.cv_topo.delete("all")
#self.cv.delete("all")
self.root.destroy()
self.event.clear()
# exit()
def move_handler(self, event):
""" detect if mouse is in the zone of node, show information """
self.var.set('')
for node, pos in self.nodes.items():
if pos[0] < self.cv_topo.canvasx(event.x) < pos[0]+self.node_size and pos[1] < self.cv_topo.canvasy(event.y) < pos[1]+self.node_size:
name = self.event.mac2name(node)
if node[15:] == "00" :
self.var.set(name+" : "+node)
else:
self.var.set(name+" : "+node)
break
def click_handler(self, event):
""" click one node to show information """
if self.tree != None:
self.tree.pack_forget()
self.ybar.pack_forget()
x = self.tree.get_children()
for item in x:
self.tree.delete(item)
for node, pos in self.nodes.items():
if pos[0] < self.cv_topo.canvasx(event.x) < pos[0]+self.node_size and pos[1] < self.cv_topo.canvasy(event.y) < pos[1]+self.node_size:
for s_mac, pos in self.switches.items():
self.cv_topo.itemconfig(self.switches[s_mac], fill=self.sw_color)
for h_mac, pos in self.hosts.items():
self.cv_topo.itemconfig(self.hosts[h_mac], fill=self.host_color)
# self.tree = Treeview(self.fr_table, columns=('col1', 'col2', 'col3', 'col4') ,show='headings')
inf = self.event.getNodeInf(node)
for i in inf:
self.tree.insert('', 'end', values=i)
# self.tree.place(x=480, y=170)
# self.ybar.place(x=800, y=170, height=218)
self.tree.pack(side="left", fill='both', pady=60)
self.ybar.pack(side="left", fill='y', pady=60)
def getThreshold(self):
""" change the threshold of mitigation """
try:
int(self.usrIn.get())
except ValueError:
self.usrIn.set("")
messagebox.showerror("Error", "You enter the wrong type !!\nPlease enter a number with type \"int\"", parent=self.root)
else:
if 0 <= int(self.usrIn.get()) <= 1000:
self.event.thr_res_num = int(self.usrIn.get())
qpktThreshold = self.usrIn.get()
rpktThreshold = self.usrIn.get()
print "You change the threshold to " + str(self.event.thr_res_num)
else:
self.usrIn.set("")
messagebox.showwarning("Warning", "Please enter a number which value is between 0 to 1000 (both includiing) !!", parent=self.root)
def labelShowHide(self):
""" show and hide all labels on topology """
if self.cv_topo.l_shohid.get() == "show":
x0 = self.cv_topo.canvasx(0)
y0 = self.cv_topo.canvasy(0)
for node, pos in self.nodes.items():
wx = pos[0] - x0
wy = pos[1] - y0
if node[15:] == "00" :
if node[0:] == "00:00:00:04:15:00":
self.cv_topo.labelGw.place(x=wx , y=wy+self.node_size)
if self.cv_topo.c_shohid.get() == "hide":
self.cv_topo.labelCt.place(x=wx+6*self.node_size, y=10*self.node_size+wy+sqrt(3)*self.node_size)
elif self.cv_topo.c_shohid.get() == "show":
self.cv_topo.labelCt.place_forget()
if node[0:] == "00:00:00:05:15:00":
self.cv_topo.labelRt.place(x=wx , y=wy+self.node_size)
else:
if node[0:] == "00:00:00:00:03:03":
self.cv_topo.labelSv.place(x=wx , y=wy+self.node_size)
if node[0:] == self.event.getVictim().mac:
self.cv_topo.labelVt.place(x=wx , y=wy+self.node_size)
self.cv_topo.l_shohid.set("hide")
self.button_lShowHide.configure(style = "v.label.TButton")
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
elif self.cv_topo.l_shohid.get() == "hide":
self.cv_topo.labelGw.place_forget()
self.cv_topo.labelRt.place_forget()
self.cv_topo.labelSv.place_forget()
self.cv_topo.labelVt.place_forget()
self.cv_topo.labelCt.place_forget()
self.cv_topo.l_shohid.set("show")
self.button_lShowHide.configure(style = "h.label.TButton")
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
def controllerShowHide(self):
""" show and hide controller, its label and line which is connected with it """
if self.cv_topo.c_shohid.get() == "show":
x = self.cv_topo.ctrPos
self.cv_topo.ctrCenter = [(x[0]+x[2])/2, x[5]]
self.cv_topo.itemconfig(self.cv_topo.controller, state="normal")
self.cv_topo.coords(self.cv_topo.controller, x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11])
self.button_cShowHide.configure(style = "v.controller.TButton")
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv_topo.itemconfig(self.cv_topo.controllers[node], state="normal")
self.cv_topo.coords(self.cv_topo.controllers[node], pos[0]+self.node_size/2, pos[1]+self.node_size/2, self.cv_topo.ctrCenter[0], self.cv_topo.ctrCenter[1])
self.cv_topo.c_shohid.set("hide")
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
self.cv_topo.ctrPos = self.cv_topo.coords(self.cv_topo.controller)
elif self.cv_topo.c_shohid.get() == "hide":
self.cv_topo.labelCt.place_forget()
self.button_cShowHide.configure(style = "h.controller.TButton")
self.cv_topo.c_shohid.set("show")
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
self.cv_topo.ctrPos = self.cv_topo.coords(self.cv_topo.controller)
self.cv_topo.ctrCenter = [(self.cv_topo.ctrPos[0]+self.cv_topo.ctrPos[2])/2, self.cv_topo.ctrPos[5]]
self.cv_topo.itemconfig(self.cv_topo.controller, state="hidden")
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv_topo.coords(self.cv_topo.controllers[node], pos[0]+self.node_size/2, pos[1]+self.node_size/2, self.cv_topo.ctrCenter[0], self.cv_topo.ctrCenter[1])
self.cv_topo.itemconfig(self.cv_topo.controllers[node], state="hidden")
def zoomRecord(self, event):
""" record the mouse position you clicked """
self.zoom.x1 = self.cv_topo.canvasx(event.x)
self.zoom.y1 = self.cv_topo.canvasy(event.y)
def zoomCreate(self, event):
""" record the position in the rectangle area you chose """
self.cv_topo.delete(self.zoom.rect)
self.zoom.x2 = self.cv_topo.canvasx(event.x)
self.zoom.y2 = self.cv_topo.canvasy(event.y)
self.zoom.rect = self.cv_topo.create_rectangle(self.zoom.x1, self.zoom.y1, self.zoom.x2, self.zoom.y2)
self.zoom.area = abs(self.zoom.x2-self.zoom.x1)*abs(self.zoom.y2-self.zoom.y1)
def zoomRelease(self, event=None, InOut="in"):
""" topology zoom in and out """
op = "*" if InOut=="in" else "/"
if self.zoom.area < 1:
self.zoom.area = 1
mag = sqrt((400*400)/self.zoom.area)
if mag >= 8:
mag = 8
elif mag >= 4:
mag = 4
elif mag >= 2:
mag = 2
elif mag >= 0:
mag = 1.5
self.zoom.width = eval("self.zoom.width "+op+"mag")
self.zoom.height= eval("self.zoom.height"+op+"mag")
if fr_topo_width-50 < self.zoom.width < fr_topo_width+50:
self.zoom.width = fr_topo_width
self.zoom.height = fr_topo_height
if self.cv_topo.c_shohid.get() == "show":
y = self.cv_topo.ctrPos
x = [eval("i"+op+"mag") for i in y]
self.cv_topo.ctrCenter = [(x[0]+x[2])/2, x[5]]
self.cv_topo.itemconfig(self.cv_topo.controller, state="normal")
self.cv_topo.coords(self.cv_topo.controller, x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11])
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv_topo.itemconfig(self.cv_topo.controllers[node], state="normal")
self.cv_topo.coords(self.cv_topo.controllers[node], pos[0]+self.node_size/2, pos[1]+self.node_size/2, self.cv_topo.ctrCenter[0], self.cv_topo.ctrCenter[1])
self.cv_topo.ctrPos = x
self.cv_topo.configure(scrollregion=(0,0,self.zoom.width,self.zoom.height))
self.cv_topo.yview_moveto(eval("self.zoom.y1"+op+"mag")/self.zoom.height)
self.cv_topo.xview_moveto(eval("self.zoom.x1"+op+"mag")/self.zoom.width)
self.node_size = eval("self.node_size "+op+" mag")
for node, pos in self.nodes.items():
self.nodes[node] = [eval("pos[0] "+op+" mag"), eval("pos[1] "+op+" mag")]
result = self.cv_topo.find_overlapping(0, 0, 10000, 10000)
for Id in result:
ords = self.cv_topo.coords(Id)
z = [eval("o"+op+"mag") for o in ords]
if len(ords) == 4:
self.cv_topo.coords(Id, z[0], z[1], z[2], z[3])
if len(ords) == 12:
self.cv_topo.coords(Id,
z[0], z[1], z[2], z[3],
z[4], z[5], z[6], z[7],
z[8], z[9], z[10], z[11])
self.labelShowHide()
self.labelShowHide()
self.controllerShowHide()
self.controllerShowHide()
self.cv_topo.delete(self.zoom.rect)
self.cv_topo.configure(
yscrollcommand= partial(self.topo_yscroll.set, nodes=self.nodes, node_size=self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()),
xscrollcommand= partial(self.topo_xscroll.set, nodes=self.nodes, node_size = self.node_size, l_shohid=self.cv_topo.l_shohid.get(), c_shohid=self.cv_topo.c_shohid.get()))
if self.cv_topo.c_shohid.get() == "show":
self.cv_topo.ctrPos = self.cv_topo.coords(self.cv_topo.controller)
self.cv_topo.itemconfig(self.cv_topo.controller, state="hidden")
for node, pos in self.nodes.items():
if node[15:] == "00":
if node[0:] != "00:00:00:02:15:00" and node[0:] != "00:00:00:03:15:00" and node[0:] != "00:00:00:05:15:00":
self.cv_topo.itemconfig(self.cv_topo.controllers[node], state="hidden")
tmp = self.zoomState
if self.zoom.width * 8 > 10000 and self.zoomState == "in":
self.zoomIn.state(["disabled"])
elif self.zoom.width / 8 < 50 and self.zoomState == "out":
self.zoomOut.state(["disabled"])
else:
self.zoomIn.state(["!disabled"])
self.zoomOut.state(["!disabled"])
self.zoomState = "Not"
self.topoZoom(InOut=tmp)
def topoZoom(self, InOut="in"):
""" check zoom state to decide to unbind or bind events """
self.cv_topo.unbind("<Button-1>")
self.cv_topo.unbind("<B1-Motion>")
self.cv_topo.unbind("<ButtonRelease-1>")
if self.zoomState == InOut:
self.zoomIn.configure(style="in.zoom.TButton")
self.zoomOut.configure(style="out.zoom.TButton")
self.zoomState = "Not"
self.cv_topo.bind('<Motion>' , self.move_handler)
self.cv_topo.bind('<Button-1>', self.click_handler)
else: # self.zoomState = "Not"
if InOut == "in":
self.zoomIn.configure(style="S.in.zoom.TButton")
self.zoomOut.configure(style="out.zoom.TButton")
elif InOut == "out":
self.zoomIn.configure(style="in.zoom.TButton")
self.zoomOut.configure(style="S.out.zoom.TButton")
self.zoomState = InOut
self.cv_topo.bind("<Button-1>", self.zoomRecord)
self.cv_topo.bind("<B1-Motion>", self.zoomCreate)
self.cv_topo.bind("<ButtonRelease-1>", partial(self.zoomRelease,InOut=InOut))
def main():
sw_mac = {'s16': '00:00:00:10:15:00', 's9': '00:00:00:09:15:00', 's8': '00:00:00:08:15:00', 's17': '00:00:00:11:15:00', 's3': '00:00:00:03:15:00', 's2': '00:00:00:02:15:00', 's1': '00:00:00:01:15:00', 's10': '00:00:00:0a:15:00', 's7': '00:00:00:07:15:00', 's6': '00:00:00:06:15:00', 's5': '00:00:00:05:15:00', 's4': '00:00:00:04:15:00', 's13': '00:00:00:0d:15:00', 's20': '00:00:00:14:15:00', 's18': '00:00:00:12:15:00', 's15': '00:00:00:0f:15:00', 's12': '00:00:00:0c:15:00', 's19': '00:00:00:13:15:00', 's21': '00:00:00:15:15:00', 's14': '00:00:00:0e:15:00', 's11': '00:00:00:0b:15:00'}
h_mac = {u'h8': u'00:00:00:00:0c:08', u'h9': u'00:00:00:00:0d:09', u'h7': u'00:00:00:00:0b:07', u'h1': u'00:00:00:00:01:01', u'h6': u'00:00:00:00:0a:06', u'h12': u'00:00:00:00:10:0c', u'h13': u'00:00:00:00:12:0d', u'h14': u'00:00:00:00:13:0e', u'h15': u'00:00:00:00:15:0f', u'h4': u'00:00:00:00:07:04', u'h5': u'00:00:00:00:08:05', u'h10': u'00:00:00:00:0e:0a', u'h2': u'00:00:00:00:02:02', u'h11': u'00:00:00:00:0f:0b', u'h3': u'00:00:00:00:03:03'}
topology = {'24': {'00:00:00:05:15:00': 3, '00:00:00:04:15:00': 10}, '25': {'00:00:00:0d:15:00': 2, '00:00:00:04:15:00': 3}, '26': {'00:00:00:0e:15:00': 2, '00:00:00:04:15:00': 4}, '27': {'00:00:00:11:15:00': 2, '00:00:00:04:15:00': 7}, '20': {'00:00:00:07:15:00': 2, '00:00:00:04:15:00': 12}, '21': {'00:00:00:06:15:00': 2, '00:00:00:04:15:00': 11}, '22': {'00:00:00:08:15:00': 2, '00:00:00:04:15:00': 13}, '23': {'00:00:00:09:15:00': 2, '00:00:00:04:15:00': 14}, '28': {'00:00:00:0f:15:00': 2, '00:00:00:04:15:00': 5}, '29': {'00:00:00:04:15:00': 9, '00:00:00:14:15:00': 2}, '1': {u'00:00:00:00:12:0d': 1, '00:00:00:12:15:00': 1}, '0': {'00:00:00:13:15:00': 1, u'00:00:00:00:13:0e': 1}, '3': {'00:00:00:0d:15:00': 1, u'00:00:00:00:0d:09': 1}, '2': {'00:00:00:08:15:00': 1, u'00:00:00:00:08:05': 1}, '5': {'00:00:00:01:15:00': 1, u'00:00:00:00:01:01': 1}, '4': {u'00:00:00:00:0c:08': 1, '00:00:00:0c:15:00': 1}, '7': {'00:00:00:07:15:00': 1, u'00:00:00:00:07:04': 1}, '6': {'00:00:00:0a:15:00': 1, u'00:00:00:00:0a:06': 1}, '9': {u'00:00:00:00:0f:0b': 1, '00:00:00:0f:15:00': 1}, '8': {u'00:00:00:00:10:0c': 1, '00:00:00:10:15:00': 1}, '11': {u'00:00:00:00:03:03': 1, '00:00:00:03:15:00': 1}, '10': {'00:00:00:0e:15:00': 1, u'00:00:00:00:0e:0a': 1}, '13': {u'00:00:00:00:02:02': 1, '00:00:00:02:15:00': 1}, '12': {u'00:00:00:00:15:0f': 1, '00:00:00:15:15:00': 1}, '15': {'00:00:00:01:15:00': 2, '00:00:00:06:15:00': 1}, '14': {u'00:00:00:00:0b:07': 1, '00:00:00:0b:15:00': 1}, '17': {'00:00:00:05:15:00': 2, '00:00:00:03:15:00': 2}, '16': {'00:00:00:05:15:00': 1, '00:00:00:02:15:00': 2}, '19': {'00:00:00:04:15:00': 1, '00:00:00:0b:15:00': 2}, '18': {'00:00:00:04:15:00': 2, '00:00:00:0c:15:00': 2}, '31': {'00:00:00:13:15:00': 2, '00:00:00:04:15:00': 8}, '30': {'00:00:00:10:15:00': 2, '00:00:00:04:15:00': 6}, '34': {'00:00:00:14:15:00': 1, '00:00:00:15:15:00': 2}, '33': {'00:00:00:11:15:00': 1, '00:00:00:12:15:00': 2}, '32': {'00:00:00:0a:15:00': 2, '00:00:00:09:15:00': 1}}
direction = {'24': {'00:00:00:05:15:00': 'r', '00:00:00:04:15:00': 'q'}, '25': {'00:00:00:0d:15:00': 'q', '00:00:00:04:15:00': 'r'}, '26': {'00:00:00:0e:15:00': 'q', '00:00:00:04:15:00': 'r'}, '27': {'00:00:00:11:15:00': 'q', '00:00:00:04:15:00': 'r'}, '20': {'00:00:00:07:15:00': 'q', '00:00:00:04:15:00': 'r'}, '21': {'00:00:00:06:15:00': 'q', '00:00:00:04:15:00': 'r'}, '22': {'00:00:00:08:15:00': 'q', '00:00:00:04:15:00': 'r'}, '23': {'00:00:00:09:15:00': 'q', '00:00:00:04:15:00': 'r'}, '28': {'00:00:00:0f:15:00': 'q', '00:00:00:04:15:00': 'r'}, '29': {'00:00:00:04:15:00': 'r', '00:00:00:14:15:00': 'q'}, '1': {'00:00:00:12:15:00': 'r', u'00:00:00:00:12:0d': 'q'}, '0': {'00:00:00:13:15:00': 'r', u'00:00:00:00:13:0e': 'q'}, '3': {'00:00:00:0d:15:00': 'r', u'00:00:00:00:0d:09': 'q'}, '2': {'00:00:00:08:15:00': 'r', u'00:00:00:00:08:05': 'q'}, '5': {'00:00:00:01:15:00': 'r', u'00:00:00:00:01:01': 'q'}, '4': {u'00:00:00:00:0c:08': 'q', '00:00:00:0c:15:00': 'r'}, '7': {'00:00:00:07:15:00': 'r', u'00:00:00:00:07:04': 'q'}, '6': {'00:00:00:0a:15:00': 'r', u'00:00:00:00:0a:06': 'q'}, '9': {u'00:00:00:00:0f:0b': 'q', '00:00:00:0f:15:00': 'r'}, '8': {u'00:00:00:00:10:0c': 'q', '00:00:00:10:15:00': 'r'}, '11': {u'00:00:00:00:03:03': 'r', '00:00:00:03:15:00': 'q'}, '10': {'00:00:00:0e:15:00': 'r', u'00:00:00:00:0e:0a': 'q'}, '13': {'00:00:00:02:15:00': 'r', u'00:00:00:00:02:02': 'q'}, '12': {'00:00:00:15:15:00': 'r', u'00:00:00:00:15:0f': 'q'}, '15': {'00:00:00:01:15:00': 'q', '00:00:00:06:15:00': 'r'}, '14': {u'00:00:00:00:0b:07': 'q', '00:00:00:0b:15:00': 'r'}, '17': {'00:00:00:05:15:00': 'q', '00:00:00:03:15:00': 'r'}, '16': {'00:00:00:05:15:00': 'r', '00:00:00:02:15:00': 'q'}, '19': {'00:00:00:04:15:00': 'r', '00:00:00:0b:15:00': 'q'}, '18': {'00:00:00:04:15:00': 'r', '00:00:00:0c:15:00': 'q'}, '31': {'00:00:00:13:15:00': 'q', '00:00:00:04:15:00': 'r'}, '30': {'00:00:00:10:15:00': 'q', '00:00:00:04:15:00': 'r'}, '34': {'00:00:00:14:15:00': 'r', '00:00:00:15:15:00': 'q'}, '33': {'00:00:00:11:15:00': 'r', '00:00:00:12:15:00': 'q'}, '32': {'00:00:00:0a:15:00': 'q', '00:00:00:09:15:00': 'r'}}
node_links = {u'h8': [[1, 's12']], u'h9': [[1, 's13']], u'h2': [[1, 's2']], u'h3': [[1, 's3']], u'h1': [[1, 's1']], u'h6': [[1, 's10']], u'h7': [[1, 's11']], u'h4': [[1, 's7']], u'h5': [[1, 's8']], 's9': [[2, 's4'], [1, 's10']], 's8': [[2, 's4'], [1, u'h5']], 's3': [[1, u'h3'], [2, 's5']], 's2': [[1, u'h2'], [2, 's5']], 's1': [[1, u'h1'], [2, 's6']], 's7': [[2, 's4'], [1, u'h4']], 's6': [[1, 's1'], [2, 's4']], 's5': [[2, 's3'], [1, 's2'], [3, 's4']], 's4': [[2, 's12'], [3, 's13'], [5, 's15'], [4, 's14'], [12, 's7'], [13, 's8'], [14, 's9'], [1, 's11'], [6, 's16'], [7, 's17'], [10, 's5'], [11, 's6'], [9, 's20'], [8, 's19']], 's19': [[1, u'h14'], [2, 's4']], 's18': [[1, u'h13'], [2, 's17']], 's13': [[2, 's4'], [1, u'h9']], 's12': [[2, 's4'], [1, u'h8']], 's11': [[2, 's4'], [1, u'h7']], 's10': [[1, u'h6'], [2, 's9']], 's17': [[2, 's4'], [1, 's18']], 's16': [[2, 's4'], [1, u'h12']], 's15': [[2, 's4'], [1, u'h11']], 's14': [[2, 's4'], [1, u'h10']], u'h10': [[1, 's14']], u'h11': [[1, 's15']], u'h12': [[1, 's16']], u'h13': [[1, 's18']], u'h14': [[1, 's19']], u'h15': [[1, 's21']], 's20': [[2, 's4'], [1, 's21']], 's21': [[1, u'h15'], [2, 's20']]}
event = myEvent()
event.init(topology, direction, node_links)
event.recordName(h_mac, sw_mac)
c = ControllerGui(event, sw_mac, h_mac, topology)
c.root.mainloop()
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description: evaluate with mscc data set
The function create_mscc_dataset is Copyright 2016 Oren Melamud
Modifications copyright (C) 2018 Tatsuya Aoki
This code is based on https://github.com/orenmel/context2vec/blob/master/context2vec/eval/mscc_text_tokenize.py
Used to convert the Microsoft Sentence Completion Challnege (MSCC) learning corpus into a one-sentence-per-line format.
"""
import sys
sys.path.append('../..')
import glob
import os
import sys
from codecs import open
import numpy
import torch
from nltk.tokenize import word_tokenize, sent_tokenize
from pycorrector.deep_context import config
from pycorrector.deep_context.data_util import load_vocab
from pycorrector.deep_context.infer import read_model
def create_mscc_dataset(input_dir, output_filename, lowercase=True):
def write_paragraph_lines(paragraph_lines, file_obj):
paragraph_str = ' '.join(paragraph_lines)
for sent in sent_tokenize(paragraph_str):
if lowercase:
sent = sent.lower()
file_obj.write(' '.join(word_tokenize(sent)) + '\n')
if input_dir[-1] != '/':
input_dir += '/'
if not os.path.isdir(input_dir):
raise NotADirectoryError
print('Read files from', input_dir)
print('Creating dataset to', output_filename)
files = glob.glob(input_dir + '*.TXT')
with open(output_filename, mode='w', encoding='utf-8') as output_file:
for file in files:
with open(file, mode='r', errors='ignore', encoding='utf-8') as input_file:
paragraph_lines = []
count = 0
for i, line in enumerate(input_file):
if len(line.strip()) == 0 and len(paragraph_lines) > 0:
write_paragraph_lines(paragraph_lines, output_file)
paragraph_lines = []
else:
paragraph_lines.append(line)
count += 1
if len(paragraph_lines) > 0:
write_paragraph_lines(paragraph_lines, output_file)
print('Read {} lines'.format(count))
def read_mscc_questions(input_file, lower=True):
with open(input_file, mode='r', encoding='utf-8') as f:
questions = []
for line in f:
q_id, text = line.split(' ', 1)
if lower:
text = text.lower()
text = text.strip().split()
target_word = ''
for index, token in enumerate(text):
if token.startswith('[') and token.endswith(']'):
target_word = token[1:-1]
target_pos = index
if not target_word:
raise SyntaxError
questions.append([text, q_id, target_word, target_pos])
return questions
def print_mscc_score(gold_q_id: list, q_id_and_sim: list):
assert len(q_id_and_sim) % 5 == 0
gold = numpy.array(gold_q_id)
answer = numpy.array([sorted(q_id_and_sim[5 * i:5 * (i + 1)], key=lambda x: x[1], reverse=True)
for i in range(int(len(q_id_and_sim) / 5))])[:, 0, 0]
correct_or_not = (gold == answer)
mid = int(len(correct_or_not) / 2)
dev = correct_or_not[:mid]
test = correct_or_not[mid:]
print('Overall', float(sum(correct_or_not)) / len(correct_or_not))
print('dev', float(sum(dev)) / len(dev))
print('test', float(sum(test)) / len(test))
def mscc_evaluation(question_file,
answer_file,
output_file,
model,
stoi,
unk_token,
bos_token,
eos_token,
device):
questions = read_mscc_questions(question_file)
q_id_and_sim = []
with open(question_file, mode='r', encoding='utf-8') as f, open(output_file, mode='w', encoding='utf-8') as w:
for question, input_line in zip(questions, f):
tokens, q_id, target_word, target_pos = question
tokens[target_pos] = target_word
tokens = [bos_token] + tokens + [eos_token]
indexed_sentence = [stoi[token] if token in stoi else stoi[unk_token] for token in tokens]
input_tokens = torch.tensor(indexed_sentence, dtype=torch.long, device=device).unsqueeze(0)
indexed_target_word = input_tokens[0, target_pos + 1]
similarity = model.run_inference(input_tokens, indexed_target_word, target_pos)
q_id_and_sim.append((q_id, similarity))
w.write(input_line.strip() + '\t' + str(similarity) + '\n')
with open(answer_file, mode='r', encoding='utf-8') as f:
gold_q_id = [line.split(' ', 1)[0] for line in f]
print_mscc_score(gold_q_id, q_id_and_sim)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Please specify your input directory that contains MSCC dataset.')
print('(Most of the case the name of the directory might be `Holmes_Training_Data`.)')
print('sample usage: python src/eval/mscc.py ~/dataset/Holmes_Training_Data/')
quit()
create_mscc_dataset(sys.argv[1], 'dataset/mscc_train.txt')
gpu_id = config.gpu_id
model_path = config.model_path
emb_path = config.emb_path
# device
use_cuda = torch.cuda.is_available() and gpu_id > -1
if use_cuda:
device = torch.device('cuda:{}'.format(gpu_id))
torch.cuda.set_device(gpu_id)
else:
device = torch.device('cpu')
# load model
model, config_dict = read_model(model_path, device)
unk_token = config_dict['unk_token']
bos_token = config_dict['bos_token']
eos_token = config_dict['eos_token']
# read vocab from word_emb path
itos, stoi = load_vocab(emb_path)
mscc_evaluation(config.question_file,
config.answer_file,
'mscc.result',
model,
stoi,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
device=device)
|
python
|
# Source
# ======
# https://www.hackerrank.com/contests/projecteuler/challenges/euler011
#
# Problem
# =======
# In the 20x20 grid (see Source for grid), four numbers along a diagonal
# line have been marked in bold.
#
# The product of these numbers is 26 x 63 x 78 x 14 = 1788696.
#
# What is the greatest product of four adjacent numbers in the same direction
# (up, down, left, right, or diagonally) in the 20x20 grid?
#
# Input Format
# ============
# Input consists of 20 lines each containing 20 integers.
#
# Constraints
# ============
# 0 <= Each integer in the grid <= 100
#
# Output Format
# =============
# Print the required answer.
grid_size = 20
grid = []
for grid_i in range(grid_size):
grid_t = [int(grid_temp) for grid_temp in input().strip().split(' ')]
grid.append(grid_t)
# Note: For this grid, we will assume that (0,0) is the location of the
# top-left corner and (grid_size, grid_size) is the bottom-right corner
product = []
for y in range(grid_size):
for x in range(grid_size):
# left-right
if x+3 < grid_size:
product.append(grid[y][x] * grid[y][x+1] * grid[y][x+2] * grid[y][x+3])
# up-down
if y+3 < grid_size:
product.append(grid[y][x] * grid[y+1][x] * grid[y+2][x] * grid[y+3][x])
# back-diagonal
if x+3 < grid_size and y+3 < grid_size:
product.append(grid[y][x] * grid[y+1][x+1] * grid[y+2][x+2] * grid[y+3][x+3])
# forward-diagonal
if x >= 3 and y+3 < grid_size:
product.append(grid[y][x] * grid[y+1][x-1] * grid[y+2][x-2] * grid[y+3][x-3])
print(max(product))
|
python
|