content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# Generated by Django 2.1.3 on 2018-12-01 22:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Eprint_users', '0011_auto_20181130_0119'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default='default.png', upload_to='media/profilepics'),
),
]
|
python
|
#!/usr/bin/env python
"""
Loop over a list of blog post src filenames and
generate a blog index markdown file.
"""
import sys
import os.path
from datetime import datetime
from utils import parse_metadata
POST_TEMPLATE = """
---
## [{title}]({htmlname})
### {subtitle}
{description}
_{datestr}_ | [Read more...]({htmlname})
"""
def post_index(filenames):
for file in sorted(filenames,reverse=True):
path,name = os.path.split(file)
htmlname = file[4:-3] + '.html'
with open(file,'r') as f:
md = parse_metadata(f.read())
#DATESTR
md['datestr'] = str(datetime.strptime(name[:10],'%Y-%m-%d').date())
if 'subtitle' not in md:
md['subtitle'] = ''
print(POST_TEMPLATE.format(htmlname=htmlname,**md))
if __name__=='__main__':
post_index(sys.argv[1:])
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.shortcuts import render
def home(request):
"""return HttpResponse('<h1>Hello, Welcome to this test</h1>')"""
"""Le chemin des templates est renseigne dans "DIRS" de "TEMPLATES" dans settings.py
DONC PAS BESOIN DE RENSEIGNER LE CHEMIN ABSOLU"""
return render(request, "index.html")
def us(request):
return render(request, "us.html")
def algos(request):
return render(request, "algos_explanation.html")
def breastCancer(request):
return render(request, "breastCancer.html")
def handler404(request, exception):
return render(request, "errors/404.html")
def handler500(request):
return render(request, "errors/500.html")
|
python
|
#!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <[email protected]>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Description here
"""
import logging as log
import networkx as nx
import madsenlab.axelrod.utils.configuration
import numpy as np
import math as m
import pprint as pp
import matplotlib.pyplot as plt
from numpy.random import RandomState
###################################################################################
class BaseGraphPopulation(object):
"""
Base class for all Axelrod model populations that use a graph (NetworkX) representation to
store the relations between agents. Methods here need to be independent of the trait representation,
but can assume that the agents are nodes in a Graph. Thus, most of the "agent selection" and
"neighbor" methods are concentrated here.
"""
def __init__(self,simconfig,graph_factory,trait_factory):
self.simconfig = simconfig
self.interactions = 0
self.innovations = 0
self.losses = 0
self.time_step_last_interaction = 0
self.prng = RandomState() # allow the library to choose a seed via OS specific mechanism
self.graph_factory = graph_factory
self.trait_factory = trait_factory
# initialize the graph structure via the factory object
self.agentgraph = self.graph_factory.get_graph()
def get_agent_by_id(self, agent_id):
return (agent_id, self.agentgraph.node[agent_id]['traits'])
def get_random_agent(self):
"""
Returns a random agent chosen from the population, in the form of a tuple of two elements
(node_id, array_of_traits). This allows operations on the agent and its traits without additional calls.
To modify the traits, change one or more elements in the array, and then call set_agent_traits(agent_id, new_list)
"""
rand_agent_id = self.prng.randint(0, self.simconfig.popsize)
return self.get_agent_by_id(rand_agent_id)
def get_random_neighbor_for_agent(self, agent_id):
"""
Returns a random agent chosen from among the neighbors of agent_id. The format is the same as
get_random_agent -- a two element tuple with the neighbor's ID and their trait list.
"""
neighbor_list = self.agentgraph.neighbors(agent_id)
num_neighbors = len(neighbor_list)
rand_neighbor_id = neighbor_list[self.prng.randint(0,num_neighbors)]
return self.get_agent_by_id(rand_neighbor_id)
def get_all_neighbors_for_agent(self, agent_id):
return self.agentgraph.neighbors(agent_id)
def get_coordination_number(self):
return self.graph_factory.get_lattice_coordination_number()
def update_interactions(self, timestep):
self.interactions += 1
self.time_step_last_interaction = timestep
def update_innovations(self):
self.innovations += 1
def update_loss_events(self):
self.losses += 1
def get_time_last_interaction(self):
return self.time_step_last_interaction
def get_interactions(self):
return self.interactions
def get_innovations(self):
return self.innovations
def get_losses(self):
return self.losses
def initialize_population(self):
self.trait_factory.initialize_population(self.agentgraph)
### Abstract methods - derived classes need to override
def draw_network_colored_by_culture(self):
raise NotImplementedError
def get_traits_packed(self,agent_traits):
raise NotImplementedError
def set_agent_traits(self, agent_id, trait_list):
raise NotImplementedError
###################################################################################
class TreeTraitStructurePopulation(BaseGraphPopulation):
"""
Base class for all Axelrod models which feature a non-fixed number of features/traits per individual where
traits are encoded as paths in a tree.
"""
def __init__(self, simconfig,graph_factory,trait_factory):
super(TreeTraitStructurePopulation, self).__init__(simconfig,graph_factory,trait_factory)
def set_agent_traits(self, agent_id, trait_set):
self.agentgraph.node[agent_id]['traits'] = trait_set
def get_traits_packed(self,agent_traits):
hashable_set = frozenset(agent_traits)
return hash(hashable_set)
def draw_network_colored_by_culture(self):
nodes, traits = zip(*nx.get_node_attributes(self.agentgraph, 'traits').items())
nodes, pos = zip(*nx.get_node_attributes(self.agentgraph, 'pos').items())
color_tupled_compressed = [self.get_traits_packed(t) for t in traits]
nx.draw(self.agentgraph, pos=pos, nodelist=nodes, node_color=color_tupled_compressed)
plt.show()
# EXPLICIT OVERRIDE OF BASE CLASS METHOD!
def initialize_population(self):
"""
For semantically structured traits, since the traits are not just random integers,
we need to have a copy of the trait "universe" -- i.e., all possible traits and their
relations. So we initialize the trait universe first, and then allow the trait factory
to initialize our starting population on the chosen population structure.
"""
self.trait_universe = self.trait_factory.initialize_traits()
self.trait_factory.initialize_population(self.agentgraph)
def __repr__(self):
rep = 'TreeTraitStructurePopulation: ['
for nodename in self.agentgraph.nodes():
rep += "node %s: " % nodename
rep += pp.pformat(self.agentgraph.node[nodename]['traits'])
rep += ",\n"
rep += ' ]'
return rep
###################################################################################
class ExtensibleTraitStructurePopulation(BaseGraphPopulation):
"""
Base class for all Axelrod models which feature a non-fixed number of features/traits per individual.
"""
def __init__(self, simconfig,graph_factory,trait_factory):
super(ExtensibleTraitStructurePopulation, self).__init__(simconfig,graph_factory, trait_factory)
def set_agent_traits(self, agent_id, trait_set):
self.agentgraph.node[agent_id]['traits'] = trait_set
def get_traits_packed(self,agent_traits):
hashable_set = frozenset(agent_traits)
return hash(hashable_set)
def draw_network_colored_by_culture(self):
nodes, traits = zip(*nx.get_node_attributes(self.agentgraph, 'traits').items())
nodes, pos = zip(*nx.get_node_attributes(self.agentgraph, 'pos').items())
color_tupled_compressed = [self.get_traits_packed(t) for t in traits]
nx.draw(self.agentgraph, pos=pos, nodelist=nodes, node_color=color_tupled_compressed)
plt.show()
###################################################################################
class FixedTraitStructurePopulation(BaseGraphPopulation):
"""
Base class for all Axelrod models with a fixed number of features and number of traits per feature.
Specifies no specific graph, lattice, or network model,
but defines operations usable on any specific model as long as the graph is represented by the
NetworkX library and API. Agents are given by nodes, and edges define "neighbors".
Important operations on a model include choosing a random agent, finding a random neighbor,
updating an agent's traits, and updating statistics such as the time the last interaction occurred
(which is used to know when (or if) we've reached a fully absorbing state and can stop.
Subclasses should ONLY implement an __init__ method, in which self.model is assigned an
instance of a
"""
def __init__(self, simconfig,graph_factory, trait_factory):
super(FixedTraitStructurePopulation, self).__init__(simconfig, graph_factory, trait_factory)
def draw_network_colored_by_culture(self):
nodes, colors = zip(*nx.get_node_attributes(self.agentgraph, 'traits').items())
nodes, pos = zip(*nx.get_node_attributes(self.agentgraph, 'pos').items())
color_tupled_compressed = [int(''.join(str(i) for i in t)) for t in colors]
nx.draw(self.agentgraph, pos=pos, nodelist=nodes, node_color=color_tupled_compressed)
plt.show()
def get_traits_packed(self,agent_traits):
return ''.join(str(i) for i in agent_traits)
def set_agent_traits(self, agent_id, trait_list):
"""
Stores a modified version of the trait list for an agent.
"""
#old_traits = self.model.node[agent_id]['traits']
self.agentgraph.node[agent_id]['traits'] = trait_list
#new_traits = self.model.node[agent_id]['traits']
#log.debug("setting agent %s: target traits: %s old: %s new: %s", agent_id, trait_list, old_traits, new_traits)
|
python
|
#!/usr/bin/env python3
# coding:utf-8
class Solution:
def maxInWindows(self, num, size):
if num == []:
return []
if len(num) < size:
return [max(num)]
res = []
queue = num[:size]
res.append(max(queue))
for i in range(size, len(num)):
queue.pop(0)
queue.append(num[i])
res.append(max(queue))
return res
if __name__ == "__main__":
nums = [2, 3, 4, 2, 6, 2, 5, 1]
size = 3
s = Solution()
ans = s.maxInWindows(nums, size)
print(ans)
|
python
|
__all__=["greeters"]
# ***
# *** Use __init__.py to expose different parts of the submodules in the desired namespace
# ***
# *** Define what can be seen in the main "skeleton." namespace (as this is skeleton/__init__.py) like this:
# from .greeters.fancy import * # now you can do: from skeleton import FancyHelloWorld
from valkka.skeleton.greeters.fancy import * # relative imports are evil, so use this instead
# *** Be aware that that in "skeleton.greeters" a list __all__ has been defined. It declares what is exposed to the API user when calling "fro skeleton.greeters.fancy import *"
# *** We could declare the API exposure here as well, by being more explicit:
# from skeleton.greeters.fancy import FancyHelloWorld
# *** If you want to keep FancyHelloWorld under the "greeters.fancy." namespace, don't add ".. import *" statements to this file
# *** The idea is, that the submodules have "internal hierarchies" that the API user is not supposed to worry with
# *** and he/she access them simply with "from skeleton import ClassName"
from valkka.skeleton.greeters.cool.cool1 import *
from valkka.skeleton.greeters.cool.cool2 import *
from valkka.skeleton.version import *
__version__=str(VERSION_MAJOR)+"."+str(VERSION_MINOR)+"."+str(VERSION_PATCH)
|
python
|
import iota_client
# client will connect to testnet by default
client = iota_client.Client()
print(client.get_info())
|
python
|
from django.apps import AppConfig
class SiteAdocaoConfig(AppConfig):
name = 'site_adocao'
|
python
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import shutil as sh
import filecmp as fc
import pathlib as pth
import collections as co
from hashlib import blake2b
from .log import Logger
from .base import config
from .counter import counters
from .resource import Resource, resource, Names
log = Logger(__name__)
def calc_digest(path, *, base=None, **_):
p = base / path if base else pth.Path(path)
if p.exists():
d, s = blake2b(digest_size=20), 0
with open(p, 'rb') as f:
for b in iter(lambda: f.read(65536), b''):
s += len(b)
d.update(b)
assert s == p.stat().st_size
return d.hexdigest(), s
log.warning("Cant't digest nonexistent file {}", p)
return None, None
class Entry(co.namedtuple('Entry', 'path digest size')):
__slots__ = ()
def __new__(cls, path, digest=None, size=None, **kw):
if not digest:
digest, size = calc_digest(path, **kw)
return super().__new__(cls, path, digest, size)
def __bool__(self):
return bool(self.path and self.digest is not None
and self.size is not None)
__hash__ = None
def __eq__(self, other):
if isinstance(other, type(self)):
d = self.digest
return (d and d == other.digest and self.size == other.size)
return NotImplemented
def __repr__(self):
s = "{}({!r}".format(type(self).__name__, str(self.path))
d = self.digest
if d:
s += ", {!r}, {}".format(d, self.size)
s += ")"
return s
def relative_to(self, path, base, **_):
try:
(base / self.path).relative_to(base / path)
except ValueError:
return False
return True
def check(self, **kw):
d = self.digest
if d:
d2, s = calc_digest(self.path, **kw)
if d2 == d and s == self.size:
return True
m = 'Mismatched digest for {}'
else:
m = 'No digest for {}'
log.info(m, self.path)
return False
def prune_dir(path, cntr=None, **_):
with os.scandir(path) as es:
for e in es:
p = pth.Path(e.path)
j = None
if p.name.startswith('.'):
if e.is_dir(follow_symlinks=False):
sh.rmtree(str(p))
elif p.suffix != '.qnr':
p.unlink()
log.info('Deleted {}', p)
j = '-'
elif e.is_dir(follow_symlinks=False):
prune_dir(p, cntr)
continue
if cntr:
cntr.incr(j)
try:
path.rmdir()
log.info('Deleted {}', path)
j = '-'
except:
j = None
if cntr:
cntr.incr(j)
class Roster(Resource):
_res_path = '.roster.qnr'
@classmethod
def globals(cls):
return globals()
def __init__(self, entries=None, **kw):
super().__init__(None, **kw)
self._expels = []
self._symlinks = []
if entries:
self.add_entry(entries)
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, tuple(self.entries))
def __str__(self):
s = '{}:'.format(str(self.base))
for e in self.entries:
s += '\n{} {} {}'.format(str(e.path), str(e.digest), e.size)
return s
@property
def entries(self):
es = [e for e in self.values() if isinstance(e, Entry)]
return sorted(es, key=lambda x: x.path)
def adjust_kw(self, kw):
def _adjust(key, default):
v = kw.get(key)
v = pth.Path(v) if v else default
kw[key] = v
_adjust('base', self.base)
def entry_adder(self, entry, cntr, modify=False, expel=True, **kw):
if isinstance(entry, Entry):
assert entry
p, d, s = entry
k = d, s
if p in self:
ok = self[p]
if k != ok:
if modify:
log.info('Modifying digest for {}', p)
del self[ok]
self[p] = k
self[k] = entry
cntr.incr(modify)
return
else:
log.warning('Digest mismatch for {}', p)
cntr.incr()
else:
try:
o = self[k]
except KeyError:
self[p] = k
self[k] = entry
yield p
else:
log.info('Duplicates: {} and {}', o.path, p)
if expel:
self._expels.append((o, entry))
cntr.incr()
else:
for e in entry:
yield from self.entry_adder(e, cntr, modify, expel, **kw)
add_args = ((('scanned', '.'), ('added', '+')), 'Adding:')
def add_entry(self, entry, **kw):
with counters(self.add_args, kw) as cs:
for _ in self.entry_adder(entry, **kw):
cs.incr('+')
return cs
def path_adder(self, path, **kw):
self.adjust_kw(kw)
p = str(pth.Path(path).relative_to(kw['base']))
yield from self.entry_adder(Entry(p, **kw), **kw)
def walker(self, paths=(), **kw):
for e in self.entries:
if paths:
for p in paths:
if e.relative_to(p, **kw):
break
else:
continue
yield e
def scanner(self, root, cntr, **kw):
def _paths(path):
with os.scandir(path) as es:
for e in es:
p = pth.Path(e.path)
if not p.name.startswith('.'):
if e.is_dir(follow_symlinks=False):
yield from _paths(p)
continue
elif e.is_file(follow_symlinks=False):
yield p
continue
elif e.is_symlink():
log.info('Symlink {}', p)
self._symlinks.append(p)
else:
log.info('Ignoring dir entry {}', p)
cntr.incr()
if root.exists():
for p in _paths(root):
yield from self.path_adder(p, **kw, cntr=cntr)
scan_args = ((('scanned', '.'), ('added', '+')), 'Scanning:')
def scan(self, paths=(), **kw):
self.adjust_kw(kw)
b = kw['base']
with counters(self.scan_args, kw) as cs:
for p in paths or ('', ):
for _ in self.scanner(b / p, **kw):
cs.incr('+')
return cs
rescan_args = ((('scanned', '.'), ('added', '+'), ('removed', '-'),
('modified', 'm')), 'Rescanning:')
def rescanner(self, paths, cntr, **kw):
self.adjust_kw(kw)
b = kw['base']
es = [e for e in self.walker(paths, **kw) if not (b / e.path).exists()]
for p, d, s in es:
del self[p]
del self[(d, s)]
cntr.incr('-')
self._expels = []
for p in paths or ('', ):
for p in self.scanner(b / p, **kw, cntr=cntr, modify='m'):
yield p
def rescan(self, paths=(), **kw):
with counters(self.rescan_args, kw) as cs:
for _ in self.rescanner(paths, **kw):
cs.incr('+')
return cs
check_args = ((('passed', '.'), ('failed', 'F')), 'Checking:')
def check(self, paths=(), **kw):
self.adjust_kw(kw)
with counters(self.check_args, kw) as cs:
for e in self.walker(paths, **kw):
cs.incr('.' if e.check(**kw) else 'F')
return cs
def check_ok(self, paths=(), **kw):
return not self.check(paths, **kw)['F']
def rename_path(self, src, dst, cntr, cntr_key=None, **_):
if dst.exists():
log.warning("Can't move/rename, destination exists {}", dst)
cntr.incr('F')
else:
dst.parent.mkdir(parents=True, exist_ok=True)
src.rename(dst)
log.info('Moved/renamed {} to/as {}', src, dst)
cntr.incr(cntr_key)
expel_args = ((('scanned', '.'), ('expelled', 'e'), ('failed', 'F')),
'Expelling:')
def expel(self, ebase=None, **kw):
with counters(self.expel_args, kw) as cs:
self.adjust_kw(kw)
b = kw['base']
for o, d in self._expels:
op = b / o.path
dp = b / d.path
if fc.cmp(op, dp, shallow=False):
e = (ebase or (b.parent / 'expel')) / d.path
self.rename_path(dp, e, **kw, cntr_key='e')
else:
log.error('Duplicates compare failed {}, {}', op, dp)
cs.incr('F')
self._expels = []
return cs
def absorb_paths(self, paths=(), abase=None, **kw):
self.adjust_kw(kw)
b = kw['base']
ab = abase or (b.parent / 'absorb')
for p in paths or ('', ):
p = ab / p
if p.exists():
yield b, ab, p
absorb_args = ((('scanned', '.'), ('absorbed', 'a'), ('failed', 'F')),
'Absorbing:')
def absorb(self, paths=(), abase=None, **kw):
with counters(self.absorb_args, kw) as cs:
kw['expel'] = False
for b, ab, path in self.absorb_paths(paths, abase, **kw):
for p in [p for p in self.scanner(path, **kw, base=ab)]:
self.rename_path(ab / p, b / p, **kw, cntr_key='a')
prune_dir(path)
return cs
prune_args = ((('scanned', '.'), ('deleted', '-')), 'Pruning:')
def prune(self, paths=(), abase=None, **kw):
with counters(self.prune_args, kw) as cs:
for _, ab, p in self.absorb_paths(paths, abase, **kw):
prune_dir(p, **kw)
return cs
def namer(self, path, names, base, cntr, **_):
p = str(path)
if p not in names:
if (base / path).exists():
names[p] = np = p.lower().replace(' ', '-')
cntr.incr('.' if p == np else 'n')
path = path.parent
if path.name:
self.namer(path, names, base, cntr)
else:
cntr.incr('F')
names_args = ((('scanned', '.'), ('renamed', 'r'), ('normalized', 'n'),
('failed', 'F')), 'Naming:')
def names(self, paths=(), **kw):
with counters(self.names_args, kw) as cs:
self.adjust_kw(kw)
with resource(Names.create(kw['base'])) as ns:
ns.clear()
for e in self.walker(paths, **kw):
self.namer(pth.Path(e.path), ns, **kw)
return cs
rename_args = ((('scanned', '.'), ('added', '+'), ('removed', '-'),
('modified', 'm'), ('normalized', 'n'), ('renamed', 'r'),
('failed', 'F')), 'Renaming:')
def rename(self, paths=(), **kw):
with counters(self.rename_args, kw) as cs:
self.adjust_kw(kw)
b = kw['base']
with resource(Names.create(b)) as ns:
if ns:
for e in self.walker(paths, **kw):
p = e.path
try:
d = b / ns.pop(p)
except KeyError:
cs.incr()
continue
self.rename_path(b / p, d, **kw, cntr_key='r')
ps = paths or ('', )
for o in sorted(ns.keys(), reverse=True):
d = b / ns.pop(o)
o = b / o
if o.exists() and o.is_dir():
for p in ps:
try:
o.relative_to(b / p)
break
except ValueError:
continue
else:
cs.incr()
continue
self.rename_path(o, d, **kw, cntr_key='r')
else:
cs.incr()
for p in self.rescanner(paths, **kw):
self.namer(pth.Path(p), ns, **kw)
return cs
if __name__ == '__main__':
from .args import BArgs
a = BArgs()
a.add_argument('paths', nargs='*', help='Paths to follow')
a.add_argument('-u', '--prune', action=a.st, help='Prune absorb dir')
a.add_argument('-a', '--absorb', help='Path to absorb uniques from')
a.add_argument('-x', '--rename', action=a.st, help='Rename files')
a.add_argument('-R', '--rescan', action=a.st, help='Rescan base')
a.add_argument('-s', '--scan', action=a.st, help='Scan base')
a.add_argument('-e', '--expel', help='Path to expel duplicates to')
a.add_argument('-c', '--check', action=a.st, help='Check all digests')
a.add_argument('-n', '--names', action=a.st, help='Names of files')
a = a.parse_args()
r = Roster.create(a.base)
if a.prune:
abase = None if a.absorb is None or a.absorb == config.DEFAULT else a.absorb
r.prune(a.paths, abase=abase)
elif a.absorb:
abase = None if a.absorb == config.DEFAULT else a.absorb
r.absorb(a.paths, abase=abase)
elif a.rename:
r.rename(a.paths)
else:
if a.rescan:
r.rescan(a.paths)
elif a.scan:
r.scan(a.paths)
if a.expel:
ebase = None if a.expel == config.DEFAULT else a.expel
r.expel(ebase=ebase)
if a.check:
r.check_ok(a.paths)
if a.names:
r.names(a.paths)
r.save()
|
python
|
import aita
if __name__ == "__main__":
# Development, Testing, Production
app = aita.create_app('Development')
app.run()
|
python
|
import json
from logging import root
import os
import warnings
from skimage.color import rgb2lab, gray2rgb, rgba2rgb
from skimage.util import img_as_float
import numpy as np
import numpy.typing as npt
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn as nn
from torchvision.models import vgg16_bn
from torchvision.transforms import Resize
from sklearn.metrics import f1_score, precision_recall_fscore_support, cohen_kappa_score, confusion_matrix
from sklearn import svm
from sklearn.cluster import MiniBatchKMeans
from sklearn.model_selection import train_test_split
from scipy.spatial.distance import cdist
import joblib
from termcolor import colored
import math
from math import floor
from collections import OrderedDict
from skimage.color import lab2rgb
from ..models.lcn import LCNCreator, MarkerBasedNorm2d, MarkerBasedNorm3d, LIDSConvNet
from ._dataset import LIDSDataset
from PIL import Image
import nibabel as nib
import re
ift = None
try:
import pyift.pyift as ift
except:
warnings.warn("PyIFT is not installed.", ImportWarning)
def load_image(path: str, lab: bool=True) -> np.ndarray:
if path.endswith('.mimg'):
image = load_mimage(path)
elif path.endswith('.nii.gz') or path.endswith('.nii.gz'):
image = np.asanyarray(nib.load(path).dataobj)
else:
image = np.asarray(Image.open(path))
if lab:
if image.ndim == 3 and image.shape[-1] == 4:
image = rgba2rgb(image)
elif image.ndim == 2 or image.shape[-1] == 1:
image = gray2rgb(image)
elif image.ndim == 3 and image.shape[-1] > 4:
image = gray2rgb(image)
elif image.ndim == 4 and image.shape[-1] == 4:
image = rgba2rgb(image)
image = rgb2lab(image)
if image.dtype != float:
image = img_as_float(image)
return image
def image_to_rgb(image):
warnings.warn("'image_to_rgb' will be remove due to its misleading name. " +
"Use 'from_lab_to_rgb' instead",
DeprecationWarning,
stacklevel=2
)
return from_lab_to_rgb(image)
def from_lab_to_rgb(image):
image = lab2rgb(image)
return image
def load_markers(markers_dir):
markers = []
lines = None
with open(markers_dir, 'r') as f:
lines = f.readlines()
label_infos = [int(info) for info in lines[0].split(" ")]
is_2d = len(label_infos) == 3
if is_2d:
image_shape = (label_infos[2], label_infos[1])
else:
image_shape = (label_infos[2], label_infos[1], label_infos[3])
markers = np.zeros(image_shape, dtype=np.int)
for line in lines[1:]:
split_line = line.split(" ")
if is_2d:
y, x, label = int(split_line[0]), int(split_line[1]), int(split_line[3])
markers[x][y] = label
else:
x, y, z, label = int(split_line[0]), int(split_line[1]), int(split_line[3]), int(split_line[4])
markers[x][y][z] = label
return markers
def load_images_and_markers(path):
dirs = os.listdir(path)
images_names = [filename for filename in dirs if not filename.endswith('.txt')]
makers_names = [filename for filename in dirs if filename.endswith('.txt')]
images_names.sort()
makers_names.sort()
images = []
images_markers = []
for image_name, marker_name in zip(images_names, makers_names):
if image_name.endswith('.npy'):
image = np.load(os.path.join(path, image_name))
else:
image = load_image(os.path.join(path, image_name))
markers = load_markers(os.path.join(path, marker_name))
images.append(image)
images_markers.append(markers)
return np.array(images), np.array(images_markers)
def _convert_arch_from_lids_format(arch):
stdev_factor = arch['stdev_factor']
n_layers = arch['nlayers']
n_arch = {
"type": "sequential",
"layers": {}
}
for i in range(1, n_layers + 1):
layer_name = f"layer{i}"
layer_params = arch[layer_name]
kernel_size = layer_params['conv']['kernel_size']
is3d = kernel_size[2] > 0
end = 3 if is3d else 2
dilation_rate = layer_params['conv']['dilation_rate'][:end]
kernel_size = kernel_size[:end]
m_norm_layer = {
"operation": "m_norm3d" if is3d else "m_norm2d",
"params": {
"kernel_size": kernel_size,
"dilation": dilation_rate,
"default_std": stdev_factor
}
}
conv_layer = {
"operation": "conv3d" if is3d else "conv2d",
"params": {
"kernel_size": kernel_size,
"dilation": dilation_rate,
"number_of_kernels_per_marker": layer_params['conv']['nkernels_per_image'],
"padding": [k_size // 2 for k_size in kernel_size],
"out_channels": layer_params['conv']['noutput_channels'],
"stride": 1
}
}
relu_layer = None
if layer_params['relu']:
relu_layer = {
"operation": "relu",
"params": {
"inplace": True
}
}
pool_type_mapping = {
"max_pool2d": "max_pool2d",
"avg_pool2d": "avg_pool2d",
"max_pool3d": "max_pool3d",
"avg_pool3d": "avg_pool3d",
"no_pool": None
}
pool_type = layer_params['pooling']['type']
if is3d and pool_type != "no_pool":
pool_type += "3d"
elif pool_type != "no_pool":
pool_type += "2d"
assert pool_type in pool_type_mapping, f"{pool_type} is not a supported pooling operation"
if pool_type == "no_pool":
pool_layer = None
else:
pool_kernel_size = layer_params['pooling']['size'][:end]
pool_layer = {
"operation": pool_type_mapping[pool_type],
"params": {
"kernel_size": pool_kernel_size,
"stride": layer_params['pooling']['stride'],
"padding": [k_size // 2 for k_size in pool_kernel_size]
}
}
n_arch['layers'][f'm-norm{i}'] = m_norm_layer
n_arch['layers'][f'conv{i}'] = conv_layer
if relu_layer:
n_arch['layers'][f'activation{i}'] = relu_layer
if pool_layer:
n_arch['layers'][f'pool{i}'] = pool_layer
return {
"features": n_arch
}
def load_architecture(architecture_dir):
path = architecture_dir
with open(path) as json_file:
architecture = json.load(json_file)
if 'nlayers' in architecture:
architecture = _convert_arch_from_lids_format(architecture)
return architecture
def configure_dataset(dataset_dir, split_dir, transform=None):
dataset = LIDSDataset(dataset_dir, split_dir, transform)
return dataset
def build_model(architecture,
images=None,
markers=None,
input_shape=None,
batch_size=32,
train_set=None,
remove_border=0,
relabel_markers=True,
default_std=1e-6,
device='cpu',
verbose=False):
creator = LCNCreator(architecture,
images=images,
markers=markers,
input_shape=input_shape,
batch_size=batch_size,
relabel_markers=relabel_markers,
remove_border=remove_border,
default_std=default_std,
device=device)
if verbose:
print("Building model...")
creator.build_model(verbose=verbose)
model = creator.get_LIDSConvNet()
if verbose:
print("Model ready.")
return model
def get_torchvision_model(model_name, number_classes, pretrained=True, device='cpu'):
model = None
if model_name == "vgg16_bn":
if pretrained:
model = vgg16_bn(pretrained=pretrained)
model.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, number_classes),
)
for m in model.classifier.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
else:
model = vgg16_bn(num_classes=number_classes, init_weights=True)
model.to(device)
return model
def train_mlp(model,
train_set,
epochs=30,
batch_size=64,
lr=1e-3,
weight_decay=1e-3,
criterion=nn.CrossEntropyLoss(),
device='cpu'):
dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=False)
model.to(device)
model.feature_extractor.eval()
model.classifier.train()
#optimizer
optimizer = optim.Adam(model.classifier.parameters(),
lr=lr,
weight_decay=weight_decay)
#training
print(f"Training classifier for {epochs} epochs")
for epoch in range(0, epochs):
print('-' * 40)
print('Epoch {}/{}'.format(epoch, epochs - 1))
running_loss = 0.0
running_corrects = 0.0
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
preds = torch.max(outputs, 1)[1]
loss.backward()
#clip_grad_norm_(self.mlp.parameters(), 1)
optimizer.step()
#print(outputs)
running_loss += loss.item()*inputs.size(0)/len(train_set)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss
epoch_acc = running_corrects.double()/len(train_set)
print('Loss: {:.6f} Acc: {:.6f}'.format(epoch_loss, epoch_acc))
def train_model(model,
train_set,
epochs=30,
batch_size=64,
lr=1e-3,
weight_decay=1e-3,
step=0,
loss_function=nn.CrossEntropyLoss,
device='cpu',
ignore_label=-100,
only_classifier=False,
wandb=None):
#torch.manual_seed(42)
#np.random.seed(42)
#if device != 'cpu':
# torch.backends.cudnn.deterministic = True
dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=False)
model.to(device)
model.eval()
criterion = loss_function(ignore_index=ignore_label)
parameters = []
if not only_classifier:
model.feature_extractor.train()
parameters.append({
"params": model.feature_extractor.parameters(),
"lr": lr,
"weight_decay": weight_decay
})
model.classifier.train()
parameters.append({
"params": model.classifier.parameters(),
"lr": lr,
"weight_decay": weight_decay
})
#optimizer
optimizer = optim.Adam(parameters)
if step > 0:
scheduler = optim.lr_scheduler.StepLR(optimizer,
step_size=step,
gamma=0.1)
#training
print(f"Training classifier for {epochs} epochs")
for epoch in range(0, epochs):
print('-' * 40)
print('Epoch {}/{}'.format(epoch, epochs - 1))
running_loss = 0.0
running_corrects = 0.0
n = 0
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
preds = torch.max(outputs, 1)[1]
loss.backward()
if epoch < 3:
nn.utils.clip_grad_norm_(model.parameters(), .1)
else:
nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
#print(outputs)
mask = labels != ignore_label
running_loss += loss.item()*(mask.sum())
running_corrects += torch.sum(preds[mask] == labels[mask].data)
n += (mask).sum()
if step > 0:
scheduler.step()
epoch_loss = running_loss/n
epoch_acc = (running_corrects.double())/n
if wandb:
wandb.log({"loss": epoch_loss, "train-acc": epoch_acc}, step=epoch)
print('Loss: {:.6f} Acc: {:.6f}'.format(epoch_loss, epoch_acc))
#if epoch_acc >= 0.9900000:
# break
def save_model(model, outputs_dir, model_filename):
if not os.path.exists(outputs_dir):
os.makedirs(outputs_dir)
dir_to_save = os.path.join(outputs_dir, model_filename)
print("Saving model...")
torch.save(model.state_dict(), dir_to_save)
def load_model(model_path, architecture, input_shape, remove_border=0, default_std=1e-6):
state_dict = torch.load(model_path, map_location=torch.device('cpu'))
creator = LCNCreator(architecture,
input_shape=input_shape,
remove_border=remove_border,
default_std=default_std,
relabel_markers=False)
print("Loading model...")
creator.load_model(state_dict)
model = creator.get_LIDSConvNet()
return model
def load_torchvision_model_weights(model, weigths_path):
state_dict = torch.load(weigths_path, map_location=torch.device('cpu'))
model.load_state_dict(state_dict)
return model
def load_weights_from_lids_model(model, lids_model_dir):
print("Loading LIDS model...")
for name, layer in model.named_children():
print(name)
if isinstance(layer, MarkerBasedNorm2d):
conv_name = name.replace('m-norm', 'conv')
with open(os.path.join(lids_model_dir,
f"{conv_name}-mean.txt")) as f:
lines = f.readlines()[0]
mean = np.array([float(line) for line in lines.split(' ') if len(line) > 0])
with open(os.path.join(lids_model_dir,
f"{conv_name}-stdev.txt")) as f:
lines = f.readlines()[0]
std = np.array([float(line) for line in lines.split(' ') if len(line) > 0])
layer.mean_by_channel = torch.from_numpy(mean).float()
layer.std_by_channel = torch.from_numpy(std).float()
if isinstance(layer, nn.Conv2d):
if os.path.exists(os.path.join(lids_model_dir, f"{name}-kernels.npy")):
weights = np.load(os.path.join(lids_model_dir,
f"{name}-kernels.npy"))
in_channels = layer.in_channels
out_channels = layer.out_channels
kernel_size = layer.kernel_size
weights = weights.transpose()
weights = weights.reshape(out_channels, kernel_size[1], kernel_size[0], in_channels)
weights = weights.transpose(0, 3, 2, 1)
layer.weight = nn.Parameter(torch.from_numpy(weights).float())
if isinstance(layer, nn.Conv3d):
if os.path.exists(os.path.join(lids_model_dir, f"{name}-kernels.npy")):
weights = np.load(os.path.join(lids_model_dir,
f"{name}-kernels.npy"))
in_channels = layer.in_channels
out_channels = layer.out_channels
kernel_size = layer.kernel_size
weights = weights.transpose()
weights = weights.reshape(out_channels, kernel_size[0], kernel_size[1], kernel_size[2], in_channels)
weights = weights.transpose(0, 4, 1, 2, 3)
layer.weight = nn.Parameter(torch.from_numpy(weights).float())
if isinstance(layer, MarkerBasedNorm3d):
conv_name = name.replace('m-norm', 'conv')
with open(os.path.join(lids_model_dir,
f"{conv_name}-mean.txt")) as f:
lines = f.readlines()[0]
mean = np.array([float(line) for line in lines.split(' ') if len(line) > 0])
with open(os.path.join(lids_model_dir,
f"{conv_name}-stdev.txt")) as f:
lines = f.readlines()[0]
std = np.array([float(line) for line in lines.split(' ') if len(line) > 0])
layer.mean_by_channel = nn.Parameter(torch.from_numpy(mean.reshape(1, -1, 1, 1, 1)).float())
layer.std_by_channel = nn.Parameter(torch.from_numpy(std.reshape(1, -1, 1, 1, 1)).float())
'''for name, layer in model.classifier.named_children():
print(name)
if isinstance(layer, SpecialLinearLayer):
if os.path.exists(os.path.join(lids_model_dir, f"{name}-weights.npy")):
weights = np.load(os.path.join(lids_model_dir,
f"split{split}-{name}-weights.npy"))
weights = weights.transpose()
with open(os.path.join(lids_model_dir,
f"{name}-mean.txt")) as f:
lines = f.readlines()
mean = np.array([float(line) for line in lines])
with open(os.path.join(lids_model_dir,
f"{name}-stdev.txt")) as f:
lines = f.readlines()
std = np.array([float(line) for line in lines])
layer.mean = torch.from_numpy(mean.reshape(1, -1)).float()
layer.std = torch.from_numpy(std.reshape(1, -1)).float()
layer._linear.weight = nn.Parameter(torch.from_numpy(weights).float())'''
print("Finish loading...")
return model
def save_lids_model(model, architecture, split, outputs_dir, model_name):
if not isinstance(model, LIDSConvNet):
pass
print("Saving model in LIDS format...")
if model_name.endswith('.pt'):
model_name = model_name.replace('.pt', '')
if not os.path.exists(os.path.join(outputs_dir, model_name)):
os.makedirs(os.path.join(outputs_dir, model_name))
if isinstance(split, str):
split_basename = os.path.basename(split)
split = re.findall(r'\d+', split_basename)
if len(split) == 0:
split = 1
else:
split = int(split[0])
layer_specs = get_arch_in_lids_format(architecture, split)
conv_count = 1
for _, layer in model.feature_extractor.named_children():
if isinstance(layer, SpecialConvLayer):
weights = layer.conv.weight.detach().cpu()
num_kernels = weights.size(0)
weights = weights.reshape(num_kernels, -1)
weights = weights.transpose(0, 1)
mean = layer.mean_by_channel.detach().cpu()
std = layer.std_by_channel.detach().cpu()
mean = mean.reshape(1, -1)
std = std.reshape(1, -1)
np.save(os.path.join(outputs_dir, model_name, f"conv{conv_count}-kernels.npy"), weights.float())
np.savetxt(os.path.join(outputs_dir, model_name, f"conv{conv_count}-mean.txt"), mean.float())
np.savetxt(os.path.join(outputs_dir, model_name, f"conv{conv_count}-stdev.txt"), std.float())
conv_count += 1
for i, layer_spec in enumerate(layer_specs, 1):
with open(os.path.join(outputs_dir, model_name, f"convlayerseeds-layer{i}.json"), 'w') as f:
json.dump(layer_spec, f, indent=4)
'''for name, layer in model.classifier.named_children():
if isinstance(layer, SpecialLinearLayer):
weights = layer._linear.weight.detach().cpu()
weights.transpose(0, 1)
mean = layer.mean.detach().cpu()
std = layer.std.detach().cpu()
mean = mean.reshape(-1)
std = std.reshape(-1)
np.save(os.path.join(outputs_dir, model_name, f"{name}-weights.npy"), weights.float())
np.savetxt(os.path.join(outputs_dir, model_name, f"{name}-mean.txt"), mean.float())
np.savetxt(os.path.join(outputs_dir, model_name, f"{name}-std.txt"), std.float())'''
def _calulate_metrics(true_labels, pred_labels):
average = 'binary' if np.unique(true_labels).shape[0] == 2 else 'weighted'
acc = 1.0*(true_labels == pred_labels).sum()/true_labels.shape[0]
precision, recall, f_score, support = precision_recall_fscore_support(true_labels, pred_labels, zero_division=0)
precision_w, recall_w, f_score_w, _ = precision_recall_fscore_support(true_labels, pred_labels, average=average, zero_division=0)
cm = confusion_matrix(true_labels, pred_labels)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("#" * 50)
print(colored("Acc", "yellow"),f': {colored(f"{acc:.6f}", "blue", attrs=["bold"])}')
print("-" * 50)
print(colored("F1-score", "yellow"), f': {colored(f"{f1_score(true_labels, pred_labels, average=average):.6f}", "blue", attrs=["bold"])}')
print("-" * 50)
print("Accuracy", *cm.diagonal())
print("-" * 50)
print("Precision:", *precision)
print("Recall:", *recall)
print("F-score:", *f_score)
print("-" * 50)
print("W-Precision:", precision_w)
print("W-Recall:", recall_w)
print("W-F-score:", f_score_w)
print("-" * 50)
print("Kappa {}".format(cohen_kappa_score(true_labels, pred_labels)))
print("-" * 50)
print("Suport", *support)
print("#" * 50)
def validate_model(model,
val_set,
criterion=nn.CrossEntropyLoss(),
batch_size=32,
device='cpu'):
dataloader = DataLoader(val_set,
batch_size=batch_size,
shuffle=True,
drop_last=False)
model.eval()
model.to(device)
running_loss = 0.0
running_corrects = 0.0
true_labels = torch.Tensor([]).long()
pred_labels = torch.Tensor([]).long()
print("Validating...")
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
with torch.set_grad_enabled(False):
outputs = model(inputs)
loss = criterion(outputs, labels)
preds = torch.max(outputs, 1)[1]
running_loss += loss.item()*inputs.size(0)/len(val_set)
running_corrects += torch.sum(preds == labels.data)
true_labels = torch.cat((true_labels, labels.cpu()))
pred_labels = torch.cat((pred_labels, preds.cpu()))
print('Val - loss: {:.6f}'.format(running_loss))
print("Calculating metrics...")
_calulate_metrics(true_labels, pred_labels)
def train_svm(model, train_set, batch_size=32, max_iter=10000, device='cpu', C=100, degree=3):
print("Preparing to train SVM")
clf = svm.SVC(max_iter=max_iter, C=C, degree=degree, gamma='auto', coef0=0, decision_function_shape='ovo', kernel='linear')
dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=False, drop_last=False)
model.eval()
model.to(device)
features = torch.Tensor([])
y = torch.Tensor([]).long()
for inputs, labels in dataloader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs).detach()
features = torch.cat((features, outputs.cpu()))
y = torch.cat((y, labels.cpu()))
print("Fitting SVM...")
clf.fit(features.flatten(start_dim=1), y)
print("Done")
return clf
def save_svm(clf, outputs_dir, svm_filename):
if not os.path.exists(outputs_dir):
os.makedirs(outputs_dir)
dir_to_save = os.path.join(outputs_dir, svm_filename)
print("Saving SVM...")
joblib.dump(clf, dir_to_save, compress=9)
def load_svm(svm_path):
print("Loading SVM...")
clf = joblib.load(svm_path)
return clf
def validate_svm(model, clf, val_set, batch_size=32, device='cpu'):
dataloader = DataLoader(val_set, batch_size=batch_size, shuffle=False, drop_last=False)
model.eval()
model.to(device)
true_labels = torch.Tensor([]).long()
pred_labels = torch.Tensor([]).long()
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
if hasattr(model, "features"):
outputs = model.features(inputs).detach()
else:
outputs = model(inputs).detach()
preds = clf.predict(outputs.cpu().flatten(start_dim=1))
true_labels = torch.cat((true_labels, labels.cpu()))
pred_labels = torch.cat((pred_labels, torch.from_numpy(preds)))
print("Calculating metrics...")
_calulate_metrics(true_labels, pred_labels)
def _images_close_to_center(images, centers):
_images = []
for center in centers:
_center = np.expand_dims(center, 0)
dist = cdist(images, _center)
_images.append(images[np.argmin(dist)])
return np.array(_images)
def _find_elems_in_array(a, elems):
indices = []
for elem in elems:
_elem = np.expand_dims(elem, 0)
mask = np.all(a == _elem, axis=1)
indice = np.where(mask)[0][0:1].item()
indices.append(indice)
return indices
def select_images_to_put_markers(dataset, class_proportion=0.05):
dataloader = DataLoader(dataset, batch_size=64, shuffle=False, drop_last=False)
all_images = None
all_labels = None
input_shape = dataset[0][0].shape
for images, labels in dataloader:
if all_images is None:
all_images = images
all_labels = labels
else:
all_images = torch.cat((all_images, images))
all_labels = torch.cat((all_labels, labels))
all_images = all_images.flatten(1).numpy()
all_labels = all_labels.numpy()
possible_labels = np.unique(all_labels)
images_names = []
roots = None
for label in possible_labels:
images_of_label = all_images[all_labels == label]
n_clusters = max(1, math.floor(images_of_label.shape[0]*class_proportion))
kmeans = MiniBatchKMeans(n_clusters=n_clusters, random_state=42)
kmeans.fit(images_of_label)
roots_of_label = _images_close_to_center(images_of_label, kmeans.cluster_centers_)
if roots is None:
roots = roots_of_label
else:
roots = np.concatenate((roots, roots_of_label))
indices = _find_elems_in_array(all_images, roots_of_label)
for indice in indices:
images_names.append(dataset.images_names[indice])
return roots.reshape(-1, *input_shape), images_names
def _label_of_image(image_name):
if not isinstance(image_name, str):
raise TypeError("Parameter image_name must be a string.")
i = image_name.index("_")
label = int(image_name[0:i]) - 1
return label
def split_dataset(dataset_dir, train_size, val_size=0, test_size=None, stratify=True):
if os.path.exists(os.path.join(dataset_dir, 'files.txt')):
with open(os.path.join(dataset_dir, 'files.txt'), 'r') as f:
filenames = f.read().split('\n')
filenames = [filename for filename in filenames if len(filename) > 0]
else:
filenames = os.listdir(dataset_dir)
filenames.sort()
labels = np.array([_label_of_image(filename) for filename in filenames])
if train_size > 1:
train_size = int(train_size)
train_split, test_split, _, test_labels = train_test_split(filenames,
labels,
train_size=train_size,
test_size=test_size,
stratify=labels)
val_size = 0 if val_size is None else val_size
val_split = []
if val_size > 0:
test_size = len(test_split) - val_size
test_size = int(test_size) if test_size > 0 else test_size
val_split, test_split = train_test_split(test_split,
test_size=test_size,
stratify=test_labels)
return train_split, val_split, test_split
def compute_grad_cam(model, image, target_layers, class_label=0, device="cpu"):
model = model.to(device)
image = image.to(device)
model.eval()
gradients = []
features = []
if image.dim() == 3:
x = image.unsqueeze(0)
else:
x = image
for name, module in model._modules.items():
if name == "features" or name == "feature_extractor":
for layer_name, layer in module.named_children():
x = layer(x)
if layer_name in target_layers:
x.register_hook(lambda grad : gradients.append(grad))
features.append(x)
elif name == "classifier":
x = x.flatten(1)
x = module(x)
else:
x = module(x)
y = x
one_hot = torch.zeros_like(y, device=device)
one_hot[0][class_label] = 1
one_hot = torch.sum(one_hot * y)
model.zero_grad()
one_hot.backward()
weights = torch.mean(gradients[-1], axis=(2,3))[0, :]
target = features[-1][0].detach()
cam = torch.zeros_like(target[0])
for i, w in enumerate(weights):
cam += w * target[i, :, ]
cam[cam < 0] = 0.0
print(cam.shape)
print(image.shape)
resize = Resize(image.shape[1:])
cam = resize(cam.unsqueeze(0))
cam = cam - cam.min()
cam = cam/cam.max()
return cam.cpu().numpy()
def load_mimage(path):
assert ift is not None, "PyIFT is not available"
mimge = ift.ReadMImage(path)
return mimge.AsNumPy().squeeze()
def save_mimage(path, image):
assert ift is not None, "PyIFT is not available"
mimage = ift.CreateMImageFromNumPy(np.ascontiguousarray(image))
ift.WriteMImage(mimage, path)
def save_opf_dataset(path, opf_dataset):
assert ift is not None, "PyIFT is not available"
ift.WriteDataSet(opf_dataset, path)
def load_opf_dataset(path):
assert ift is not None, "PyIFT is not available"
opf_dataset = ift.ReadDataSet(path)
return opf_dataset
def save_intermediate_outputs(model, dataset, outputs_dir, batch_size=16, layers=None, only_features=True, format="mimg", remove_border=0, device='cpu'):
if only_features:
if hasattr(model, "features"):
_model = model.features
else:
_model = model.feature_extractor
else:
_model = model
last_layer = None
for layer_name in layers:
layer_dir = os.path.join(outputs_dir, 'intermediate-outputs', layer_name)
if not os.path.exists(layer_dir):
os.makedirs(layer_dir)
last_layer = layer_name
_model.eval()
_model.to(device)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=False)
outputs = {}
outputs_count = {}
outputs_names = dataset.images_names
print("Saving intermediate outputs...")
for inputs, _ in dataloader:
inputs = inputs.to(device)
for layer_name, layer in _model.named_children():
_outputs = layer(inputs)
if layer_name == last_layer and remove_border > 0:
b = remove_border
_outputs = _outputs[:,:, b:-b, b:-b]
inputs = _outputs
if layer_name not in outputs_count:
outputs_count[layer_name] = 0
if layers is None or len(layers) == 0 or layer_name in layers:
if format == 'zip':
if layer_name not in outputs:
outputs[layer_name] = _outputs.detach().cpu()
else:
outputs[layer_name] = torch.cat((outputs[layer_name],_outputs.detach().cpu()))
elif format in ["mimg", "npy"]:
layer_dir = os.path.join(outputs_dir, 'intermediate-outputs', layer_name)
_outputs = _outputs.detach().cpu()
for _output in _outputs:
_output_dir = os.path.join(layer_dir, f"{outputs_names[outputs_count[layer_name]].split('.')[0]}.{format}")
if format == "npy":
np.save(_output_dir, _output)
else:
save_mimgage(_output_dir, _output.permute(1, 2, 0).numpy())
outputs_count[layer_name] += 1
del _outputs
torch.cuda.empty_cache()
if format == 'zip':
for layer_name in outputs:
_outputs = outputs[layer_name]
_outputs = _outputs.permute(0, 2, 3, 1).numpy().reshape(_outputs.shape[0], -1)
labels = np.array([int(image_name[0:image_name.index("_")]) - 1 for image_name in outputs_names]).astype(np.int32)
opf_dataset = ift.CreateDataSetFromNumPy(_outputs, labels + 1)
opf_dataset.SetNClasses = labels.max() + 1
ift.SetStatus(opf_dataset, ift.IFT_TRAIN)
ift.AddStatus(opf_dataset, ift.IFT_SUPERVISED)
# opf_dataset.SetLabels(labels + 1)
_output_dir = os.path.join(layer_dir, "dataset.zip")
save_opf_dataset(_output_dir, opf_dataset)
def get_arch_in_lids_format(architecture, split):
layer_names = list(architecture['features']['layers'].keys())
layers = architecture['features']['layers']
operations = [layers[layer_name]['operation'] for layer_name in layer_names]
conv_layers_count = 1
lids_layer_specs = []
for i in range(len(layer_names)):
layer_spec = {}
if operations[i] == 'conv2d':
params = layers[layer_names[i]]['params']
kernel_size = params['kernel_size']
dilation = params['kernel_size']
number_of_kernels_per_markers = params['number_of_kernels_per_marker']
out_channels = params['out_channels']
layer_spec['layer'] = conv_layers_count
layer_spec['split'] = split
if isinstance(kernel_size, int):
layer_spec['kernelsize'] = [kernel_size, kernel_size, 0]
else:
layer_spec['kernelsize'] = [*kernel_size, 0]
if isinstance(dilation, int):
layer_spec['dilationrate'] = [dilation, dilation, 0]
else:
layer_spec['dilationrate'] = [*dilation, 0]
layer_spec['nkernelspermarker'] = number_of_kernels_per_markers
layer_spec['finalnkernels'] = out_channels
layer_spec['nkernelsperimage'] = 10000
if i + 1 < len(layer_names) and operations[i+1] == 'relu':
layer_spec['relu'] = 1
else:
layer_spec['relu'] = 0
conv_layers_count += 1
j = i + 1 if layer_spec['relu'] == 0 else i + 2
pool_spec = {}
if j < len(layer_names) and 'pool' in operations[j]:
if operations[j] == 'max_pool2d':
pool_spec['pool_type'] = 2
elif operations[j] == 'avg_pool2d':
pool_spec['pool_type'] = 1
pool_params = layers[layer_names[j]]['params']
kernel_size = pool_params['kernel_size']
stride = pool_params['stride']
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
pool_spec['poolxsize'] = kernel_size[0]
pool_spec['poolysize'] = kernel_size[1]
pool_spec['poolzsize'] = 0
pool_spec['stride'] = stride
else:
pool_spec['pool_type'] = 0
layer_spec['pooling'] = pool_spec
lids_layer_specs.append(layer_spec)
return lids_layer_specs
def create_arch(layers_dir):
layers_info_files = [f for f in os.listdir(layers_dir) if f.endswith('.json')]
layers_info_files.sort()
arch = OrderedDict([('features', {'type': 'sequential', 'layers': OrderedDict()})])
layers = arch['features']['layers']
for i, layer_info_file in enumerate(layers_info_files, 1):
with open(os.path.join(layers_dir, layer_info_file), 'r') as f:
layer_info = json.load(f)
# print(layer_info)
conv_spec = {
'operation': 'conv2d',
'params': {
'kernel_size': layer_info['kernelsize'][:-1],
'number_of_kernels_per_marker': layer_info['nkernelspermarker'],
'dilation': layer_info['dilationrate'][:-1],
'out_channels': layer_info['finalnkernels'],
'padding': [floor((layer_info['kernelsize'][0] + (layer_info['kernelsize'][0] - 1) * (layer_info['dilationrate'][0] -1))/2),
floor((layer_info['kernelsize'][1] + (layer_info['kernelsize'][1] - 1) * (layer_info['dilationrate'][1] -1))/2)],
'stride': 1
}
}
if layer_info['relu'] == 1:
relu_spec = {
'operation': 'relu',
'params': {
'inplace': True
}
}
else:
relu_spec = None
if layer_info['pooling']['pooltype'] != 0:
pool_spec = {
'params': {
'kernel_size': [layer_info['pooling']['poolxsize'], layer_info['pooling']['poolysize']],
'stride': layer_info['pooling']['poolstride'],
'padding': [floor(layer_info['pooling']['poolxsize']/2), floor(layer_info['pooling']['poolysize']/2)]
}
}
if layer_info['pooling']['pooltype'] == 2:
pool_spec['operation'] = 'max_pool2d'
elif layer_info['pooling']['pooltype'] == 1:
pool_spec['operation'] = 'avg_pool2d'
layers[f'conv{i}'] = conv_spec
if relu_spec is not None:
layers[f'relu{i}'] = relu_spec
if pool_spec is not None:
layers[f'pool{i}'] = pool_spec
return arch
def save_arch(arch, output_path):
dirname = os.path.dirname(output_path)
if not os.path.exists(dirname) and dirname != '':
os.makedirs(os.path.dirname(output_path))
with open(output_path, 'w') as f:
json.dump(arch, f, indent=4)
|
python
|
from functools import partial
from PyQt5.QtCore import pyqtSignal, QTimer, Qt
from PyQt5.QtWidgets import QInputDialog, QLabel, QVBoxLayout, QLineEdit, QWidget, QPushButton
from electrum.i18n import _
from electrum.plugin import hook
from electrum.wallet import Standard_Wallet
from electrum.gui.qt.util import WindowModalDialog
from .ledger import LedgerPlugin, Ledger_Client, AtomicBoolean, AbstractTracker
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
class Plugin(LedgerPlugin, QtPluginBase):
icon_unpaired = "ledger_unpaired.png"
icon_paired = "ledger.png"
def create_handler(self, window):
return Ledger_Handler(window)
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on Ledger"), show_address)
class Ledger_UI(WindowModalDialog):
def __init__(self, parse_data: AbstractTracker, atomic_b: AtomicBoolean, parent=None, title='Ledger UI'):
super().__init__(parent, title)
# self.setWindowModality(Qt.NonModal)
# Thread interrupter. If we cancel, set true
self.parse_data = parse_data
self.atomic_b = atomic_b
self.label = QLabel('')
self.label.setText(_("Generating Information..."))
layout = QVBoxLayout(self)
layout.addWidget(self.label)
self.cancel = QPushButton(_('Cancel'))
def end():
self.finished()
self.close()
self.atomic_b.set_true()
self.cancel.clicked.connect(end)
layout.addWidget(self.cancel)
self.setLayout(layout)
self.setWindowFlags(self.windowFlags() | Qt.CustomizeWindowHint)
self.setWindowFlags(self.windowFlags() & ~Qt.WindowCloseButtonHint)
self.timer = QTimer()
self.timer.timeout.connect(self.update_text)
def begin(self):
self.timer.start(500)
def finished(self):
self.timer.stop()
def update_text(self):
self.label.setText(self.parse_data.parsed_string())
class Ledger_Handler(QtHandlerBase):
setup_signal = pyqtSignal()
auth_signal = pyqtSignal(object, object)
ui_start_signal = pyqtSignal(object, object, object)
ui_stop_signal = pyqtSignal()
def __init__(self, win):
super(Ledger_Handler, self).__init__(win, 'Ledger')
self.setup_signal.connect(self.setup_dialog)
self.auth_signal.connect(self.auth_dialog)
self.ui_start_signal.connect(self.ui_dialog)
self.ui_stop_signal.connect(self.stop_ui_dialog)
def word_dialog(self, msg):
response = QInputDialog.getText(self.top_level_window(), "Ledger Wallet Authentication", msg,
QLineEdit.Password)
if not response[1]:
self.word = None
else:
self.word = str(response[0])
self.done.set()
def message_dialog(self, msg):
self.clear_dialog()
self.dialog = dialog = WindowModalDialog(self.top_level_window(), _("Ledger Status"))
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
dialog.show()
def ui_dialog(self, title, stopped_boolean, parse_data):
self.clear_dialog()
self.dialog = Ledger_UI(parse_data, stopped_boolean, self.top_level_window(), title)
self.dialog.show()
self.dialog.begin()
def stop_ui_dialog(self):
if isinstance(self.dialog, Ledger_UI):
self.dialog.finished()
def auth_dialog(self, data, client: 'Ledger_Client'):
try:
from .auth2fa import LedgerAuthDialog
except ImportError as e:
self.message_dialog(repr(e))
return
dialog = LedgerAuthDialog(self, data, client=client)
dialog.exec_()
self.word = dialog.pin
self.done.set()
def get_auth(self, data, *, client: 'Ledger_Client'):
self.done.clear()
self.auth_signal.emit(data, client)
self.done.wait()
return self.word
def get_setup(self):
self.done.clear()
self.setup_signal.emit()
self.done.wait()
return
def get_ui(self, title, atomic_b, data):
self.ui_start_signal.emit(title, atomic_b, data)
def finished_ui(self):
self.ui_stop_signal.emit()
def setup_dialog(self):
self.show_error(_('Initialization of Ledger HW devices is currently disabled.'))
|
python
|
from data.scrapers import *
import pandas as pd
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def model_run(model, freq='1111111', existing=None):
scraper = model(freq)
dfs = scraper.run()
for df in dfs:
existing.append(df)
return existing
def generate_wordcloud(text, year=None):
wordcloud = WordCloud().generate(text)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
if year:
plt.savefig("../assets/img/jellyfish_{}.png".format(str(year)), format="png")
plt.show()
def count_frequency(wordtxt):
my_list = wordtxt.split()
freq = {}
for word in my_list:
if word not in freq:
freq[word] = 0
else:
pass
freq[word] += 1
freq = {k: v for k, v in sorted(freq.items(), key=lambda item: item[1])}
return freq
if __name__ == "__main__":
dfs = list()
model_run(SmithsonianScraper, freq='1111111', existing=dfs)
model_run(FastCompanyScraper, freq='1111111', existing=dfs)
model_run(WorldEconomicForumScraper, freq='1111111', existing=dfs)
model_run(NewScientistScraper, freq='1111111', existing=dfs)
model_run(TimeScraper, freq='1111111', existing=dfs)
model_run(JStorScraper, freq='1111111', existing=dfs)
model_run(QuartzScraper, freq='1111111', existing=dfs)
model_run(MarineScienceScraper, freq='1111111', existing=dfs)
model_run(BBCEarthScraper, freq='1111111', existing=dfs)
model_run(BBCNewsScraper, freq='1111111', existing=dfs)
model_run(TheGuardianScraper, freq='1111111', existing=dfs)
dfs = pd.DataFrame(dfs).sort_values(by="date")
grouped_df = dfs.groupby(dfs['date'].dt.year)['words'].agg(['sum', 'count']).reset_index()
for index, row in grouped_df.iterrows():
row['freq'] = count_frequency(row['sum'])
print(grouped_df)
# for index, row in grouped_df.iterrows():
# generate_wordcloud(row['sum'], row['date'])
|
python
|
import numpy as np
from ..pakbase import Package
class ModflowFlwob(Package):
"""
Head-dependent flow boundary Observation package class. Minimal working
example that will be refactored in a future version.
Parameters
----------
nqfb : int
Number of cell groups for the head-dependent flow boundary
observations
nqcfb : int
Greater than or equal to the total number of cells in all cell groups
nqtfb : int
Total number of head-dependent flow boundary observations for all cell
groups
iufbobsv : int
unit number where output is saved
tomultfb : float
Time-offset multiplier for head-dependent flow boundary observations.
The product of tomultfb and toffset must produce a time value in units
consistent with other model input. tomultfb can be dimensionless or
can be used to convert the units of toffset to the time unit used in
the simulation.
nqobfb : int list of length nqfb
The number of times at which flows are observed for the group of cells
nqclfb : int list of length nqfb
Is a flag, and the absolute value of nqclfb is the number of cells in
the group. If nqclfb is less than zero, factor = 1.0 for all cells in
the group.
obsnam : string list of length nqtfb
Observation name
irefsp : int of length nqtfb
Stress period to which the observation time is referenced.
The reference point is the beginning of the specified stress period.
toffset : float list of length nqtfb
Is the time from the beginning of the stress period irefsp to the time
of the observation. toffset must be in units such that the product of
toffset and tomultfb are consistent with other model input. For
steady state observations, specify irefsp as the steady state stress
period and toffset less than or equal to perlen of the stress period.
If perlen is zero, set toffset to zero. If the observation falls
within a time step, linearly interpolation is used between values at
the beginning and end of the time step.
flwobs : float list of length nqtfb
Observed flow value from the head-dependent flow boundary into the
aquifer (+) or the flow from the aquifer into the boundary (-)
layer : int list of length(nqfb, nqclfb)
layer index for the cell included in the cell group
row : int list of length(nqfb, nqclfb)
row index for the cell included in the cell group
column : int list of length(nqfb, nqclfb)
column index of the cell included in the cell group
factor : float list of length(nqfb, nqclfb)
Is the portion of the simulated gain or loss in the cell that is
included in the total gain or loss for this cell group (fn of eq. 5).
flowtype : string
String that corresponds to the head-dependent flow boundary condition
type (CHD, GHB, DRN, RIV)
extension : list of string
Filename extension. If extension is None, extension is set to
['chob','obc','gbob','obg','drob','obd', 'rvob','obr']
(default is None).
no_print : boolean
When True or 1, a list of flow observations will not be
written to the Listing File (default is False)
options : list of strings
Package options (default is None).
unitnumber : list of int
File unit number. If unitnumber is None, unitnumber is set to
[40, 140, 41, 141, 42, 142, 43, 143] (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the flwob output name will be created using
the model name and .out extension (for example,
modflowtest.out), if iufbobsv is a number greater than zero.
If a single string is passed the package will be set to the string
and flwob output name will be created using the model name and .out
extension, if iufbobsv is a number greater than zero. To define the
names for all package files (input and output) the length of the list
of strings should be 2. Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
This represents a minimal working example that will be refactored in a
future version.
"""
def __init__(self, model, nqfb=0, nqcfb=0, nqtfb=0, iufbobsv=0,
tomultfb=1.0, nqobfb=None, nqclfb=None, obsnam=None,
irefsp=None, toffset=None, flwobs=None, layer=None,
row=None, column=None, factor=None, flowtype=None,
extension=None, no_print=False, options=None,
filenames=None, unitnumber=None):
"""
Package constructor
"""
if nqobfb is None:
nqobfb = []
if nqclfb is None:
nqclfb = []
if obsnam is None:
obsnam = []
if irefsp is None:
irefsp = []
if toffset is None:
toffset = []
if flwobs is None:
flwobs = []
if layer is None:
layer = []
if row is None:
row = []
if column is None:
column = []
if factor is None:
factor = []
if extension is None:
extension = ['chob', 'obc', 'gbob', 'obg', 'drob', 'obd',
'rvob', 'obr']
if unitnumber is None:
unitnumber = [40, 140, 41, 141, 42, 142, 43, 143]
if flowtype.upper().strip() == 'CHD':
name = ['CHOB', 'DATA']
extension = extension[0:2]
unitnumber = unitnumber[0:2]
iufbobsv = unitnumber[1]
self.url = 'chob.htm'
self.heading = '# CHOB for MODFLOW, generated by Flopy.'
elif flowtype.upper().strip() == 'GHB':
name = ['GBOB', 'DATA']
extension = extension[2:4]
unitnumber = unitnumber[2:4]
iufbobsv = unitnumber[1]
self.url = 'gbob.htm'
self.heading = '# GBOB for MODFLOW, generated by Flopy.'
elif flowtype.upper().strip() == 'DRN':
name = ['DROB', 'DATA']
extension = extension[4:6]
unitnumber = unitnumber[4:6]
iufbobsv = unitnumber[1]
self.url = 'drob.htm'
self.heading = '# DROB for MODFLOW, generated by Flopy.'
elif flowtype.upper().strip() == 'RIV':
name = ['RVOB', 'DATA']
extension = extension[6:8]
unitnumber = unitnumber[6:8]
iufbobsv = unitnumber[1]
self.url = 'rvob.htm'
self.heading = '# RVOB for MODFLOW, generated by Flopy.'
else:
msg = 'ModflowFlwob: flowtype must be CHD, GHB, DRN, or RIV'
raise KeyError(msg)
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# call base package constructor
Package.__init__(self, model, extension=extension, name=name,
unit_number=unitnumber,
allowDuplicates=True, filenames=filenames)
self.nqfb = nqfb
self.nqcfb = nqcfb
self.nqtfb = nqtfb
self.iufbobsv = iufbobsv
self.tomultfb = tomultfb
self.nqobfb = nqobfb
self.nqclfb = nqclfb
self.obsnam = obsnam
self.irefsp = irefsp
self.toffset = toffset
self.flwobs = flwobs
self.layer = layer
self.row = row
self.column = column
self.factor = factor
# -create empty arrays of the correct size
self.layer = np.zeros((self.nqfb, max(self.nqclfb)), dtype='int32')
self.row = np.zeros((self.nqfb, max(self.nqclfb)), dtype='int32')
self.column = np.zeros((self.nqfb, max(self.nqclfb)), dtype='int32')
self.factor = np.zeros((self.nqfb, max(self.nqclfb)), dtype='float32')
self.nqobfb = np.zeros((self.nqfb), dtype='int32')
self.nqclfb = np.zeros((self.nqfb), dtype='int32')
self.irefsp = np.zeros((self.nqtfb), dtype='int32')
self.toffset = np.zeros((self.nqtfb), dtype='float32')
self.flwobs = np.zeros((self.nqtfb), dtype='float32')
# -assign values to arrays
self.nqobfb[:] = nqobfb
self.nqclfb[:] = nqclfb
self.obsnam[:] = obsnam
self.irefsp[:] = irefsp
self.toffset[:] = toffset
self.flwobs[:] = flwobs
for i in range(self.nqfb):
self.layer[i, :len(layer[i])] = layer[i]
self.row[i, :len(row[i])] = row[i]
self.column[i, :len(column[i])] = column[i]
self.factor[i, :len(factor[i])] = factor[i]
# add more checks here
self.no_print = no_print
self.np = 0
if options is None:
options = []
if self.no_print:
options.append('NOPRINT')
self.options = options
# add checks for input compliance (obsnam length, etc.)
self.parent.add_package(self)
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
# open file for writing
f_fbob = open(self.fn_path, 'w')
# write header
f_fbob.write('{}\n'.format(self.heading))
# write sections 1 and 2 : NOTE- what about NOPRINT?
line = '{:10d}'.format(self.nqfb)
line += '{:10d}'.format(self.nqcfb)
line += '{:10d}'.format(self.nqtfb)
line += '{:10d}'.format(self.iufbobsv)
if self.no_print or 'NOPRINT' in self.options:
line += '{: >10}'.format('NOPRINT')
line += '\n'
f_fbob.write(line)
f_fbob.write('{:10e}\n'.format(self.tomultfb))
# write sections 3-5 looping through observations groups
c = 0
for i in range(self.nqfb):
# while (i < self.nqfb):
# write section 3
f_fbob.write('{:10d}{:10d}\n'.format(self.nqobfb[i],
self.nqclfb[i]))
# Loop through observation times for the groups
for j in range(self.nqobfb[i]):
# write section 4
line = '{}{:10d}{:10.4g} {:10.4g}\n'.format(self.obsnam[c],
self.irefsp[c],
self.toffset[c],
self.flwobs[c])
f_fbob.write(line)
c += 1 # index variable
# write section 5 - NOTE- need to adjust factor for multiple
# observations in the same cell
for j in range(abs(self.nqclfb[i])):
# set factor to 1.0 for all cells in group
if self.nqclfb[i] < 0:
self.factor[i, :] = 1.0
line = '{:10d}'.format(self.layer[i, j])
line += '{:10d}'.format(self.row[i, j])
line += '{:10d}'.format(self.column[i, j])
line += ' '.format(self.factor[i, j])
# note is 10f good enough here?
line += '{:10f}\n'.format(self.factor[i, j])
f_fbob.write(line)
f_fbob.close()
#
# swm: BEGIN hack for writing standard file
sfname = self.fn_path
sfname += '_ins'
# write header
f_ins = open(sfname, 'w')
f_ins.write('jif @\n')
f_ins.write('StandardFile 0 1 {}\n'.format(self.nqtfb))
for i in range(0, self.nqtfb):
f_ins.write('{}\n'.format(self.obsnam[i]))
f_ins.close()
# swm: END hack for writing standard file
return
|
python
|
# globals.py
# Logic to get a list of the DBS instances available on DAS.
# Currently hardcoding. There's probably a better way!
instances = ['prod/global', 'prod/phys01', 'prod/phys02', 'prod/phys03', 'prod/caf']
|
python
|
from hashlib import sha1
from multiprocessing.dummy import Lock
m_lock = Lock()
z_lock = Lock()
print(f"是否相等:{m_lock==z_lock}\n{m_lock}\n{z_lock}") # 地址不一样
m_code = hash(m_lock)
z_code = hash(z_lock)
print(f"是否相等:{m_code==z_code}\n{m_code}\n{z_code}") # 值一样
# Java可以使用:identityhashcode
m_code = sha1(str(m_lock).encode("utf-8")).hexdigest()
z_code = sha1(str(z_code).encode("utf-8")).hexdigest()
print(f"是否相等:{m_code==z_code}\n{m_code}\n{z_code}") # 不相等
m_code = id(m_lock)
z_code = id(z_lock)
print(f"是否相等:{m_code==z_code}\n{m_code}\n{z_code}") # 不相等
|
python
|
import codecs
import csv
import json
import os
import random
import sys
directory = str(os.getcwd())
final_data = {"url": "http://10.10.0.112"}
def getNumberRecords():
'''
Counts the number of username-password for admin.csv file
Arguments:
None
Returns:
number of username-password records in admin.csv file
'''
fileDirectory = directory + "/config/admin.csv"
readFile=csv.reader(codecs.open(fileDirectory, encoding='utf-8'),delimiter=",")
number = 0
for x in readFile:
number += 1
return number
def checkFilesExist(botNumber):
'''
Checks if the csv files to be generated already exist
Arguments:
botNumber (int): Number of admin bot concurrently running
Returns:
True, if the files already exist. Else, False
'''
fileNumber = botNumber + 20
number = 0
while (number <= fileNumber):
outputFileDirectory = directory + "/config/admin/adminLogin" + str(number) + ".csv"
if os.path.exists(outputFileDirectory):
number += 1
continue
else:
return False
return True
def genAdminFiles(botNumbers):
'''
Generate csv files for different usernames-passwords according to the number of bots
Arguments:
botNumber (int): Number of admin bot concurrently running
Returns:
None
'''
fileNumber = botNumbers + 20
recordsPerFile = (int)(getNumberRecords()/fileNumber)
print(recordsPerFile)
adminFileDirectory = directory + "/config/admin.csv"
readFile=csv.reader(codecs.open(adminFileDirectory, encoding='utf-8'),delimiter=",")
number = 0
for row in readFile:
outputFileDirectory = directory + "/config/admin/adminLogin" + str(number) + ".csv"
writeFile = open(outputFileDirectory,mode = 'a', newline = '')
writer = csv.writer(writeFile, delimiter = ',')
writer.writerow(row)
if (number >= fileNumber):
number = 0
number += 1
def getCredentials(botNumbers):
'''
Obtain credentials for the bot to login
Arguments:
botNumber (int): Number of admin bot concurrently running
Returns:
None
'''
trackNumber = 0
newRecords = []
credentials = []
number = ((random.randint(1,2000)%23) * (random.randint(1,2000)%17) * (random.randint(1000,2000)%13)) % (botNumbers + 20)
fileDirectory = directory + "/config/admin/adminLogin" + str(number) + ".csv"
print("Reading from " + str(fileDirectory))
readFile=csv.reader(codecs.open(fileDirectory, encoding='utf-8'),delimiter=",")
for rows in readFile:
if (trackNumber == 0):
credentials.append(rows[0])
credentials.append(rows[1])
trackNumber += 1
else:
newRecords.append(rows)
credentials.append(number)
writeFile = open(fileDirectory,mode = 'w', newline = '')
for record in newRecords:
writeFile.write(record[0] + ',' + record[1])
writeFile.write('\n')
return credentials
def writeBack(username, password, fileNumber):
'''
Writes back the credentials to the csv file after all admin actions have been completed
Arguments:
username (str): Username that the bot is logging in with
password (str): Password that the bot is logging in with
fileNumber (int): file number for the csv file that the bot is going to open to read the credentials
Returns:
None
'''
fileDirectory = directory + "/config/admin/adminLogin" + str(fileNumber) + ".csv"
writeFile = open(fileDirectory,mode = 'a', newline = '')
writer = csv.writer(writeFile, delimiter = ',')
writeBack = []
writeBack.append(username)
writeBack.append(password)
writer.writerow(writeBack)
def getUrl():
'''
Obtain the url that the bot is logging into
Arguments:
None
Returns:
None
'''
return "https://10.10.0.112"
|
python
|
# Copyright 2020 Yuhao Zhang and Arun Kumar. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import glob
SEED = 2018
INPUT_SHAPE = (112, 112, 3)
NUM_CLASSES = 1000
TOP_5 = 'top_k_categorical_accuracy'
TOP_1 = 'categorical_accuracy'
MODEL_ARCH_TABLE = 'model_arch_library'
MODEL_SELECTION_TABLE = 'mst_table'
MODEL_SELECTION_SUMMARY_TABLE = 'mst_table_summary'
class spark_imagenet_cat:
valid_list = [
"hdfs://master:9000/imagenet_parquet/valid/valid_{}.parquet".format(i) for i in range(8)]
train_list = [
"hdfs://master:9000/imagenet_parquet/train/train_{}.parquet".format(i) for i in range(8)]
class spark_imagenet_cat_nfs:
valid_list = [
"/mnt/nfs/hdd/imagenet/valid/valid_{}.parquet".format(i)
for i in range(8)
]
train_list = [
"/mnt/nfs/hdd/imagenet/train/train_{}.parquet".format(i)
for i in range(8)
]
param_grid = {
"learning_rate": [1e-4, 1e-6],
"lambda_value": [1e-4, 1e-6],
"batch_size": [32, 256],
"model": ["vgg16", "resnet50"]
}
param_grid_hetro = {
"learning_rate": [1e-4, 1e-4],
"lambda_value": [1e-4, 1e-4],
"batch_size": [4, 128],
"model": ["nasnetmobile", "mobilenetv2"],
'p': 0.8,
'hetro': True,
'fast': 38,
'slow': 10,
'total': 48
}
param_grid_scalability = {
"learning_rate": [1e-3, 1e-4, 1e-5, 1e-6],
"lambda_value": [1e-4, 1e-6],
"batch_size": [32],
"model": ["resnet50"]
}
param_grid_model_size = {
's': {
"learning_rate": [1e-4, 1e-6],
"lambda_value": [1e-3, 1e-4, 1e-5, 1e-6],
"batch_size": [32],
"model": ["mobilenetv2"]
},
'm': {
"learning_rate": [1e-4, 1e-6],
"lambda_value": [1e-3, 1e-4, 1e-5, 1e-6],
"batch_size": [32],
"model": ["resnet50"]
},
'l': {
"learning_rate": [1e-4, 1e-6],
"lambda_value": [1e-3, 1e-4, 1e-5, 1e-6],
"batch_size": [32],
"model": ["resnet152"]
},
'x': {
"learning_rate": [1e-4, 1e-6],
"lambda_value": [1e-3, 1e-4, 1e-5, 1e-6],
"batch_size": [32],
"model": ["vgg16"]
},
}
param_grid_best_model = {
"learning_rate": [1e-4],
"lambda_value": [1e-4],
"batch_size": [32],
"model": ["resnet50"]
}
param_grid_hyperopt = {
"learning_rate": [0.00001, 0.1],
"lambda_value": [1e-4, 1e-6],
"batch_size": [16, 256],
"model": ["resnet18", "resnet34"]
}
|
python
|
def ejercicio01MCM():
#Definir variables y otros
print("--> EJERCICIO 01 <--")
notaFinal=round(0.0)
#Datos de entrada
n1=float(input("Ingrese la 1ra nota: "))
n2=float(input("Ingrese la 2da nota: "))
n3=float(input("Ingrese la 2da nota: "))
n4=float(input("Ingrese la 4ta nota: "))
#Proceso
notaFinal=(n1*0.2+n2*0.15+n3*0.15+n4*0.5)
#Datos de salida
print("La nota final del curso es:",notaFinal)
ejercicio01MCM()
print("")
def ejercicio02MCM():
#Definir variables y otros
print("--> EJERCICIO 02 <--")
puntos=0
salariomin=0
bono=0
#Datos de entrada
puntos=int(input("Ingrese los puntos: "))
salariomin=int(input("Ingrese el salario minimo: "))
#Proceso
if puntos>=50 and puntos<=100:
bono=(salariomin*0.10)
else:
bono=("Nada, sera para la proxima")
if puntos>=101 and puntos<=150:
bono=(salariomin*0.40)
elif puntos>=151:
bono=(salariomin*0.70)
#Datos de salida
print("El bono que recibira es:",bono)
ejercicio02MCM()
print("")
def ejercicio03MCM():
#Definir variables y otros
print("--> EJERCICIO 03 <--")
edad=0
sexo=0
vacuna=""
#Datos de entrada
edad=int(input("Ingrese la edad: "))
sexo=input("Ingrese sexo: ")
#Proceso
if sexo=="mujer" or sexo=="hombre" and edad>70:
vacuna=("Tipo C")
if sexo=="mujer" and edad>=16 and edad<=69:
vacuna=("Tipo B")
elif sexo=="hombre" and edad>=16 and edad<=69:
vacuna=("Tipo A")
if sexo=="mujer" or sexo=="hombre" and edad<16:
vacuna=("Tipo A")
#Datos de salida
print("Recibira la vacuna :", vacuna)
ejercicio03MCM()
print("")
def ejercicio04MCM():
#Definir variables y otros
print("--> EJERCICIO 04 <--")
operador=0
resultado=0
#Datos de entrada
operador=input("Ingrese el operador aritmetico: ")
n1=int(input("Ingrese el 1er numero: "))
n2=int(input("Ingrese el 2do numero: "))
#Proceso
if operador=="suma" or operador=="+":
resultado=n1+n2
if operador=="resta" or operador=="-":
resultado=n1-n2
elif operador=="division" or operador=="/":
resultado=n1/n2
if operador=="multiplicacion" or operador=="*":
resultado=n1*n2
elif operador=="potencia" or operador=="^":
resultado=n1**n2
#Datos de salida
print("Los resultados son:", resultado)
ejercicio04MCM()
print("")
|
python
|
from __future__ import absolute_import
from django.test import RequestFactory
from exam import fixture
from mock import patch
from sentry.middleware.stats import RequestTimingMiddleware, add_request_metric_tags
from sentry.testutils import TestCase
from sentry.testutils.helpers.faux import Mock
class RequestTimingMiddlewareTest(TestCase):
middleware = fixture(RequestTimingMiddleware)
factory = fixture(RequestFactory)
@patch('sentry.utils.metrics.incr')
def test_records_default_api_metrics(self, incr):
request = self.factory.get('/')
request._view_path = '/'
response = Mock(status_code=200)
self.middleware.process_response(request, response)
incr.assert_called_with(
'view.response',
instance=request._view_path,
tags={
'method': 'GET',
'status_code': 200,
},
skip_internal=False,
)
@patch('sentry.utils.metrics.incr')
def test_records_endpoint_specific_metrics(self, incr):
request = self.factory.get('/')
request._view_path = '/'
request._metric_tags = {'a': 'b'}
response = Mock(status_code=200)
self.middleware.process_response(request, response)
incr.assert_called_with(
'view.response',
instance=request._view_path,
tags={
'method': 'GET',
'status_code': 200,
'a': 'b',
},
skip_internal=False,
)
@patch('sentry.utils.metrics.incr')
def test_add_request_metric_tags(self, incr):
request = self.factory.get('/')
request._view_path = '/'
add_request_metric_tags(request, foo='bar')
response = Mock(status_code=200)
self.middleware.process_response(request, response)
incr.assert_called_with(
'view.response',
instance=request._view_path,
tags={
'method': 'GET',
'status_code': 200,
'foo': 'bar',
},
skip_internal=False,
)
|
python
|
from django.conf.urls import url
from django.urls import path
from rest.quiklash import views
from rest.push_the_buttons.views import PushTheButtonView
urlpatterns = [
path('api/qa/game/start', views.QuicklashMainGame.as_view()),
path('api/qa/question/new', views.QuiklashQuestionListView.as_view()),
path('api/qa/question/answer', views.QuiklashQuestionAnswer.as_view()),
# path('api/qa/voting', PushTheButtonView.as_view()),
# path('api/qa/vote', views.PlayerView.as_view()),
]
|
python
|
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
from acapy_wrapper.models.indy_proof_requested_proof_predicate import (
IndyProofRequestedProofPredicate,
)
from acapy_wrapper.models.indy_proof_requested_proof_revealed_attr import (
IndyProofRequestedProofRevealedAttr,
)
from acapy_wrapper.models.indy_proof_requested_proof_revealed_attr_group import (
IndyProofRequestedProofRevealedAttrGroup,
)
class IndyProofRequestedProof(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
IndyProofRequestedProof - a model defined in OpenAPI
predicates: The predicates of this IndyProofRequestedProof [Optional].
revealed_attr_groups: The revealed_attr_groups of this IndyProofRequestedProof [Optional].
revealed_attrs: The revealed_attrs of this IndyProofRequestedProof [Optional].
self_attested_attrs: The self_attested_attrs of this IndyProofRequestedProof [Optional].
unrevealed_attrs: The unrevealed_attrs of this IndyProofRequestedProof [Optional].
"""
predicates: Optional[Dict[str, IndyProofRequestedProofPredicate]] = None
revealed_attr_groups: Optional[
Dict[str, IndyProofRequestedProofRevealedAttrGroup]
] = None
revealed_attrs: Optional[Dict[str, IndyProofRequestedProofRevealedAttr]] = None
self_attested_attrs: Optional[Dict[str, Any]] = None
unrevealed_attrs: Optional[Dict[str, Any]] = None
IndyProofRequestedProof.update_forward_refs()
|
python
|
# Basic training configuration file
from pathlib import Path
from torchvision.transforms import RandomVerticalFlip, RandomHorizontalFlip, CenterCrop
from torchvision.transforms import RandomApply, RandomAffine
from torchvision.transforms import ToTensor, Normalize
from common.dataset import get_test_data_loader
SEED = 12345
DEBUG = True
OUTPUT_PATH = "output"
dataset_path = Path("/home/fast_storage/imaterialist-challenge-furniture-2018/")
SAMPLE_SUBMISSION_PATH = dataset_path / "sample_submission_randomlabel.csv"
TEST_TRANSFORMS = [
RandomApply(
[RandomAffine(degrees=45, translate=(0.1, 0.1), scale=(0.7, 1.2), resample=2), ],
p=0.5
),
CenterCrop(size=350),
RandomHorizontalFlip(p=0.5),
RandomVerticalFlip(p=0.5),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]
N_CLASSES = 128
BATCH_SIZE = 32
NUM_WORKERS = 8
TEST_LOADER = get_test_data_loader(
dataset_path=dataset_path / "test_400x400",
test_data_transform=TEST_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory=True)
MODEL = (Path(OUTPUT_PATH) / "training_FurnitureSqueezeNet350_20180414_1610" /
"model_FurnitureSqueezeNet350_47_val_loss=0.8795085.pth").as_posix()
N_TTA = 10
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The module file for nxos_bgp_global
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: nxos_bgp_global
short_description: BGP Global resource module.
description:
- This module manages global BGP configuration on devices running Cisco NX-OS.
version_added: 1.4.0
notes:
- Tested against NX-OS 9.3.6.
- Unsupported for Cisco MDS
- This module works with connection C(network_cli) and C(httpapi).
author: Nilashish Chakraborty (@NilashishC)
options:
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the NX-OS device
by executing the command B(show running-config | section '^router bgp').
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
config:
description: A list of BGP process configuration.
type: dict
suboptions:
as_number:
description: Autonomous System Number of the router.
type: str
affinity_group:
description: Configure an affinity group.
type: dict
suboptions:
group_id:
description: Affinity Group ID.
type: int
bestpath: &bestpath
description: Define the default bestpath selection algorithm.
type: dict
suboptions:
always_compare_med:
description: Compare MED on paths from different AS.
type: bool
as_path:
description: AS-Path.
type: dict
suboptions:
ignore:
description: Ignore AS-Path during bestpath selection.
type: bool
multipath_relax:
description: Relax AS-Path restriction when choosing multipaths.
type: bool
compare_neighborid:
description: When more paths are available than max path config, use neighborid as tie-breaker.
type: bool
compare_routerid:
description: Compare router-id for identical EBGP paths.
type: bool
cost_community_ignore:
description: Ignore cost communities in bestpath selection.
type: bool
igp_metric_ignore:
description: Ignore IGP metric for next-hop during bestpath selection.
type: bool
med:
description: MED
type: dict
suboptions:
confed:
description: Compare MED only from paths originated from within a confederation.
type: bool
missing_as_worst:
description: Treat missing MED as highest MED.
type: bool
non_deterministic:
description: Not always pick the best-MED path among paths from same AS.
type: bool
cluster_id: &cluster_id
description: Configure Route Reflector Cluster-ID.
type: str
confederation: &confederation
description: AS confederation parameters.
type: dict
suboptions:
identifier:
description: Set routing domain confederation AS.
type: str
peers:
description: Peer ASs in BGP confederation.
type: list
elements: str
disable_policy_batching:
description: Disable batching evaluation of outbound policy for a peer.
type: dict
suboptions:
set:
description: Set policy batching.
type: bool
ipv4:
description: IPv4 address-family settings.
type: dict
suboptions:
prefix_list:
description: Name of prefix-list to apply.
type: str
ipv6:
description: IPv6 address-family settings.
type: dict
suboptions:
prefix_list:
description: Name of prefix-list to apply.
type: str
nexthop:
description: Batching based on nexthop.
type: bool
dynamic_med_interval:
description: Sets the interval for dampening of med changes.
type: int
enforce_first_as:
description: Enforce neighbor AS is the first AS in AS-PATH attribute (EBGP).
type: bool
enhanced_error:
description: Enable BGP Enhanced error handling.
type: bool
fabric_soo:
description: Fabric site of origin.
type: str
fast_external_fallover:
description: Immediately reset the session if the link to a directly connected BGP peer goes down.
type: bool
flush_routes:
description: Flush routes in RIB upon controlled restart.
type: bool
graceful_restart: &graceful_restart
description: Configure Graceful Restart functionality.
type: dict
suboptions:
set:
description: Enable graceful-restart.
type: bool
restart_time:
description: Maximum time for restart advertised to peers.
type: int
stalepath_time:
description: Maximum time to keep a restarting peer's stale routes.
type: int
helper:
description: Configure Graceful Restart Helper mode functionality.
type: bool
graceful_shutdown:
description: Graceful-shutdown for BGP protocol.
type: dict
suboptions:
activate:
description: Send graceful-shutdown community on all routes.
type: dict
suboptions:
set:
description: Activiate graceful-shutdown.
type: bool
route_map:
description: Apply route-map to modify attributes for outbound.
type: str
aware:
description: Lower preference of routes carrying graceful-shutdown community.
type: bool
isolate:
description: Isolate this router from BGP perspective.
type: dict
suboptions:
set:
description: Withdraw remote BGP routes to isolate this router.
type: bool
include_local:
description: Withdraw both local and remote BGP routes.
type: bool
log_neighbor_changes: &log_nbr
description: Log a message for neighbor up/down event.
type: bool
maxas_limit: &maxas_limit
description: Allow AS-PATH attribute from EBGP neighbor imposing a limit on number of ASes.
type: int
neighbors: &nbr
description: Configure BGP neighbors.
type: list
elements: dict
suboptions:
neighbor_address:
description: IP address/Prefix of the neighbor or interface.
type: str
required: True
bfd:
description: Bidirectional Fast Detection for the neighbor.
type: dict
suboptions:
set:
description: Set BFD for this neighbor.
type: bool
singlehop:
description: Single-hop session.
type: bool
multihop:
description: Multihop session.
type: dict
suboptions:
set:
description: Set BFD multihop.
type: bool
interval:
description: Configure BFD session interval parameters.
type: dict
suboptions:
tx_interval:
description: TX interval in milliseconds.
type: int
min_rx_interval:
description: Minimum RX interval.
type: int
multiplier:
description: Detect Multiplier.
type: int
neighbor_affinity_group:
description: Configure an affinity group.
type: dict
suboptions:
group_id:
description: Affinity Group ID.
type: int
bmp_activate_server:
description: Specify server ID for activating BMP monitoring for the peer.
type: int
capability:
description: Capability.
type: dict
suboptions:
suppress_4_byte_as:
description: Suppress 4-byte AS Capability.
type: bool
description:
description: Neighbor specific descripion.
type: str
disable_connected_check:
description: Disable check for directly connected peer.
type: bool
dont_capability_negotiate:
description: Don't negotiate capability with this neighbor.
type: bool
dscp:
description: Set dscp value for tcp transport.
type: str
dynamic_capability:
description: Dynamic Capability
type: bool
ebgp_multihop:
description: Specify multihop TTL for remote peer.
type: int
graceful_shutdown:
description: Graceful-shutdown for this neighbor.
type: dict
suboptions:
activate:
description: Send graceful-shutdown community.
type: dict
suboptions:
set:
description: Set activate.
type: bool
route_map:
description: Apply route-map to modify attributes for outbound.
type: str
inherit:
description: Inherit a template.
type: dict
suboptions:
peer:
description: Peer template to inherit.
type: str
peer_session:
description: Peer-session template to inherit.
type: str
local_as:
description: Specify the local-as number for the eBGP neighbor.
type: str
log_neighbor_changes:
description: Log message for neighbor up/down event.
type: dict
suboptions:
set:
description:
- Set log-neighbor-changes.
type: bool
disable:
description:
- Disable logging of neighbor up/down event.
type: bool
low_memory:
description: Behaviour in low memory situations.
type: dict
suboptions:
exempt:
description: Do not shutdown this peer when under memory pressure.
type: bool
password:
description: Configure a password for neighbor.
type: dict
suboptions:
encryption:
description:
- 0 specifies an UNENCRYPTED neighbor password.
- 3 specifies an 3DES ENCRYPTED neighbor password will follow.
- 7 specifies a Cisco type 7 ENCRYPTED neighbor password will follow.
type: int
key:
description: Authentication password.
type: str
path_attribute:
description: BGP path attribute optional filtering.
type: list
elements: dict
suboptions:
action:
description: Action.
type: str
choices: ["discard", "treat-as-withdraw"]
type:
description: Path attribute type
type: int
range:
description: Path attribute range.
type: dict
suboptions:
start:
description: Path attribute range start value.
type: int
end:
description: Path attribute range end value.
type: int
peer_type:
description: Neighbor facing
type: str
choices: ["fabric-border-leaf", "fabric-external"]
remote_as:
description: Specify Autonomous System Number of the neighbor.
type: str
remove_private_as:
description: Remove private AS number from outbound updates.
type: dict
suboptions:
set:
description: Remove private AS.
type: bool
replace_as:
description: Replace.
type: bool
all:
description: All.
type: bool
shutdown:
description: Administratively shutdown this neighbor.
type: bool
timers:
description: Configure keepalive and hold timers.
type: dict
suboptions:
keepalive:
description: Keepalive interval (seconds).
type: int
holdtime:
description: Holdtime (seconds).
type: int
transport:
description: BGP transport connection.
type: dict
suboptions:
connection_mode:
description: Specify type of connection.
type: dict
suboptions:
passive:
description: Allow passive connection setup only.
type: bool
ttl_security:
description: Enable TTL Security Mechanism.
type: dict
suboptions:
hops:
description: Specify hop count for remote peer.
type: int
update_source:
description: Specify source of BGP session and updates.
type: str
neighbor_down: &nbr_down
description: Handle BGP neighbor down event, due to various reasons.
type: dict
suboptions:
fib_accelerate:
description: Accelerate the hardware updates for IP/IPv6 adjacencies for neighbor.
type: bool
nexthop:
description: Nexthop resolution options.
type: dict
suboptions:
suppress_default_resolution:
description: Prohibit use of default route for nexthop address resolution.
type: bool
rd:
description: Secondary Route Distinguisher for vxlan multisite border gateway.
type: dict
suboptions:
dual:
description: Generate Secondary RD for all VRFs and L2VNIs.
type: bool
id:
description: Specify 2 byte value for ID.
type: int
reconnect_interval: &reconn_intv
description: Configure connection reconnect interval.
type: int
router_id: &rtr_id
description: Specify the IP address to use as router-id.
type: str
shutdown: &shtdwn
description: Administratively shutdown BGP protocol.
type: bool
suppress_fib_pending: &suppr
description: Advertise only routes that are programmed in hardware to peers.
type: bool
timers: &timers
description: Configure bgp related timers.
type: dict
suboptions:
bestpath_limit:
description: Configure timeout for first bestpath after restart.
type: dict
suboptions:
timeout:
description: Bestpath timeout (seconds).
type: int
always:
description: Configure update-delay-always option.
type: bool
bgp:
description: Configure different bgp keepalive and holdtimes.
type: dict
suboptions:
keepalive:
description: Keepalive interval (seconds).
type: int
holdtime:
description: Holdtime (seconds).
type: int
prefix_peer_timeout:
description: Prefix Peer timeout (seconds).
type: int
prefix_peer_wait:
description: Configure wait timer for a prefix peer.
type: int
vrfs:
description: Virtual Router Context configurations.
type: list
elements: dict
suboptions:
vrf:
description: VRF name.
type: str
allocate_index:
description: Configure allocate-index.
type: int
bestpath: *bestpath
cluster_id: *cluster_id
confederation: *confederation
graceful_restart: *graceful_restart
local_as:
description: Specify the local-as for this vrf.
type: str
log_neighbor_changes: *log_nbr
maxas_limit: *maxas_limit
neighbors: *nbr
neighbor_down: *nbr_down
reconnect_interval: *reconn_intv
router_id: *rtr_id
timers: *timers
state:
description:
- The state the configuration should be left in.
- State I(purged) removes all the BGP configurations from the
target device. Use caution with this state.
- State I(deleted) only removes BGP attributes that this modules
manages and does not negate the BGP process completely. Thereby, preserving
address-family related configurations under BGP context.
- Running states I(deleted) and I(replaced) will result in an error if there
are address-family configuration lines present under a neighbor,
or a vrf context that is to be removed. Please use the
M(cisco.nxos.nxos_bgp_af) or M(cisco.nxos.nxos_bgp_neighbor_af)
modules for prior cleanup.
- States I(merged) and I(replaced) will result in a failure if BGP is already configured
with a different ASN than what is provided in the task. In such cases, please use
state I(purged) to remove the existing BGP process and proceed further.
- Refer to examples for more details.
type: str
choices:
- merged
- replaced
- deleted
- purged
- parsed
- gathered
- rendered
default: merged
"""
EXAMPLES = """
# Using merged
# Before state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# Nexus9000v#
- name: Merge the provided configuration with the existing running configuration
cisco.nxos.nxos_bgp_global:
config:
as_number: 65563
router_id: 192.168.1.1
bestpath:
as_path:
multipath_relax: True
compare_neighborid: True
cost_community_ignore: True
confederation:
identifier: 42
peers:
- 65020
- 65030
- 65040
log_neighbor_changes: True
maxas_limit: 20
neighbors:
- neighbor_address: 192.168.1.100
neighbor_affinity_group:
group_id: 160
bmp_activate_server: 1
remote_as: 65563
description: NBR-1
low_memory:
exempt: True
- neighbor_address: 192.168.1.101
remote_as: 65563
password:
encryption: 7
key: 12090404011C03162E
neighbor_down:
fib_accelerate: True
vrfs:
- vrf: site-1
allocate_index: 5000
local_as: 200
log_neighbor_changes: True
neighbors:
- neighbor_address: 198.51.100.1
description: site-1-nbr-1
password:
encryption: 3
key: 13D4D3549493D2877B1DC116EE27A6BE
remote_as: 65562
- neighbor_address: 198.51.100.2
remote_as: 65562
description: site-1-nbr-2
- vrf: site-2
local_as: 300
log_neighbor_changes: True
neighbors:
- neighbor_address: 203.0.113.2
description: site-2-nbr-1
password:
encryption: 3
key: AF92F4C16A0A0EC5BDF56CF58BC030F6
remote_as: 65568
neighbor_down:
fib_accelerate: True
# Task output
# -------------
# before: {}
#
# commands:
# - router bgp 65563
# - bestpath as-path multipath-relax
# - bestpath compare-neighborid
# - bestpath cost-community ignore
# - confederation identifier 42
# - log-neighbor-changes
# - maxas-limit 20
# - neighbor-down fib-accelerate
# - router-id 192.168.1.1
# - confederation peers 65020 65030 65040
# - neighbor 192.168.1.100
# - remote-as 65563
# - affinity-group 160
# - bmp-activate-server 1
# - description NBR-1
# - low-memory exempt
# - neighbor 192.168.1.101
# - remote-as 65563
# - password 7 12090404011C03162E
# - vrf site-1
# - allocate-index 5000
# - local-as 200
# - log-neighbor-changes
# - neighbor 198.51.100.1
# - remote-as 65562
# - description site-1-nbr-1
# - password 3 13D4D3549493D2877B1DC116EE27A6BE
# - neighbor 198.51.100.2
# - remote-as 65562
# - description site-1-nbr-2
# - vrf site-2
# - local-as 300
# - log-neighbor-changes
# - neighbor-down fib-accelerate
# - neighbor 203.0.113.2
# - remote-as 65568
# - description site-2-nbr-1
# - password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6
#
# after:
# as_number: '65563'
# bestpath:
# as_path:
# multipath_relax: true
# compare_neighborid: true
# cost_community_ignore: true
# confederation:
# identifier: '42'
# peers:
# - '65020'
# - '65030'
# - '65040'
# log_neighbor_changes: true
# maxas_limit: 20
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - bmp_activate_server: 1
# description: NBR-1
# low_memory:
# exempt: true
# neighbor_address: 192.168.1.100
# neighbor_affinity_group:
# group_id: 160
# remote_as: '65563'
# - neighbor_address: 192.168.1.101
# password:
# encryption: 7
# key: 12090404011C03162E
# remote_as: '65563'
# router_id: 192.168.1.1
# vrfs:
# - allocate_index: 5000
# local_as: '200'
# log_neighbor_changes: true
# neighbors:
# - description: site-1-nbr-1
# neighbor_address: 198.51.100.1
# password:
# encryption: 3
# key: 13D4D3549493D2877B1DC116EE27A6BE
# remote_as: '65562'
# - description: site-1-nbr-2
# neighbor_address: 198.51.100.2
# remote_as: '65562'
# vrf: site-1
# - local_as: '300'
# log_neighbor_changes: true
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - description: site-2-nbr-1
# neighbor_address: 203.0.113.2
# password:
# encryption: 3
# key: AF92F4C16A0A0EC5BDF56CF58BC030F6
# remote_as: '65568'
# vrf: site-2
# After state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# router bgp 65563
# router-id 192.168.1.1
# confederation identifier 42
# confederation peers 65020 65030 65040
# bestpath as-path multipath-relax
# bestpath cost-community ignore
# bestpath compare-neighborid
# neighbor-down fib-accelerate
# maxas-limit 20
# log-neighbor-changes
# neighbor 192.168.1.100
# low-memory exempt
# bmp-activate-server 1
# remote-as 65563
# description NBR-1
# affinity-group 160
# neighbor 192.168.1.101
# remote-as 65563
# password 7 12090404011C03162E
# vrf site-1
# local-as 200
# log-neighbor-changes
# allocate-index 5000
# neighbor 198.51.100.1
# remote-as 65562
# description site-1-nbr-1
# password 3 13D4D3549493D2877B1DC116EE27A6BE
# neighbor 198.51.100.2
# remote-as 65562
# description site-1-nbr-2
# vrf site-2
# local-as 300
# neighbor-down fib-accelerate
# log-neighbor-changes
# neighbor 203.0.113.2
# remote-as 65568
# description site-2-nbr-1
# password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6
# Using replaced
# Before state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# router bgp 65563
# router-id 192.168.1.1
# confederation identifier 42
# confederation peers 65020 65030 65040
# bestpath as-path multipath-relax
# bestpath cost-community ignore
# bestpath compare-neighborid
# neighbor-down fib-accelerate
# maxas-limit 20
# log-neighbor-changes
# neighbor 192.168.1.100
# low-memory exempt
# bmp-activate-server 1
# remote-as 65563
# description NBR-1
# affinity-group 160
# neighbor 192.168.1.101
# remote-as 65563
# password 7 12090404011C03162E
# vrf site-1
# local-as 200
# log-neighbor-changes
# allocate-index 5000
# neighbor 198.51.100.1
# remote-as 65562
# description site-1-nbr-1
# password 3 13D4D3549493D2877B1DC116EE27A6BE
# neighbor 198.51.100.2
# remote-as 65562
# description site-1-nbr-2
# vrf site-2
# local-as 300
# neighbor-down fib-accelerate
# log-neighbor-changes
# neighbor 203.0.113.2
# remote-as 65568
# description site-2-nbr-1
# password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6
- name: Replace BGP configuration with provided configuration
cisco.nxos.nxos_bgp_global:
config:
as_number: 65563
router_id: 192.168.1.1
bestpath:
compare_neighborid: True
cost_community_ignore: True
confederation:
identifier: 42
peers:
- 65020
- 65030
- 65050
maxas_limit: 40
neighbors:
- neighbor_address: 192.168.1.100
neighbor_affinity_group:
group_id: 160
bmp_activate_server: 1
remote_as: 65563
description: NBR-1
low_memory:
exempt: True
neighbor_down:
fib_accelerate: True
vrfs:
- vrf: site-2
local_as: 300
log_neighbor_changes: True
neighbors:
- neighbor_address: 203.0.113.2
password:
encryption: 7
key: 12090404011C03162E
neighbor_down:
fib_accelerate: True
state: replaced
# Task output
# -------------
# before:
# as_number: '65563'
# bestpath:
# as_path:
# multipath_relax: true
# compare_neighborid: true
# cost_community_ignore: true
# confederation:
# identifier: '42'
# peers:
# - '65020'
# - '65030'
# - '65040'
# log_neighbor_changes: true
# maxas_limit: 20
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - bmp_activate_server: 1
# description: NBR-1
# low_memory:
# exempt: true
# neighbor_address: 192.168.1.100
# neighbor_affinity_group:
# group_id: 160
# remote_as: '65563'
# - neighbor_address: 192.168.1.101
# password:
# encryption: 7
# key: 12090404011C03162E
# remote_as: '65563'
# router_id: 192.168.1.1
# vrfs:
# - allocate_index: 5000
# local_as: '200'
# log_neighbor_changes: true
# neighbors:
# - description: site-1-nbr-1
# neighbor_address: 198.51.100.1
# password:
# encryption: 3
# key: 13D4D3549493D2877B1DC116EE27A6BE
# remote_as: '65562'
# - description: site-1-nbr-2
# neighbor_address: 198.51.100.2
# remote_as: '65562'
# vrf: site-1
# - local_as: '300'
# log_neighbor_changes: true
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - description: site-2-nbr-1
# neighbor_address: 203.0.113.2
# password:
# encryption: 3
# key: AF92F4C16A0A0EC5BDF56CF58BC030F6
# remote_as: '65568'
# vrf: site-2
#
# commands:
# - router bgp 65563
# - no bestpath as-path multipath-relax
# - no log-neighbor-changes
# - maxas-limit 40
# - no confederation peers 65020 65030 65040
# - confederation peers 65020 65030 65050
# - no neighbor 192.168.1.101
# - vrf site-2
# - neighbor 203.0.113.2
# - no remote-as 65568
# - no description site-2-nbr-1
# - password 7 12090404011C03162E
# - no vrf site-1
# after:
# as_number: '65563'
# bestpath:
# compare_neighborid: true
# cost_community_ignore: true
# confederation:
# identifier: '42'
# peers:
# - '65020'
# - '65030'
# - '65050'
# maxas_limit: 40
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - bmp_activate_server: 1
# description: NBR-1
# low_memory:
# exempt: true
# neighbor_address: 192.168.1.100
# neighbor_affinity_group:
# group_id: 160
# remote_as: '65563'
# router_id: 192.168.1.1
# vrfs:
# - local_as: '300'
# log_neighbor_changes: true
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - neighbor_address: 203.0.113.2
# password:
# encryption: 7
# key: 12090404011C03162E
# vrf: site-2
#
# After state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# router bgp 65563
# router-id 192.168.1.1
# confederation identifier 42
# confederation peers 65020 65030 65050
# bestpath cost-community ignore
# bestpath compare-neighborid
# neighbor-down fib-accelerate
# maxas-limit 40
# neighbor 192.168.1.100
# low-memory exempt
# bmp-activate-server 1
# remote-as 65563
# description NBR-1
# affinity-group 160
# vrf site-2
# local-as 300
# neighbor-down fib-accelerate
# log-neighbor-changes
# neighbor 203.0.113.2
# password 7 12090404011C03162E
# Using deleted
# Before state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# router bgp 65563
# router-id 192.168.1.1
# confederation identifier 42
# confederation peers 65020 65030 65040
# bestpath as-path multipath-relax
# bestpath cost-community ignore
# bestpath compare-neighborid
# neighbor-down fib-accelerate
# maxas-limit 20
# log-neighbor-changes
# address-family ipv4 unicast
# default-metric 400
# suppress-inactive
# default-information originate
# address-family ipv6 multicast
# wait-igp-convergence
# redistribute eigrp eigrp-1 route-map site-1-rmap
# neighbor 192.168.1.100
# low-memory exempt
# bmp-activate-server 1
# remote-as 65563
# description NBR-1
# affinity-group 160
# neighbor 192.168.1.101
# remote-as 65563
# password 7 12090404011C03162E
# vrf site-1
# local-as 200
# log-neighbor-changes
# allocate-index 5000
# address-family ipv4 multicast
# maximum-paths 40
# dampen-igp-metric 1200
# neighbor 198.51.100.1
# remote-as 65562
# description site-1-nbr-1
# password 3 13D4D3549493D2877B1DC116EE27A6BE
# neighbor 198.51.100.2
# remote-as 65562
# description site-1-nbr-2
# vrf site-2
# local-as 300
# neighbor-down fib-accelerate
# log-neighbor-changes
# neighbor 203.0.113.2
# remote-as 65568
# description site-1-nbr-1
# password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6
- name: Delete BGP configurations handled by this module
cisco.nxos.nxos_bgp_global:
state: deleted
# Task output
# -------------
# before:
# as_number: '65563'
# bestpath:
# as_path:
# multipath_relax: true
# compare_neighborid: true
# cost_community_ignore: true
# confederation:
# identifier: '42'
# peers:
# - '65020'
# - '65030'
# - '65040'
# log_neighbor_changes: true
# maxas_limit: 20
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - bmp_activate_server: 1
# description: NBR-1
# low_memory:
# exempt: true
# neighbor_address: 192.168.1.100
# neighbor_affinity_group:
# group_id: 160
# remote_as: '65563'
# - neighbor_address: 192.168.1.101
# password:
# encryption: 7
# key: 12090404011C03162E
# remote_as: '65563'
# router_id: 192.168.1.1
# vrfs:
# - allocate_index: 5000
# local_as: '200'
# log_neighbor_changes: true
# neighbors:
# - description: site-1-nbr-1
# neighbor_address: 198.51.100.1
# password:
# encryption: 3
# key: 13D4D3549493D2877B1DC116EE27A6BE
# remote_as: '65562'
# - description: site-1-nbr-2
# neighbor_address: 198.51.100.2
# remote_as: '65562'
# vrf: site-1
# - local_as: '300'
# log_neighbor_changes: true
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - description: site-1-nbr-1
# neighbor_address: 203.0.113.2
# password:
# encryption: 3
# key: AF92F4C16A0A0EC5BDF56CF58BC030F6
# remote_as: '65568'
# vrf: site-2
#
# commands:
# - router bgp 65563
# - no bestpath as-path multipath-relax
# - no bestpath compare-neighborid
# - no bestpath cost-community ignore
# - no confederation identifier 42
# - no log-neighbor-changes
# - no maxas-limit 20
# - no neighbor-down fib-accelerate
# - no router-id 192.168.1.1
# - no confederation peers 65020 65030 65040
# - no neighbor 192.168.1.100
# - no neighbor 192.168.1.101
# - no vrf site-1
# - no vrf site-2
#
# after:
# as_number: '65563'
#
# After state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# router bgp 65563
# address-family ipv4 unicast
# default-metric 400
# suppress-inactive
# default-information originate
# address-family ipv6 multicast
# wait-igp-convergence
# redistribute eigrp eigrp-1 route-map site-1-rmap
#
# Using purged
# Before state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# router bgp 65563
# router-id 192.168.1.1
# confederation identifier 42
# confederation peers 65020 65030 65040
# bestpath as-path multipath-relax
# bestpath cost-community ignore
# bestpath compare-neighborid
# neighbor-down fib-accelerate
# maxas-limit 20
# log-neighbor-changes
# address-family ipv4 unicast
# default-metric 400
# suppress-inactive
# default-information originate
# address-family ipv6 multicast
# wait-igp-convergence
# redistribute eigrp eigrp-1 route-map site-1-rmap
# neighbor 192.168.1.100
# low-memory exempt
# bmp-activate-server 1
# remote-as 65563
# description NBR-1
# affinity-group 160
# neighbor 192.168.1.101
# remote-as 65563
# password 7 12090404011C03162E
# vrf site-1
# local-as 200
# log-neighbor-changes
# allocate-index 5000
# address-family ipv4 multicast
# maximum-paths 40
# dampen-igp-metric 1200
# neighbor 198.51.100.1
# remote-as 65562
# description site-1-nbr-1
# password 3 13D4D3549493D2877B1DC116EE27A6BE
# neighbor 198.51.100.2
# remote-as 65562
# description site-1-nbr-2
# vrf site-2
# local-as 300
# neighbor-down fib-accelerate
# log-neighbor-changes
# neighbor 203.0.113.2
# remote-as 65568
# description site-1-nbr-1
# password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6
- name: Purge all BGP configurations from the device
cisco.nxos.nxos_bgp_global:
state: purged
# Task output
# -------------
# before:
# as_number: '65563'
# bestpath:
# as_path:
# multipath_relax: true
# compare_neighborid: true
# cost_community_ignore: true
# confederation:
# identifier: '42'
# peers:
# - '65020'
# - '65030'
# - '65040'
# log_neighbor_changes: true
# maxas_limit: 20
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - bmp_activate_server: 1
# description: NBR-1
# low_memory:
# exempt: true
# neighbor_address: 192.168.1.100
# neighbor_affinity_group:
# group_id: 160
# remote_as: '65563'
# - neighbor_address: 192.168.1.101
# password:
# encryption: 7
# key: 12090404011C03162E
# remote_as: '65563'
# router_id: 192.168.1.1
# vrfs:
# - allocate_index: 5000
# local_as: '200'
# log_neighbor_changes: true
# neighbors:
# - description: site-1-nbr-1
# neighbor_address: 198.51.100.1
# password:
# encryption: 3
# key: 13D4D3549493D2877B1DC116EE27A6BE
# remote_as: '65562'
# - description: site-1-nbr-2
# neighbor_address: 198.51.100.2
# remote_as: '65562'
# vrf: site-1
# - local_as: '300'
# log_neighbor_changes: true
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - description: site-1-nbr-1
# neighbor_address: 203.0.113.2
# password:
# encryption: 3
# key: AF92F4C16A0A0EC5BDF56CF58BC030F6
# remote_as: '65568'
# vrf: site-2
#
# commands:
# - no router bgp 65563
#
# after: {}
#
# After state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# Nexus9000v#
# Using rendered
- name: Render platform specific configuration lines (without connecting to the device)
cisco.nxos.nxos_bgp_global:
config:
as_number: 65563
router_id: 192.168.1.1
bestpath:
as_path:
multipath_relax: True
compare_neighborid: True
cost_community_ignore: True
confederation:
identifier: 42
peers:
- 65020
- 65030
- 65040
log_neighbor_changes: True
maxas_limit: 20
neighbors:
- neighbor_address: 192.168.1.100
neighbor_affinity_group:
group_id: 160
bmp_activate_server: 1
remote_as: 65563
description: NBR-1
low_memory:
exempt: True
- neighbor_address: 192.168.1.101
remote_as: 65563
password:
encryption: 7
key: 12090404011C03162E
neighbor_down:
fib_accelerate: True
vrfs:
- vrf: site-1
allocate_index: 5000
local_as: 200
log_neighbor_changes: True
neighbors:
- neighbor_address: 198.51.100.1
description: site-1-nbr-1
password:
encryption: 3
key: 13D4D3549493D2877B1DC116EE27A6BE
remote_as: 65562
- neighbor_address: 198.51.100.2
remote_as: 65562
description: site-1-nbr-2
- vrf: site-2
local_as: 300
log_neighbor_changes: True
neighbors:
- neighbor_address: 203.0.113.2
description: site-1-nbr-1
password:
encryption: 3
key: AF92F4C16A0A0EC5BDF56CF58BC030F6
remote_as: 65568
neighbor_down:
fib_accelerate: True
# Task Output (redacted)
# -----------------------
# rendered:
# - router bgp 65563
# - bestpath as-path multipath-relax
# - bestpath compare-neighborid
# - bestpath cost-community ignore
# - confederation identifier 42
# - log-neighbor-changes
# - maxas-limit 20
# - neighbor-down fib-accelerate
# - router-id 192.168.1.1
# - confederation peers 65020 65030 65040
# - neighbor 192.168.1.100
# - remote-as 65563
# - affinity-group 160
# - bmp-activate-server 1
# - description NBR-1
# - low-memory exempt
# - neighbor 192.168.1.101
# - remote-as 65563
# - password 7 12090404011C03162E
# - vrf site-1
# - allocate-index 5000
# - local-as 200
# - log-neighbor-changes
# - neighbor 198.51.100.1
# - remote-as 65562
# - description site-1-nbr-1
# - password 3 13D4D3549493D2877B1DC116EE27A6BE
# - neighbor 198.51.100.2
# - remote-as 65562
# - description site-1-nbr-2
# - vrf site-2
# - local-as 300
# - log-neighbor-changes
# - neighbor-down fib-accelerate
# - neighbor 203.0.113.2
# - remote-as 65568
# - description site-1-nbr-1
# - password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6
# Using parsed
# parsed.cfg
# ------------
# router bgp 65563
# router-id 192.168.1.1
# confederation identifier 42
# confederation peers 65020 65030 65040
# bestpath as-path multipath-relax
# bestpath cost-community ignore
# bestpath compare-neighborid
# neighbor-down fib-accelerate
# maxas-limit 20
# log-neighbor-changes
# neighbor 192.168.1.100
# low-memory exempt
# bmp-activate-server 1
# remote-as 65563
# description NBR-1
# affinity-group 160
# neighbor 192.168.1.101
# remote-as 65563
# password 7 12090404011C03162E
# vrf site-1
# local-as 200
# log-neighbor-changes
# allocate-index 5000
# neighbor 198.51.100.1
# remote-as 65562
# description site-1-nbr-1
# password 3 13D4D3549493D2877B1DC116EE27A6BE
# neighbor 198.51.100.2
# remote-as 65562
# description site-1-nbr-2
# vrf site-2
# local-as 300
# neighbor-down fib-accelerate
# log-neighbor-changes
# neighbor 203.0.113.2
# remote-as 65568
# description site-1-nbr-1
# password 3 AF92F4C16A0A0EC5BDF56CF58BC030F6
- name: Parse externally provided BGP config
cisco.nxos.nxos_bgp_global:
running_config: "{{ lookup('file', 'parsed.cfg') }}"
state: parsed
# Task output (redacted)
# -----------------------
# parsed:
# as_number: '65563'
# bestpath:
# as_path:
# multipath_relax: true
# compare_neighborid: true
# cost_community_ignore: true
# confederation:
# identifier: '42'
# peers:
# - '65020'
# - '65030'
# - '65040'
# log_neighbor_changes: true
# maxas_limit: 20
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - bmp_activate_server: 1
# description: NBR-1
# low_memory:
# exempt: true
# neighbor_address: 192.168.1.100
# neighbor_affinity_group:
# group_id: 160
# remote_as: '65563'
# - neighbor_address: 192.168.1.101
# password:
# encryption: 7
# key: 12090404011C03162E
# remote_as: '65563'
# router_id: 192.168.1.1
# vrfs:
# - allocate_index: 5000
# local_as: '200'
# log_neighbor_changes: true
# neighbors:
# - description: site-1-nbr-1
# neighbor_address: 198.51.100.1
# password:
# encryption: 3
# key: 13D4D3549493D2877B1DC116EE27A6BE
# remote_as: '65562'
# - description: site-1-nbr-2
# neighbor_address: 198.51.100.2
# remote_as: '65562'
# vrf: site-1
# - local_as: '300'
# log_neighbor_changes: true
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - description: site-1-nbr-1
# neighbor_address: 203.0.113.2
# password:
# encryption: 3
# key: AF92F4C16A0A0EC5BDF56CF58BC030F6
# remote_as: '65568'
# vrf: site-2
# Using gathered
# existing config
#
# Nexus9000v# show running-config | section "^router bgp"
# router bgp 65563
# router-id 192.168.1.1
# confederation identifier 42
# confederation peers 65020 65030 65050
# bestpath cost-community ignore
# bestpath compare-neighborid
# neighbor-down fib-accelerate
# maxas-limit 40
# neighbor 192.168.1.100
# low-memory exempt
# bmp-activate-server 1
# remote-as 65563
# description NBR-1
# affinity-group 160
# vrf site-1
# vrf site-2
# local-as 300
# neighbor-down fib-accelerate
# log-neighbor-changes
# neighbor 203.0.113.2
# password 7 12090404011C03162E
- name: Gather BGP facts using gathered
cisco.nxos.nxos_bgp_global:
state: gathered
# Task output (redacted)
# -----------------------
# gathered:
# as_number: '65563'
# bestpath:
# compare_neighborid: true
# cost_community_ignore: true
# confederation:
# identifier: '42'
# peers:
# - '65020'
# - '65030'
# - '65050'
# maxas_limit: 40
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - bmp_activate_server: 1
# description: NBR-1
# low_memory:
# exempt: true
# neighbor_address: 192.168.1.100
# neighbor_affinity_group:
# group_id: 160
# remote_as: '65563'
# router_id: 192.168.1.1
# vrfs:
# - vrf: site-1
# - local_as: '300'
# log_neighbor_changes: true
# neighbor_down:
# fib_accelerate: true
# neighbors:
# - neighbor_address: 203.0.113.2
# password:
# encryption: 7
# key: 12090404011C03162E
# vrf: site-2
# Remove a neighbor having AF configurations with state replaced (will fail)
# Before state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# router bgp 65536
# log-neighbor-changes
# maxas-limit 20
# router-id 198.51.100.2
# neighbor 203.0.113.2
# address-family ipv4 unicast
# next-hop-self
# remote-as 65538
# affinity-group 160
# description NBR-1
# low-memory exempt
# neighbor 192.0.2.1
# remote-as 65537
# password 7 12090404011C03162E
- name: Remove a neighbor having AF configurations (should fail)
cisco.nxos.nxos_bgp_global:
config:
as_number: 65536
router_id: 198.51.100.2
maxas_limit: 20
log_neighbor_changes: True
neighbors:
- neighbor_address: 192.0.2.1
remote_as: 65537
password:
encryption: 7
key: 12090404011C03162E
state: replaced
# Task output (redacted)
# -----------------------
# fatal: [Nexus9000v]: FAILED! => changed=false
# msg: Neighbor 203.0.113.2 has address-family configurations.
# Please use the nxos_bgp_neighbor_af module to remove those first.
# Remove a VRF having AF configurations with state replaced (will fail)
# Before state:
# -------------
# Nexus9000v# show running-config | section "^router bgp"
# router bgp 65536
# log-neighbor-changes
# maxas-limit 20
# router-id 198.51.100.2
# neighbor 192.0.2.1
# remote-as 65537
# password 7 12090404011C03162E
# vrf site-1
# address-family ipv4 unicast
# default-information originate
# neighbor 203.0.113.2
# remote-as 65538
# affinity-group 160
# description NBR-1
# low-memory exempt
# vrf site-2
# neighbor-down fib-accelerate
- name: Remove a VRF having AF configurations (should fail)
cisco.nxos.nxos_bgp_global:
config:
as_number: 65536
router_id: 198.51.100.2
maxas_limit: 20
log_neighbor_changes: True
neighbors:
- neighbor_address: 192.0.2.1
remote_as: 65537
password:
encryption: 7
key: 12090404011C03162E
vrfs:
- vrf: site-2
neighbor_down:
fib_accelerate: True
state: replaced
# Task output (redacted)
# -----------------------
# fatal: [Nexus9000v]: FAILED! => changed=false
# msg: VRF site-1 has address-family configurations.
# Please use the nxos_bgp_af module to remove those first.
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
type: dict
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The resulting configuration model invocation.
returned: when changed
type: dict
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample:
- router bgp 65563
- maxas-limit 20
- router-id 192.168.1.1
- confederation peers 65020 65030 65040
- neighbor 192.168.1.100
- remote-as 65563
- affinity-group 160
- bmp-activate-server 1
- description NBR-1
- low-memory exempt
- vrf site-1
- log-neighbor-changes
- neighbor 198.51.100.1
- remote-as 65562
- description site-1-nbr-1
- password 3 13D4D3549493D2877B1DC116EE27A6BE
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.bgp_global.bgp_global import (
Bgp_globalArgs,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.config.bgp_global.bgp_global import (
Bgp_global,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(
argument_spec=Bgp_globalArgs.argument_spec,
mutually_exclusive=[["config", "running_config"]],
required_if=[
["state", "merged", ["config"]],
["state", "replaced", ["config"]],
["state", "rendered", ["config"]],
["state", "parsed", ["running_config"]],
],
supports_check_mode=True,
)
result = Bgp_global(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
python
|
print("merhaba")
print("merhaba")
print("merhaba")
print("merhaba")
|
python
|
r"""
Base class for polyhedra, part 6
Define methods related to plotting including affine hull projection.
"""
# ****************************************************************************
# Copyright (C) 2008-2012 Marshall Hampton <[email protected]>
# Copyright (C) 2011-2015 Volker Braun <[email protected]>
# Copyright (C) 2012-2018 Frederic Chapoton
# Copyright (C) 2013 Andrey Novoseltsev
# Copyright (C) 2014-2017 Moritz Firsching
# Copyright (C) 2014-2019 Thierry Monteil
# Copyright (C) 2015 Nathann Cohen
# Copyright (C) 2015-2017 Jeroen Demeyer
# Copyright (C) 2015-2017 Vincent Delecroix
# Copyright (C) 2015-2018 Dima Pasechnik
# Copyright (C) 2015-2020 Jean-Philippe Labbe <labbe at math.huji.ac.il>
# Copyright (C) 2015-2021 Matthias Koeppe
# Copyright (C) 2016-2019 Daniel Krenn
# Copyright (C) 2017 Marcelo Forets
# Copyright (C) 2017-2018 Mark Bell
# Copyright (C) 2019 Julian Ritter
# Copyright (C) 2019-2020 Laith Rastanawi
# Copyright (C) 2019-2020 Sophia Elia
# Copyright (C) 2019-2021 Jonathan Kliem <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.modules.vector_space_morphism import linear_transformation
from sage.matrix.constructor import matrix
from sage.modules.free_module_element import vector
from sage.rings.qqbar import AA
from sage.geometry.convex_set import AffineHullProjectionData
from .base5 import Polyhedron_base5
class Polyhedron_base6(Polyhedron_base5):
r"""
Methods related to plotting including affine hull projection.
TESTS::
sage: from sage.geometry.polyhedron.base6 import Polyhedron_base6
sage: P = polytopes.cube()
sage: Polyhedron_base6.plot(P)
Graphics3d Object
sage: Polyhedron_base6.tikz(P)
\begin{tikzpicture}%
[x={(1.000000cm, 0.000000cm)},
y={(-0.000000cm, 1.000000cm)},
z={(0.000000cm, -0.000000cm)},
scale=1.000000,
back/.style={loosely dotted, thin},
edge/.style={color=blue!95!black, thick},
facet/.style={fill=blue!95!black,fill opacity=0.800000},
vertex/.style={inner sep=1pt,circle,draw=green!25!black,fill=green!75!black,thick}]
%
%
%% This TikZ-picture was produced with Sagemath version ...
%% with the command: ._tikz_3d_in_3d and parameters:
%% view = [0, 0, 1]
%% angle = 0
%% scale = 1
%% edge_color = blue!95!black
%% facet_color = blue!95!black
%% opacity = 0.8
%% vertex_color = green
%% axis = False
<BLANKLINE>
%% Coordinate of the vertices:
%%
\coordinate (1.00000, -1.00000, -1.00000) at (1.00000, -1.00000, -1.00000);
\coordinate (1.00000, 1.00000, -1.00000) at (1.00000, 1.00000, -1.00000);
\coordinate (1.00000, 1.00000, 1.00000) at (1.00000, 1.00000, 1.00000);
\coordinate (1.00000, -1.00000, 1.00000) at (1.00000, -1.00000, 1.00000);
\coordinate (-1.00000, -1.00000, 1.00000) at (-1.00000, -1.00000, 1.00000);
\coordinate (-1.00000, -1.00000, -1.00000) at (-1.00000, -1.00000, -1.00000);
\coordinate (-1.00000, 1.00000, -1.00000) at (-1.00000, 1.00000, -1.00000);
\coordinate (-1.00000, 1.00000, 1.00000) at (-1.00000, 1.00000, 1.00000);
%%
%%
%% Drawing edges in the back
%%
\draw[edge,back] (1.00000, -1.00000, -1.00000) -- (1.00000, 1.00000, -1.00000);
\draw[edge,back] (1.00000, -1.00000, -1.00000) -- (1.00000, -1.00000, 1.00000);
\draw[edge,back] (1.00000, -1.00000, -1.00000) -- (-1.00000, -1.00000, -1.00000);
\draw[edge,back] (1.00000, 1.00000, -1.00000) -- (1.00000, 1.00000, 1.00000);
\draw[edge,back] (1.00000, 1.00000, -1.00000) -- (-1.00000, 1.00000, -1.00000);
\draw[edge,back] (-1.00000, -1.00000, 1.00000) -- (-1.00000, -1.00000, -1.00000);
\draw[edge,back] (-1.00000, -1.00000, -1.00000) -- (-1.00000, 1.00000, -1.00000);
\draw[edge,back] (-1.00000, 1.00000, -1.00000) -- (-1.00000, 1.00000, 1.00000);
%%
%%
%% Drawing vertices in the back
%%
\node[vertex] at (1.00000, -1.00000, -1.00000) {};
\node[vertex] at (1.00000, 1.00000, -1.00000) {};
\node[vertex] at (-1.00000, 1.00000, -1.00000) {};
\node[vertex] at (-1.00000, -1.00000, -1.00000) {};
%%
%%
%% Drawing the facets
%%
\fill[facet] (-1.00000, 1.00000, 1.00000) -- (1.00000, 1.00000, 1.00000) -- (1.00000, -1.00000, 1.00000) -- (-1.00000, -1.00000, 1.00000) -- cycle {};
%%
%%
%% Drawing edges in the front
%%
\draw[edge] (1.00000, 1.00000, 1.00000) -- (1.00000, -1.00000, 1.00000);
\draw[edge] (1.00000, 1.00000, 1.00000) -- (-1.00000, 1.00000, 1.00000);
\draw[edge] (1.00000, -1.00000, 1.00000) -- (-1.00000, -1.00000, 1.00000);
\draw[edge] (-1.00000, -1.00000, 1.00000) -- (-1.00000, 1.00000, 1.00000);
%%
%%
%% Drawing the vertices in the front
%%
\node[vertex] at (1.00000, 1.00000, 1.00000) {};
\node[vertex] at (1.00000, -1.00000, 1.00000) {};
\node[vertex] at (-1.00000, -1.00000, 1.00000) {};
\node[vertex] at (-1.00000, 1.00000, 1.00000) {};
%%
%%
\end{tikzpicture}
sage: Q = polytopes.hypercube(4)
sage: Polyhedron_base6.show(Q)
sage: Polyhedron_base6.schlegel_projection(Q)
The projection of a polyhedron into 3 dimensions
sage: R = polytopes.simplex(5)
sage: Polyhedron_base6.affine_hull(R)
A 5-dimensional polyhedron in ZZ^6 defined as the convex hull of 1 vertex and 5 lines
sage: Polyhedron_base6.affine_hull_projection(R)
A 5-dimensional polyhedron in ZZ^5 defined as the convex hull of 6 vertices
"""
def plot(self,
point=None, line=None, polygon=None, # None means unspecified by the user
wireframe='blue', fill='green',
position=None,
orthonormal=True, # whether to use orthonormal projections
**kwds):
r"""
Return a graphical representation.
INPUT:
- ``point``, ``line``, ``polygon`` -- Parameters to pass to
point (0d), line (1d), and polygon (2d) plot commands.
Allowed values are:
* A Python dictionary to be passed as keywords to the plot
commands.
* A string or triple of numbers: The color. This is
equivalent to passing the dictionary ``{'color':...}``.
* ``False``: Switches off the drawing of the corresponding
graphics object
- ``wireframe``, ``fill`` -- Similar to ``point``, ``line``,
and ``polygon``, but ``fill`` is used for the graphics
objects in the dimension of the polytope (or of dimension 2
for higher dimensional polytopes) and ``wireframe`` is used
for all lower-dimensional graphics objects
(default: 'green' for ``fill`` and 'blue' for ``wireframe``)
- ``position`` -- positive number; the position to take the projection
point in Schlegel diagrams.
- ``orthonormal`` -- Boolean (default: True); whether to use
orthonormal projections.
- ``**kwds`` -- optional keyword parameters that are passed to
all graphics objects.
OUTPUT:
A (multipart) graphics object.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: point = Polyhedron([[1,1]])
sage: line = Polyhedron([[1,1],[2,1]])
sage: cube = polytopes.hypercube(3)
sage: hypercube = polytopes.hypercube(4)
By default, the wireframe is rendered in blue and the fill in green::
sage: square.plot() # optional - sage.plot
Graphics object consisting of 6 graphics primitives
sage: point.plot() # optional - sage.plot
Graphics object consisting of 1 graphics primitive
sage: line.plot() # optional - sage.plot
Graphics object consisting of 2 graphics primitives
sage: cube.plot() # optional - sage.plot
Graphics3d Object
sage: hypercube.plot() # optional - sage.plot
Graphics3d Object
Draw the lines in red and nothing else::
sage: square.plot(point=False, line='red', polygon=False) # optional - sage.plot
Graphics object consisting of 4 graphics primitives
sage: point.plot(point=False, line='red', polygon=False) # optional - sage.plot
Graphics object consisting of 0 graphics primitives
sage: line.plot(point=False, line='red', polygon=False) # optional - sage.plot
Graphics object consisting of 1 graphics primitive
sage: cube.plot(point=False, line='red', polygon=False) # optional - sage.plot
Graphics3d Object
sage: hypercube.plot(point=False, line='red', polygon=False) # optional - sage.plot
Graphics3d Object
Draw points in red, no lines, and a blue polygon::
sage: square.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot
Graphics object consisting of 2 graphics primitives
sage: point.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot
Graphics object consisting of 1 graphics primitive
sage: line.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot
Graphics object consisting of 1 graphics primitive
sage: cube.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot
Graphics3d Object
sage: hypercube.plot(point={'color':'red'}, line=False, polygon=(0,0,1)) # optional - sage.plot
Graphics3d Object
If we instead use the ``fill`` and ``wireframe`` options, the
coloring depends on the dimension of the object::
sage: square.plot(fill='green', wireframe='red') # optional - sage.plot
Graphics object consisting of 6 graphics primitives
sage: point.plot(fill='green', wireframe='red') # optional - sage.plot
Graphics object consisting of 1 graphics primitive
sage: line.plot(fill='green', wireframe='red') # optional - sage.plot
Graphics object consisting of 2 graphics primitives
sage: cube.plot(fill='green', wireframe='red') # optional - sage.plot
Graphics3d Object
sage: hypercube.plot(fill='green', wireframe='red') # optional - sage.plot
Graphics3d Object
It is possible to draw polyhedra up to dimension 4, no matter what the
ambient dimension is::
sage: hcube = polytopes.hypercube(5)
sage: facet = hcube.facets()[0].as_polyhedron();facet
A 4-dimensional polyhedron in ZZ^5 defined as the convex hull of 16 vertices
sage: facet.plot() # optional - sage.plot
Graphics3d Object
TESTS::
sage: for p in square.plot(): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
blue Point set defined by 4 point(s)
blue Line defined by 2 points
blue Line defined by 2 points
blue Line defined by 2 points
blue Line defined by 2 points
green Polygon defined by 4 points
sage: for p in line.plot(): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
blue Point set defined by 2 point(s)
green Line defined by 2 points
sage: for p in point.plot(): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
green Point set defined by 1 point(s)
Draw the lines in red and nothing else::
sage: for p in square.plot(point=False, line='red', polygon=False): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
red Line defined by 2 points
red Line defined by 2 points
red Line defined by 2 points
red Line defined by 2 points
Draw vertices in red, no lines, and a blue polygon::
sage: for p in square.plot(point={'color':'red'}, line=False, polygon=(0,0,1)): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
red Point set defined by 4 point(s)
(0, 0, 1) Polygon defined by 4 points
sage: for p in line.plot(point={'color':'red'}, line=False, polygon=(0,0,1)): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
red Point set defined by 2 point(s)
sage: for p in point.plot(point={'color':'red'}, line=False, polygon=(0,0,1)): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
red Point set defined by 1 point(s)
Draw in red without wireframe::
sage: for p in square.plot(wireframe=False, fill="red"): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
red Polygon defined by 4 points
sage: for p in line.plot(wireframe=False, fill="red"): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
red Line defined by 2 points
sage: for p in point.plot(wireframe=False, fill="red"): # optional - sage.plot
....: print("{} {}".format(p.options()['rgbcolor'], p))
red Point set defined by 1 point(s)
We try to draw the polytope in 2 or 3 dimensions::
sage: type(Polyhedron(ieqs=[(1,)]).plot()) # optional - sage.plot
<class 'sage.plot.graphics.Graphics'>
sage: type(polytopes.hypercube(1).plot()) # optional - sage.plot
<class 'sage.plot.graphics.Graphics'>
sage: type(polytopes.hypercube(2).plot()) # optional - sage.plot
<class 'sage.plot.graphics.Graphics'>
sage: type(polytopes.hypercube(3).plot()) # optional - sage.plot
<class 'sage.plot.plot3d.base.Graphics3dGroup'>
In 4d a projection to 3d is used::
sage: type(polytopes.hypercube(4).plot()) # optional - sage.plot
<class 'sage.plot.plot3d.base.Graphics3dGroup'>
sage: type(polytopes.hypercube(5).plot()) # optional - sage.plot
Traceback (most recent call last):
...
NotImplementedError: plotting of 5-dimensional polyhedra not implemented
If the polyhedron is not full-dimensional, the :meth:`affine_hull_projection` is used if necessary::
sage: type(Polyhedron([(0,), (1,)]).plot()) # optional - sage.plot
<class 'sage.plot.graphics.Graphics'>
sage: type(Polyhedron([(0,0), (1,1)]).plot()) # optional - sage.plot
<class 'sage.plot.graphics.Graphics'>
sage: type(Polyhedron([(0,0,0), (1,1,1)]).plot()) # optional - sage.plot
<class 'sage.plot.plot3d.base.Graphics3dGroup'>
sage: type(Polyhedron([(0,0,0,0), (1,1,1,1)]).plot()) # optional - sage.plot
<class 'sage.plot.graphics.Graphics'>
sage: type(Polyhedron([(0,0,0,0,0), (1,1,1,1,1)]).plot()) # optional - sage.plot
<class 'sage.plot.graphics.Graphics'>
sage: type(Polyhedron([(0,0,0,0), (1,1,1,1), (1,0,0,0)]).plot()) # optional - sage.plot
<class 'sage.plot.graphics.Graphics'>
TESTS:
Check that :trac:`30015` is fixed::
sage: fcube = polytopes.hypercube(4)
sage: tfcube = fcube.face_truncation(fcube.faces(0)[0])
sage: sp = tfcube.schlegel_projection()
sage: for face in tfcube.faces(2):
....: vertices = face.ambient_Vrepresentation()
....: indices = [sp.coord_index_of(vector(x)) for x in vertices]
....: projected_vertices = [sp.transformed_coords[i] for i in indices]
....: assert Polyhedron(projected_vertices).dim() == 2
"""
def merge_options(*opts):
merged = dict()
for i in range(len(opts)):
opt = opts[i]
if opt is None:
continue
elif opt is False:
return False
elif isinstance(opt, (str, list, tuple)):
merged['color'] = opt
else:
merged.update(opt)
return merged
d = min(self.dim(), 2)
opts = [wireframe] * d + [fill] + [False] * (2-d)
# The point/line/polygon options take precedence over wireframe/fill
opts = [merge_options(opt1, opt2, kwds)
for opt1, opt2 in zip(opts, [point, line, polygon])]
def project(polyhedron, ortho):
if polyhedron.ambient_dim() <= 3:
return polyhedron.projection()
elif polyhedron.dim() <= 3:
if ortho:
return polyhedron.affine_hull_projection(orthonormal=True, extend=True).projection()
else:
return polyhedron.affine_hull_projection().projection()
elif polyhedron.dimension() == 4:
# For 4d-polyhedron, we can use schlegel projections:
return polyhedron.schlegel_projection(position=position)
else:
return polyhedron.projection()
projection = project(self, orthonormal)
try:
plot_method = projection.plot
except AttributeError:
raise NotImplementedError('plotting of {0}-dimensional polyhedra not implemented'
.format(self.ambient_dim()))
return plot_method(*opts)
def show(self, **kwds):
r"""
Display graphics immediately
This method attempts to display the graphics immediately,
without waiting for the currently running code (if any) to
return to the command line. Be careful, calling it from within
a loop will potentially launch a large number of external
viewer programs.
INPUT:
- ``kwds`` -- optional keyword arguments. See :meth:`plot` for
the description of available options.
OUTPUT:
This method does not return anything. Use :meth:`plot` if you
want to generate a graphics object that can be saved or
further transformed.
EXAMPLES::
sage: square = polytopes.hypercube(2)
sage: square.show(point='red') # optional - sage.plot
"""
self.plot(**kwds).show()
def tikz(self, view=[0, 0, 1], angle=0, scale=1,
edge_color='blue!95!black', facet_color='blue!95!black',
opacity=0.8, vertex_color='green', axis=False):
r"""
Return a string ``tikz_pic`` consisting of a tikz picture of ``self``
according to a projection ``view`` and an angle ``angle``
obtained via the threejs viewer.
INPUT:
- ``view`` - list (default: [0,0,1]) representing the rotation axis (see note below).
- ``angle`` - integer (default: 0) angle of rotation in degree from 0 to 360 (see note
below).
- ``scale`` - integer (default: 1) specifying the scaling of the tikz picture.
- ``edge_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``facet_color`` - string (default: 'blue!95!black') representing colors which tikz
recognize.
- ``vertex_color`` - string (default: 'green') representing colors which tikz
recognize.
- ``opacity`` - real number (default: 0.8) between 0 and 1 giving the opacity of
the front facets.
- ``axis`` - Boolean (default: False) draw the axes at the origin or not.
OUTPUT:
- LatexExpr -- containing the TikZ picture.
.. NOTE::
This is a wrapper of a method of the projection object
`self.projection()`. See :meth:`~sage.geometry.polyhedron.plot.Projection.tikz`
for more detail.
The inputs ``view`` and ``angle`` can be obtained by visualizing it
using ``.show(aspect_ratio=1)``. This will open an interactive view
in your default browser, where you can rotate the polytope. Once
the desired view angle is found, click on the information icon in
the lower right-hand corner and select *Get Viewpoint*. This will
copy a string of the form '[x,y,z],angle' to your local clipboard.
Go back to Sage and type ``Img = P.tikz([x,y,z],angle)``.
The inputs ``view`` and ``angle`` can also be obtained from the
viewer Jmol::
1) Right click on the image
2) Select ``Console``
3) Select the tab ``State``
4) Scroll to the line ``moveto``
It reads something like::
moveto 0.0 {x y z angle} Scale
The ``view`` is then [x,y,z] and ``angle`` is angle.
The following number is the scale.
Jmol performs a rotation of ``angle`` degrees along the
vector [x,y,z] and show the result from the z-axis.
EXAMPLES::
sage: co = polytopes.cuboctahedron()
sage: Img = co.tikz([0,0,1], 0)
sage: print('\n'.join(Img.splitlines()[:9]))
\begin{tikzpicture}%
[x={(1.000000cm, 0.000000cm)},
y={(0.000000cm, 1.000000cm)},
z={(0.000000cm, 0.000000cm)},
scale=1.000000,
back/.style={loosely dotted, thin},
edge/.style={color=blue!95!black, thick},
facet/.style={fill=blue!95!black,fill opacity=0.800000},
vertex/.style={inner sep=1pt,circle,draw=green!25!black,fill=green!75!black,thick}]
sage: print('\n'.join(Img.splitlines()[12:21]))
%% with the command: ._tikz_3d_in_3d and parameters:
%% view = [0, 0, 1]
%% angle = 0
%% scale = 1
%% edge_color = blue!95!black
%% facet_color = blue!95!black
%% opacity = 0.8
%% vertex_color = green
%% axis = False
sage: print('\n'.join(Img.splitlines()[22:26]))
%% Coordinate of the vertices:
%%
\coordinate (-1.00000, -1.00000, 0.00000) at (-1.00000, -1.00000, 0.00000);
\coordinate (-1.00000, 0.00000, -1.00000) at (-1.00000, 0.00000, -1.00000);
"""
return self.projection().tikz(view, angle, scale,
edge_color, facet_color,
opacity, vertex_color, axis)
def _rich_repr_(self, display_manager, **kwds):
r"""
Rich Output Magic Method
See :mod:`sage.repl.rich_output` for details.
EXAMPLES::
sage: from sage.repl.rich_output import get_display_manager
sage: dm = get_display_manager()
sage: polytopes.hypercube(2)._rich_repr_(dm)
OutputPlainText container
The ``supplemental_plot`` preference lets us control whether
this object is shown as text or picture+text::
sage: dm.preferences.supplemental_plot
'never'
sage: del dm.preferences.supplemental_plot
sage: polytopes.hypercube(3)
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices (use the .plot() method to plot)
sage: dm.preferences.supplemental_plot = 'never'
"""
prefs = display_manager.preferences
is_small = (self.ambient_dim() <= 2)
can_plot = (prefs.supplemental_plot != 'never')
plot_graph = can_plot and (prefs.supplemental_plot == 'always' or is_small)
# Under certain circumstances we display the plot as graphics
if plot_graph:
plot_kwds = dict(kwds)
plot_kwds.setdefault('title', repr(self))
output = self.plot(**plot_kwds)._rich_repr_(display_manager)
if output is not None:
return output
# create text for non-graphical output
if can_plot:
text = '{0} (use the .plot() method to plot)'.format(repr(self))
else:
text = repr(self)
# latex() produces huge tikz environment, override
tp = display_manager.types
if (prefs.text == 'latex' and tp.OutputLatex in display_manager.supported_output()):
return tp.OutputLatex(r'\text{{{0}}}'.format(text))
return tp.OutputPlainText(text)
@cached_method
def gale_transform(self):
r"""
Return the Gale transform of a polytope as described in the
reference below.
OUTPUT:
A list of vectors, the Gale transform. The dimension is the
dimension of the affine dependencies of the vertices of the
polytope.
EXAMPLES:
This is from the reference, for a triangular prism::
sage: p = Polyhedron(vertices = [[0,0],[0,1],[1,0]])
sage: p2 = p.prism()
sage: p2.gale_transform()
((-1, 0), (0, -1), (1, 1), (-1, -1), (1, 0), (0, 1))
REFERENCES:
Lectures in Geometric Combinatorics, R.R.Thomas, 2006, AMS Press.
.. SEEALSO::
:func`~sage.geometry.polyhedron.library.gale_transform_to_polyhedron`.
TESTS::
sage: P = Polyhedron(rays=[[1,0,0]])
sage: P.gale_transform()
Traceback (most recent call last):
...
ValueError: not a polytope
Check that :trac:`29073` is fixed::
sage: P = polytopes.icosahedron(exact=False)
sage: sum(P.gale_transform()).norm() < 1e-15
True
"""
if not self.is_compact():
raise ValueError('not a polytope')
A = matrix(self.n_vertices(),
[[1]+x for x in self.vertex_generator()])
A = A.transpose()
A_ker = A.right_kernel_matrix(basis='computed')
return tuple(A_ker.columns())
def _test_gale_transform(self, tester=None, **options):
r"""
Run tests on the method :meth:`.gale_transform` and its inverse
:meth:`~sage.geometry.polyhedron.library.gale_transform_to_polytope`.
TESTS::
sage: polytopes.cross_polytope(3)._test_gale_transform()
"""
if tester is None:
tester = self._tester(**options)
if not self.is_compact():
with tester.assertRaises(ValueError):
self.gale_transform()
return
# Check :trac:`29073`.
if not self.base_ring().is_exact() and self.ambient_dim() > 0:
g = self.gale_transform()
tester.assertTrue(sum(g).norm() < 1e-10 or sum(g).norm()/matrix(g).norm() < 1e-13)
return
# Prevent very long doctests.
if self.n_vertices() + self.n_rays() > 50 or self.n_facets() > 50:
return
if not self.is_empty():
# ``gale_transform_to_polytope`` needs at least one vertex to work.
from sage.geometry.polyhedron.library import gale_transform_to_polytope
g = self.gale_transform()
P = gale_transform_to_polytope(g, base_ring=self.base_ring(), backend=self.backend())
try:
import sage.graphs.graph
except ImportError:
pass
else:
tester.assertTrue(self.is_combinatorially_isomorphic(P))
def projection(self, projection=None):
r"""
Return a projection object.
INPUT:
- ``proj`` -- a projection function
OUTPUT:
The identity projection. This is useful for plotting
polyhedra.
.. SEEALSO::
:meth:`~sage.geometry.polyhedron.base.Polyhedron_base.schlegel_projection` for a more interesting projection.
EXAMPLES::
sage: p = polytopes.hypercube(3)
sage: proj = p.projection()
sage: proj
The projection of a polyhedron into 3 dimensions
"""
from .plot import Projection
if projection is not None:
self.projection = Projection(self, projection)
else:
self.projection = Projection(self)
return self.projection
def render_solid(self, **kwds):
r"""
Return a solid rendering of a 2- or 3-d polytope.
EXAMPLES::
sage: p = polytopes.hypercube(3)
sage: p_solid = p.render_solid(opacity = .7)
sage: type(p_solid)
<class 'sage.plot.plot3d.index_face_set.IndexFaceSet'>
"""
proj = self.projection()
if self.ambient_dim() == 3:
return proj.render_solid_3d(**kwds)
if self.ambient_dim() == 2:
return proj.render_fill_2d(**kwds)
raise ValueError("render_solid is only defined for 2 and 3 dimensional polyhedra")
def render_wireframe(self, **kwds):
r"""
For polytopes in 2 or 3 dimensions, return the edges
as a list of lines.
EXAMPLES::
sage: p = Polyhedron([[1,2,],[1,1],[0,0]])
sage: p_wireframe = p.render_wireframe()
sage: p_wireframe._objects
[Line defined by 2 points, Line defined by 2 points, Line defined by 2 points]
"""
proj = self.projection()
if self.ambient_dim() == 3:
return proj.render_wireframe_3d(**kwds)
if self.ambient_dim() == 2:
return proj.render_outline_2d(**kwds)
raise ValueError("render_wireframe is only defined for 2 and 3 dimensional polyhedra")
def schlegel_projection(self, facet=None, position=None):
r"""
Return the Schlegel projection.
* The facet is orthonormally transformed into its affine hull.
* The position specifies a point coming out of the barycenter of the
facet from which the other vertices will be projected into the facet.
INPUT:
- ``facet`` -- a PolyhedronFace. The facet into which the Schlegel
diagram is created. The default is the first facet.
- ``position`` -- a positive number. Determines a relative distance
from the barycenter of ``facet``. A value close to 0 will place the
projection point close to the facet and a large value further away.
Default is `1`. If the given value is too large, an error is returned.
OUTPUT:
A :class:`~sage.geometry.polyhedron.plot.Projection` object.
EXAMPLES::
sage: p = polytopes.hypercube(3)
sage: sch_proj = p.schlegel_projection()
sage: schlegel_edge_indices = sch_proj.lines
sage: schlegel_edges = [sch_proj.coordinates_of(x) for x in schlegel_edge_indices]
sage: len([x for x in schlegel_edges if x[0][0] > 0])
8
The Schlegel projection preserves the convexity of facets, see :trac:`30015`::
sage: fcube = polytopes.hypercube(4)
sage: tfcube = fcube.face_truncation(fcube.faces(0)[0])
sage: tfcube.facets()[-1]
A 3-dimensional face of a Polyhedron in QQ^4 defined as the convex hull of 8 vertices
sage: sp = tfcube.schlegel_projection(tfcube.facets()[-1])
sage: sp.plot() # optional - sage.plot
Graphics3d Object
The same truncated cube but see inside the tetrahedral facet::
sage: tfcube.facets()[4]
A 3-dimensional face of a Polyhedron in QQ^4 defined as the convex hull of 4 vertices
sage: sp = tfcube.schlegel_projection(tfcube.facets()[4])
sage: sp.plot() # optional - sage.plot
Graphics3d Object
A different values of ``position`` changes the projection::
sage: sp = tfcube.schlegel_projection(tfcube.facets()[4],1/2)
sage: sp.plot() # optional - sage.plot
Graphics3d Object
sage: sp = tfcube.schlegel_projection(tfcube.facets()[4],4)
sage: sp.plot() # optional - sage.plot
Graphics3d Object
A value which is too large give a projection point that sees more than
one facet resulting in a error::
sage: sp = tfcube.schlegel_projection(tfcube.facets()[4],5)
Traceback (most recent call last):
...
ValueError: the chosen position is too large
"""
proj = self.projection()
return proj.schlegel(facet, position)
def affine_hull(self, *args, **kwds):
r"""
Return the affine hull of ``self`` as a polyhedron.
EXAMPLES::
sage: half_plane_in_space = Polyhedron(ieqs=[(0,1,0,0)], eqns=[(0,0,0,1)])
sage: half_plane_in_space.affine_hull().Hrepresentation()
(An equation (0, 0, 1) x + 0 == 0,)
sage: polytopes.cube().affine_hull().is_universe()
True
"""
if args or kwds:
raise TypeError("the method 'affine_hull' does not take any parameters; perhaps you meant 'affine_hull_projection'")
if not self.inequalities():
return self
self_as_face = self.faces(self.dimension())[0]
return self_as_face.affine_tangent_cone()
@cached_method
def _affine_hull_projection(self, *,
as_convex_set=True, as_affine_map=True, as_section_map=True,
orthogonal=False, orthonormal=False,
extend=False, minimal=False):
r"""
Return ``self`` projected into its affine hull.
INPUT:
See :meth:`affine_hull_projection`.
OUTPUT:
An instance of :class:`~sage.geometry.convex_set.AffineHullProjectionData`.
See :meth:`affine_hull_projection` for details.
TESTS:
Check that :trac:`23355` is fixed::
sage: P = Polyhedron([[7]]); P
A 0-dimensional polyhedron in ZZ^1 defined as the convex hull of 1 vertex
sage: P.affine_hull_projection()
A 0-dimensional polyhedron in ZZ^0 defined as the convex hull of 1 vertex
sage: P.affine_hull_projection(orthonormal='True')
A 0-dimensional polyhedron in QQ^0 defined as the convex hull of 1 vertex
sage: P.affine_hull_projection(orthogonal='True')
A 0-dimensional polyhedron in QQ^0 defined as the convex hull of 1 vertex
Check that :trac:`24047` is fixed::
sage: P1 = Polyhedron(vertices=([[-1, 1], [0, -1], [0, 0], [-1, -1]]))
sage: P2 = Polyhedron(vertices=[[1, 1], [1, -1], [0, -1], [0, 0]])
sage: P = P1.intersection(P2)
sage: A, b = P.affine_hull_projection(as_affine_map=True, orthonormal=True, extend=True) # optional - sage.rings.number_field
sage: Polyhedron([(2,3,4)]).affine_hull_projection()
A 0-dimensional polyhedron in ZZ^0 defined as the convex hull of 1 vertex
Check that backend is preserved::
sage: polytopes.simplex(backend='field').affine_hull_projection().backend()
'field'
sage: P = Polyhedron(vertices=[[0,0], [1,0]], backend='field')
sage: P.affine_hull_projection(orthogonal=True, orthonormal=True, extend=True).backend() # optional - sage.rings.number_field
'field'
Check that :trac:`29116` is fixed::
sage: V =[
....: [1, 0, -1, 0, 0],
....: [1, 0, 0, -1, 0],
....: [1, 0, 0, 0, -1],
....: [1, 0, 0, +1, 0],
....: [1, 0, 0, 0, +1],
....: [1, +1, 0, 0, 0]
....: ]
sage: P = Polyhedron(V)
sage: P.affine_hull_projection()
A 4-dimensional polyhedron in ZZ^4 defined as the convex hull of 6 vertices
sage: P.affine_hull_projection(orthonormal=True)
Traceback (most recent call last):
...
ValueError: the base ring needs to be extended; try with "extend=True"
sage: P.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.rings.number_field
A 4-dimensional polyhedron in AA^4 defined as the convex hull of 6 vertices
"""
result = AffineHullProjectionData()
if self.is_empty():
raise ValueError('affine hull projection of an empty polyhedron is undefined')
# handle trivial full-dimensional case
if self.ambient_dim() == self.dim():
if as_convex_set:
result.image = self
if as_affine_map:
identity = linear_transformation(matrix(self.base_ring(),
self.dim(),
self.dim(),
self.base_ring().one()))
result.projection_linear_map = result.section_linear_map = identity
result.projection_translation = result.section_translation = self.ambient_space().zero()
elif orthogonal or orthonormal:
# see TODO
if not self.is_compact():
raise NotImplementedError('"orthogonal=True" and "orthonormal=True" work only for compact polyhedra')
affine_basis = self.an_affine_basis()
v0 = affine_basis[0].vector()
# We implicitly translate the first vertex of the affine basis to zero.
vi = tuple(v.vector() - v0 for v in affine_basis[1:])
M = matrix(self.base_ring(), self.dim(), self.ambient_dim(), vi)
# Switch base_ring to AA if necessary,
# since gram_schmidt needs to be able to take square roots.
# Pick orthonormal basis and transform all vertices accordingly
# if the orthonormal transform makes it necessary, change base ring.
try:
A, G = M.gram_schmidt(orthonormal=orthonormal)
except TypeError:
if not extend:
raise ValueError('the base ring needs to be extended; try with "extend=True"')
M = matrix(AA, M)
A = M.gram_schmidt(orthonormal=orthonormal)[0]
if minimal:
from sage.rings.qqbar import number_field_elements_from_algebraics
new_ring = number_field_elements_from_algebraics(A.list(), embedded=True, minimal=True)[0]
A = A.change_ring(new_ring)
L = linear_transformation(A, side='right')
ambient_translation = -vector(A.base_ring(), affine_basis[0])
image_translation = A * ambient_translation
# Note the order. We compute ``A*self`` and then translate the image.
# ``A*self`` uses the incidence matrix and we avoid recomputation.
# Also, if the new base ring is ``AA``, we want to avoid computing the incidence matrix in that ring.
# ``convert=True`` takes care of the case, where there might be no coercion (``AA`` and quadratic field).
if as_convex_set:
result.image = self.linear_transformation(A, new_base_ring=A.base_ring()) + image_translation
if as_affine_map:
result.projection_linear_map = L
result.projection_translation = image_translation
if as_section_map:
L_dagger = linear_transformation(A.transpose() * (A * A.transpose()).inverse(), side='right')
result.section_linear_map = L_dagger
result.section_translation = v0.change_ring(A.base_ring())
else:
# translate one vertex to the origin
v0 = self.vertices()[0].vector()
gens = []
for v in self.vertices()[1:]:
gens.append(v.vector() - v0)
for r in self.rays():
gens.append(r.vector())
for l in self.lines():
gens.append(l.vector())
# Pick subset of coordinates to coordinatize the affine span
M = matrix(gens)
pivots = M.pivots()
A = matrix(self.base_ring(), len(pivots), self.ambient_dim(),
[[1 if j == i else 0 for j in range(self.ambient_dim())] for i in pivots])
if as_affine_map:
image_translation = vector(self.base_ring(), self.dim())
L = linear_transformation(A, side='right')
result.projection_linear_map = L
result.projection_translation = image_translation
if as_convex_set:
result.image = A*self
if as_section_map:
if self.dim():
B = M.transpose()/(A*M.transpose())
else:
B = matrix(self.ambient_dim(), 0)
L_section = linear_transformation(B, side='right')
result.section_linear_map = L_section
result.section_translation = v0 - L_section(L(v0) + image_translation)
return result
def affine_hull_projection(self,
as_polyhedron=None, as_affine_map=False,
orthogonal=False, orthonormal=False,
extend=False, minimal=False,
return_all_data=False,
*, as_convex_set=None):
r"""
Return the polyhedron projected into its affine hull.
Each polyhedron is contained in some smallest affine subspace
(possibly the entire ambient space) -- its affine hull. We
provide an affine linear map that projects the ambient space of
the polyhedron to the standard Euclidean space of dimension of
the polyhedron, which restricts to a bijection from the affine
hull.
The projection map is not unique; some parameters control the
choice of the map. Other parameters control the output of the
function.
INPUT:
- ``as_polyhedron`` (or ``as_convex_set``) -- (boolean or the default
``None``) and
- ``as_affine_map`` -- (boolean, default ``False``) control the output
The default ``as_polyhedron=None`` translates to
``as_polyhedron=not as_affine_map``,
therefore to ``as_polyhedron=True`` if nothing is specified.
If exactly one of either ``as_polyhedron`` or ``as_affine_map`` is
set, then either a polyhedron or the affine transformation
is returned. The affine transformation
sends the embedded polytope to a fulldimensional one.
It is given as a pair ``(A, b)``, where A is a linear transformation
and `b` is a vector, and the affine transformation sends ``v`` to
``A(v)+b``.
If both ``as_polyhedron`` and ``as_affine_map`` are set, then
both are returned, encapsulated in an instance of
:class:`~sage.geometry.convex_set.AffineHullProjectionData`.
- ``return_all_data`` -- (boolean, default ``False``)
If set, then ``as_polyhedron`` and ``as_affine_map`` will set
(possibly overridden) and additional (internal) data concerning
the transformation is returned. Everything is encapsulated
in an instance of
:class:`~sage.geometry.convex_set.AffineHullProjectionData` in
this case.
- ``orthogonal`` -- boolean (default: ``False``); if ``True``,
provide an orthogonal transformation.
- ``orthonormal`` -- boolean (default: ``False``); if ``True``,
provide an orthonormal transformation. If the base ring does not
provide the necessary square roots, the extend parameter
needs to be set to ``True``.
- ``extend`` -- boolean (default: ``False``); if ``True``,
allow base ring to be extended if necessary. This becomes
relevant when requiring an orthonormal transformation.
- ``minimal`` -- boolean (default: ``False``); if ``True``,
when doing an extension, it computes the minimal base ring of the
extension, otherwise the base ring is ``AA``.
OUTPUT:
A full-dimensional polyhedron or an affine transformation,
depending on the parameters ``as_polyhedron`` and ``as_affine_map``,
or an instance of :class:`~sage.geometry.convex_set.AffineHullProjectionData`
containing all data (parameter ``return_all_data``).
If the output is an instance of
:class:`~sage.geometry.convex_set.AffineHullProjectionData`, the
following fields may be set:
- ``image`` -- the projection of the original polyhedron
- ``projection_map`` -- the affine map as a pair whose first component
is a linear transformation and its second component a shift;
see above.
- ``section_map`` -- an affine map as a pair whose first component
is a linear transformation and its second component a shift.
It maps the codomain of ``affine_map`` to the affine hull of
``self``. It is a right inverse of ``projection_map``.
Note that all of these data are compatible.
.. TODO::
- make the parameters ``orthogonal`` and ``orthonormal`` work
with unbounded polyhedra.
EXAMPLES::
sage: triangle = Polyhedron([(1,0,0), (0,1,0), (0,0,1)]); triangle
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: triangle.affine_hull_projection()
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: half3d = Polyhedron(vertices=[(3,2,1)], rays=[(1,0,0)])
sage: half3d.affine_hull_projection().Vrepresentation()
(A ray in the direction (1), A vertex at (3))
The resulting affine hulls depend on the parameter ``orthogonal`` and ``orthonormal``::
sage: L = Polyhedron([[1,0],[0,1]]); L
A 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: A = L.affine_hull_projection(); A
A 1-dimensional polyhedron in ZZ^1 defined as the convex hull of 2 vertices
sage: A.vertices()
(A vertex at (0), A vertex at (1))
sage: A = L.affine_hull_projection(orthogonal=True); A
A 1-dimensional polyhedron in QQ^1 defined as the convex hull of 2 vertices
sage: A.vertices()
(A vertex at (0), A vertex at (2))
sage: A = L.affine_hull_projection(orthonormal=True) # optional - sage.rings.number_field
Traceback (most recent call last):
...
ValueError: the base ring needs to be extended; try with "extend=True"
sage: A = L.affine_hull_projection(orthonormal=True, extend=True); A # optional - sage.rings.number_field
A 1-dimensional polyhedron in AA^1 defined as the convex hull of 2 vertices
sage: A.vertices() # optional - sage.rings.number_field
(A vertex at (1.414213562373095?), A vertex at (0.?e-18))
More generally::
sage: S = polytopes.simplex(); S
A 3-dimensional polyhedron in ZZ^4 defined as the convex hull of 4 vertices
sage: S.vertices()
(A vertex at (0, 0, 0, 1),
A vertex at (0, 0, 1, 0),
A vertex at (0, 1, 0, 0),
A vertex at (1, 0, 0, 0))
sage: A = S.affine_hull_projection(); A
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 4 vertices
sage: A.vertices()
(A vertex at (0, 0, 0),
A vertex at (0, 0, 1),
A vertex at (0, 1, 0),
A vertex at (1, 0, 0))
sage: A = S.affine_hull_projection(orthogonal=True); A
A 3-dimensional polyhedron in QQ^3 defined as the convex hull of 4 vertices
sage: A.vertices()
(A vertex at (0, 0, 0),
A vertex at (2, 0, 0),
A vertex at (1, 3/2, 0),
A vertex at (1, 1/2, 4/3))
sage: A = S.affine_hull_projection(orthonormal=True, extend=True); A
A 3-dimensional polyhedron in AA^3 defined as the convex hull of 4 vertices
sage: A.vertices()
(A vertex at (0.7071067811865475?, 0.4082482904638630?, 1.154700538379252?),
A vertex at (0.7071067811865475?, 1.224744871391589?, 0.?e-18),
A vertex at (1.414213562373095?, 0.?e-18, 0.?e-18),
A vertex at (0.?e-18, 0.?e-18, 0.?e-18))
With the parameter ``minimal`` one can get a minimal base ring::
sage: s = polytopes.simplex(3)
sage: s_AA = s.affine_hull_projection(orthonormal=True, extend=True)
sage: s_AA.base_ring()
Algebraic Real Field
sage: s_full = s.affine_hull_projection(orthonormal=True, extend=True, minimal=True)
sage: s_full.base_ring()
Number Field in a with defining polynomial y^4 - 4*y^2 + 1 with a = 0.5176380902050415?
More examples with the ``orthonormal`` parameter::
sage: P = polytopes.permutahedron(3); P # optional - sage.combinat # optional - sage.rings.number_field
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 6 vertices
sage: set([F.as_polyhedron().affine_hull_projection(orthonormal=True, extend=True).volume() for F in P.affine_hull_projection().faces(1)]) == {1, sqrt(AA(2))} # optional - sage.combinat # optional - sage.rings.number_field
True
sage: set([F.as_polyhedron().affine_hull_projection(orthonormal=True, extend=True).volume() for F in P.affine_hull_projection(orthonormal=True, extend=True).faces(1)]) == {sqrt(AA(2))} # optional - sage.combinat # optional - sage.rings.number_field
True
sage: D = polytopes.dodecahedron() # optional - sage.rings.number_field
sage: F = D.faces(2)[0].as_polyhedron() # optional - sage.rings.number_field
sage: F.affine_hull_projection(orthogonal=True) # optional - sage.rings.number_field
A 2-dimensional polyhedron in (Number Field in sqrt5 with defining polynomial x^2 - 5 with sqrt5 = 2.236067977499790?)^2 defined as the convex hull of 5 vertices
sage: F.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.rings.number_field
A 2-dimensional polyhedron in AA^2 defined as the convex hull of 5 vertices
sage: K.<sqrt2> = QuadraticField(2) # optional - sage.rings.number_field
sage: P = Polyhedron([2*[K.zero()],2*[sqrt2]]); P # optional - sage.rings.number_field
A 1-dimensional polyhedron in (Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095?)^2 defined as the convex hull of 2 vertices
sage: P.vertices() # optional - sage.rings.number_field
(A vertex at (0, 0), A vertex at (sqrt2, sqrt2))
sage: A = P.affine_hull_projection(orthonormal=True); A # optional - sage.rings.number_field
A 1-dimensional polyhedron in (Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095?)^1 defined as the convex hull of 2 vertices
sage: A.vertices() # optional - sage.rings.number_field
(A vertex at (0), A vertex at (2))
sage: K.<sqrt3> = QuadraticField(3) # optional - sage.rings.number_field
sage: P = Polyhedron([2*[K.zero()],2*[sqrt3]]); P # optional - sage.rings.number_field
A 1-dimensional polyhedron in (Number Field in sqrt3 with defining polynomial x^2 - 3 with sqrt3 = 1.732050807568878?)^2 defined as the convex hull of 2 vertices
sage: P.vertices() # optional - sage.rings.number_field
(A vertex at (0, 0), A vertex at (sqrt3, sqrt3))
sage: A = P.affine_hull_projection(orthonormal=True) # optional - sage.rings.number_field
Traceback (most recent call last):
...
ValueError: the base ring needs to be extended; try with "extend=True"
sage: A = P.affine_hull_projection(orthonormal=True, extend=True); A # optional - sage.rings.number_field
A 1-dimensional polyhedron in AA^1 defined as the convex hull of 2 vertices
sage: A.vertices() # optional - sage.rings.number_field
(A vertex at (0), A vertex at (2.449489742783178?))
sage: sqrt(6).n() # optional - sage.rings.number_field
2.44948974278318
The affine hull is combinatorially equivalent to the input::
sage: P.is_combinatorially_isomorphic(P.affine_hull_projection()) # optional - sage.rings.number_field
True
sage: P.is_combinatorially_isomorphic(P.affine_hull_projection(orthogonal=True)) # optional - sage.rings.number_field
True
sage: P.is_combinatorially_isomorphic(P.affine_hull_projection(orthonormal=True, extend=True)) # optional - sage.rings.number_field
True
The ``orthonormal=True`` parameter preserves volumes;
it provides an isometric copy of the polyhedron::
sage: Pentagon = polytopes.dodecahedron().faces(2)[0].as_polyhedron() # optional - sage.rings.number_field
sage: P = Pentagon.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.rings.number_field
sage: _, c= P.is_inscribed(certificate=True) # optional - sage.rings.number_field
sage: c # optional - sage.rings.number_field
(0.4721359549995794?, 0.6498393924658126?)
sage: circumradius = (c-vector(P.vertices()[0])).norm() # optional - sage.rings.number_field
sage: p = polytopes.regular_polygon(5) # optional - sage.rings.number_field
sage: p.volume() # optional - sage.rings.number_field
2.377641290737884?
sage: P.volume() # optional - sage.rings.number_field
1.53406271079097?
sage: p.volume()*circumradius^2 # optional - sage.rings.number_field
1.534062710790965?
sage: P.volume() == p.volume()*circumradius^2 # optional - sage.rings.number_field
True
One can also use ``orthogonal`` parameter to calculate volumes;
in this case we don't need to switch base rings. One has to divide
by the square root of the determinant of the linear part of the
affine transformation times its transpose::
sage: Pentagon = polytopes.dodecahedron().faces(2)[0].as_polyhedron() # optional - sage.rings.number_field
sage: Pnormal = Pentagon.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.rings.number_field
sage: Pgonal = Pentagon.affine_hull_projection(orthogonal=True) # optional - sage.rings.number_field
sage: A, b = Pentagon.affine_hull_projection(orthogonal=True, as_affine_map=True) # optional - sage.rings.number_field
sage: Adet = (A.matrix().transpose()*A.matrix()).det() # optional - sage.rings.number_field
sage: Pnormal.volume() # optional - sage.rings.number_field
1.53406271079097?
sage: Pgonal.volume()/Adet.sqrt(extend=True) # optional - sage.rings.number_field
-80*(55*sqrt(5) - 123)/sqrt(-6368*sqrt(5) + 14240)
sage: Pgonal.volume()/AA(Adet).sqrt().n(digits=20) # optional - sage.rings.number_field
1.5340627107909646813
sage: AA(Pgonal.volume()^2) == (Pnormal.volume()^2)*AA(Adet) # optional - sage.rings.number_field
True
Another example with ``as_affine_map=True``::
sage: P = polytopes.permutahedron(4) # optional - sage.combinat # optional - sage.rings.number_field
sage: A, b = P.affine_hull_projection(orthonormal=True, as_affine_map=True, extend=True) # optional - sage.combinat # optional - sage.rings.number_field
sage: Q = P.affine_hull_projection(orthonormal=True, extend=True) # optional - sage.combinat # optional - sage.rings.number_field
sage: Q.center() # optional - sage.combinat # optional - sage.rings.number_field
(0.7071067811865475?, 1.224744871391589?, 1.732050807568878?)
sage: A(P.center()) + b == Q.center() # optional - sage.combinat # optional - sage.rings.number_field
True
For unbounded, non full-dimensional polyhedra, the ``orthogonal=True`` and ``orthonormal=True``
is not implemented::
sage: P = Polyhedron(ieqs=[[0, 1, 0], [0, 0, 1], [0, 0, -1]]); P
A 1-dimensional polyhedron in QQ^2 defined as the convex hull of 1 vertex and 1 ray
sage: P.is_compact()
False
sage: P.is_full_dimensional()
False
sage: P.affine_hull_projection(orthogonal=True)
Traceback (most recent call last):
...
NotImplementedError: "orthogonal=True" and "orthonormal=True" work only for compact polyhedra
sage: P.affine_hull_projection(orthonormal=True)
Traceback (most recent call last):
...
NotImplementedError: "orthogonal=True" and "orthonormal=True" work only for compact polyhedra
Setting ``as_affine_map`` to ``True``
without ``orthogonal`` or ``orthonormal`` set to ``True``::
sage: S = polytopes.simplex()
sage: S.affine_hull_projection(as_affine_map=True)
(Vector space morphism represented by the matrix:
[1 0 0]
[0 1 0]
[0 0 1]
[0 0 0]
Domain: Vector space of dimension 4 over Rational Field
Codomain: Vector space of dimension 3 over Rational Field,
(0, 0, 0))
If the polyhedron is full-dimensional, it is returned::
sage: polytopes.cube().affine_hull_projection()
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices
sage: polytopes.cube().affine_hull_projection(as_affine_map=True)
(Vector space morphism represented by the matrix:
[1 0 0]
[0 1 0]
[0 0 1]
Domain: Vector space of dimension 3 over Rational Field
Codomain: Vector space of dimension 3 over Rational Field,
(0, 0, 0))
Return polyhedron and affine map::
sage: S = polytopes.simplex(2)
sage: data = S.affine_hull_projection(orthogonal=True,
....: as_polyhedron=True,
....: as_affine_map=True); data
AffineHullProjectionData(image=A 2-dimensional polyhedron in QQ^2
defined as the convex hull of 3 vertices,
projection_linear_map=Vector space morphism represented by the matrix:
[ -1 -1/2]
[ 1 -1/2]
[ 0 1]
Domain: Vector space of dimension 3 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field,
projection_translation=(1, 1/2),
section_linear_map=None,
section_translation=None)
Return all data::
sage: data = S.affine_hull_projection(orthogonal=True, return_all_data=True); data
AffineHullProjectionData(image=A 2-dimensional polyhedron in QQ^2
defined as the convex hull of 3 vertices,
projection_linear_map=Vector space morphism represented by the matrix:
[ -1 -1/2]
[ 1 -1/2]
[ 0 1]
Domain: Vector space of dimension 3 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field,
projection_translation=(1, 1/2),
section_linear_map=Vector space morphism represented by the matrix:
[-1/2 1/2 0]
[-1/3 -1/3 2/3]
Domain: Vector space of dimension 2 over Rational Field
Codomain: Vector space of dimension 3 over Rational Field, section_translation=(1, 0, 0))
The section map is a right inverse of the projection map::
sage: data.image.linear_transformation(data.section_linear_map.matrix().transpose()) + data.section_translation == S
True
Same without ``orthogonal=True``::
sage: data = S.affine_hull_projection(return_all_data=True); data
AffineHullProjectionData(image=A 2-dimensional polyhedron in ZZ^2
defined as the convex hull of 3 vertices,
projection_linear_map=Vector space morphism represented by the matrix:
[1 0]
[0 1]
[0 0]
Domain: Vector space of dimension 3 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field, projection_translation=(0, 0),
section_linear_map=Vector space morphism represented by the matrix:
[ 1 0 -1]
[ 0 1 -1]
Domain: Vector space of dimension 2 over Rational Field
Codomain: Vector space of dimension 3 over Rational Field, section_translation=(0, 0, 1))
sage: data.image.linear_transformation(data.section_linear_map.matrix().transpose()) + data.section_translation == S
True
::
sage: P0 = Polyhedron(
....: ieqs=[(0, -1, 0, 1, 1, 1), (0, 1, 1, 0, -1, -1), (0, -1, 1, 1, 0, 0),
....: (0, 1, 0, 0, 0, 0), (0, 0, 1, 1, -1, -1), (0, 0, 0, 0, 0, 1),
....: (0, 0, 0, 0, 1, 0), (0, 0, 0, 1, 0, -1), (0, 0, 1, 0, 0, 0)])
sage: P = P0.intersection(Polyhedron(eqns=[(-1, 1, 1, 1, 1, 1)]))
sage: P.dim()
4
sage: P.affine_hull_projection(orthogonal=True, as_affine_map=True)[0]
Vector space morphism represented by the matrix:
[ 0 0 0 1/3]
[ -2/3 -1/6 0 -1/12]
[ 1/3 -1/6 1/2 -1/12]
[ 0 1/2 0 -1/12]
[ 1/3 -1/6 -1/2 -1/12]
Domain: Vector space of dimension 5 over Rational Field
Codomain: Vector space of dimension 4 over Rational Field
"""
if as_polyhedron is not None:
as_convex_set = as_polyhedron
return super().affine_hull_projection(
as_convex_set=as_convex_set, as_affine_map=as_affine_map,
orthogonal=orthogonal, orthonormal=orthonormal,
extend=extend, minimal=minimal,
return_all_data=return_all_data)
def _test_affine_hull_projection(self, tester=None, verbose=False, **options):
r"""
Run tests on the method :meth:`.affine_hull_projection`.
TESTS::
sage: D = polytopes.dodecahedron() # optional - sage.rings.number_field
sage: D.facets()[0].as_polyhedron()._test_affine_hull_projection() # optional - sage.rings.number_field
"""
if tester is None:
tester = self._tester(**options)
if self.is_empty():
# Undefined, nothing to test
return
if self.n_vertices() > 30 or self.n_facets() > 30 or self.dim() > 6:
# Avoid very long doctests.
return
data_sets = [None]*4
data_sets[0] = self.affine_hull_projection(return_all_data=True)
if self.is_compact():
data_sets[1] = self.affine_hull_projection(return_all_data=True,
orthogonal=True,
extend=True)
data_sets[2] = self.affine_hull_projection(return_all_data=True,
orthonormal=True,
extend=True)
data_sets[3] = self.affine_hull_projection(return_all_data=True,
orthonormal=True,
extend=True,
minimal=True)
else:
data_sets = data_sets[:1]
for i, data in enumerate(data_sets):
if verbose:
print("Running test number {}".format(i))
M = data.projection_linear_map.matrix().transpose()
tester.assertEqual(self.linear_transformation(M, new_base_ring=M.base_ring())
+ data.projection_translation,
data.image)
M = data.section_linear_map.matrix().transpose()
if M.base_ring() is AA:
self_extend = self.change_ring(AA)
else:
self_extend = self
tester.assertEqual(data.image.linear_transformation(M)
+ data.section_translation,
self_extend)
if i == 0:
tester.assertEqual(data.image.base_ring(), self.base_ring())
else:
# Test whether the map is orthogonal.
M = data.projection_linear_map.matrix()
tester.assertTrue((M.transpose() * M).is_diagonal())
if i > 1:
# Test whether the map is orthonormal.
tester.assertTrue((M.transpose() * M).is_one())
if i == 3:
# Test that the extension is indeed minimal.
if self.base_ring() is not AA:
tester.assertIsNot(data.image.base_ring(), AA)
def affine_hull_manifold(self, name=None, latex_name=None, start_index=0, ambient_space=None,
ambient_chart=None, names=None, **kwds):
r"""
Return the affine hull of ``self`` as a manifold.
If ``self`` is full-dimensional, it is just the ambient Euclidean space.
Otherwise, it is a Riemannian submanifold of the ambient Euclidean space.
INPUT:
- ``ambient_space`` -- a :class:`~sage.manifolds.differentiable.examples.euclidean.EuclideanSpace`
of the ambient dimension (default: the manifold of ``ambient_chart``, if provided;
otherwise, a new instance of ``EuclideanSpace``).
- ``ambient_chart`` -- a chart on ``ambient_space``.
- ``names`` -- names for the coordinates on the affine hull.
- optional arguments accepted by :meth:`affine_hull_projection`.
The default chart is determined by the optional arguments of
:meth:`affine_hull_projection`.
EXAMPLES::
sage: triangle = Polyhedron([(1,0,0), (0,1,0), (0,0,1)]); triangle
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: A = triangle.affine_hull_manifold(name='A'); A
2-dimensional Riemannian submanifold A embedded in the Euclidean space E^3
sage: A.embedding().display()
A → E^3
(x0, x1) ↦ (x, y, z) = (t0 + x0, t0 + x1, t0 - x0 - x1 + 1)
sage: A.embedding().inverse().display()
E^3 → A
(x, y, z) ↦ (x0, x1) = (x, y)
sage: A.adapted_chart()
[Chart (E^3, (x0_E3, x1_E3, t0_E3))]
sage: A.normal().display()
n = 1/3*sqrt(3) e_x + 1/3*sqrt(3) e_y + 1/3*sqrt(3) e_z
sage: A.induced_metric() # Need to call this before volume_form
Riemannian metric gamma on the 2-dimensional Riemannian submanifold A embedded in the Euclidean space E^3
sage: A.volume_form()
2-form eps_gamma on the 2-dimensional Riemannian submanifold A embedded in the Euclidean space E^3
Orthogonal version::
sage: A = triangle.affine_hull_manifold(name='A', orthogonal=True); A
2-dimensional Riemannian submanifold A embedded in the Euclidean space E^3
sage: A.embedding().display()
A → E^3
(x0, x1) ↦ (x, y, z) = (t0 - 1/2*x0 - 1/3*x1 + 1, t0 + 1/2*x0 - 1/3*x1, t0 + 2/3*x1)
sage: A.embedding().inverse().display()
E^3 → A
(x, y, z) ↦ (x0, x1) = (-x + y + 1, -1/2*x - 1/2*y + z + 1/2)
Arrangement of affine hull of facets::
sage: D = polytopes.dodecahedron() # optional - sage.rings.number_field
sage: E3 = EuclideanSpace(3) # optional - sage.rings.number_field
sage: submanifolds = [ # optional - sage.rings.number_field
....: F.as_polyhedron().affine_hull_manifold(name=f'F{i}', orthogonal=True, ambient_space=E3)
....: for i, F in enumerate(D.facets())]
sage: sum(FM.plot({}, srange(-2, 2, 0.1), srange(-2, 2, 0.1), opacity=0.2) # not tested # optional - sage.plot # optional - sage.rings.number_field
....: for FM in submanifolds) + D.plot()
Graphics3d Object
Full-dimensional case::
sage: cube = polytopes.cube(); cube
A 3-dimensional polyhedron in ZZ^3 defined as the convex hull of 8 vertices
sage: cube.affine_hull_manifold()
Euclidean space E^3
"""
if ambient_space is None:
if ambient_chart is not None:
ambient_space = ambient_chart.manifold()
else:
from sage.manifolds.differentiable.examples.euclidean import EuclideanSpace
ambient_space = EuclideanSpace(self.ambient_dim(), start_index=start_index)
if ambient_space.dimension() != self.ambient_dim():
raise ValueError('ambient_space and ambient_chart must match the ambient dimension')
if self.is_full_dimensional():
return ambient_space
if ambient_chart is None:
ambient_chart = ambient_space.default_chart()
CE = ambient_chart
from sage.manifolds.manifold import Manifold
if name is None:
name, latex_name = self._affine_hull_name_latex_name()
H = Manifold(self.dim(), name, ambient=ambient_space, structure="Riemannian",
latex_name=latex_name, start_index=start_index)
if names is None:
names = tuple(f'x{i}' for i in range(self.dim()))
CH = H.chart(names=names)
data = self.affine_hull_projection(return_all_data=True, **kwds)
projection_matrix = data.projection_linear_map.matrix().transpose()
projection_translation_vector = data.projection_translation
section_matrix = data.section_linear_map.matrix().transpose()
section_translation_vector = data.section_translation
from sage.symbolic.ring import SR
# We use the slacks of the (linear independent) equations as the foliation parameters
foliation_parameters = vector(SR.var(f't{i}') for i in range(self.ambient_dim() - self.dim()))
normal_matrix = matrix(equation.A() for equation in self.equation_generator()).transpose()
slack_matrix = normal_matrix.pseudoinverse()
phi = H.diff_map(ambient_space, {(CH, CE):
(section_matrix * vector(CH._xx) + section_translation_vector
+ normal_matrix * foliation_parameters).list()})
phi_inv = ambient_space.diff_map(H, {(CE, CH):
(projection_matrix * vector(CE._xx) + projection_translation_vector).list()})
foliation_scalar_fields = {parameter:
ambient_space.scalar_field({CE: slack_matrix.row(i) * (vector(CE._xx) - section_translation_vector)})
for i, parameter in enumerate(foliation_parameters)}
H.set_embedding(phi, inverse=phi_inv,
var=list(foliation_parameters), t_inverse=foliation_scalar_fields)
return H
def _affine_hull_name_latex_name(self, name=None, latex_name=None):
r"""
Return the default name of the affine hull.
EXAMPLES::
sage: polytopes.cube()._affine_hull_name_latex_name('C', r'\square')
('aff_C', '\\mathop{\\mathrm{aff}}(\\square)')
sage: Polyhedron(vertices=[[0, 1], [1, 0]])._affine_hull_name_latex_name()
('aff_P', '\\mathop{\\mathrm{aff}}(P)')
"""
if name is None:
name = 'P'
if latex_name is None:
latex_name = name
operator = 'aff'
aff_name = f'{operator}_{name}'
aff_latex_name = r'\mathop{\mathrm{' + operator + '}}(' + latex_name + ')'
return aff_name, aff_latex_name
|
python
|
import discord
from discord.ext import commands
import traceback
import datetime
import asyncio
import random
from datetime import datetime
from storage import *
pat_gifs = [
"https://cdn.discordapp.com/attachments/670153232039018516/674299983117156362/1edd1db645f55aa7f2923838b5afabfc863fc109_hq.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299989152890881/7MPC.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299989559738378/2e27d5d124bc2a62ddeb5dc9e7a73dd8.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299990386016257/48f70b7f0f0858254d0e50d68ef4bc4f443b74a7_hq.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299995922628628/anime-head-pat-gif.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299997248028712/a.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300008031322114/e3e2588fbae9422f2bd4813c324b1298.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300013492437014/giphy_1.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300014427766801/FlimsyDeafeningGrassspider-small.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300013509214228/giphy.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300026150977563/tenor_1.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300032303759360/tenor.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300033440415754/unnamed.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300032366804992/giphy_2.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300037924126743/tumblr_n9g05o77tU1ttu8odo1_500.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300047004925952/c0c1c5d15f8ad65a9f0aaf6c91a3811e.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300051438305368/giphy_3.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300056601362454/tenor_2.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300062024597514/B7g8Vh.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300069696241684/source_1.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300074557177892/source.gif"
]
@bot.command(aliases=["pet"])
async def pat(ctx, user: discord.Member):
embed = discord.Embed(description="**{.message.author.display_name}** pats **{.display_name}**. <a:pat:691589024774750228>".format(ctx, user), color=0xFFFFFF, timestamp=datetime.utcnow())
embed.set_image(url=random.choice(pat_gifs))
embed.set_footer(text="© MommyBot by Shiki.", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
@pat.error
async def pat_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(description="**babi** pats **{.message.author.display_name}**. <a:pat:691589024774750228>".format(ctx), color=0xFFFFFF, timestamp=datetime.utcnow())
embed.set_image(url=random.choice(pat_gifs))
embed.set_footer(text="© MommyBot by Shiki.", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
elif isinstance(error, commands.BadArgument):
embed = discord.Embed(description="**babi** pats **{.message.author.display_name}**. <a:pat:691589024774750228>".format(ctx), color=0xFFFFFF, timestamp=datetime.utcnow())
embed.set_image(url=random.choice(pat_gifs))
embed.set_footer(text="© MommyBot by Shiki.", icon_url=bot.user.avatar_url)
await ctx.send(f"**{ctx.message.author.display_name}** member not found, I patted you instead", embed=embed)
else:
print('Ignoring exception in command av:', file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
embed = discord.Embed(description="{}".format(error), color=0x000000)
embed.set_footer(text="© MommyBot by Shiki.", icon_url=bot.user.avatar_url)
await ctx.send("An error has occured. Detailed information below:", embed=embed)
|
python
|
import Tkinter as tk
import warnings
VAR_TYPES = {
int: tk.IntVar,
float: tk.DoubleVar,
str: tk.StringVar
}
class ParameterController(tk.Frame):
def __init__(self,parent, key, value):
tk.Frame.__init__(self, parent)
self.value_type = type(value)
self._var = VAR_TYPES[self.value_type]()
self._var.set(value)
self._label = tk.Label(self,text=key,justify=tk.LEFT,width=20)
self._label.pack(side=tk.LEFT,padx=5,anchor="e",fill=tk.BOTH)
validator = self.register(self.validator)
self._entry = tk.Entry(self,textvariable=self._var, validate='all',
validatecommand=(validator, '%P', '%s'))
self._entry.pack(side=tk.RIGHT,expand=1)
def set_bg(self,colour):
try:
self._entry.config(bg=colour)
except:
pass
def validator(self,value,last_value):
if not value.strip() and not self.value_type == str:
self.set_bg('red')
self.bell()
return True
else:
try:
self.value_type(value)
except Exception as error:
return False
else:
self.set_bg('white')
return True
def get(self):
return self._var.get()
def set(self,value):
if self.validator(value):
self._var.set(self.value_type(value))
class DictController(tk.Frame):
def __init__(self, parent, dict_):
tk.Frame.__init__(self, parent)
self._dict = {}
self.update(dict_)
def update(self,new_dict):
self._dict.update(new_dict)
for key,val in sorted(self._dict.items()):
controller = ParameterController(self,key,val)
controller.pack()
self._dict[key] = controller
def __getitem__(self,key):
return self._dict[key].get()
def __setitem__(self,key,value):
self._dict[key].set(value)
def as_dict(self):
output = {}
for key,val in self._dict.items():
try:
output[key] = val.get()
except ValueError:
raise ValueError("Invalid value for key '%s'"%key)
return output
if __name__ == "__main__":
test_dict = {
"Test1":"node name",
"Test2":90,
"Test3":123.
}
root = tk.Tk()
c = DictController(root,test_dict)
c.pack()
def print_vals():
for key in test_dict:
try:
print c.as_dict()
except ValueError as error:
warnings.warn(repr(error))
root.after(1000,print_vals)
root.after(4000,print_vals)
root.mainloop()
|
python
|
import factory
import factory.fuzzy
from user.models import User
from company.tests.factories import CompanyFactory
class UserFactory(factory.django.DjangoModelFactory):
sso_id = factory.Iterator(range(99999999))
name = factory.fuzzy.FuzzyText(length=12)
company_email = factory.LazyAttribute(
lambda supplier: '%[email protected]' % supplier.name)
company = factory.SubFactory(CompanyFactory)
is_company_owner = True
class Meta:
model = User
|
python
|
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
# Using dfs to record all possible
def dfs(nums, path=None, res=[]):
if path is None:
path = []
if len(path) == k:
res += [path]
return res
for idx, num in enumerate(nums):
dfs(nums[idx+1:], path+[num], res)
return res
res = dfs(range(1, n+1))
return res
|
python
|
from typing import Optional
import requests
from libgravatar import Gravatar
from bs4 import BeautifulSoup
def get_gravatar_image(email) -> Optional[str]:
"""Only will return a url if the user exists and is correct on gravatar, otherwise None"""
g = Gravatar(email)
profile_url = g.get_profile()
res = requests.get(profile_url)
if res.status_code == 200:
return g.get_image()
return None
def get_github_repositories(github_username):
"""Only will return a url if the user exists and will return the number of repositories,
even if there are none will return 0"""
url = f'https://github.com/{github_username}'
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
css_selector = 'div.UnderlineNav > nav > a:nth-child(2) > span'
try:
repositories_info = soup.select_one(css_selector)
return int(repositories_info.text)
except AttributeError:
pass
|
python
|
patterns = ['you cannot perform this operation as root']
def match(command):
if command.script_parts and command.script_parts[0] != 'sudo':
return False
for pattern in patterns:
if pattern in command.output.lower():
return True
return False
def get_new_command(command):
return ' '.join(command.script_parts[1:])
|
python
|
import json
import os
from py2neo import Graph
class GraphInstanceFactory:
def __init__(self, config_file_path):
"""
init the graph factory by a config path.
the config json file format example:
[
{
"server_name": "LocalHostServer",
"server_id": 1,
"host": "localhost",
"user": "neo4j",
"password": "123456",
"http_port": 7474,
"https_port": 7473,
"bolt_port": 7687
},
...
]
:param config_file_path: the config file path
"""
if not os.path.exists(config_file_path):
raise IOError("Neo4j config file not exist")
if not os.path.isfile(config_file_path):
raise IOError("Neo4j config path is not file")
if not config_file_path.endswith(".json"):
raise IOError("Neo4j config file is not json")
self.config_file_path = config_file_path
with open(self.config_file_path, 'r') as f:
self.configs = json.load(f)
## todo add more json format check,raise exception when same name or same id config
def create_py2neo_graph_by_server_name(self, server_name):
"""
:param server_name: the server name in config file, can be used to find a unique neo4j graph instance location
:return: the Graph object in py2neo, None if create fail
"""
for config in self.configs:
if config["server_name"] == server_name:
return self.__create_py2neo_graph_by_config(config)
return None
def create_py2neo_graph_by_server_id(self, server_id):
"""
:param server_id: the server id in config file, can be used to find a unique neo4j graph instance location
:return: the Graph object in py2neo, None if create fail
"""
for config in self.configs:
if config["server_id"] == server_id:
return self.__create_py2neo_graph_by_config(config)
return None
def get_configs(self):
"""
get the config server list
:return: a list of config
"""
return self.configs
def get_config_file_path(self):
"""
get the config file path
:return: a string for config file path
"""
return self.config_file_path
def __create_py2neo_graph_by_config(self, config):
try:
return Graph(host=config['host'],
port=config['bolt_port'],
scheme="bolt",
user=config['user'],
password=config['password'])
except BaseException:
return Graph('bolt' + ':' + '//' + config['host'] + ':' + str(config['bolt_port']),
auth=(config['user'], config['password']))
|
python
|
from datetime import datetime
class mentions_self:
nom = 'я'; gen = ['меня', 'себя']; dat = ['мне', 'себе']
acc = ['меня', 'себя']; ins = ['мной', 'собой']; abl = ['мне','себе']
class mentions_unknown:
all = 'всех'
him = 'его'; her = 'её'; it = 'это'
they = 'их'; them = 'их'; us = 'нас'
name_cases = ['nom', 'gen', 'dat', 'acc', 'ins', 'abl']
everyone = ['@everyone', '@all', '@все']
def getDate(time = datetime.now()) -> str:
return f'{"%02d" % time.day}.{"%02d" % time.month}.{time.year}'
def getTime(time = datetime.now()) -> str:
return f'{"%02d" % time.hour}:{"%02d" % time.minute}:{"%02d" % time.second}.{time.microsecond}'
def getDateTime(time = datetime.now()) -> str:
return getDate(time) + ' ' + getTime(time)
def ischecktype(checklist, checktype) -> bool:
for i in checklist:
if isinstance(checktype, list) and type(i) in checktype:
return True
elif isinstance(checktype, type) and isinstance(i, checktype):
return True
return False
|
python
|
from flask import (
Blueprint,
render_template,
)
from sqlalchemy import desc, func, or_, text
from .. import db
from ..models import (
Video,
Vote,
GamePeriod,
Reward,
)
game = Blueprint(
'game',
__name__,
template_folder='templates'
)
@game.route('/')
def index():
q = """
SELECT *, rewards/videos AS rpv
FROM top_creators_30_days
ORDER BY rewards DESC
LIMIT :limit;
"""
rs = db.session.execute(q, {
"limit": 10,
})
leaderboard = [dict(zip(rs.keys(), item)) for item in rs.fetchall()]
return render_template(
'index.html',
leaderboard=leaderboard
)
@game.route('/periods')
def list_periods():
periods = \
(db.session.query(GamePeriod)
.order_by(desc(GamePeriod.end))
.limit(1000)
.all())
return render_template(
'periods.html',
periods=periods,
)
@game.route('/rewards')
def list_rewards():
rewards = \
(db.session.query(Reward)
.filter_by(creator_payable=True)
.order_by(desc(Reward.period_id))
.limit(1000)
.all())
return render_template(
'rewards.html',
rewards=rewards,
)
@game.route('/period/<int:period_id>')
def period_rewards(period_id):
period = db.session.query(GamePeriod).filter_by(id=period_id).one()
rewards_summary = \
(db.session.query(
Reward.video_id,
func.count(Reward.id),
func.sum(Reward.creator_reward).label('creator_rewards'),
func.sum(Reward.voter_reward))
.filter_by(period_id=period_id)
.group_by(Reward.video_id)
.order_by(text("creator_rewards desc"))
.all())
rewards = \
(db.session.query(Reward, Vote)
.filter_by(period_id=period_id)
.from_self()
.join(Vote, Vote.id == Reward.vote_id)
.order_by(desc(Reward.creator_reward))
.all())
return render_template(
'period_rewards.html',
period=period,
rewards=rewards,
rewards_summary=rewards_summary,
)
@game.route('/payment/<string:txid>')
def explain_payment(txid):
rewards = \
(db.session.query(Reward)
.filter(or_(Reward.creator_txid == txid, Reward.voter_txid == txid))
.order_by(desc(Reward.period_id))
.all())
return render_template(
'payment.html',
txid=txid,
rewards=rewards,
)
@game.route('/votes/<string:video_id>')
def video_votes(video_id: str):
video = db.session.query(Video).filter_by(id=video_id).one()
votes = \
(db.session.query(Vote)
.filter_by(video_id=video_id)
.order_by(desc(Vote.token_amount))
.all())
rewards = \
(db.session.query(Reward, Vote)
.filter_by(video_id=video_id)
.join(Vote)
.order_by(desc(Reward.creator_reward))
.all())
period = None
summary = None
if rewards:
period_id = rewards[0][0].period_id
period = db.session.query(GamePeriod).filter_by(id=period_id).one()
summary = \
(db.session.query(
func.count(Reward.id).label('rewards_count'),
func.sum(Reward.creator_reward).label('creator_rewards'),
func.sum(Reward.voter_reward).label('voter_rewards'))
.filter_by(video_id=video_id, creator_payable=True)
.one())
return render_template(
'video_votes.html',
video=video,
votes=votes,
rewards=rewards,
period=period,
summary=summary,
)
@game.route('/voter/<string:eth_address>')
def voter_activity(eth_address: str):
votes = \
(db.session.query(Vote)
.filter_by(eth_address=eth_address)
.order_by(desc(Vote.created_at))
.limit(100)
.all())
return render_template(
'voter.html',
eth_address=eth_address,
votes=votes,
)
|
python
|
import torch
import torch.nn.utils.rnn as rnn
import numpy as np
import pandas
from torch.utils.data import Dataset
from sklearn.preprocessing import LabelEncoder
from parsers.spacy_wrapper import spacy_whitespace_parser as spacy_ws
from common.symbols import SPACY_POS_TAGS
import json
import transformers
from transformers import BertForTokenClassification, BertConfig, BertTokenizer
class OpenIE_CONLL_Dataset(Dataset):
def __init__(self, file_path, emb, sep='\t', sent_maxlen=300, label_map=None):
'''
data is a list of triples (according to data keys)
label is a list of int
'''
self.file_path = file_path
self.sep = sep
self.emb = emb
self.sent_maxlen = sent_maxlen
self.label_map = label_map
if label_map is None:
self.label_map = LabelEncoder()
self.classes = set()
self.data = []
self.labels = []
self.data_keys = ["word_inputs", "predicate_inputs", "postags_inputs"]
self.build()
def __getitem__(self, i):
x = []
for key in self.data_keys:
datum = self.data[key][i]
x.append(datum)
return x, self.labels[i]
def __len__(self):
return len(self.labels)
def collate(self, data):
x = [[],[],[]]
y = []
for i in data:
for j in range(len(i[0])):
x[j].append(torch.LongTensor(i[0][j]))
y.append(torch.LongTensor(i[1]))
return x, y
def build(self):
"""
Load a supervised OIE dataset from file
"""
df = pandas.read_csv(self.file_path,
sep = self.sep,
header = 0,
keep_default_na = False)
self.label_map.fit(df.label.values)
# Split according to sentences and encode
sents = self.get_sents_from_df(df)
self.data = self.encode_inputs(sents)
self.labels = self.encode_outputs(sents)
def get_sents_from_df(self, df):
"""
Split a data frame by rows accroding to the sentences
"""
return [df[df.run_id == run_id]
for run_id
in sorted(set(df.run_id.values))]
def encode_inputs(self, sents):
"""
Given a dataframe which is already split to sentences,
encode inputs for rnn classification.
Should return a dictionary of sequences of sample of length maxlen.
"""
word_inputs = []
pred_inputs = []
pos_inputs = []
# Preproc to get all preds per run_id
# Sanity check - make sure that all sents agree on run_id
assert(all([len(set(sent.run_id.values)) == 1
for sent in sents]))
run_id_to_pred = dict([(int(sent.run_id.values[0]),
self.get_head_pred_word(sent))
for sent in sents])
# Construct a mapping from running word index to pos
word_id_to_pos = {}
for sent in sents:
indices = sent.index.values
words = sent.word.values
for index, word in zip(indices,
spacy_ws(" ".join(words))):
word_id_to_pos[index] = word.tag_
fixed_size_sents = sents # removed
for sent in fixed_size_sents:
assert(len(set(sent.run_id.values)) == 1)
word_indices = sent.index.values
sent_words = sent.word.values
sent_str = " ".join(sent_words)
pos_tags_encodings = [(SPACY_POS_TAGS.index(word_id_to_pos[word_ind]) \
if word_id_to_pos[word_ind] in SPACY_POS_TAGS \
else 0)
for word_ind
in word_indices]
for hh in pos_tags_encodings:
if hh > 55:
print(pos_tags_encodings)
word_encodings = [self.emb.get_word_index(w)
for w in sent_words]
# Same pred word encodings for all words in the sentence
pred_word = run_id_to_pred[int(sent.run_id.values[0])]
pred_word_encodings = [self.emb.get_word_index(pred_word)
for _ in sent_words]
word_inputs.append(word_encodings)
pred_inputs.append(pred_word_encodings)
pos_inputs.append(pos_tags_encodings)
# Pad / truncate to desired maximum length
# NOTE: removed pad in reimplementation
ret = {}
for name, sequence in zip(["word_inputs", "predicate_inputs", "postags_inputs"],
[word_inputs, pred_inputs, pos_inputs]):
ret[name] = []
for samples in truncate_sequences(sequence,
maxlen = self.sent_maxlen):
ret[name].append(samples)
return {k: np.array(v) for k, v in ret.items()}
def encode_outputs(self, sents):
"""
Given a dataframe split to sentences, encode outputs for rnn classification.
Should return a list sequence of sample of length maxlen.
"""
output_encodings = []
# Encode outputs
for sent in sents:
output_encodings.append(list(self.transform_labels(sent.label.values)))
return truncate_sequences(output_encodings, maxlen=self.sent_maxlen)
def transform_labels(self, labels):
"""
Encode a list of textual labels
"""
# Fallback:
return self.label_map.transform(labels)
def num_of_classes(self):
if self.label_map is not None:
return len(self.label_map.classes_)
else:
print("encoder not instantiated for num of classes")
return 0
def get_head_pred_word(self, full_sent):
"""
Get the head predicate word from a full sentence conll.
"""
assert(len(set(full_sent.head_pred_id.values)) == 1) # Sanity check
pred_ind = full_sent.head_pred_id.values[0]
return full_sent.word.values[pred_ind] \
if pred_ind != -1 \
else full_sent.pred.values[0].split(" ")[0]
class OIE_BERT_Dataset(Dataset):
def __init__(self, file_path, sep='\t', sent_maxlen=300, label_map=None, bert_model='bert-base-uncased'):
'''
data is a list of triples (according to data keys)
label is a list of int
'''
self.file_path = file_path
self.sep = sep
self.sent_maxlen = sent_maxlen
self.label_map = label_map
self.bert_model = bert_model
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model)
if label_map is None:
self.label_map = LabelEncoder()
self.classes = set()
self.data = []
self.labels = []
self.data_keys = ["word_inputs", "predicate_inputs", "postags_inputs"]
self.build()
def __getitem__(self, i):
x = {}
for key in self.data.keys():
x[key] = self.data[key][i]
return x, self.labels[i]
def __len__(self):
return len(self.labels)
def collate(self, data):
x = {}
y = []
batch_max_len = 0
for i in data:
for key in self.data.keys():
x[key] = x.get(key, [])
if key == 'word_inputs':
x[key].append(i[0][key])
batch_max_len = max(batch_max_len, len(i[0][key]))
else:
x[key].append(torch.LongTensor(i[0][key]))
y.append(torch.LongTensor(i[1]))
x['predicate_inputs'] = torch.LongTensor(x['predicate_inputs'])
bert_in = self.tokenizer.batch_encode_plus(x['word_inputs'],
return_tensors='pt', pad_to_max_length=True,
max_length=batch_max_len, return_lengths=True,
add_special_tokens = False)
x['bert_inputs'] = bert_in
return x, y
def build(self):
"""
Load a supervised OIE dataset from file
"""
df = pandas.read_csv(self.file_path,
sep = self.sep,
header = 0,
keep_default_na = False)
self.label_map.fit(df.label.values)
# Split according to sentences and encode
sents = self.get_sents_from_df(df)
data, labels = self.encode_data(sents)
self.data = data
self.labels = labels
def get_sents_from_df(self, df):
"""
Split a data frame by rows accroding to the sentences
"""
return [df[df.run_id == run_id]
for run_id in sorted(set(df.run_id.values))]
def encode_data(self, sents):
"""
Given a dataframe which is already split to sentences,
Should return a tuple of (sequences of sample of length maxlen, sequencecs of labels).
"""
word_inputs = []
pred_inputs = []
pos_inputs = []
output_encodings = []
# Preproc to get all preds per run_id
# Sanity check - make sure that all sents agree on run_id
assert(all([len(set(sent.run_id.values)) == 1
for sent in sents]))
run_id_to_pred = dict([(int(sent.run_id.values[0]),
self.get_head_pred_id(sent))
for sent in sents])
# Construct a mapping from running word index to pos
word_id_to_pos = {}
for sent in sents:
indices = sent.index.values
words = sent.word.values
for index, word in zip(indices, spacy_ws(" ".join(words))):
word_id_to_pos[index] = word.tag_
for sent in sents:
assert(len(set(sent.run_id.values)) == 1)
word_indices = sent.index.values
sent_words = sent.word.values
pos_tags_encodings = [(SPACY_POS_TAGS.index(word_id_to_pos[word_ind]) \
if word_id_to_pos[word_ind] in SPACY_POS_TAGS \
else 0)
for word_ind in word_indices]
# Same pred word encodings for all words in the sentence
word_encodings = sent_words.tolist()
pred_id = run_id_to_pred[int(sent.run_id.values[0])]
pred_word_encodings = [pred_id]
if pred_id != -1:
word_inputs.append(word_encodings)
pred_inputs.append(pred_word_encodings)
pos_inputs.append(pos_tags_encodings)
output_encodings.append(list(self.transform_labels(sent.label.values)))
x = {}
for name, sequence in zip(self.data_keys,
[word_inputs, pred_inputs, pos_inputs]):
x[name] = []
for samples in truncate_sequences(sequence, maxlen = self.sent_maxlen):
x[name].append(samples)
y = truncate_sequences(output_encodings, maxlen=self.sent_maxlen)
return x, y
def transform_labels(self, labels):
"""
Encode a list of textual labels
"""
# Fallback:
return self.label_map.transform(labels)
def num_of_classes(self):
if self.label_map is not None:
return len(self.label_map.classes_)
else:
print("encoder not instantiated for num of classes")
return 0
def get_head_pred_word(self, full_sent):
"""
Get the head predicate word from a full sentence conll.
"""
assert(len(set(full_sent.head_pred_id.values)) == 1) # Sanity check
pred_ind = full_sent.head_pred_id.values[0]
return full_sent.word.values[pred_ind] \
if pred_ind != -1 \
else full_sent.pred.values[0].split(" ")[0]
def get_head_pred_id(self, full_sent):
# only get the id
assert(len(set(full_sent.head_pred_id.values)) == 1) # Sanity check
pred_ind = full_sent.head_pred_id.values[0]
if pred_ind == -1:
pred_word = full_sent.pred.values[0].split(" ")[0]
words = full_sent.word.values.tolist()
if pred_word in words:
pred_ind = words.index(pred_word) # might not capture the second or later occurrence
else:
pred_ind = -1 # will be filtered out
return pred_ind
def truncate_sequences(sequences, maxlen=None):
ret = []
if maxlen is not None:
for seq in sequences:
truc_seq = seq[:maxlen]
ret.append(truc_seq)
return ret
|
python
|
# coding=utf-8
from selenium.webdriver.common.by import By
from view_models import certification_services, sidebar, ss_system_parameters
import re
import time
def test_ca_cs_details_view_cert(case, profile_class=None):
'''
:param case: MainController object
:param profile_class: string The fully qualified name of the Java class
:return:
'''
self = case
def view_cert():
'''Open "Certification services"'''
self.wait_until_visible(self.by_css(sidebar.CERTIFICATION_SERVICES_CSS)).click()
self.wait_jquery()
view_cert_data(self, profile_class=profile_class)
return view_cert
def view_cert_data(self, profile_class=None):
'''Get approved CA row'''
service_row = self.wait_until_visible(type=By.XPATH, element=certification_services.LAST_ADDED_CERT_XPATH)
'''Double click on approved CA row'''
self.double_click(service_row)
'''Click on "Edit button"'''
self.by_id(certification_services.DETAILS_BTN_ID).click()
self.log('UC TRUST_04 1.CS administrator selects to view the settings of a certification service.')
self.wait_until_visible(type=By.XPATH, element=certification_services.CA_SETTINGS_TAB_XPATH).click()
self.wait_jquery()
self.log(
'UC TRUST_04: 2.System displays the following settings. Usage restrictions for the certificates issued by the certification service.')
auth_checkbox = self.wait_until_visible(certification_services.EDIT_CA_AUTH_ONLY_CHECKBOX_XPATH,
By.XPATH).is_enabled()
self.is_true(auth_checkbox, msg='Authentication chechkbox not found')
'''Click on authentication checkbox'''
self.wait_until_visible(certification_services.EDIT_CA_AUTH_ONLY_CHECKBOX_XPATH, By.XPATH).click()
self.log(
'UC TRUST_04: 2.System displays the following settings. The fully qualified name of the Java class that describes the certificate profile for certificates issued by the certification service.')
'''Get profile info'''
profile_info_area = self.wait_until_visible(type=By.XPATH,
element=certification_services.EDIT_CERTIFICATE_PROFILE_INFO_AREA_XPATH)
profile_info = profile_info_area.get_attribute("value")
'''Verify profile info'''
self.is_equal(profile_info, profile_class,
msg='The name of the Java class that describes the certificate profile is wrong')
self.log(
'UC TRUST_04: 2. The following user action options are displayed:edit the settings of the certification service')
'''Verify "Save" button'''
save_button_id = self.wait_until_visible(type=By.ID,
element=certification_services.SAVE_CA_SETTINGS_BTN_ID).is_enabled()
self.is_true(save_button_id, msg='"Save" button not found')
|
python
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Thursday, January 31st 2019, 1:15:46 pm
from pathlib import Path
import argparse
import ibllib.io.params as params
import oneibl.params
from alf.one_iblrig import create
from poop_count import main as poop
IBLRIG_DATA = Path().cwd().parent.parent.parent.parent / 'iblrig_data' / 'Subjects' # noqa
def main():
pfile = Path(params.getfile('one_params'))
if not pfile.exists():
oneibl.params.setup_alyx_params()
create(IBLRIG_DATA, dry=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create session in Alyx')
parser.add_argument(
'--patch', help='Ask for a poop count before registering',
required=False, default=True, type=bool)
args = parser.parse_args()
if args.patch:
poop()
main()
else:
main()
print('done')
|
python
|
"""
Flask-Limiter extension for rate limiting
"""
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from .errors import ConfigurationError, RateLimitExceeded
from .extension import Limiter, HEADERS
|
python
|
from foldrm import Classifier
import numpy as np
def acute():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6']
nums = ['a1']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/acute/acute.csv')
print('\n% acute dataset', np.shape(data))
return model, data
def exercise():
attrs = ["age","gender","height_cm","weight_kg","body fat_%","diastolic","systolic","gripForce","sit and bend forward_cm","sit-ups counts","broad jump_cm"]
nums = ["age","height_cm","weight_kg","body fat_%","diastolic","systolic","gripForce","sit and bend forward_cm","sit-ups counts","broad jump_cm"]
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/exercise/exercise.csv')
print('\n% exercise dataset', np.shape(data))
return model, data
def data_science():
attrs = ["HOURS_DATASCIENCE","HOURS_BACKEND","HOURS_FRONTEND","NUM_COURSES_BEGINNER_DATASCIENCE","NUM_COURSES_BEGINNER_BACKEND","NUM_COURSES_BEGINNER_FRONTEND","NUM_COURSES_ADVANCED_DATASCIENCE","NUM_COURSES_ADVANCED_BACKEND","NUM_COURSES_ADVANCED_FRONTEND","AVG_SCORE_DATASCIENCE","AVG_SCORE_BACKEND","AVG_SCORE_FRONTEND"]
nums = ["HOURS_DATASCIENCE","HOURS_BACKEND","HOURS_FRONTEND","NUM_COURSES_BEGINNER_DATASCIENCE","NUM_COURSES_BEGINNER_BACKEND","NUM_COURSES_BEGINNER_FRONTEND","NUM_COURSES_ADVANCED_DATASCIENCE","NUM_COURSES_ADVANCED_BACKEND","NUM_COURSES_ADVANCED_FRONTEND","AVG_SCORE_DATASCIENCE","AVG_SCORE_BACKEND","AVG_SCORE_FRONTEND"]
model = Classifier(attrs=attrs, numeric=nums, label='PROFILE')
data = model.load_data('data/data_science/data_science.csv')
print('\n% data_science dataset', np.shape(data))
return model, data
def air():
attrs = ["year","month","day","hour","PM2.5","PM10","SO2","NO2","CO","O3","TEMP","PRES","DEWP","RAIN","wd","WSPM"]
nums = ["year","month","day","hour","PM2.5","PM10","SO2","NO2","CO","O3","TEMP","PRES","DEWP","RAIN","WSPM"]
model = Classifier(attrs=attrs, numeric=nums, label='station')
data = model.load_data('data/air/air3.csv')
print('\n% air dataset', np.shape(data))
return model, data
def adult():
attrs = ['age','workclass','fnlwgt','education','education_num','marital_status','occupation','relationship',
'race','sex','capital_gain','capital_loss','hours_per_week','native_country']
nums = ['age','fnlwgt','education_num','capital_gain','capital_loss','hours_per_week']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/adult/adult.csv')
print('\n% adult dataset', np.shape(data))
return model, data
def autism():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'age', 'gender', 'ethnicity', 'jaundice',
'pdd', 'used_app_before', 'relation']
nums = ['age']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/autism/autism.csv')
print('\n% autism dataset', np.shape(data))
return model, data
def breastw():
attrs = ['clump_thickness', 'cell_size_uniformity', 'cell_shape_uniformity', 'marginal_adhesion',
'single_epi_cell_size', 'bare_nuclei', 'bland_chromatin', 'normal_nucleoli', 'mitoses']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/breastw/breastw.csv')
print('\n% breastw dataset', np.shape(data))
return model, data
def cars():
attrs = ['buying', 'maint', 'doors', 'persons', 'lugboot', 'safety']
model = Classifier(attrs=attrs, numeric=[], label='label')
data = model.load_data('data/cars/cars.csv')
print('\n% cars dataset', np.shape(data))
return model, data
def credit():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15']
nums = ['a2', 'a3', 'a8', 'a11', 'a14', 'a15']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/credit/credit.csv')
print('\n% credit dataset', np.shape(data))
return model, data
def heart():
attrs = ['age', 'sex', 'chest_pain', 'blood_pressure', 'serum_cholestoral', 'fasting_blood_sugar',
'resting_electrocardiographic_results', 'maximum_heart_rate_achieved', 'exercise_induced_angina', 'oldpeak',
'slope', 'major_vessels', 'thal']
nums = ['age', 'blood_pressure', 'serum_cholestoral', 'maximum_heart_rate_achieved', 'oldpeak']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/heart/heart.csv')
print('\n% heart dataset', np.shape(data))
return model, data
def kidney():
attrs = ['age', 'bp', 'sg', 'al', 'su', 'rbc', 'pc', 'pcc', 'ba', 'bgr', 'bu', 'sc', 'sod', 'pot', 'hemo', 'pcv',
'wbcc', 'rbcc', 'htn', 'dm', 'cad', 'appet', 'pe', 'ane']
nums = ['age', 'bp', 'sg', 'bgr', 'bu', 'sc', 'sod', 'pot', 'hemo', 'pcv', 'wbcc', 'rbcc']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/kidney/kidney.csv')
print('\n% kidney dataset', np.shape(data))
return model, data
def krkp():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16',
'a17', 'a18', 'a19', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27', 'a28', 'a29', 'a30', 'a31', 'a32',
'a33', 'a34', 'a35', 'a36']
model = Classifier(attrs=attrs, numeric=[], label='label')
data = model.load_data('data/krkp/krkp.csv')
print('\n% krkp dataset', np.shape(data))
return model, data
def mushroom():
attrs = ['cap_shape', 'cap_surface', 'cap_color', 'bruises', 'odor', 'gill_attachment', 'gill_spacing',
'gill_size', 'gill_color', 'stalk_shape', 'stalk_root', 'stalk_surface_above_ring', 'stalk_surface_below_ring',
'stalk_color_above_ring', 'stalk_color_below_ring', 'veil_type', 'veil_color', 'ring_number', 'ring_type',
'spore_print_color', 'population', 'habitat']
model = Classifier(attrs=attrs, numeric=[], label='label')
data = model.load_data('data/mushroom/mushroom.csv')
print('\n% mushroom dataset', np.shape(data))
return model, data
def sonar():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16',
'a17', 'a18', 'a19', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27', 'a28', 'a29', 'a30', 'a31', 'a32',
'a33', 'a34', 'a35', 'a36', 'a37', 'a38', 'a39', 'a40', 'a41', 'a42', 'a43', 'a44', 'a45', 'a46', 'a47', 'a48',
'a49', 'a50', 'a51', 'a52', 'a53', 'a54', 'a55', 'a56', 'a57', 'a58', 'a59', 'a60']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/sonar/sonar.csv')
print('\n% sonar dataset', np.shape(data))
return model, data
def voting():
attrs = ['handicapped_infants', 'water_project_cost_sharing', 'budget_resolution', 'physician_fee_freeze',
'el_salvador_aid', 'religious_groups_in_schools', 'anti_satellite_test_ban', 'aid_to_nicaraguan_contras',
'mx_missile', 'immigration', 'synfuels_corporation_cutback', 'education_spending', 'superfund_right_to_sue',
'crime', 'duty_free_exports', 'export_administration_act_south_africa']
model = Classifier(attrs=attrs, numeric=[], label='label')
data = model.load_data('data/voting/voting.csv')
print('\n% voting dataset', np.shape(data))
return model, data
def ecoli():
attrs = ['sn','mcg','gvh','lip','chg','aac','alm1','alm2']
nums = ['mcg','gvh','lip','chg','aac','alm1','alm2']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/ecoli/ecoli.csv')
print('\n% ecoli dataset', np.shape(data))
return model, data
def ionosphere():
attrs = ['c1','c2','c3','c4','c5','c6','c7','c8','c9','c10','c11','c12','c13','c14','c15','c16','c17','c18','c19',
'c20','c21','c22','c23','c24','c25','c26','c27','c28','c29','c30','c31','c32','c33','c34']
model = Classifier(attrs=attrs, numeric=attrs, label='label')
data = model.load_data('data/ionosphere/ionosphere.csv')
print('\n% ionosphere dataset', np.shape(data))
return model, data
def wine():
attrs = ['alcohol','malic_acid','ash','alcalinity_of_ash','magnesium','tot_phenols','flavanoids',
'nonflavanoid_phenols','proanthocyanins','color_intensity','hue','OD_of_diluted','proline']
model = Classifier(attrs=attrs, numeric=attrs, label='label')
data = model.load_data('data/wine/wine.csv')
print('\n% wine dataset', np.shape(data))
return model, data
def credit_card():
attrs = ['LIMIT_BAL','SEX','EDUCATION','MARRIAGE','AGE','PAY_0','PAY_2','PAY_3','PAY_4','PAY_5','PAY_6',
'BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6','PAY_AMT1','PAY_AMT2','PAY_AMT3','PAY_AMT4',
'PAY_AMT5','PAY_AMT6']
nums = ['LIMIT_BAL','AGE','BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6','PAY_AMT1',
'PAY_AMT2','PAY_AMT3','PAY_AMT4','PAY_AMT5','PAY_AMT6']
model = Classifier(attrs=attrs, numeric=nums, label='DEFAULT_PAYMENT')
data = model.load_data('data/credit_card/credit_card.csv')
print('\n% credit card dataset', np.shape(data))
return model, data
def rain():
attrs = ['Month','Day','Location','MinTemp','MaxTemp','Rainfall','Evaporation','Sunshine','WindGustDir','WindGustSpeed','WindDir9am','WindDir3pm','WindSpeed9am','WindSpeed3pm','Humidity9am','Humidity3pm','Pressure9am','Pressure3pm','Cloud9am','Cloud3pm','Temp9am','Temp3pm','RainToday']
nums = ['Month','Day','MinTemp','MaxTemp','Rainfall','WindDir9am','WindDir3pm','WindSpeed9am','WindSpeed3pm','Humidity9am','Humidity3pm','Pressure9am','Pressure3pm','Temp9am','Temp3pm']
model = Classifier(attrs=attrs, numeric=nums, label='RainTomorrow')
data = model.load_data('data/rain/rain.csv')
print('\n% rain dataset', np.shape(data))
return model, data
def heloc():
attrs = ['ExternalRiskEstimate','MSinceOldestTradeOpen','MSinceMostRecentTradeOpen','AverageMInFile','NumSatisfactoryTrades','NumTrades60Ever2DerogPubRec','NumTrades90Ever2DerogPubRec','PercentTradesNeverDelq','MSinceMostRecentDelq','MaxDelq2PublicRecLast12M','MaxDelqEver','NumTotalTrades','NumTradesOpeninLast12M','PercentInstallTrades','MSinceMostRecentInqexcl7days','NumInqLast6M','NumInqLast6Mexcl7days','NetFractionRevolvingBurden','NetFractionInstallBurden','NumRevolvingTradesWBalance','NumInstallTradesWBalance','NumBank2NatlTradesWHighUtilization','PercentTradesWBalance']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='RiskPerformance')
data = model.load_data('data/heloc/heloc_dataset_v1.csv')
print('\n% rain dataset', np.shape(data))
return model, data
def avila():
attrs = ['f1','f2','f3','f4','f5','f6','f7','f8','f9','f10']
nums = ['f1','f2','f3','f4','f5','f6','f7','f8','f9','f10']
model = Classifier(attrs=attrs, numeric=nums, label='class')
data_train = model.load_data('data/avila/train.csv')
data_test = model.load_data('data/avila/test.csv')
print('\n% avila dataset train', np.shape(data_train), 'test', np.shape(data_test))
return model, data_train, data_test
def titanic():
attrs = ['Sex', 'Age', 'Number_of_Siblings_Spouses', 'Number_Of_Parents_Children', 'Fare', 'Class', 'Embarked']
nums = ['Age', 'Number_of_Siblings_Spouses', 'Number_Of_Parents_Children', 'Fare']
model = Classifier(attrs=attrs, numeric=nums, label='Survived')
data_train = model.load_data('data/titanic/train.csv')
data_test = model.load_data('data/titanic/test.csv')
print('\n% titanic dataset train', np.shape(data_train), 'test', np.shape(data_test))
return model, data_train, data_test
def anneal():
attrs = ['family', 'product_type', 'steel', 'carbon', 'hardness', 'temper_rolling', 'condition', 'formability',
'strength', 'non_ageing', 'surface_finish', 'surface_quality', 'enamelability', 'bc', 'bf', 'bt', 'bw_me', 'bl',
'm', 'chrom', 'phos', 'cbond', 'marvi', 'exptl', 'ferro', 'corr', 'blue_bright_varn_clean', 'lustre', 'jurofm',
's', 'p', 'shape', 'thick', 'width', 'len', 'oil', 'bore', 'packing']
nums = ['thick', 'width', 'len']
model = Classifier(attrs=attrs, numeric=nums, label='classes')
data_train = model.load_data('data/anneal/anneal_train.csv')
data_test = model.load_data('data/anneal/anneal_test.csv')
print('\n% anneal dataset train', np.shape(data_train), 'test', np.shape(data_test))
return model, data_train, data_test
def weight_lifting():
attrs = ['new_window','num_window','roll_belt','pitch_belt','yaw_belt','total_accel_belt','kurtosis_roll_belt','kurtosis_picth_belt','kurtosis_yaw_belt','skewness_roll_belt','skewness_roll_belt','skewness_yaw_belt','max_roll_belt','max_picth_belt','max_yaw_belt','min_roll_belt','min_pitch_belt','min_yaw_belt','amplitude_roll_belt','amplitude_pitch_belt','amplitude_yaw_belt','var_total_accel_belt','avg_roll_belt','stddev_roll_belt','var_roll_belt','avg_pitch_belt','stddev_pitch_belt','var_pitch_belt','avg_yaw_belt','stddev_yaw_belt','var_yaw_belt','gyros_belt_x','gyros_belt_y','gyros_belt_z','accel_belt_x','accel_belt_y','accel_belt_z','magnet_belt_x','magnet_belt_y','magnet_belt_z','roll_arm','pitch_arm','yaw_arm','total_accel_arm','var_accel_arm','avg_roll_arm','stddev_roll_arm','var_roll_arm','avg_pitch_arm','stddev_pitch_arm','var_pitch_arm','avg_yaw_arm','stddev_yaw_arm','var_yaw_arm','gyros_arm_x','gyros_arm_y','gyros_arm_z','accel_arm_x','accel_arm_y','accel_arm_z','magnet_arm_x','magnet_arm_y','magnet_arm_z','kurtosis_roll_arm','kurtosis_picth_arm','kurtosis_yaw_arm','skewness_roll_arm','skewness_pitch_arm','skewness_yaw_arm','max_roll_arm','max_picth_arm','max_yaw_arm','min_roll_arm','min_pitch_arm','min_yaw_arm','amplitude_roll_arm','amplitude_pitch_arm','amplitude_yaw_arm','roll_dumbbell','pitch_dumbbell','yaw_dumbbell','kurtosis_roll_dumbbell','kurtosis_picth_dumbbell','kurtosis_yaw_dumbbell','skewness_roll_dumbbell','skewness_pitch_dumbbell','skewness_yaw_dumbbell','max_roll_dumbbell','max_picth_dumbbell','max_yaw_dumbbell','min_roll_dumbbell','min_pitch_dumbbell','min_yaw_dumbbell','amplitude_roll_dumbbell','amplitude_pitch_dumbbell','amplitude_yaw_dumbbell','total_accel_dumbbell','var_accel_dumbbell','avg_roll_dumbbell','stddev_roll_dumbbell','var_roll_dumbbell','avg_pitch_dumbbell','stddev_pitch_dumbbell','var_pitch_dumbbell','avg_yaw_dumbbell','stddev_yaw_dumbbell','var_yaw_dumbbell','gyros_dumbbell_x','gyros_dumbbell_y','gyros_dumbbell_z','accel_dumbbell_x','accel_dumbbell_y','accel_dumbbell_z','magnet_dumbbell_x','magnet_dumbbell_y','magnet_dumbbell_z','roll_forearm','pitch_forearm','yaw_forearm','kurtosis_roll_forearm','kurtosis_picth_forearm','kurtosis_yaw_forearm','skewness_roll_forearm','skewness_pitch_forearm','skewness_yaw_forearm','max_roll_forearm','max_picth_forearm','max_yaw_forearm','min_roll_forearm','min_pitch_forearm','min_yaw_forearm','amplitude_roll_forearm','amplitude_pitch_forearm','amplitude_yaw_forearm','total_accel_forearm','var_accel_forearm','avg_roll_forearm','stddev_roll_forearm','var_roll_forearm','avg_pitch_forearm','stddev_pitch_forearm','var_pitch_forearm','avg_yaw_forearm','stddev_yaw_forearm','var_yaw_forearm','gyros_forearm_x','gyros_forearm_y','gyros_forearm_z','accel_forearm_x','accel_forearm_y','accel_forearm_z','magnet_forearm_x','magnet_forearm_y','magnet_forearm_z']
nums = ['num_window','roll_belt','pitch_belt','yaw_belt','total_accel_belt','kurtosis_roll_belt','kurtosis_picth_belt','kurtosis_yaw_belt','skewness_roll_belt','skewness_roll_belt','skewness_yaw_belt','max_roll_belt','max_picth_belt','max_yaw_belt','min_roll_belt','min_pitch_belt','min_yaw_belt','amplitude_roll_belt','amplitude_pitch_belt','amplitude_yaw_belt','var_total_accel_belt','avg_roll_belt','stddev_roll_belt','var_roll_belt','avg_pitch_belt','stddev_pitch_belt','var_pitch_belt','avg_yaw_belt','stddev_yaw_belt','var_yaw_belt','gyros_belt_x','gyros_belt_y','gyros_belt_z','accel_belt_x','accel_belt_y','accel_belt_z','magnet_belt_x','magnet_belt_y','magnet_belt_z','roll_arm','pitch_arm','yaw_arm','total_accel_arm','var_accel_arm','avg_roll_arm','stddev_roll_arm','var_roll_arm','avg_pitch_arm','stddev_pitch_arm','var_pitch_arm','avg_yaw_arm','stddev_yaw_arm','var_yaw_arm','gyros_arm_x','gyros_arm_y','gyros_arm_z','accel_arm_x','accel_arm_y','accel_arm_z','magnet_arm_x','magnet_arm_y','magnet_arm_z','kurtosis_roll_arm','kurtosis_picth_arm','kurtosis_yaw_arm','skewness_roll_arm','skewness_pitch_arm','skewness_yaw_arm','max_roll_arm','max_picth_arm','max_yaw_arm','min_roll_arm','min_pitch_arm','min_yaw_arm','amplitude_roll_arm','amplitude_pitch_arm','amplitude_yaw_arm','roll_dumbbell','pitch_dumbbell','yaw_dumbbell','kurtosis_roll_dumbbell','kurtosis_picth_dumbbell','kurtosis_yaw_dumbbell','skewness_roll_dumbbell','skewness_pitch_dumbbell','skewness_yaw_dumbbell','max_roll_dumbbell','max_picth_dumbbell','max_yaw_dumbbell','min_roll_dumbbell','min_pitch_dumbbell','min_yaw_dumbbell','amplitude_roll_dumbbell','amplitude_pitch_dumbbell','amplitude_yaw_dumbbell','total_accel_dumbbell','var_accel_dumbbell','avg_roll_dumbbell','stddev_roll_dumbbell','var_roll_dumbbell','avg_pitch_dumbbell','stddev_pitch_dumbbell','var_pitch_dumbbell','avg_yaw_dumbbell','stddev_yaw_dumbbell','var_yaw_dumbbell','gyros_dumbbell_x','gyros_dumbbell_y','gyros_dumbbell_z','accel_dumbbell_x','accel_dumbbell_y','accel_dumbbell_z','magnet_dumbbell_x','magnet_dumbbell_y','magnet_dumbbell_z','roll_forearm','pitch_forearm','yaw_forearm','kurtosis_roll_forearm','kurtosis_picth_forearm','kurtosis_yaw_forearm','skewness_roll_forearm','skewness_pitch_forearm','skewness_yaw_forearm','max_roll_forearm','max_picth_forearm','max_yaw_forearm','min_roll_forearm','min_pitch_forearm','min_yaw_forearm','amplitude_roll_forearm','amplitude_pitch_forearm','amplitude_yaw_forearm','total_accel_forearm','var_accel_forearm','avg_roll_forearm','stddev_roll_forearm','var_roll_forearm','avg_pitch_forearm','stddev_pitch_forearm','var_pitch_forearm','avg_yaw_forearm','stddev_yaw_forearm','var_yaw_forearm','gyros_forearm_x','gyros_forearm_y','gyros_forearm_z','accel_forearm_x','accel_forearm_y','accel_forearm_z','magnet_forearm_x','magnet_forearm_y','magnet_forearm_z']
model = Classifier(attrs=attrs, numeric=nums, label='classe')
data = model.load_data('data/weight_lifting/weight_lifting.csv')
print('\n% weight lifting dataset', np.shape(data))
return model, data
def yeast():
attrs = ['sequence','mcg','gvh','alm','mit','erl','pox','vac','nuc']
nums = ['mcg','gvh','alm','mit','erl','pox','vac','nuc']
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/yeast/yeast.csv')
print('\n% yeast dataset', np.shape(data))
return model, data
def drug():
attrs = ['Age','Gender','Education','Country','Ethnicity','Nscore','Escore','Oscore','Ascore','Cscore','Impulsive','SS']
nums = attrs
output = ['Alcohol','Amphet','Amyl','Benzos','Caff','Cannabis','Choc','Code','Crack','Ecstasy','Heroin','Ketamine','Legalh','LSD','Meth','Mushrooms','Nicotine','Semer','VSA']
model = Classifier(attrs=attrs, numeric=nums, label=output[17])
data = model.load_data('data/drug/drug.csv')
print('\n% drug consumption dataset', np.shape(data))
return model, data
def dry_bean():
attrs = ['Area','Perimeter','MajorAxisLength','MinorAxisLength','AspectRation','Eccentricity','ConvexArea','EquivDiameter','Extent','Solidity','roundness','Compactness','ShapeFactor1','ShapeFactor2','ShapeFactor3','ShapeFactor4']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='Class')
data = model.load_data('data/dry_bean/dry_bean.csv')
print('\n% dry bean dataset', np.shape(data))
return model, data
def eeg():
attrs = ['AF3','F7','F3','FC5','T7','P7','O1','O2','P8','T8','FC6','F4','F8','AF4']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='eyeDetection')
data = model.load_data('data/eeg/eeg.csv')
print('\n% eeg dataset', np.shape(data))
return model, data
def nursery():
attrs = ['parents','has_nurs','form','children','housing','finance','social','health']
nums = []
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/nursery/nursery.csv')
print('\n% nursery dataset', np.shape(data))
return model, data
def intention():
attrs = ['Administrative','Administrative_Duration','Informational','Informational_Duration','ProductRelated','ProductRelated_Duration','BounceRates','ExitRates','PageValues','SpecialDay','Month','OperatingSystems','Browser','Region','TrafficType','VisitorType','Weekend']
nums = ['Administrative','Administrative_Duration','Informational','Informational_Duration','ProductRelated','ProductRelated_Duration','BounceRates','ExitRates','PageValues','SpecialDay']
model = Classifier(attrs=attrs, numeric=nums, label='Revenue')
data = model.load_data('data/intention/intention.csv')
print('\n% online shoppers intention dataset', np.shape(data))
return model, data
def page_blocks():
attrs = ['height','lenght','area','eccen','p_black','p_and','mean_tr','blackpix','blackand','wb_trans']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/page_blocks/page_blocks.csv')
print('\n% page blocks dataset', np.shape(data))
return model, data
def parkison():
attrs = ['gender','PPE','DFA','RPDE','numPulses','numPeriodsPulses','meanPeriodPulses','stdDevPeriodPulses','locPctJitter','locAbsJitter','rapJitter','ppq5Jitter','ddpJitter','locShimmer','locDbShimmer','apq3Shimmer','apq5Shimmer','apq11Shimmer','ddaShimmer','meanAutoCorrHarmonicity','meanNoiseToHarmHarmonicity','meanHarmToNoiseHarmonicity','minIntensity','maxIntensity','meanIntensity','f1','f2','f3','f4','b1','b2','b3','b4','GQ_prc5_95','GQ_std_cycle_open','GQ_std_cycle_closed','GNE_mean','GNE_std','GNE_SNR_TKEO','GNE_SNR_SEO','GNE_NSR_TKEO','GNE_NSR_SEO','VFER_mean','VFER_std','VFER_entropy','VFER_SNR_TKEO','VFER_SNR_SEO','VFER_NSR_TKEO','VFER_NSR_SEO','IMF_SNR_SEO','IMF_SNR_TKEO','IMF_SNR_entropy','IMF_NSR_SEO','IMF_NSR_TKEO','IMF_NSR_entropy','mean_Log_energy','mean_MFCC_0th_coef','mean_MFCC_1st_coef','mean_MFCC_2nd_coef','mean_MFCC_3rd_coef','mean_MFCC_4th_coef','mean_MFCC_5th_coef','mean_MFCC_6th_coef','mean_MFCC_7th_coef','mean_MFCC_8th_coef','mean_MFCC_9th_coef','mean_MFCC_10th_coef','mean_MFCC_11th_coef','mean_MFCC_12th_coef','mean_delta_log_energy','mean_0th_delta','mean_1st_delta','mean_2nd_delta','mean_3rd_delta','mean_4th_delta','mean_5th_delta','mean_6th_delta','mean_7th_delta','mean_8th_delta','mean_9th_delta','mean_10th_delta','mean_11th_delta','mean_12th_delta','mean_delta_delta_log_energy','mean_delta_delta_0th','mean_1st_delta_delta','mean_2nd_delta_delta','mean_3rd_delta_delta','mean_4th_delta_delta','mean_5th_delta_delta','mean_6th_delta_delta','mean_7th_delta_delta','mean_8th_delta_delta','mean_9th_delta_delta','mean_10th_delta_delta','mean_11th_delta_delta','mean_12th_delta_delta','std_Log_energy','std_MFCC_0th_coef','std_MFCC_1st_coef','std_MFCC_2nd_coef','std_MFCC_3rd_coef','std_MFCC_4th_coef','std_MFCC_5th_coef','std_MFCC_6th_coef','std_MFCC_7th_coef','std_MFCC_8th_coef','std_MFCC_9th_coef','std_MFCC_10th_coef','std_MFCC_11th_coef','std_MFCC_12th_coef','std_delta_log_energy','std_0th_delta','std_1st_delta','std_2nd_delta','std_3rd_delta','std_4th_delta','std_5th_delta','std_6th_delta','std_7th_delta','std_8th_delta','std_9th_delta','std_10th_delta','std_11th_delta','std_12th_delta','std_delta_delta_log_energy','std_delta_delta_0th','std_1st_delta_delta','std_2nd_delta_delta','std_3rd_delta_delta','std_4th_delta_delta','std_5th_delta_delta','std_6th_delta_delta','std_7th_delta_delta','std_8th_delta_delta','std_9th_delta_delta','std_10th_delta_delta','std_11th_delta_delta','std_12th_delta_delta','Ea','Ed_1_coef','Ed_2_coef','Ed_3_coef','Ed_4_coef','Ed_5_coef','Ed_6_coef','Ed_7_coef','Ed_8_coef','Ed_9_coef','Ed_10_coef','det_entropy_shannon_1_coef','det_entropy_shannon_2_coef','det_entropy_shannon_3_coef','det_entropy_shannon_4_coef','det_entropy_shannon_5_coef','det_entropy_shannon_6_coef','det_entropy_shannon_7_coef','det_entropy_shannon_8_coef','det_entropy_shannon_9_coef','det_entropy_shannon_10_coef','det_entropy_log_1_coef','det_entropy_log_2_coef','det_entropy_log_3_coef','det_entropy_log_4_coef','det_entropy_log_5_coef','det_entropy_log_6_coef','det_entropy_log_7_coef','det_entropy_log_8_coef','det_entropy_log_9_coef','det_entropy_log_10_coef','det_TKEO_mean_1_coef','det_TKEO_mean_2_coef','det_TKEO_mean_3_coef','det_TKEO_mean_4_coef','det_TKEO_mean_5_coef','det_TKEO_mean_6_coef','det_TKEO_mean_7_coef','det_TKEO_mean_8_coef','det_TKEO_mean_9_coef','det_TKEO_mean_10_coef','det_TKEO_std_1_coef','det_TKEO_std_2_coef','det_TKEO_std_3_coef','det_TKEO_std_4_coef','det_TKEO_std_5_coef','det_TKEO_std_6_coef','det_TKEO_std_7_coef','det_TKEO_std_8_coef','det_TKEO_std_9_coef','det_TKEO_std_10_coef','app_entropy_shannon_1_coef','app_entropy_shannon_2_coef','app_entropy_shannon_3_coef','app_entropy_shannon_4_coef','app_entropy_shannon_5_coef','app_entropy_shannon_6_coef','app_entropy_shannon_7_coef','app_entropy_shannon_8_coef','app_entropy_shannon_9_coef','app_entropy_shannon_10_coef','app_entropy_log_1_coef','app_entropy_log_2_coef','app_entropy_log_3_coef','app_entropy_log_4_coef','app_entropy_log_5_coef','app_entropy_log_6_coef','app_entropy_log_7_coef','app_entropy_log_8_coef','app_entropy_log_9_coef','app_entropy_log_10_coef','app_det_TKEO_mean_1_coef','app_det_TKEO_mean_2_coef','app_det_TKEO_mean_3_coef','app_det_TKEO_mean_4_coef','app_det_TKEO_mean_5_coef','app_det_TKEO_mean_6_coef','app_det_TKEO_mean_7_coef','app_det_TKEO_mean_8_coef','app_det_TKEO_mean_9_coef','app_det_TKEO_mean_10_coef','app_TKEO_std_1_coef','app_TKEO_std_2_coef','app_TKEO_std_3_coef','app_TKEO_std_4_coef','app_TKEO_std_5_coef','app_TKEO_std_6_coef','app_TKEO_std_7_coef','app_TKEO_std_8_coef','app_TKEO_std_9_coef','app_TKEO_std_10_coef','Ea2','Ed2_1_coef','Ed2_2_coef','Ed2_3_coef','Ed2_4_coef','Ed2_5_coef','Ed2_6_coef','Ed2_7_coef','Ed2_8_coef','Ed2_9_coef','Ed2_10_coef','det_LT_entropy_shannon_1_coef','det_LT_entropy_shannon_2_coef','det_LT_entropy_shannon_3_coef','det_LT_entropy_shannon_4_coef','det_LT_entropy_shannon_5_coef','det_LT_entropy_shannon_6_coef','det_LT_entropy_shannon_7_coef','det_LT_entropy_shannon_8_coef','det_LT_entropy_shannon_9_coef','det_LT_entropy_shannon_10_coef','det_LT_entropy_log_1_coef','det_LT_entropy_log_2_coef','det_LT_entropy_log_3_coef','det_LT_entropy_log_4_coef','det_LT_entropy_log_5_coef','det_LT_entropy_log_6_coef','det_LT_entropy_log_7_coef','det_LT_entropy_log_8_coef','det_LT_entropy_log_9_coef','det_LT_entropy_log_10_coef','det_LT_TKEO_mean_1_coef','det_LT_TKEO_mean_2_coef','det_LT_TKEO_mean_3_coef','det_LT_TKEO_mean_4_coef','det_LT_TKEO_mean_5_coef','det_LT_TKEO_mean_6_coef','det_LT_TKEO_mean_7_coef','det_LT_TKEO_mean_8_coef','det_LT_TKEO_mean_9_coef','det_LT_TKEO_mean_10_coef','det_LT_TKEO_std_1_coef','det_LT_TKEO_std_2_coef','det_LT_TKEO_std_3_coef','det_LT_TKEO_std_4_coef','det_LT_TKEO_std_5_coef','det_LT_TKEO_std_6_coef','det_LT_TKEO_std_7_coef','det_LT_TKEO_std_8_coef','det_LT_TKEO_std_9_coef','det_LT_TKEO_std_10_coef','app_LT_entropy_shannon_1_coef','app_LT_entropy_shannon_2_coef','app_LT_entropy_shannon_3_coef','app_LT_entropy_shannon_4_coef','app_LT_entropy_shannon_5_coef','app_LT_entropy_shannon_6_coef','app_LT_entropy_shannon_7_coef','app_LT_entropy_shannon_8_coef','app_LT_entropy_shannon_9_coef','app_LT_entropy_shannon_10_coef','app_LT_entropy_log_1_coef','app_LT_entropy_log_2_coef','app_LT_entropy_log_3_coef','app_LT_entropy_log_4_coef','app_LT_entropy_log_5_coef','app_LT_entropy_log_6_coef','app_LT_entropy_log_7_coef','app_LT_entropy_log_8_coef','app_LT_entropy_log_9_coef','app_LT_entropy_log_10_coef','app_LT_TKEO_mean_1_coef','app_LT_TKEO_mean_2_coef','app_LT_TKEO_mean_3_coef','app_LT_TKEO_mean_4_coef','app_LT_TKEO_mean_5_coef','app_LT_TKEO_mean_6_coef','app_LT_TKEO_mean_7_coef','app_LT_TKEO_mean_8_coef','app_LT_TKEO_mean_9_coef','app_LT_TKEO_mean_10_coef','app_LT_TKEO_std_1_coef','app_LT_TKEO_std_2_coef','app_LT_TKEO_std_3_coef','app_LT_TKEO_std_4_coef','app_LT_TKEO_std_5_coef','app_LT_TKEO_std_6_coef','app_LT_TKEO_std_7_coef','app_LT_TKEO_std_8_coef','app_LT_TKEO_std_9_coef','app_LT_TKEO_std_10_coef','tqwt_energy_dec_1','tqwt_energy_dec_2','tqwt_energy_dec_3','tqwt_energy_dec_4','tqwt_energy_dec_5','tqwt_energy_dec_6','tqwt_energy_dec_7','tqwt_energy_dec_8','tqwt_energy_dec_9','tqwt_energy_dec_10','tqwt_energy_dec_11','tqwt_energy_dec_12','tqwt_energy_dec_13','tqwt_energy_dec_14','tqwt_energy_dec_15','tqwt_energy_dec_16','tqwt_energy_dec_17','tqwt_energy_dec_18','tqwt_energy_dec_19','tqwt_energy_dec_20','tqwt_energy_dec_21','tqwt_energy_dec_22','tqwt_energy_dec_23','tqwt_energy_dec_24','tqwt_energy_dec_25','tqwt_energy_dec_26','tqwt_energy_dec_27','tqwt_energy_dec_28','tqwt_energy_dec_29','tqwt_energy_dec_30','tqwt_energy_dec_31','tqwt_energy_dec_32','tqwt_energy_dec_33','tqwt_energy_dec_34','tqwt_energy_dec_35','tqwt_energy_dec_36','tqwt_entropy_shannon_dec_1','tqwt_entropy_shannon_dec_2','tqwt_entropy_shannon_dec_3','tqwt_entropy_shannon_dec_4','tqwt_entropy_shannon_dec_5','tqwt_entropy_shannon_dec_6','tqwt_entropy_shannon_dec_7','tqwt_entropy_shannon_dec_8','tqwt_entropy_shannon_dec_9','tqwt_entropy_shannon_dec_10','tqwt_entropy_shannon_dec_11','tqwt_entropy_shannon_dec_12','tqwt_entropy_shannon_dec_13','tqwt_entropy_shannon_dec_14','tqwt_entropy_shannon_dec_15','tqwt_entropy_shannon_dec_16','tqwt_entropy_shannon_dec_17','tqwt_entropy_shannon_dec_18','tqwt_entropy_shannon_dec_19','tqwt_entropy_shannon_dec_20','tqwt_entropy_shannon_dec_21','tqwt_entropy_shannon_dec_22','tqwt_entropy_shannon_dec_23','tqwt_entropy_shannon_dec_24','tqwt_entropy_shannon_dec_25','tqwt_entropy_shannon_dec_26','tqwt_entropy_shannon_dec_27','tqwt_entropy_shannon_dec_28','tqwt_entropy_shannon_dec_29','tqwt_entropy_shannon_dec_30','tqwt_entropy_shannon_dec_31','tqwt_entropy_shannon_dec_32','tqwt_entropy_shannon_dec_33','tqwt_entropy_shannon_dec_34','tqwt_entropy_shannon_dec_35','tqwt_entropy_shannon_dec_36','tqwt_entropy_log_dec_1','tqwt_entropy_log_dec_2','tqwt_entropy_log_dec_3','tqwt_entropy_log_dec_4','tqwt_entropy_log_dec_5','tqwt_entropy_log_dec_6','tqwt_entropy_log_dec_7','tqwt_entropy_log_dec_8','tqwt_entropy_log_dec_9','tqwt_entropy_log_dec_10','tqwt_entropy_log_dec_11','tqwt_entropy_log_dec_12','tqwt_entropy_log_dec_13','tqwt_entropy_log_dec_14','tqwt_entropy_log_dec_15','tqwt_entropy_log_dec_16','tqwt_entropy_log_dec_17','tqwt_entropy_log_dec_18','tqwt_entropy_log_dec_19','tqwt_entropy_log_dec_20','tqwt_entropy_log_dec_21','tqwt_entropy_log_dec_22','tqwt_entropy_log_dec_23','tqwt_entropy_log_dec_24','tqwt_entropy_log_dec_25','tqwt_entropy_log_dec_26','tqwt_entropy_log_dec_27','tqwt_entropy_log_dec_28','tqwt_entropy_log_dec_29','tqwt_entropy_log_dec_30','tqwt_entropy_log_dec_31','tqwt_entropy_log_dec_32','tqwt_entropy_log_dec_33','tqwt_entropy_log_dec_34','tqwt_entropy_log_dec_35','tqwt_entropy_log_dec_36','tqwt_TKEO_mean_dec_1','tqwt_TKEO_mean_dec_2','tqwt_TKEO_mean_dec_3','tqwt_TKEO_mean_dec_4','tqwt_TKEO_mean_dec_5','tqwt_TKEO_mean_dec_6','tqwt_TKEO_mean_dec_7','tqwt_TKEO_mean_dec_8','tqwt_TKEO_mean_dec_9','tqwt_TKEO_mean_dec_10','tqwt_TKEO_mean_dec_11','tqwt_TKEO_mean_dec_12','tqwt_TKEO_mean_dec_13','tqwt_TKEO_mean_dec_14','tqwt_TKEO_mean_dec_15','tqwt_TKEO_mean_dec_16','tqwt_TKEO_mean_dec_17','tqwt_TKEO_mean_dec_18','tqwt_TKEO_mean_dec_19','tqwt_TKEO_mean_dec_20','tqwt_TKEO_mean_dec_21','tqwt_TKEO_mean_dec_22','tqwt_TKEO_mean_dec_23','tqwt_TKEO_mean_dec_24','tqwt_TKEO_mean_dec_25','tqwt_TKEO_mean_dec_26','tqwt_TKEO_mean_dec_27','tqwt_TKEO_mean_dec_28','tqwt_TKEO_mean_dec_29','tqwt_TKEO_mean_dec_30','tqwt_TKEO_mean_dec_31','tqwt_TKEO_mean_dec_32','tqwt_TKEO_mean_dec_33','tqwt_TKEO_mean_dec_34','tqwt_TKEO_mean_dec_35','tqwt_TKEO_mean_dec_36','tqwt_TKEO_std_dec_1','tqwt_TKEO_std_dec_2','tqwt_TKEO_std_dec_3','tqwt_TKEO_std_dec_4','tqwt_TKEO_std_dec_5','tqwt_TKEO_std_dec_6','tqwt_TKEO_std_dec_7','tqwt_TKEO_std_dec_8','tqwt_TKEO_std_dec_9','tqwt_TKEO_std_dec_10','tqwt_TKEO_std_dec_11','tqwt_TKEO_std_dec_12','tqwt_TKEO_std_dec_13','tqwt_TKEO_std_dec_14','tqwt_TKEO_std_dec_15','tqwt_TKEO_std_dec_16','tqwt_TKEO_std_dec_17','tqwt_TKEO_std_dec_18','tqwt_TKEO_std_dec_19','tqwt_TKEO_std_dec_20','tqwt_TKEO_std_dec_21','tqwt_TKEO_std_dec_22','tqwt_TKEO_std_dec_23','tqwt_TKEO_std_dec_24','tqwt_TKEO_std_dec_25','tqwt_TKEO_std_dec_26','tqwt_TKEO_std_dec_27','tqwt_TKEO_std_dec_28','tqwt_TKEO_std_dec_29','tqwt_TKEO_std_dec_30','tqwt_TKEO_std_dec_31','tqwt_TKEO_std_dec_32','tqwt_TKEO_std_dec_33','tqwt_TKEO_std_dec_34','tqwt_TKEO_std_dec_35','tqwt_TKEO_std_dec_36','tqwt_medianValue_dec_1','tqwt_medianValue_dec_2','tqwt_medianValue_dec_3','tqwt_medianValue_dec_4','tqwt_medianValue_dec_5','tqwt_medianValue_dec_6','tqwt_medianValue_dec_7','tqwt_medianValue_dec_8','tqwt_medianValue_dec_9','tqwt_medianValue_dec_10','tqwt_medianValue_dec_11','tqwt_medianValue_dec_12','tqwt_medianValue_dec_13','tqwt_medianValue_dec_14','tqwt_medianValue_dec_15','tqwt_medianValue_dec_16','tqwt_medianValue_dec_17','tqwt_medianValue_dec_18','tqwt_medianValue_dec_19','tqwt_medianValue_dec_20','tqwt_medianValue_dec_21','tqwt_medianValue_dec_22','tqwt_medianValue_dec_23','tqwt_medianValue_dec_24','tqwt_medianValue_dec_25','tqwt_medianValue_dec_26','tqwt_medianValue_dec_27','tqwt_medianValue_dec_28','tqwt_medianValue_dec_29','tqwt_medianValue_dec_30','tqwt_medianValue_dec_31','tqwt_medianValue_dec_32','tqwt_medianValue_dec_33','tqwt_medianValue_dec_34','tqwt_medianValue_dec_35','tqwt_medianValue_dec_36','tqwt_meanValue_dec_1','tqwt_meanValue_dec_2','tqwt_meanValue_dec_3','tqwt_meanValue_dec_4','tqwt_meanValue_dec_5','tqwt_meanValue_dec_6','tqwt_meanValue_dec_7','tqwt_meanValue_dec_8','tqwt_meanValue_dec_9','tqwt_meanValue_dec_10','tqwt_meanValue_dec_11','tqwt_meanValue_dec_12','tqwt_meanValue_dec_13','tqwt_meanValue_dec_14','tqwt_meanValue_dec_15','tqwt_meanValue_dec_16','tqwt_meanValue_dec_17','tqwt_meanValue_dec_18','tqwt_meanValue_dec_19','tqwt_meanValue_dec_20','tqwt_meanValue_dec_21','tqwt_meanValue_dec_22','tqwt_meanValue_dec_23','tqwt_meanValue_dec_24','tqwt_meanValue_dec_25','tqwt_meanValue_dec_26','tqwt_meanValue_dec_27','tqwt_meanValue_dec_28','tqwt_meanValue_dec_29','tqwt_meanValue_dec_30','tqwt_meanValue_dec_31','tqwt_meanValue_dec_32','tqwt_meanValue_dec_33','tqwt_meanValue_dec_34','tqwt_meanValue_dec_35','tqwt_meanValue_dec_36','tqwt_stdValue_dec_1','tqwt_stdValue_dec_2','tqwt_stdValue_dec_3','tqwt_stdValue_dec_4','tqwt_stdValue_dec_5','tqwt_stdValue_dec_6','tqwt_stdValue_dec_7','tqwt_stdValue_dec_8','tqwt_stdValue_dec_9','tqwt_stdValue_dec_10','tqwt_stdValue_dec_11','tqwt_stdValue_dec_12','tqwt_stdValue_dec_13','tqwt_stdValue_dec_14','tqwt_stdValue_dec_15','tqwt_stdValue_dec_16','tqwt_stdValue_dec_17','tqwt_stdValue_dec_18','tqwt_stdValue_dec_19','tqwt_stdValue_dec_20','tqwt_stdValue_dec_21','tqwt_stdValue_dec_22','tqwt_stdValue_dec_23','tqwt_stdValue_dec_24','tqwt_stdValue_dec_25','tqwt_stdValue_dec_26','tqwt_stdValue_dec_27','tqwt_stdValue_dec_28','tqwt_stdValue_dec_29','tqwt_stdValue_dec_30','tqwt_stdValue_dec_31','tqwt_stdValue_dec_32','tqwt_stdValue_dec_33','tqwt_stdValue_dec_34','tqwt_stdValue_dec_35','tqwt_stdValue_dec_36','tqwt_minValue_dec_1','tqwt_minValue_dec_2','tqwt_minValue_dec_3','tqwt_minValue_dec_4','tqwt_minValue_dec_5','tqwt_minValue_dec_6','tqwt_minValue_dec_7','tqwt_minValue_dec_8','tqwt_minValue_dec_9','tqwt_minValue_dec_10','tqwt_minValue_dec_11','tqwt_minValue_dec_12','tqwt_minValue_dec_13','tqwt_minValue_dec_14','tqwt_minValue_dec_15','tqwt_minValue_dec_16','tqwt_minValue_dec_17','tqwt_minValue_dec_18','tqwt_minValue_dec_19','tqwt_minValue_dec_20','tqwt_minValue_dec_21','tqwt_minValue_dec_22','tqwt_minValue_dec_23','tqwt_minValue_dec_24','tqwt_minValue_dec_25','tqwt_minValue_dec_26','tqwt_minValue_dec_27','tqwt_minValue_dec_28','tqwt_minValue_dec_29','tqwt_minValue_dec_30','tqwt_minValue_dec_31','tqwt_minValue_dec_32','tqwt_minValue_dec_33','tqwt_minValue_dec_34','tqwt_minValue_dec_35','tqwt_minValue_dec_36','tqwt_maxValue_dec_1','tqwt_maxValue_dec_2','tqwt_maxValue_dec_3','tqwt_maxValue_dec_4','tqwt_maxValue_dec_5','tqwt_maxValue_dec_6','tqwt_maxValue_dec_7','tqwt_maxValue_dec_8','tqwt_maxValue_dec_9','tqwt_maxValue_dec_10','tqwt_maxValue_dec_11','tqwt_maxValue_dec_12','tqwt_maxValue_dec_13','tqwt_maxValue_dec_14','tqwt_maxValue_dec_15','tqwt_maxValue_dec_16','tqwt_maxValue_dec_17','tqwt_maxValue_dec_18','tqwt_maxValue_dec_19','tqwt_maxValue_dec_20','tqwt_maxValue_dec_21','tqwt_maxValue_dec_22','tqwt_maxValue_dec_23','tqwt_maxValue_dec_24','tqwt_maxValue_dec_25','tqwt_maxValue_dec_26','tqwt_maxValue_dec_27','tqwt_maxValue_dec_28','tqwt_maxValue_dec_29','tqwt_maxValue_dec_30','tqwt_maxValue_dec_31','tqwt_maxValue_dec_32','tqwt_maxValue_dec_33','tqwt_maxValue_dec_34','tqwt_maxValue_dec_35','tqwt_maxValue_dec_36','tqwt_skewnessValue_dec_1','tqwt_skewnessValue_dec_2','tqwt_skewnessValue_dec_3','tqwt_skewnessValue_dec_4','tqwt_skewnessValue_dec_5','tqwt_skewnessValue_dec_6','tqwt_skewnessValue_dec_7','tqwt_skewnessValue_dec_8','tqwt_skewnessValue_dec_9','tqwt_skewnessValue_dec_10','tqwt_skewnessValue_dec_11','tqwt_skewnessValue_dec_12','tqwt_skewnessValue_dec_13','tqwt_skewnessValue_dec_14','tqwt_skewnessValue_dec_15','tqwt_skewnessValue_dec_16','tqwt_skewnessValue_dec_17','tqwt_skewnessValue_dec_18','tqwt_skewnessValue_dec_19','tqwt_skewnessValue_dec_20','tqwt_skewnessValue_dec_21','tqwt_skewnessValue_dec_22','tqwt_skewnessValue_dec_23','tqwt_skewnessValue_dec_24','tqwt_skewnessValue_dec_25','tqwt_skewnessValue_dec_26','tqwt_skewnessValue_dec_27','tqwt_skewnessValue_dec_28','tqwt_skewnessValue_dec_29','tqwt_skewnessValue_dec_30','tqwt_skewnessValue_dec_31','tqwt_skewnessValue_dec_32','tqwt_skewnessValue_dec_33','tqwt_skewnessValue_dec_34','tqwt_skewnessValue_dec_35','tqwt_skewnessValue_dec_36','tqwt_kurtosisValue_dec_1','tqwt_kurtosisValue_dec_2','tqwt_kurtosisValue_dec_3','tqwt_kurtosisValue_dec_4','tqwt_kurtosisValue_dec_5','tqwt_kurtosisValue_dec_6','tqwt_kurtosisValue_dec_7','tqwt_kurtosisValue_dec_8','tqwt_kurtosisValue_dec_9','tqwt_kurtosisValue_dec_10','tqwt_kurtosisValue_dec_11','tqwt_kurtosisValue_dec_12','tqwt_kurtosisValue_dec_13','tqwt_kurtosisValue_dec_14','tqwt_kurtosisValue_dec_15','tqwt_kurtosisValue_dec_16','tqwt_kurtosisValue_dec_17','tqwt_kurtosisValue_dec_18','tqwt_kurtosisValue_dec_19','tqwt_kurtosisValue_dec_20','tqwt_kurtosisValue_dec_21','tqwt_kurtosisValue_dec_22','tqwt_kurtosisValue_dec_23','tqwt_kurtosisValue_dec_24','tqwt_kurtosisValue_dec_25','tqwt_kurtosisValue_dec_26','tqwt_kurtosisValue_dec_27','tqwt_kurtosisValue_dec_28','tqwt_kurtosisValue_dec_29','tqwt_kurtosisValue_dec_30','tqwt_kurtosisValue_dec_31','tqwt_kurtosisValue_dec_32','tqwt_kurtosisValue_dec_33','tqwt_kurtosisValue_dec_34','tqwt_kurtosisValue_dec_35','tqwt_kurtosisValue_dec_36']
nums = attrs[1:]
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/parkison_disease/parkison_disease.csv')
print('\n% parkison disease dataset', np.shape(data))
return model, data
def pendigits():
attrs = ['a1','a2','a3','a4','a5','a6','a7','a8','a9','a10','a11','a12','a13','a14','a15','a16']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='class')
data_train = model.load_data('data/pendigits/train.csv')
data_test = model.load_data('data/pendigits/test.csv')
print('\n% pendigits train dataset', np.shape(data_train), 'test', np.shape(data_test))
return model, data_train, data_test
def wall_robot():
attrs = ['US1','US2','US3','US4','US5','US6','US7','US8','US9','US10','US11','US12','US13','US14','US15','US16','US17','US18','US19','US20','US21','US22','US23','US24']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='Class')
data = model.load_data('data/wall_following_robot/wall_following_robot.csv')
print('\n% wall_following_robot dataset', np.shape(data))
return model, data
def glass():
attrs = ['RI','Na','Mg','Al','Si','K','Ca','Ba','Fe']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='Type')
data = model.load_data('data/glass/glass.csv')
print('\n% glass dataset', np.shape(data))
return model, data
def flags():
attrs = ['name','landmass','zone','area','population','language','bars','stripes','colours','red','green','blue','gold','white','black','orange','mainhue','circles','crosses','saltires','quarters','sunstars','crescent','triangle','icon','animate','text','topleft','botright']
nums = ['area','population','stripes','colours','sunstars']
model = Classifier(attrs=attrs, numeric=nums, label='religion')
data = model.load_data('data/flags/flags.csv')
print('\n% flags dataset', np.shape(data))
return model, data
|
python
|
import tensorflow as tf
# 本节主要讲 placeholder
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
# 原教程中为 mul, 我使用的版本为 multiply
output = tf.multiply(input1, input2)
with tf.Session() as sess:
print(sess.run(output, feed_dict={input1: [7.], input2: [2.]}))
|
python
|
'''Statistical tests for NDVars
Common Attributes
-----------------
The following attributes are always present. For ANOVA, they are lists with the
corresponding items for different effects.
t/f/... : NDVar
Map of the statistical parameter.
p_uncorrected : NDVar
Map of uncorrected p values.
p : NDVar | None
Map of corrected p values (None if no correct was applied).
clusters : Dataset | None
Table of all the clusters found (None if no clusters were found, or if no
clustering was performed).
n_samples : None | int
The actual number of permutations. If ``samples = -1``, i.e. a complete set
or permutations is performed, then ``n_samples`` indicates the actual
number of permutations that constitute the complete set.
'''
from datetime import datetime, timedelta
from functools import reduce, partial
from itertools import chain, repeat
from math import ceil
from multiprocessing import Process, Event, SimpleQueue
from multiprocessing.sharedctypes import RawArray
import logging
import operator
import os
import re
import socket
from time import time as current_time
from typing import Union
import numpy as np
import scipy.stats
from scipy import ndimage
from tqdm import trange
from .. import fmtxt, _info, _text
from ..fmtxt import FMText
from .._celltable import Celltable
from .._config import CONFIG
from .._data_obj import (
CategorialArg, CellArg, IndexArg, ModelArg, NDVarArg, VarArg,
Dataset, Var, Factor, Interaction, NestedEffect,
NDVar, Categorial, UTS,
ascategorial, asmodel, asndvar, asvar, assub,
cellname, combine, dataobj_repr)
from .._exceptions import OldVersionError, WrongDimension, ZeroVariance
from .._utils import LazyProperty, user_activity
from .._utils.numpy_utils import FULL_AXIS_SLICE
from . import opt, stats, vector
from .connectivity import Connectivity, find_peaks
from .connectivity_opt import merge_labels, tfce_increment
from .glm import _nd_anova
from .permutation import (
_resample_params, permute_order, permute_sign_flip, random_seeds,
rand_rotation_matrices)
from .t_contrast import TContrastRel
from .test import star, star_factor
__test__ = False
def check_for_vector_dim(y: NDVar) -> None:
for dim in y.dims:
if dim._connectivity_type == 'vector':
raise WrongDimension(f"{dim}: mass-univariate methods are not suitable for vectors. Consider using vector norm as test statistic, or using a testnd.Vector test function.")
def check_variance(x):
if x.ndim != 2:
x = x.reshape((len(x), -1))
if opt.has_zero_variance(x):
raise ZeroVariance("y contains data column with zero variance")
class NDTest:
"""Baseclass for testnd test results
Attributes
----------
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_common = ('y', 'match', 'sub', 'samples', 'tfce', 'pmin', '_cdist',
'tstart', 'tstop', '_dims')
_state_specific = ()
_statistic = None
_statistic_tail = 0
@property
def _attributes(self):
return self._state_common + self._state_specific
def __init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop):
self.y = y.name
self.match = dataobj_repr(match) if match else match
self.sub = sub
self.samples = samples
self.tfce = tfce
self.pmin = pmin
self._cdist = cdist
self.tstart = tstart
self.tstop = tstop
self._dims = y.dims[1:]
def __getstate__(self):
return {name: getattr(self, name, None) for name in self._attributes}
def __setstate__(self, state):
# backwards compatibility:
if 'Y' in state:
state['y'] = state.pop('Y')
if 'X' in state:
state['x'] = state.pop('X')
for k, v in state.items():
setattr(self, k, v)
# backwards compatibility:
if 'tstart' not in state:
cdist = self._first_cdist
self.tstart = cdist.tstart
self.tstop = cdist.tstop
if '_dims' not in state: # 0.17
if 't' in state:
self._dims = state['t'].dims
elif 'r' in state:
self._dims = state['r'].dims
elif 'f' in state:
self._dims = state['f'][0].dims
else:
raise RuntimeError("Error recovering old test results dims")
self._expand_state()
def __repr__(self):
args = self._repr_test_args()
if self.sub is not None:
if isinstance(self.sub, np.ndarray):
sub_repr = '<array>'
else:
sub_repr = repr(self.sub)
args.append(f'sub={sub_repr}')
if self._cdist:
args += self._repr_cdist()
else:
args.append('samples=0')
return f"<{self.__class__.__name__} {', '.join(args)}>"
def _repr_test_args(self):
"""List of strings describing parameters unique to the test
Will be joined with ``", ".join(repr_args)``
"""
raise NotImplementedError()
def _repr_cdist(self):
"""List of results (override for MultiEffectResult)"""
return (self._cdist._repr_test_args(self.pmin) +
self._cdist._repr_clusters())
def _expand_state(self):
"Override to create secondary results"
cdist = self._cdist
if cdist is None:
self.tfce_map = None
self.p = None
self._kind = None
else:
self.tfce_map = cdist.tfce_map
self.p = cdist.probability_map
self._kind = cdist.kind
def _desc_samples(self):
if self.samples == -1:
return f"a complete set of {self.n_samples} permutations"
elif self.samples is None:
return "no permutations"
else:
return f"{self.n_samples} random permutations"
def _desc_timewindow(self):
tstart = self._time_dim.tmin if self.tstart is None else self.tstart
tstop = self._time_dim.tstop if self.tstop is None else self.tstop
return f"{_text.ms(tstart)} - {_text.ms(tstop)} ms"
def _asfmtext(self):
p = self.p.min()
max_stat = self._max_statistic()
return FMText((fmtxt.eq(self._statistic, max_stat, 'max', stars=p), ', ', fmtxt.peq(p)))
def _default_plot_obj(self):
raise NotImplementedError
def _iter_cdists(self):
yield (None, self._cdist)
@property
def _first_cdist(self):
return self._cdist
def _plot_model(self):
"Determine x for plotting categories"
return None
def _plot_sub(self):
if isinstance(self.sub, str) and self.sub == "<unsaved array>":
raise RuntimeError("The sub parameter was not saved for previous "
"versions of Eelbrain. Please recompute this "
"result with the current version.")
return self.sub
def _assert_has_cdist(self):
if self._cdist is None:
raise RuntimeError("This method only applies to results of tests "
"with threshold-based clustering and tests with "
"a permutation distribution (samples > 0)")
def masked_parameter_map(self, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
return self._cdist.masked_parameter_map(pmin, **sub)
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
return self._cdist.cluster(cluster_id)
@LazyProperty
def clusters(self):
if self._cdist is None:
return None
else:
return self.find_clusters(None, True)
def find_clusters(self, pmin=None, maps=False, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
return self._cdist.clusters(pmin, maps, **sub)
def find_peaks(self):
"""Find peaks in a threshold-free cluster distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
return self._cdist.find_peaks()
def compute_probability_map(self, **sub):
"""Compute a probability map
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
return self._cdist.compute_probability_map(**sub)
def info_list(self, computation=True):
"List with information about the test"
out = fmtxt.List("Mass-univariate statistics:")
out.add_item(self._name())
dimnames = [dim.name for dim in self._dims]
dimlist = out.add_sublist(f"Over {_text.enumeration(dimnames)}")
if 'time' in dimnames:
dimlist.add_item(f"Time interval: {self._desc_timewindow()}.")
cdist = self._first_cdist
if cdist is None:
out.add_item("No inferential statistics")
return out
# inference
l = out.add_sublist("Inference:")
if cdist.kind == 'raw':
l.add_item("Based on maximum statistic")
elif cdist.kind == 'tfce':
l.add_item("Based on maximum statistic with threshold-"
"free cluster enhancement (Smith & Nichols, 2009)")
elif cdist.kind == 'cluster':
l.add_item("Based on maximum cluster mass statistic")
sl = l.add_sublist("Cluster criteria:")
for dim in dimnames:
if dim == 'time':
sl.add_item(f"Minimum cluster duration {_text.ms(cdist.criteria.get('mintime', 0))} ms")
elif dim == 'source':
sl.add_item(f"At least {cdist.criteria.get('minsource', 0)} contiguous sources.")
elif dim == 'sensor':
sl.add_item(f"At least {cdist.criteria.get('minsensor', 0)} contiguous sensors.")
else:
value = cdist.criteria.get(f'min{dim}', 0)
sl.add_item(f"Minimum number of contiguous elements in {dim}: {value}")
# n samples
l.add_item(f"In {self._desc_samples()}")
# computation
if computation:
out.add_item(cdist.info_list())
return out
@property
def _statistic_map(self):
return getattr(self, self._statistic)
def _max_statistic(self):
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(self._statistic_map, self.p, tail)
@staticmethod
def _max_statistic_from_map(stat_map: NDVar, p_map: NDVar, tail: int):
if tail == 0:
func = stat_map.extrema
elif tail == 1:
func = stat_map.max
else:
func = stat_map.min
if p_map:
mask = p_map <= .05 if p_map.min() <= .05 else None
else:
mask = None
return func() if mask is None else func(mask)
@property
def n_samples(self):
if self.samples == -1:
return self._first_cdist.samples
else:
return self.samples
@property
def _time_dim(self):
for dim in self._first_cdist.dims:
if isinstance(dim, UTS):
return dim
return None
class t_contrast_rel(NDTest):
"""Mass-univariate contrast based on t-values
Parameters
----------
y : NDVar
Dependent variable.
x : categorial
Model containing the cells which are compared with the contrast.
contrast : str
Contrast specification: see Notes.
match : Factor
Match cases for a repeated measures test.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value for a related samples t-test (with df =
len(match.cells) - 1).
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Notes
-----
A contrast specifies the steps to calculate a map based on *t*-values.
Contrast definitions can contain:
- Comparisons using ``>`` or ``<`` and data cells to compute *t*-maps.
For example, ``"cell1 > cell0"`` will compute a *t*-map of the comparison
if ``cell1`` and ``cell0``, being positive where ``cell1`` is greater than
``cell0`` and negative where ``cell0`` is greater than ``cell1``.
If the data is defined based on an interaction, cells are specified with
``|``, e.g. ``"a1 | b1 > a0 | b0"``. Cells can contain ``*`` to average
multiple cells. Thus, if the second factor in the model has cells ``b1``
and ``b0``, ``"a1 | * > a0 | *"`` would compare ``a1`` to ``a0``
while averaging ``b1`` and ``b0`` within ``a1`` and ``a0``.
- Unary numpy functions ``abs`` and ``negative``, e.g.
``"abs(cell1 > cell0)"``.
- Binary numpy functions ``subtract`` and ``add``, e.g.
``"add(a>b, a>c)"``.
- Numpy functions for multiple arrays ``min``, ``max`` and ``sum``,
e.g. ``min(a>d, b>d, c>d)``.
Cases with zero variance are set to t=0.
Examples
--------
To find cluster where both of two pairwise comparisons are reliable,
i.e. an intersection of two effects, one could use
``"min(a > c, b > c)"``.
To find a specific kind of interaction, where a is greater than b, and
this difference is greater than the difference between c and d, one
could use ``"(a > b) - abs(c > d)"``.
"""
_state_specific = ('x', 'contrast', 't', 'tail')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: CategorialArg,
contrast: str,
match: CategorialArg = None,
sub: CategorialArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
if match is None:
raise TypeError("The `match` parameter needs to be specified for repeated measures test t_contrast_rel")
ct = Celltable(y, x, match, sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
check_variance(ct.y.x)
# setup contrast
t_contrast = TContrastRel(contrast, ct.cells, ct.data_indexes)
# original data
tmap = t_contrast.map(ct.y.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
df = len(ct.match.cells) - 1
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(
ct.y, samples, threshold, tfce, tail, 't', "t-contrast",
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(len(ct.y), samples, unit=ct.match)
run_permutation(t_contrast, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = ('%'.join(ct.x.base_names) if isinstance(ct.x, Interaction) else
ct.x.name)
self.contrast = contrast
self.tail = tail
self.tmin = tmin
self.t = t
self._expand_state()
def _name(self):
if self.y:
return "T-Contrast: %s ~ %s" % (self.y, self.contrast)
else:
return "T-Contrast: %s" % self.contrast
def _plot_model(self):
return self.x
def _repr_test_args(self):
args = [repr(self.y), repr(self.x), repr(self.contrast)]
if self.tail:
args.append("tail=%r" % self.tail)
if self.match:
args.append('match=%r' % self.match)
return args
class corr(NDTest):
"""Mass-univariate correlation
Parameters
----------
y : NDVar
Dependent variable.
x : continuous
The continuous predictor variable.
norm : None | categorial
Categories in which to normalize (z-score) x.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an r-value equivalent to an
uncorrected p-value.
rmin : None | scalar
Threshold for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : None | categorial
When permuting data, only shuffle the cases within the categories
of match.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
r : NDVar
Map of correlation values (with threshold contours).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_specific = ('x', 'norm', 'n', 'df', 'r')
_statistic = 'r'
@user_activity
def __init__(
self,
y: NDVarArg,
x: VarArg,
norm: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
rmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: CategorialArg = None,
parc: str = None,
**criteria):
sub = assub(sub, ds)
y = asndvar(y, sub=sub, ds=ds, dtype=np.float64)
check_for_vector_dim(y)
if not y.has_case:
raise ValueError("Dependent variable needs case dimension")
x = asvar(x, sub=sub, ds=ds)
if norm is not None:
norm = ascategorial(norm, sub, ds)
if match is not None:
match = ascategorial(match, sub, ds)
name = "%s corr %s" % (y.name, x.name)
# Normalize by z-scoring the data for each subject
# normalization is done before the permutation b/c we are interested in
# the variance associated with each subject for the z-scoring.
y = y.copy()
if norm is not None:
for cell in norm.cells:
idx = (norm == cell)
y.x[idx] = scipy.stats.zscore(y.x[idx], None)
# subtract the mean from y and x so that this can be omitted during
# permutation
y -= y.summary('case')
x = x - x.mean()
n = len(y)
df = n - 2
rmap = stats.corr(y.x, x.x)
n_threshold_params = sum((pmin is not None, rmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, rmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.rtest_r(pmin, df)
elif rmin is not None:
threshold = abs(rmin)
else:
threshold = None
cdist = NDPermutationDistribution(
y, samples, threshold, tfce, 0, 'r', name,
tstart, tstop, criteria, parc)
cdist.add_original(rmap)
if cdist.do_permutation:
iterator = permute_order(n, samples, unit=match)
run_permutation(stats.corr, cdist, iterator, x.x)
# compile results
info = _info.for_stat_map('r', threshold)
r = NDVar(rmap, y.dims[1:], info, name)
# store attributes
NDTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = x.name
self.norm = None if norm is None else norm.name
self.rmin = rmin
self.n = n
self.df = df
self.r = r
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
r = self.r
# uncorrected probability
pmap = stats.rtest_p(r.x, self.df)
info = _info.for_p_map()
p_uncorrected = NDVar(pmap, r.dims, info, 'p_uncorrected')
self.p_uncorrected = p_uncorrected
self.r_p = [[r, self.p]] if self.samples else None
def _name(self):
if self.y and self.x:
return "Correlation: %s ~ %s" % (self.y, self.x)
else:
return "Correlation"
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.norm:
args.append('norm=%r' % self.norm)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_parameter_map()
else:
return self.r
class NDDifferenceTest(NDTest):
difference = None
def _get_mask(self, p=0.05):
self._assert_has_cdist()
if not 1 >= p > 0:
raise ValueError(f"p={p}: needs to be between 1 and 0")
if p == 1:
if self._cdist.kind != 'cluster':
raise ValueError(f"p=1 is only a valid mask for threshold-based cluster tests")
mask = self._cdist.cluster_map == 0
else:
mask = self.p > p
return self._cdist.uncrop(mask, self.difference, True)
def masked_difference(self, p=0.05):
"""Difference map masked by significance
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
"""
mask = self._get_mask(p)
return self.difference.mask(mask)
class NDMaskedC1Mixin:
def masked_c1(self, p=0.05):
"""``c1`` map masked by significance of the ``c1``-``c0`` difference
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
"""
mask = self._get_mask(p)
return self.c1_mean.mask(mask)
class ttest_1samp(NDDifferenceTest):
"""Mass-univariate one sample t-test
Parameters
----------
y : NDVar
Dependent variable.
popmean : scalar
Value to compare y against (default is 0).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
The difference value entering the test (``y`` if popmean is 0).
n : int
Number of cases.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Data points with zero variance are set to t=0.
"""
_state_specific = ('popmean', 'tail', 'n', 'df', 't', 'difference')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
popmean: float = 0,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
n = len(ct.y)
df = n - 1
y = ct.y.summary()
tmap = stats.t_1samp(ct.y.x)
if popmean:
raise NotImplementedError("popmean != 0")
diff = y - popmean
if np.any(diff < 0):
diff.info['cmap'] = 'xpolar'
else:
diff = y
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
if popmean:
y_perm = ct.y - popmean
else:
y_perm = ct.y
n_samples, samples = _resample_params(len(y_perm), samples)
cdist = NDPermutationDistribution(
y_perm, n_samples, threshold, tfce, tail, 't', '1-Sample t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.popmean = popmean
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.difference = diff
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
def _expand_state(self):
NDTest._expand_state(self)
t = self.t
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map(t.info)
p_uncorr = NDVar(pmap, t.dims, info, 'p')
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "One-Sample T-Test: %s" % self.y
else:
return "One-Sample T-Test"
def _repr_test_args(self):
args = [repr(self.y)]
if self.popmean:
args.append(repr(self.popmean))
if self.match:
args.append('match=%r' % self.match)
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_difference()
else:
return self.difference
def _independent_measures_args(y, x, c1, c0, match, ds, sub):
"Interpret parameters for independent measures tests (2 different argspecs)"
if isinstance(x, str):
x = ds.eval(x)
if isinstance(x, NDVar):
assert c1 is None
assert c0 is None
assert match is None
y1 = asndvar(y, sub, ds)
y0 = asndvar(x, sub, ds)
y = combine((y1, y0))
c1_name = y1.name
c0_name = y0.name
x_name = y0.name
else:
ct = Celltable(y, x, match, sub, cat=(c1, c0), ds=ds, coercion=asndvar, dtype=np.float64)
c1, c0 = ct.cat
c1_name = c1
c0_name = c0
x_name = ct.x.name
match = ct.match
y = ct.y
y1 = ct.data[c1]
y0 = ct.data[c0]
return y, y1, y0, c1, c0, match, x_name, c1_name, c0_name
class ttest_ind(NDDifferenceTest):
"""Mass-univariate independent samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold p value for forming clusters. None for threshold-free
cluster enhancement.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n1', 'n0', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub)
check_for_vector_dim(y)
n1 = len(y1)
n = len(y)
n0 = n - n1
df = n - 2
groups = np.arange(n) < n1
groups.dtype = np.int8
tmap = stats.t_ind(y.x, groups)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(y, samples, threshold, tfce, tail, 't', 'Independent Samples t-Test', tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(n, samples)
run_permutation(stats.t_ind, cdist, iterator, groups)
# store attributes
NDDifferenceTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n1 = n1
self.n0 = n0
self.df = df
self.tail = tail
info = _info.for_stat_map('t', threshold, tail=tail, old=y.info)
self.t = NDVar(tmap, y.dims[1:], info, 't')
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(self.t.x, self.df, self.tail)
info = _info.for_p_map(self.t.info)
p_uncorr = NDVar(pmap, self.t.dims, info, 'p')
self.p_uncorrected = p_uncorr
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
if self.tail == 0:
comp = "%s == %s" % (self.c1, self.c0)
elif self.tail > 0:
comp = "%s > %s" % (self.c1, self.c0)
else:
comp = "%s < %s" % (self.c1, self.c0)
if self.y:
return "Independent-Samples T-Test: %s ~ %s" % (self.y, comp)
else:
return "Independent-Samples T-Test: %s" % comp
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
if self.c1 is None:
args = [f'{self.y!r} (n={self.n1})', f'{self.x!r} (n={self.n0})']
else:
args = [f'{self.y!r}', f'{self.x!r}', f'{self.c1!r} (n={self.n1})', f'{self.c0!r} (n={self.n0})']
if self.match:
args.append(f'match{self.match!r}')
if self.tail:
args.append(f'tail={self.tail}')
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
def _related_measures_args(y, x, c1, c0, match, ds, sub):
"Interpret parameters for related measures tests (2 different argspecs)"
if isinstance(x, str):
if ds is None:
raise TypeError(f"x={x!r} specified as str without specifying ds")
x = ds.eval(x)
if isinstance(x, NDVar):
assert c1 is None
assert c0 is None
assert match is None
y1 = asndvar(y, sub, ds)
n = len(y1)
y0 = asndvar(x, sub, ds, n)
c1_name = y1.name
c0_name = y0.name
x_name = y0.name
elif match is None:
raise TypeError("The `match` argument needs to be specified for related measures tests")
else:
ct = Celltable(y, x, match, sub, cat=(c1, c0), ds=ds, coercion=asndvar,
dtype=np.float64)
c1, c0 = ct.cat
c1_name = c1
c0_name = c0
if not ct.all_within:
raise ValueError(f"conditions {c1!r} and {c0!r} do not have the same values on {dataobj_repr(ct.match)}")
n = len(ct.y) // 2
y1 = ct.y[:n]
y0 = ct.y[n:]
x_name = ct.x.name
match = ct.match
return y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name
class ttest_rel(NDMaskedC1Mixin, NDDifferenceTest):
"""Mass-univariate related samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed, default);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
n : int
Number of cases.
Notes
-----
In the permutation cluster test, permutations are done within the
categories of ``match``.
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub)
check_for_vector_dim(y1)
if n <= 2:
raise ValueError("Not enough observations for t-test (n=%i)" % n)
df = n - 1
diff = y1 - y0
tmap = stats.t_1samp(diff.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
n_samples, samples = _resample_params(len(diff), samples)
cdist = NDPermutationDistribution(
diff, n_samples, threshold, tfce, tail, 't', 'Related Samples t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=y1.info)
t = NDVar(tmap, y1.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, y1, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
cdist = self._cdist
t = self.t
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map()
self.p_uncorrected = NDVar(pmap, t.dims, info, 'p')
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
if self.tail == 0:
comp = "%s == %s" % (self.c1, self.c0)
elif self.tail > 0:
comp = "%s > %s" % (self.c1, self.c0)
else:
comp = "%s < %s" % (self.c1, self.c0)
if self.y:
return "Related-Samples T-Test: %s ~ %s" % (self.y, comp)
else:
return "Related-Samples T-Test: %s" % comp
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.c1 is not None:
args.extend((repr(self.c1), repr(self.c0), repr(self.match)))
args[-1] += " (n=%i)" % self.n
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
class MultiEffectNDTest(NDTest):
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.match is not None:
args.append('match=%r' % self.match)
return args
def _repr_cdist(self):
args = self._cdist[0]._repr_test_args(self.pmin)
for cdist in self._cdist:
effect_args = cdist._repr_clusters()
args.append("%r: %s" % (cdist.name, ', '.join(effect_args)))
return args
def _asfmtext(self):
table = fmtxt.Table('llll')
table.cells('Effect', fmtxt.symbol(self._statistic, 'max'), fmtxt.symbol('p'), 'sig')
table.midrule()
for i, effect in enumerate(self.effects):
table.cell(effect)
table.cell(fmtxt.stat(self._max_statistic(i)))
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
def _expand_state(self):
self.effects = tuple(e.name for e in self._effects)
# clusters
cdists = self._cdist
if cdists is None:
self._kind = None
else:
self.tfce_maps = [cdist.tfce_map for cdist in cdists]
self.p = [cdist.probability_map for cdist in cdists]
self._kind = cdists[0].kind
def _effect_index(self, effect: Union[int, str]):
if isinstance(effect, str):
return self.effects.index(effect)
else:
return effect
def _iter_cdists(self):
for cdist in self._cdist:
yield cdist.name.capitalize(), cdist
@property
def _first_cdist(self):
if self._cdist is None:
return None
else:
return self._cdist[0]
def _max_statistic(self, effect: Union[str, int]):
i = self._effect_index(effect)
stat_map = self._statistic_map[i]
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(stat_map, self.p[i], tail)
def cluster(self, cluster_id, effect=0):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
effect : int | str
Index or name of the effect from which to retrieve a cluster
(default is the first effect).
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].cluster(cluster_id)
def compute_probability_map(self, effect=0, **sub):
"""Compute a probability map
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map
(default is the first effect).
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].compute_probability_map(**sub)
def masked_parameter_map(self, effect=0, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map.
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].masked_parameter_map(pmin, **sub)
def find_clusters(self, pmin=None, maps=False, effect=None, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
effect : int | str
Index or name of the effect from which to find clusters (default is
all effects).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
if effect is not None:
i = self._effect_index(effect)
return self._cdist[i].clusters(pmin, maps, **sub)
dss = []
info = {}
for cdist in self._cdist:
ds = cdist.clusters(pmin, maps, **sub)
ds[:, 'effect'] = cdist.name
if 'clusters' in ds.info:
info['%s clusters' % cdist.name] = ds.info.pop('clusters')
dss.append(ds)
out = combine(dss)
out.info.update(info)
return out
def find_peaks(self):
"""Find peaks in a TFCE distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
dss = []
for cdist in self._cdist:
ds = cdist.find_peaks()
ds[:, 'effect'] = cdist.name
dss.append(ds)
return combine(dss)
class anova(MultiEffectNDTest):
"""Mass-univariate ANOVA
Parameters
----------
y : NDVar
Dependent variable.
x : Model
Independent variables.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an f-value equivalent to an
uncorrected p-value.
fmin : scalar
Threshold for forming clusters as f-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
replacement : bool
whether random samples should be drawn with replacement or
without
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : categorial | False
When permuting data, only shuffle the cases within the categories
of match. By default, ``match`` is determined automatically based on
the random efects structure of ``x``.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
effects : tuple of str
Names of the tested effects, in the same order as in other attributes.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
f : list of NDVar
Maps of F values.
p : list of NDVar | None
Maps of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : list of NDVar
Maps of p-values uncorrected for multiple comparison.
tfce_maps : list of NDVar | None
Maps of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Examples
--------
For information on model specification see the univariate
:func:`~eelbrain.test.anova` examples.
"""
_state_specific = ('x', 'pmin', '_effects', '_dfs_denom', 'f')
_statistic = 'f'
_statistic_tail = 1
@user_activity
def __init__(
self,
y: NDVarArg,
x: ModelArg,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
fmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: Union[CategorialArg, bool] = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
x_arg = x
sub_arg = sub
sub = assub(sub, ds)
y = asndvar(y, sub, ds, dtype=np.float64)
check_for_vector_dim(y)
x = asmodel(x, sub, ds)
if match is None:
random_effects = [e for e in x.effects if e.random]
if not random_effects:
match = None
elif len(random_effects) > 1:
raise NotImplementedError(
"Automatic match parameter for model with more than one "
"random effect. Set match manually.")
else:
match = random_effects[0]
elif match is not False:
match = ascategorial(match, sub, ds)
lm = _nd_anova(x)
effects = lm.effects
dfs_denom = lm.dfs_denom
fmaps = lm.map(y.x)
n_threshold_params = sum((pmin is not None, fmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
cdists = None
thresholds = tuple(repeat(None, len(effects)))
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, fmin and tfce can be specified")
else:
if pmin is not None:
thresholds = tuple(stats.ftest_f(pmin, e.df, df_den) for e, df_den in zip(effects, dfs_denom))
elif fmin is not None:
thresholds = tuple(repeat(abs(fmin), len(effects)))
else:
thresholds = tuple(repeat(None, len(effects)))
cdists = [
NDPermutationDistribution(
y, samples, thresh, tfce, 1, 'f', e.name,
tstart, tstop, criteria, parc, force_permutation)
for e, thresh in zip(effects, thresholds)]
# Find clusters in the actual data
do_permutation = 0
for cdist, fmap in zip(cdists, fmaps):
cdist.add_original(fmap)
do_permutation += cdist.do_permutation
if do_permutation:
iterator = permute_order(len(y), samples, unit=match)
run_permutation_me(lm, cdists, iterator)
# create ndvars
dims = y.dims[1:]
f = []
for e, fmap, df_den, f_threshold in zip(effects, fmaps, dfs_denom, thresholds):
info = _info.for_stat_map('f', f_threshold, tail=1, old=y.info)
f.append(NDVar(fmap, dims, info, e.name))
# store attributes
MultiEffectNDTest.__init__(self, y, match, sub_arg, samples, tfce, pmin,
cdists, tstart, tstop)
self.x = x_arg if isinstance(x_arg, str) else x.name
self._effects = effects
self._dfs_denom = dfs_denom
self.f = f
self._expand_state()
def _expand_state(self):
# backwards compatibility
if hasattr(self, 'effects'):
self._effects = self.effects
MultiEffectNDTest._expand_state(self)
# backwards compatibility
if hasattr(self, 'df_den'):
df_den_temp = {e.name: df for e, df in self.df_den.items()}
del self.df_den
self._dfs_denom = tuple(df_den_temp[e] for e in self.effects)
# f-maps with clusters
pmin = self.pmin or 0.05
if self.samples:
f_and_clusters = []
for e, fmap, df_den, cdist in zip(self._effects, self.f,
self._dfs_denom, self._cdist):
# create f-map with cluster threshold
f0 = stats.ftest_f(pmin, e.df, df_den)
info = _info.for_stat_map('f', f0)
f_ = NDVar(fmap.x, fmap.dims, info, e.name)
# add overlay with cluster
if cdist.probability_map is not None:
f_and_clusters.append([f_, cdist.probability_map])
else:
f_and_clusters.append([f_])
self.f_probability = f_and_clusters
# uncorrected probability
p_uncorr = []
for e, f, df_den in zip(self._effects, self.f, self._dfs_denom):
info = _info.for_p_map()
pmap = stats.ftest_p(f.x, e.df, df_den)
p_ = NDVar(pmap, f.dims, info, e.name)
p_uncorr.append(p_)
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "ANOVA: %s ~ %s" % (self.y, self.x)
else:
return "ANOVA: %s" % self.x
def _plot_model(self):
return '%'.join(e.name for e in self._effects if isinstance(e, Factor) or
(isinstance(e, NestedEffect) and isinstance(e.effect, Factor)))
def _plot_sub(self):
return super(anova, self)._plot_sub()
def _default_plot_obj(self):
if self.samples:
return [self.masked_parameter_map(e) for e in self.effects]
else:
return self._statistic_map
def table(self):
"""Table with effects and smallest p-value"""
table = fmtxt.Table('rlr' + ('' if self.p is None else 'rl'))
table.cells('#', 'Effect', 'f_max')
if self.p is not None:
table.cells('p', 'sig')
table.midrule()
for i in range(len(self.effects)):
table.cell(i)
table.cell(self.effects[i])
table.cell(fmtxt.stat(self.f[i].max()))
if self.p is not None:
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
class Vector(NDDifferenceTest):
"""Test a vector field for vectors with non-random direction
Parameters
----------
y : NDVar
Dependent variable (needs to include one vector dimension).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
difference : NDVar
The vector field averaged across cases.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
Notes
-----
Vector tests are based on the Hotelling T-Square statistic. Computation of
the T-Square statistic relies on [1]_.
References
----------
.. [1] Kopp, J. (2008). Efficient numerical diagonalization of hermitian 3 x
3 matrices. International Journal of Modern Physics C, 19(3), 523-548.
`10.1142/S0129183108012303 <https://doi.org/10.1142/S0129183108012303>`_
"""
_state_specific = ('difference', 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
n = len(ct.y)
cdist = NDPermutationDistribution(ct.y, samples, tmin, tfce, 1, 'norm', 'Vector test', tstart, tstop, criteria, parc, force_permutation)
v_dim = ct.y.dimnames[cdist._vector_ax + 1]
v_mean = ct.y.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(ct.y)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self._v_dim = v_dim
self.n = n
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
@property
def _statistic(self):
return 'norm' if self.t2 is None else 't2'
def _name(self):
if self.y:
return f"Vector test: {self.y}"
else:
return "Vector test"
def _repr_test_args(self):
args = []
if self.y:
args.append(repr(self.y))
if self.match:
args.append(f'match={self.match!r}')
return args
@staticmethod
def _vector_perm(y, out, seed, use_norm):
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
rotation = rand_rotation_matrices(n_cases, seed)
if use_norm:
return vector.mean_norm_rotated(y, rotation, out)
else:
return vector.t2_stat_rotated(y, rotation, out)
@staticmethod
def _vector_t2_map(y):
dimnames = y.get_dimnames(first=('case', 'space'))
x = y.get_data(dimnames)
t2_map = stats.t2_1samp(x)
if y.ndim == 2:
return np.float64(t2_map)
else:
dims = y.get_dims(dimnames[2:])
return NDVar(t2_map, dims)
class VectorDifferenceIndependent(Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Total number of cases.
n1 : int
Number of cases in ``c1``.
n0 : int
Number of cases in ``c0``.
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
_statistic = 'norm'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub)
self.n1 = len(y1)
self.n0 = len(y0)
self.n = len(y)
cdist = NDPermutationDistribution(y, samples, tmin, tfce, 1, 'norm', 'Vector test (independent)', tstart, tstop, criteria, parc, force_permutation)
self._v_dim = v_dim = y.dimnames[cdist._vector_ax + 1]
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self.difference = self.c1_mean - self.c0_mean
self.difference.name = 'difference'
v_mean_norm = self.difference.norm(v_dim)
if not use_norm:
raise NotImplementedError("t2 statistic not implemented for VectorDifferenceIndependent")
else:
cdist.add_original(v_mean_norm.x if self.difference.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator, self.n1)
NDTest.__init__(self, y, match, sub, samples, tfce, None, cdist, tstart, tstop)
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (independent): {self.y}"
else:
return "Vector test (independent)"
@staticmethod
def _vector_perm(y, n1, out, seed, use_norm):
assert use_norm
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
# randomize directions
rotation = rand_rotation_matrices(n_cases, seed)
# randomize groups
cases = np.arange(n_cases)
np.random.shuffle(cases)
# group 1
mean_1 = np.zeros((n_dims, n_tests))
for case in cases[:n1]:
mean_1 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_1 /= n1
# group 0
mean_0 = np.zeros((n_dims, n_tests))
for case in cases[n1:]:
mean_0 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_0 /= (n_cases - n1)
# difference
mean_1 -= mean_0
norm = scipy.linalg.norm(mean_1, 2, axis=0)
if out is not None:
out[:] = norm
return norm
class VectorDifferenceRelated(NDMaskedC1Mixin, Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
c1_mean : NDVar
Mean in the ``c1`` condition.
c0_mean : NDVar
Mean in the ``c0`` condition.
difference : NDVar
Difference between the mean in condition ``c1`` and condition ``c0``.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
See Also
--------
Vector : One-sample vector test, notes on vector test implementation
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub)
difference = y1 - y0
difference.name = 'difference'
n_samples, samples = _resample_params(n, samples)
cdist = NDPermutationDistribution(difference, n_samples, tmin, tfce, 1, 'norm', 'Vector test (related)', tstart, tstop, criteria, parc, force_permutation)
v_dim = difference.dimnames[cdist._vector_ax + 1]
v_mean = difference.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(difference)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(n_samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, difference, match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._v_dim = v_dim
self.n = n
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (related): {self.y}"
else:
return "Vector test (related)"
def flatten(array, connectivity):
"""Reshape SPM buffer array to 2-dimensional map for connectivity processing
Parameters
----------
array : ndarray
N-dimensional array (with non-adjacent dimension at first position).
connectivity : Connectivity
N-dimensional connectivity.
Returns
-------
flat_array : ndarray
The input array reshaped if necessary, making sure that input and output
arrays share the same underlying data buffer.
"""
if array.ndim == 2 or not connectivity.custom:
return array
else:
out = array.reshape((array.shape[0], -1))
assert out.base is array
return out
def flatten_1d(array):
if array.ndim == 1:
return array
else:
out = array.ravel()
assert out.base is array
return out
def label_clusters(stat_map, threshold, tail, connectivity, criteria):
"""Label clusters
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
Returns
-------
cmap : np.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
cmap = np.empty(stat_map.shape, np.uint32)
bin_buff = np.empty(stat_map.shape, np.bool8)
cmap_flat = flatten(cmap, connectivity)
if tail == 0:
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
else:
int_buff = int_buff_flat = None
cids = _label_clusters(stat_map, threshold, tail, connectivity, criteria,
cmap, cmap_flat, bin_buff, int_buff, int_buff_flat)
return cmap, cids
def _label_clusters(stat_map, threshold, tail, conn, criteria, cmap, cmap_flat,
bin_buff, int_buff, int_buff_flat):
"""Find clusters on a statistical parameter map
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
cmap : array of int
Buffer for the cluster id map (will be modified).
Returns
-------
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
# compute clusters
if tail >= 0:
bin_map_above = np.greater(stat_map, threshold, bin_buff)
cids = _label_clusters_binary(bin_map_above, cmap, cmap_flat, conn,
criteria)
if tail <= 0:
bin_map_below = np.less(stat_map, -threshold, bin_buff)
if tail < 0:
cids = _label_clusters_binary(bin_map_below, cmap, cmap_flat, conn,
criteria)
else:
cids_l = _label_clusters_binary(bin_map_below, int_buff,
int_buff_flat, conn, criteria)
x = cmap.max()
int_buff[bin_map_below] += x
cids_l += x
cmap += int_buff
cids = np.concatenate((cids, cids_l))
return cids
def label_clusters_binary(bin_map, connectivity, criteria=None):
"""Label clusters in a boolean map
Parameters
----------
bin_map : numpy.ndarray
Binary map.
connectivity : Connectivity
Connectivity corresponding to ``bin_map``.
criteria : dict
Cluster criteria.
Returns
-------
cmap : numpy.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : numpy.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
cmap = np.empty(bin_map.shape, np.uint32)
cmap_flat = flatten(cmap, connectivity)
cids = _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria)
return cmap, cids
def _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria):
"""Label clusters in a binary array
Parameters
----------
bin_map : np.ndarray
Binary map of where the parameter map exceeds the threshold for a
cluster (non-adjacent dimension on the first axis).
cmap : np.ndarray
Array in which to label the clusters.
cmap_flat : np.ndarray
Flat copy of cmap (ndim=2, only used when all_adjacent==False)
connectivity : Connectivity
Connectivity.
criteria : None | list
Cluster size criteria, list of (axes, v) tuples. Collapse over axes
and apply v minimum length).
Returns
-------
cluster_ids : np.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
# find clusters
n = ndimage.label(bin_map, connectivity.struct, cmap)
if n <= 1:
# in older versions, n is 1 even when no cluster is found
if n == 0 or cmap.max() == 0:
return np.array((), np.uint32)
else:
cids = np.array((1,), np.uint32)
elif connectivity.custom:
cids = merge_labels(cmap_flat, n, *connectivity.custom[0])
else:
cids = np.arange(1, n + 1, 1, np.uint32)
# apply minimum cluster size criteria
if criteria and cids.size:
for axes, v in criteria:
cids = np.setdiff1d(cids,
[i for i in cids if np.count_nonzero(np.equal(cmap, i).any(axes)) < v],
True)
if cids.size == 0:
break
return cids
def tfce(stat_map, tail, connectivity, dh=0.1):
tfce_im = np.empty(stat_map.shape, np.float64)
tfce_im_1d = flatten_1d(tfce_im)
bin_buff = np.empty(stat_map.shape, np.bool8)
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
int_buff_1d = flatten_1d(int_buff)
return _tfce(stat_map, tail, connectivity, tfce_im, tfce_im_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh)
def _tfce(stat_map, tail, conn, out, out_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh=0.1, e=0.5, h=2.0):
"Threshold-free cluster enhancement"
out.fill(0)
# determine slices
if tail == 0:
hs = chain(np.arange(-dh, stat_map.min(), -dh),
np.arange(dh, stat_map.max(), dh))
elif tail < 0:
hs = np.arange(-dh, stat_map.min(), -dh)
else:
hs = np.arange(dh, stat_map.max(), dh)
# label clusters in slices at different heights
# fill each cluster with total section value
# each point's value is the vertical sum
for h_ in hs:
if h_ > 0:
np.greater_equal(stat_map, h_, bin_buff)
h_factor = h_ ** h
else:
np.less_equal(stat_map, h_, bin_buff)
h_factor = (-h_) ** h
c_ids = _label_clusters_binary(bin_buff, int_buff, int_buff_flat, conn, None)
tfce_increment(c_ids, int_buff_1d, out_1d, e, h_factor)
return out
class StatMapProcessor:
def __init__(self, tail, max_axes, parc):
"""Reduce a statistical map to the relevant maximum statistic"""
self.tail = tail
self.max_axes = max_axes
self.parc = parc
def max_stat(self, stat_map):
if self.tail == 0:
v = np.abs(stat_map, stat_map).max(self.max_axes)
elif self.tail > 0:
v = stat_map.max(self.max_axes)
else:
v = -stat_map.min(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class TFCEProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, dh):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.dh = dh
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._int_buff = np.empty(shape, np.uint32)
self._tfce_im = np.empty(shape, np.float64)
self._tfce_im_1d = flatten_1d(self._tfce_im)
self._int_buff_flat = flatten(self._int_buff, connectivity)
self._int_buff_1d = flatten_1d(self._int_buff)
def max_stat(self, stat_map):
v = _tfce(
stat_map, self.tail, self.connectivity, self._tfce_im, self._tfce_im_1d,
self._bin_buff, self._int_buff, self._int_buff_flat, self._int_buff_1d,
self.dh,
).max(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class ClusterProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, threshold,
criteria):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.threshold = threshold
self.criteria = criteria
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._cmap = np.empty(shape, np.uint32)
self._cmap_flat = flatten(self._cmap, connectivity)
if tail == 0:
self._int_buff = np.empty(shape, np.uint32)
self._int_buff_flat = flatten(self._int_buff, connectivity)
else:
self._int_buff = self._int_buff_flat = None
def max_stat(self, stat_map, threshold=None):
if threshold is None:
threshold = self.threshold
cmap = self._cmap
cids = _label_clusters(stat_map, threshold, self.tail, self.connectivity,
self.criteria, cmap, self._cmap_flat,
self._bin_buff, self._int_buff,
self._int_buff_flat)
if self.parc is not None:
v = []
for idx in self.parc:
clusters_v = ndimage.sum(stat_map[idx], cmap[idx], cids)
if len(clusters_v):
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
v.append(clusters_v.max())
else:
v.append(0)
return v
elif len(cids):
clusters_v = ndimage.sum(stat_map, cmap, cids)
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
return clusters_v.max()
else:
return 0
def get_map_processor(kind, *args):
if kind == 'tfce':
return TFCEProcessor(*args)
elif kind == 'cluster':
return ClusterProcessor(*args)
elif kind == 'raw':
return StatMapProcessor(*args)
else:
raise ValueError("kind=%s" % repr(kind))
class NDPermutationDistribution:
"""Accumulate information on a cluster statistic.
Parameters
----------
y : NDVar
Dependent variable.
samples : int
Number of permutations.
threshold : scalar > 0
Threshold-based clustering.
tfce : bool | scalar
Threshold-free cluster enhancement.
tail : 1 | 0 | -1
Which tail(s) of the distribution to consider. 0 is two-tailed,
whereas 1 only considers positive values and -1 only considers
negative values.
meas : str
Label for the parameter measurement (e.g., 't' for t-values).
name : None | str
Name for the comparison.
tstart, tstop : None | scalar
Restrict the time window for finding clusters (None: use the whole
epoch).
criteria : dict
Dictionary with threshold criteria for cluster size: 'mintime'
(seconds) and 'minsource' (n_sources).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation : bool
Conduct permutations regardless of whether there are any clusters.
Notes
-----
Use of the NDPermutationDistribution proceeds in 3 steps:
- initialize the NDPermutationDistribution object: ``cdist = NDPermutationDistribution(...)``
- use a copy of y cropped to the time window of interest:
``y = cdist.Y_perm``
- add the actual statistical map with ``cdist.add_original(pmap)``
- if any clusters are found (``if cdist.n_clusters``):
- proceed to add statistical maps from permuted data with
``cdist.add_perm(pmap)``.
Permutation data shape: case, [vector, ][non-adjacent, ] ...
internal shape: [non-adjacent, ] ...
"""
tfce_warning = None
def __init__(self, y, samples, threshold, tfce=False, tail=0, meas='?', name=None,
tstart=None, tstop=None, criteria={}, parc=None, force_permutation=False):
assert y.has_case
assert parc is None or isinstance(parc, str)
if tfce and threshold:
raise RuntimeError(f"threshold={threshold!r}, tfce={tfce!r}: mutually exclusive parameters")
elif tfce:
if tfce is not True:
tfce = abs(tfce)
kind = 'tfce'
elif threshold:
threshold = float(threshold)
kind = 'cluster'
assert threshold > 0
else:
kind = 'raw'
# vector: will be removed for stat_map
vector = [d._connectivity_type == 'vector' for d in y.dims[1:]]
has_vector_ax = any(vector)
if has_vector_ax:
vector_ax = vector.index(True)
else:
vector_ax = None
# prepare temporal cropping
if (tstart is None) and (tstop is None):
y_perm = y
self._crop_for_permutation = False
self._crop_idx = None
else:
t_ax = y.get_axis('time') - 1
y_perm = y.sub(time=(tstart, tstop))
# for stat-maps
if vector_ax is not None and vector_ax < t_ax:
t_ax -= 1
t_slice = y.time._array_index(slice(tstart, tstop))
self._crop_for_permutation = True
self._crop_idx = FULL_AXIS_SLICE * t_ax + (t_slice,)
dims = list(y_perm.dims[1:])
if has_vector_ax:
del dims[vector_ax]
# custom connectivity: move non-adjacent connectivity to first axis
custom = [d._connectivity_type == 'custom' for d in dims]
n_custom = sum(custom)
if n_custom > 1:
raise NotImplementedError("More than one axis with custom connectivity")
nad_ax = None if n_custom == 0 else custom.index(True)
if nad_ax:
swapped_dims = list(dims)
swapped_dims[0], swapped_dims[nad_ax] = dims[nad_ax], dims[0]
else:
swapped_dims = dims
connectivity = Connectivity(swapped_dims, parc)
assert connectivity.vector is None
# cluster map properties
ndim = len(dims)
# prepare cluster minimum size criteria
if criteria:
criteria_ = []
for k, v in criteria.items():
m = re.match('min(\w+)', k)
if m:
dimname = m.group(1)
if not y.has_dim(dimname):
raise TypeError(
"%r is an invalid keyword argument for this testnd "
"function (no dimension named %r)" % (k, dimname))
ax = y.get_axis(dimname) - 1
if dimname == 'time':
v = int(ceil(v / y.time.tstep))
else:
raise TypeError("%r is an invalid keyword argument for this testnd function" % (k,))
if nad_ax:
if ax == 0:
ax = nad_ax
elif ax == nad_ax:
ax = 0
axes = tuple(i for i in range(ndim) if i != ax)
criteria_.append((axes, v))
if kind != 'cluster':
# here so that invalid keywords raise explicitly
err = ("Can not use cluster size criteria when doing "
"threshold free cluster evaluation")
raise ValueError(err)
else:
criteria_ = None
# prepare distribution
samples = int(samples)
if parc:
for parc_ax, parc_dim in enumerate(swapped_dims):
if parc_dim.name == parc:
break
else:
raise ValueError("parc=%r (no dimension named %r)" % (parc, parc))
if parc_dim._connectivity_type == 'none':
parc_indexes = np.arange(len(parc_dim))
elif kind == 'tfce':
raise NotImplementedError(
f"TFCE for parc={parc!r} ({parc_dim.__class__.__name__} dimension)")
elif parc_dim._connectivity_type == 'custom':
if not hasattr(parc_dim, 'parc'):
raise NotImplementedError(f"parc={parc!r}: dimension has no parcellation")
parc_indexes = tuple(np.flatnonzero(parc_dim.parc == cell) for
cell in parc_dim.parc.cells)
parc_dim = Categorial(parc, parc_dim.parc.cells)
else:
raise NotImplementedError(f"parc={parc!r}")
dist_shape = (samples, len(parc_dim))
dist_dims = ('case', parc_dim)
max_axes = tuple(chain(range(parc_ax), range(parc_ax + 1, ndim)))
else:
dist_shape = (samples,)
dist_dims = None
max_axes = None
parc_indexes = None
# arguments for the map processor
shape = tuple(map(len, swapped_dims))
if kind == 'raw':
map_args = (kind, tail, max_axes, parc_indexes)
elif kind == 'tfce':
dh = 0.1 if tfce is True else tfce
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, dh)
else:
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, threshold, criteria_)
self.kind = kind
self.y_perm = y_perm
self.dims = tuple(dims) # external stat map dims (cropped time)
self.shape = shape # internal stat map shape
self._connectivity = connectivity
self.samples = samples
self.dist_shape = dist_shape
self._dist_dims = dist_dims
self._max_axes = max_axes
self.dist = None
self.threshold = threshold
self.tfce = tfce
self.tail = tail
self._nad_ax = nad_ax
self._vector_ax = vector_ax
self.tstart = tstart
self.tstop = tstop
self.parc = parc
self.meas = meas
self.name = name
self._criteria = criteria_
self.criteria = criteria
self.map_args = map_args
self.has_original = False
self.do_permutation = False
self.dt_perm = None
self._finalized = False
self._init_time = current_time()
self._host = socket.gethostname()
self.force_permutation = force_permutation
from .. import __version__
self._version = __version__
def _crop(self, im):
"Crop an original stat_map"
if self._crop_for_permutation:
return im[self._crop_idx]
else:
return im
def uncrop(
self,
ndvar: NDVar, # NDVar to uncrop
to: NDVar, # NDVar that has the target time dimensions
default: float = 0, # value to fill in uncropped area
):
if self.tstart is None and self.tstop is None:
return ndvar
target_time = to.get_dim('time')
t_ax = ndvar.get_axis('time')
dims = list(ndvar.dims)
dims[t_ax] = target_time
shape = list(ndvar.shape)
shape[t_ax] = len(target_time)
t_slice = target_time._array_index(slice(self.tstart, self.tstop))
x = np.empty(shape, ndvar.x.dtype)
x.fill(default)
x[FULL_AXIS_SLICE * t_ax + (t_slice,)] = ndvar.x
return NDVar(x, dims, ndvar.info, ndvar.name)
def add_original(self, stat_map):
"""Add the original statistical parameter map.
Parameters
----------
stat_map : array
Parameter map of the statistic of interest (uncropped).
"""
if self.has_original:
raise RuntimeError("Original pmap already added")
logger = logging.getLogger(__name__)
logger.debug("Adding original parameter map...")
# crop/reshape stat_map
stat_map = self._crop(stat_map)
if self._nad_ax:
stat_map = stat_map.swapaxes(0, self._nad_ax)
# process map
if self.kind == 'tfce':
dh = 0.1 if self.tfce is True else self.tfce
self.tfce_warning = max(stat_map.max(), -stat_map.min()) < dh
cmap = tfce(stat_map, self.tail, self._connectivity, dh)
cids = None
n_clusters = cmap.max() > 0
elif self.kind == 'cluster':
cmap, cids = label_clusters(stat_map, self.threshold, self.tail,
self._connectivity, self._criteria)
n_clusters = len(cids)
# clean original cluster map
idx = np.in1d(cmap, cids, invert=True).reshape(self.shape)
cmap[idx] = 0
else:
cmap = stat_map
cids = None
n_clusters = True
self._t0 = current_time()
self._original_cluster_map = cmap
self._cids = cids
self.n_clusters = n_clusters
self.has_original = True
self.dt_original = self._t0 - self._init_time
self._original_param_map = stat_map
if self.force_permutation or (self.samples and n_clusters):
self._create_dist()
self.do_permutation = True
else:
self.dist_array = None
self.finalize()
def _create_dist(self):
"Create the distribution container"
if CONFIG['n_workers']:
n = reduce(operator.mul, self.dist_shape)
dist_array = RawArray('d', n)
dist = np.frombuffer(dist_array, np.float64, n)
dist.shape = self.dist_shape
else:
dist_array = None
dist = np.zeros(self.dist_shape)
self.dist_array = dist_array
self.dist = dist
def _aggregate_dist(self, **sub):
"""Aggregate permutation distribution to one value per permutation
Parameters
----------
[dimname] : index
Limit the data for the distribution.
Returns
-------
dist : array, shape = (samples,)
Maximum value for each permutation in the given region.
"""
dist = self.dist
if sub:
if self._dist_dims is None:
raise TypeError("NDPermutationDistribution does not have parcellation")
dist_ = NDVar(dist, self._dist_dims)
dist_sub = dist_.sub(**sub)
dist = dist_sub.x
if dist.ndim > 1:
axes = tuple(range(1, dist.ndim))
dist = dist.max(axes)
return dist
def __repr__(self):
items = []
if self.has_original:
dt = timedelta(seconds=round(self.dt_original))
items.append("%i clusters (%s)" % (self.n_clusters, dt))
if self.samples > 0 and self.n_clusters > 0:
if self.dt_perm is not None:
dt = timedelta(seconds=round(self.dt_perm))
items.append("%i permutations (%s)" % (self.samples, dt))
else:
items.append("no data")
return "<NDPermutationDistribution: %s>" % ', '.join(items)
def __getstate__(self):
if not self._finalized:
raise RuntimeError("Cannot pickle cluster distribution before all "
"permutations have been added.")
state = {
name: getattr(self, name) for name in (
'name', 'meas', '_version', '_host', '_init_time',
# settings ...
'kind', 'threshold', 'tfce', 'tail', 'criteria', 'samples', 'tstart', 'tstop', 'parc',
# data properties ...
'dims', 'shape', '_nad_ax', '_vector_ax', '_criteria', '_connectivity',
# results ...
'dt_original', 'dt_perm', 'n_clusters', '_dist_dims', 'dist', '_original_param_map', '_original_cluster_map', '_cids',
)}
state['version'] = 3
return state
def __setstate__(self, state):
# backwards compatibility
version = state.pop('version', 0)
if version == 0:
if '_connectivity_src' in state:
del state['_connectivity_src']
del state['_connectivity_dst']
if '_connectivity' in state:
del state['_connectivity']
if 'N' in state:
state['samples'] = state.pop('N')
if '_version' not in state:
state['_version'] = '< 0.11'
if '_host' not in state:
state['_host'] = 'unknown'
if '_init_time' not in state:
state['_init_time'] = None
if 'parc' not in state:
if state['_dist_dims'] is None:
state['parc'] = None
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
elif isinstance(state['parc'], tuple):
if len(state['parc']) == 0:
state['parc'] = None
elif len(state['parc']) == 1:
state['parc'] = state['parc'][0]
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
nad_ax = state['_nad_ax']
state['dims'] = dims = state['dims'][1:]
state['_connectivity'] = Connectivity(
(dims[nad_ax],) + dims[:nad_ax] + dims[nad_ax + 1:],
state['parc'])
if version < 2:
state['_vector_ax'] = None
if version < 3:
state['tfce'] = ['kind'] == 'tfce'
for k, v in state.items():
setattr(self, k, v)
self.has_original = True
self.finalize()
def _repr_test_args(self, pmin):
"Argument representation for TestResult repr"
args = ['samples=%r' % self.samples]
if pmin is not None:
args.append(f"pmin={pmin!r}")
elif self.kind == 'tfce':
arg = f"tfce={self.tfce!r}"
if self.tfce_warning:
arg = f"{arg} [WARNING: The TFCE step is larger than the largest value in the data]"
args.append(arg)
if self.tstart is not None:
args.append(f"tstart={self.tstart!r}")
if self.tstop is not None:
args.append(f"tstop={self.tstop!r}")
for k, v in self.criteria.items():
args.append(f"{k}={v!r}")
return args
def _repr_clusters(self):
info = []
if self.kind == 'cluster':
if self.n_clusters == 0:
info.append("no clusters")
else:
info.append("%i clusters" % self.n_clusters)
if self.n_clusters and self.samples:
info.append(f"{fmtxt.peq(self.probability_map.min())}")
return info
def _package_ndvar(self, x, info=None, external_shape=False):
"Generate NDVar from map with internal shape"
if not self.dims:
if isinstance(x, np.ndarray):
return x.item()
return x
if not external_shape and self._nad_ax:
x = x.swapaxes(0, self._nad_ax)
if info is None:
info = {}
return NDVar(x, self.dims, info, self.name)
def finalize(self):
"Package results and delete temporary data"
if self.dt_perm is None:
self.dt_perm = current_time() - self._t0
# original parameter map
param_contours = {}
if self.kind == 'cluster':
if self.tail >= 0:
param_contours[self.threshold] = (0.7, 0.7, 0)
if self.tail <= 0:
param_contours[-self.threshold] = (0.7, 0, 0.7)
info = _info.for_stat_map(self.meas, contours=param_contours)
self.parameter_map = self._package_ndvar(self._original_param_map, info)
# TFCE map
if self.kind == 'tfce':
self.tfce_map = self._package_ndvar(self._original_cluster_map)
else:
self.tfce_map = None
# cluster map
if self.kind == 'cluster':
self.cluster_map = self._package_ndvar(self._original_cluster_map)
else:
self.cluster_map = None
self._finalized = True
def data_for_permutation(self, raw=True):
"""Retrieve data flattened for permutation
Parameters
----------
raw : bool
Return a RawArray and a shape tuple instead of a numpy array.
"""
# get data in the right shape
x = self.y_perm.x
if self._vector_ax:
x = np.moveaxis(x, self._vector_ax + 1, 1)
if self._nad_ax is not None:
dst = 1
src = 1 + self._nad_ax
if self._vector_ax is not None:
dst += 1
if self._vector_ax > self._nad_ax:
src += 1
if dst != src:
x = x.swapaxes(dst, src)
# flat y shape
ndims = 1 + (self._vector_ax is not None)
n_flat = 1 if x.ndim == ndims else reduce(operator.mul, x.shape[ndims:])
y_flat_shape = x.shape[:ndims] + (n_flat,)
if not raw:
return x.reshape(y_flat_shape)
n = reduce(operator.mul, y_flat_shape)
ra = RawArray('d', n)
ra[:] = x.ravel() # OPT: don't copy data
return ra, y_flat_shape, x.shape[ndims:]
def _cluster_properties(self, cluster_map, cids):
"""Create a Dataset with cluster properties
Parameters
----------
cluster_map : NDVar
NDVar in which clusters are marked by bearing the same number.
cids : array_like of int
Numbers specifying the clusters (must occur in cluster_map) which
should be analyzed.
Returns
-------
cluster_properties : Dataset
Cluster properties. Which properties are included depends on the
dimensions.
"""
ndim = cluster_map.ndim
n_clusters = len(cids)
# setup compression
compression = []
for ax, dim in enumerate(cluster_map.dims):
extents = np.empty((n_clusters, len(dim)), dtype=np.bool_)
axes = tuple(i for i in range(ndim) if i != ax)
compression.append((ax, dim, axes, extents))
# find extents for all clusters
c_mask = np.empty(cluster_map.shape, np.bool_)
for i, cid in enumerate(cids):
np.equal(cluster_map, cid, c_mask)
for ax, dim, axes, extents in compression:
np.any(c_mask, axes, extents[i])
# prepare Dataset
ds = Dataset()
ds['id'] = Var(cids)
for ax, dim, axes, extents in compression:
properties = dim._cluster_properties(extents)
if properties is not None:
ds.update(properties)
return ds
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
if self.kind != 'cluster':
raise RuntimeError(
f'Only cluster-based tests have clusters with stable ids, this '
f'is a {self.kind} distribution. Use the .find_clusters() '
f'method instead with maps=True.')
elif cluster_id not in self._cids:
raise ValueError(f'No cluster with id {cluster_id!r}')
out = self.parameter_map * (self.cluster_map == cluster_id)
properties = self._cluster_properties(self.cluster_map, (cluster_id,))
for k in properties:
out.info[k] = properties[0, k]
return out
def clusters(self, pmin=None, maps=True, **sub):
"""Find significant clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value for clusters (for thresholded cluster tests the
default is 1, for others 0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default True).
[dimname] : index
Limit the data for the distribution.
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
if pmin is None:
if self.samples > 0 and self.kind != 'cluster':
pmin = 0.05
elif self.samples == 0:
msg = ("Can not determine p values in distribution without "
"permutations.")
if self.kind == 'cluster':
msg += " Find clusters with pmin=None."
raise RuntimeError(msg)
if sub:
param_map = self.parameter_map.sub(**sub)
else:
param_map = self.parameter_map
if self.kind == 'cluster':
if sub:
cluster_map = self.cluster_map.sub(**sub)
cids = np.setdiff1d(cluster_map.x, [0])
else:
cluster_map = self.cluster_map
cids = np.array(self._cids)
if len(cids):
# measure original clusters
cluster_v = ndimage.sum(param_map.x, cluster_map.x, cids)
# p-values
if self.samples:
# p-values: "the proportion of random partitions that
# resulted in a larger test statistic than the observed
# one" (179)
dist = self._aggregate_dist(**sub)
n_larger = np.sum(dist > np.abs(cluster_v[:, None]), 1)
cluster_p = n_larger / self.samples
# select clusters
if pmin is not None:
idx = cluster_p <= pmin
cids = cids[idx]
cluster_p = cluster_p[idx]
cluster_v = cluster_v[idx]
# p-value corrected across parc
if sub:
dist = self._aggregate_dist()
n_larger = np.sum(dist > np.abs(cluster_v[:, None]), 1)
cluster_p_corr = n_larger / self.samples
else:
cluster_v = cluster_p = cluster_p_corr = []
ds = self._cluster_properties(cluster_map, cids)
ds['v'] = Var(cluster_v)
if self.samples:
ds['p'] = Var(cluster_p)
if sub:
ds['p_parc'] = Var(cluster_p_corr)
threshold = self.threshold
else:
p_map = self.compute_probability_map(**sub)
bin_map = np.less_equal(p_map.x, pmin)
# threshold for maps
if maps:
values = np.abs(param_map.x)[bin_map]
if len(values):
threshold = values.min() / 2
else:
threshold = 1.
# find clusters (reshape to internal shape for labelling)
if self._nad_ax:
bin_map = bin_map.swapaxes(0, self._nad_ax)
if sub:
raise NotImplementedError("sub")
# need to subset connectivity!
c_map, cids = label_clusters_binary(bin_map, self._connectivity)
if self._nad_ax:
c_map = c_map.swapaxes(0, self._nad_ax)
# Dataset with cluster info
cluster_map = NDVar(c_map, p_map.dims, {}, "clusters")
ds = self._cluster_properties(cluster_map, cids)
ds.info['clusters'] = cluster_map
min_pos = ndimage.minimum_position(p_map.x, c_map, cids)
ds['p'] = Var([p_map.x[pos] for pos in min_pos])
if 'p' in ds:
ds['sig'] = star_factor(ds['p'])
# expand clusters
if maps:
shape = (ds.n_cases,) + param_map.shape
c_maps = np.empty(shape, dtype=param_map.x.dtype)
c_mask = np.empty(param_map.shape, dtype=np.bool_)
for i, cid in enumerate(cids):
np.equal(cluster_map.x, cid, c_mask)
np.multiply(param_map.x, c_mask, c_maps[i])
# package ndvar
dims = ('case',) + param_map.dims
param_contours = {}
if self.tail >= 0:
param_contours[threshold] = (0.7, 0.7, 0)
if self.tail <= 0:
param_contours[-threshold] = (0.7, 0, 0.7)
info = _info.for_stat_map(self.meas, contours=param_contours)
info['summary_func'] = np.sum
ds['cluster'] = NDVar(c_maps, dims, info)
else:
ds.info['clusters'] = self.cluster_map
return ds
def find_peaks(self):
"""Find peaks in a TFCE distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
if self.kind == 'cluster':
raise RuntimeError("Not a threshold-free distribution")
param_map = self._original_param_map
probability_map = self.probability_map.x
if self._nad_ax:
probability_map = probability_map.swapaxes(0, self._nad_ax)
peaks = find_peaks(self._original_cluster_map, self._connectivity)
peak_map, peak_ids = label_clusters_binary(peaks, self._connectivity)
ds = Dataset()
ds['id'] = Var(peak_ids)
v = ds.add_empty_var('v')
if self.samples:
p = ds.add_empty_var('p')
bin_buff = np.empty(peak_map.shape, np.bool8)
for i, id_ in enumerate(peak_ids):
idx = np.equal(peak_map, id_, bin_buff)
v[i] = param_map[idx][0]
if self.samples:
p[i] = probability_map[idx][0]
return ds
def compute_probability_map(self, **sub):
"""Compute a probability map
Parameters
----------
[dimname] : index
Limit the data for the distribution.
Returns
-------
probability : NDVar
Map of p-values.
"""
if not self.samples:
raise RuntimeError("Can't compute probability without permutations")
if self.kind == 'cluster':
cpmap = np.ones(self.shape)
if self.n_clusters:
cids = self._cids
dist = self._aggregate_dist(**sub)
cluster_map = self._original_cluster_map
param_map = self._original_param_map
# measure clusters
cluster_v = ndimage.sum(param_map, cluster_map, cids)
# p-values: "the proportion of random partitions that resulted
# in a larger test statistic than the observed one" (179)
n_larger = np.sum(dist >= np.abs(cluster_v[:, None]), 1)
cluster_p = n_larger / self.samples
c_mask = np.empty(self.shape, dtype=np.bool8)
for i, cid in enumerate(cids):
np.equal(cluster_map, cid, c_mask)
cpmap[c_mask] = cluster_p[i]
# revert to original shape
if self._nad_ax:
cpmap = cpmap.swapaxes(0, self._nad_ax)
dims = self.dims
else:
if self.kind == 'tfce':
stat_map = self.tfce_map
else:
if self.tail == 0:
stat_map = self.parameter_map.abs()
elif self.tail < 0:
stat_map = -self.parameter_map
else:
stat_map = self.parameter_map
if sub:
stat_map = stat_map.sub(**sub)
dims = stat_map.dims if isinstance(stat_map, NDVar) else None
cpmap = np.zeros(stat_map.shape) if dims else 0.
if self.dist is None: # flat stat-map
cpmap += 1
else:
dist = self._aggregate_dist(**sub)
idx = np.empty(stat_map.shape, dtype=np.bool8)
actual = stat_map.x if self.dims else stat_map
for v in dist:
cpmap += np.greater_equal(v, actual, idx)
cpmap /= self.samples
if dims:
return NDVar(cpmap, dims, _info.for_cluster_pmap(), self.name)
else:
return cpmap
def masked_parameter_map(self, pmin=0.05, name=None, **sub):
"""Parameter map masked by significance
Parameters
----------
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map, masked with
p <= pmin.
"""
if not 1 >= pmin > 0:
raise ValueError(f"pmin={pmin}: needs to be between 1 and 0")
if name is None:
name = self.parameter_map.name
if sub:
param_map = self.parameter_map.sub(**sub)
else:
param_map = self.parameter_map
if pmin == 1:
if self.kind != 'cluster':
raise ValueError(f"pmin=1 is only a valid mask for threshold-based cluster tests")
mask = self.cluster_map == 0
else:
probability_map = self.compute_probability_map(**sub)
mask = probability_map > pmin
return param_map.mask(mask, name)
@LazyProperty
def probability_map(self):
if self.samples:
return self.compute_probability_map()
else:
return None
@LazyProperty
def _default_plot_obj(self):
if self.samples:
return [[self.parameter_map, self.probability_map]]
else:
return [[self.parameter_map]]
def info_list(self, title="Computation Info"):
"List with information on computation"
l = fmtxt.List(title)
l.add_item("Eelbrain version: %s" % self._version)
l.add_item("Host Computer: %s" % self._host)
if self._init_time is not None:
l.add_item("Created: %s" % datetime.fromtimestamp(self._init_time)
.strftime('%y-%m-%d %H:%M'))
l.add_item("Original time: %s" % timedelta(seconds=round(self.dt_original)))
l.add_item("Permutation time: %s" % timedelta(seconds=round(self.dt_perm)))
return l
class _MergedTemporalClusterDist:
"""Merge permutation distributions from multiple tests"""
def __init__(self, cdists):
if isinstance(cdists[0], list):
self.effects = [d.name for d in cdists[0]]
self.samples = cdists[0][0].samples
dist = {}
for i, effect in enumerate(self.effects):
if any(d[i].n_clusters for d in cdists):
dist[effect] = np.column_stack([d[i].dist for d in cdists if d[i].dist is not None])
if len(dist):
dist = {c: d.max(1) for c, d in dist.items()}
else:
self.samples = cdists[0].samples
if any(d.n_clusters for d in cdists):
dist = np.column_stack([d.dist for d in cdists if d.dist is not None])
dist = dist.max(1)
else:
dist = None
self.dist = dist
def correct_cluster_p(self, res):
clusters = res.find_clusters()
keys = list(clusters.keys())
if not clusters.n_cases:
return clusters
if isinstance(res, MultiEffectNDTest):
keys.insert(-1, 'p_parc')
cluster_p_corr = []
for cl in clusters.itercases():
n_larger = np.sum(self.dist[cl['effect']] > np.abs(cl['v']))
cluster_p_corr.append(float(n_larger) / self.samples)
else:
keys.append('p_parc')
vs = np.array(clusters['v'])
n_larger = np.sum(self.dist > np.abs(vs[:, None]), 1)
cluster_p_corr = n_larger / self.samples
clusters['p_parc'] = Var(cluster_p_corr)
clusters = clusters[keys]
return clusters
def distribution_worker(dist_array, dist_shape, in_queue, kill_beacon):
"Worker that accumulates values and places them into the distribution"
n = reduce(operator.mul, dist_shape)
dist = np.frombuffer(dist_array, np.float64, n)
dist.shape = dist_shape
samples = dist_shape[0]
for i in trange(samples, desc="Permutation test", unit=' permutations',
disable=CONFIG['tqdm']):
dist[i] = in_queue.get()
if kill_beacon.is_set():
return
def permutation_worker(in_queue, out_queue, y, y_flat_shape, stat_map_shape,
test_func, args, map_args, kill_beacon):
"Worker for 1 sample t-test"
if CONFIG['nice']:
os.nice(CONFIG['nice'])
n = reduce(operator.mul, y_flat_shape)
y = np.frombuffer(y, np.float64, n).reshape(y_flat_shape)
stat_map = np.empty(stat_map_shape)
stat_map_flat = stat_map.ravel()
map_processor = get_map_processor(*map_args)
while not kill_beacon.is_set():
perm = in_queue.get()
if perm is None:
break
test_func(y, *args, stat_map_flat, perm)
max_v = map_processor.max_stat(stat_map)
out_queue.put(max_v)
def run_permutation(test_func, dist, iterator, *args):
if CONFIG['n_workers']:
workers, out_queue, kill_beacon = setup_workers(test_func, dist, args)
try:
for perm in iterator:
out_queue.put(perm)
for _ in range(len(workers) - 1):
out_queue.put(None)
logger = logging.getLogger(__name__)
for w in workers:
w.join()
logger.debug("worker joined")
except KeyboardInterrupt:
kill_beacon.set()
raise
else:
y = dist.data_for_permutation(False)
map_processor = get_map_processor(*dist.map_args)
stat_map = np.empty(dist.shape)
stat_map_flat = stat_map.ravel()
for i, perm in enumerate(iterator):
test_func(y, *args, stat_map_flat, perm)
dist.dist[i] = map_processor.max_stat(stat_map)
dist.finalize()
def setup_workers(test_func, dist, func_args):
"Initialize workers for permutation tests"
logger = logging.getLogger(__name__)
logger.debug("Setting up %i worker processes..." % CONFIG['n_workers'])
permutation_queue = SimpleQueue()
dist_queue = SimpleQueue()
kill_beacon = Event()
# permutation workers
y, y_flat_shape, stat_map_shape = dist.data_for_permutation()
args = (permutation_queue, dist_queue, y, y_flat_shape, stat_map_shape,
test_func, func_args, dist.map_args, kill_beacon)
workers = []
for _ in range(CONFIG['n_workers']):
w = Process(target=permutation_worker, args=args)
w.start()
workers.append(w)
# distribution worker
args = (dist.dist_array, dist.dist_shape, dist_queue, kill_beacon)
w = Process(target=distribution_worker, args=args)
w.start()
workers.append(w)
return workers, permutation_queue, kill_beacon
def run_permutation_me(test, dists, iterator):
dist = dists[0]
if dist.kind == 'cluster':
thresholds = tuple(d.threshold for d in dists)
else:
thresholds = None
if CONFIG['n_workers']:
workers, out_queue, kill_beacon = setup_workers_me(test, dists, thresholds)
try:
for perm in iterator:
out_queue.put(perm)
for _ in range(len(workers) - 1):
out_queue.put(None)
logger = logging.getLogger(__name__)
for w in workers:
w.join()
logger.debug("worker joined")
except KeyboardInterrupt:
kill_beacon.set()
raise
else:
y = dist.data_for_permutation(False)
map_processor = get_map_processor(*dist.map_args)
stat_maps = test.preallocate(dist.shape)
if thresholds:
stat_maps_iter = tuple(zip(stat_maps, thresholds, dists))
else:
stat_maps_iter = tuple(zip(stat_maps, dists))
for i, perm in enumerate(iterator):
test.map(y, perm)
if thresholds:
for m, t, d in stat_maps_iter:
if d.do_permutation:
d.dist[i] = map_processor.max_stat(m, t)
else:
for m, d in stat_maps_iter:
if d.do_permutation:
d.dist[i] = map_processor.max_stat(m)
for d in dists:
if d.do_permutation:
d.finalize()
def setup_workers_me(test_func, dists, thresholds):
"Initialize workers for permutation tests"
logger = logging.getLogger(__name__)
logger.debug("Setting up %i worker processes..." % CONFIG['n_workers'])
permutation_queue = SimpleQueue()
dist_queue = SimpleQueue()
kill_beacon = Event()
# permutation workers
dist = dists[0]
y, y_flat_shape, stat_map_shape = dist.data_for_permutation()
args = (permutation_queue, dist_queue, y, y_flat_shape, stat_map_shape,
test_func, dist.map_args, thresholds, kill_beacon)
workers = []
for _ in range(CONFIG['n_workers']):
w = Process(target=permutation_worker_me, args=args)
w.start()
workers.append(w)
# distribution worker
args = ([d.dist_array for d in dists], dist.dist_shape, dist_queue, kill_beacon)
w = Process(target=distribution_worker_me, args=args)
w.start()
workers.append(w)
return workers, permutation_queue, kill_beacon
def permutation_worker_me(in_queue, out_queue, y, y_flat_shape, stat_map_shape,
test, map_args, thresholds, kill_beacon):
if CONFIG['nice']:
os.nice(CONFIG['nice'])
n = reduce(operator.mul, y_flat_shape)
y = np.frombuffer(y, np.float64, n).reshape(y_flat_shape)
iterator = test.preallocate(stat_map_shape)
if thresholds:
iterator = tuple(zip(iterator, thresholds))
else:
iterator = tuple(iterator)
map_processor = get_map_processor(*map_args)
while not kill_beacon.is_set():
perm = in_queue.get()
if perm is None:
break
test.map(y, perm)
if thresholds:
max_v = [map_processor.max_stat(m, t) for m, t in iterator]
else:
max_v = [map_processor.max_stat(m) for m in iterator]
out_queue.put(max_v)
def distribution_worker_me(dist_arrays, dist_shape, in_queue, kill_beacon):
"Worker that accumulates values and places them into the distribution"
n = reduce(operator.mul, dist_shape)
dists = [d if d is None else np.frombuffer(d, np.float64, n).reshape(dist_shape)
for d in dist_arrays]
samples = dist_shape[0]
for i in trange(samples, desc="Permutation test", unit=' permutations',
disable=CONFIG['tqdm']):
for dist, v in zip(dists, in_queue.get()):
if dist is not None:
dist[i] = v
if kill_beacon.is_set():
return
# Backwards compatibility for pickling
_ClusterDist = NDPermutationDistribution
|
python
|
class SelectionSort:
@staticmethod
def sort(a):
for i, v in enumerate(a):
minimum = i
j = i+1
while j < len(a):
if a[j] < a[minimum]:
minimum = j
j += 1
tmp = a[i]
a[i] = a[minimum]
a[minimum] = tmp
original = [
325432, 989, 547510, 3, -93, 189019, 5042, 123,
597, 42, 7506, 184, 184, 2409, 45, 824,
4, -2650, 9, 662, 3928, -170, 45358, 395,
842, 7697, 110, 14, 99, 221
]
selection = SelectionSort()
selection.sort(original)
sorted_ = [
-2650, -170, -93, 3, 4, 9, 14, 42, 45, 99, 110,
123, 184, 184, 221, 395, 597, 662, 824, 842, 989,
2409, 3928, 5042, 7506, 7697, 45358, 189019, 325432, 547510
]
for i, v in enumerate(original):
assert original[i] == sorted_[i]
|
python
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/18 15:40
|
python
|
# 정수를 저장하는 큐를 구현한 다음, 입력으로 주어지는 명령을 처리하는 프로그램을 작성하시오.
# 명령은 총 여섯 가지이다.
# push X: 정수 X를 큐에 넣는 연산이다.
# pop: 큐에서 가장 앞에 있는 정수를 빼고, 그 수를 출력한다. 만약 큐에 들어있는 정수가 없는 경우에는 -1을 출력한다.
# size: 큐에 들어있는 정수의 개수를 출력한다.
# empty: 큐가 비어있으면 1, 아니면 0을 출력한다.
# front: 큐의 가장 앞에 있는 정수를 출력한다. 만약 큐에 들어있는 정수가 없는 경우에는 -1을 출력한다.
# back: 큐의 가장 뒤에 있는 정수를 출력한다. 만약 큐에 들어있는 정수가 없는 경우에는 -1을 출력한다.
import sys
from collections import deque
t=int(input())
q=deque()
for _ in range(t):
ql=len(q)
s = sys.stdin.readline().rstrip().split()
if(len(s)==2):
q.append(s[1])
elif s[0]=='front':
if(ql!=0):
print(q[0])
else:
print(-1)
elif s[0]=='back':
if(ql!=0):
print(q[ql-1])
else:
print(-1)
elif s[0]=='size':
print(ql)
elif s[0]=='empty':
if(ql!=0):
print(0)
else:
print(1)
else:
if(ql!=0):
print(q.popleft())
else:
print(-1)
|
python
|
from clickhouse_orm import migrations
from ..test_migrations import *
operations = [migrations.AlterIndexes(ModelWithIndex2, reindex=True)]
|
python
|
from functools import partial
import pytest
from stp_core.loop.eventually import eventuallyAll
from plenum.test import waits
from plenum.test.helper import checkReqNack
whitelist = ['discarding message']
class TestVerifier:
@staticmethod
def verify(operation):
assert operation['amount'] <= 100, 'amount too high'
@pytest.fixture(scope="module")
def restrictiveVerifier(nodeSet):
for n in nodeSet:
n.opVerifiers = [TestVerifier()]
@pytest.fixture(scope="module")
def request1(wallet1):
op = {"type": "buy",
"amount": 999}
req = wallet1.signOp(op)
return req
@pytest.mark.skip(reason="old style plugin")
def testRequestFullRoundTrip(restrictiveVerifier,
client1,
sent1,
looper,
nodeSet):
update = {'reason': 'client request invalid: InvalidClientRequest() '
'[caused by amount too high\nassert 999 <= 100]'}
coros2 = [partial(checkReqNack, client1, node, sent1.identifier,
sent1.reqId, update)
for node in nodeSet]
timeout = waits.expectedReqAckQuorumTime()
looper.run(eventuallyAll(*coros2, totalTimeout=timeout))
|
python
|
import torch
from torch import autograd
def steptaker(data, critic, step, num_step = 1):
"""Applies gradient descent (GD) to data using critic
Inputs
- data; data to apply GD to
- critic; critic to compute gradients of
- step; how large of a step to take
- num_step; how finely to discretize flow. taken as 1 in TTC
Outputs
- data with gradient descent applied
"""
for j in range(num_step):
gradients = grad_calc(data, critic)
data = (data - (step/num_step)*gradients).detach()
return data.detach()
def rk4(data, critic, step, num_step = 1):
"""Assumes data is a batch of images, critic is a Kantorovich potential,
and step is desired step size. Applies fourth order Runge-Kutta to the data num_step times
with stepsize step/num_step. Unused in TTC"""
h = step/num_step
for j in range(num_step):
data_0 = data.detach().clone()
k = grad_calc(data_0, critic)
data += (h/6)*k
k = grad_calc(data_0 + (h/2)*k, critic)
data += (h/3)*k
k = grad_calc(data_0 + (h/2)*k, critic)
data += (h/3)*k
k = grad_calc(data_0 + k, critic)
data += (h/6)*k
data = data.detach()
return data
def grad_calc(data, critic):
"""Returns the gradients of critic at data"""
data = data.detach().clone()
data.requires_grad = True
Dfake = critic(data)
gradients = autograd.grad(outputs = Dfake, inputs = data,
grad_outputs = torch.ones(Dfake.size()).cuda(), only_inputs=True)[0]
return gradients.detach()
|
python
|
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
import pickle
import pprint
import datefinder
# What the program can access within Calendar
# See more at https://developers.google.com/calendar/auth
scopes = ["https://www.googleapis.com/auth/calendar"]
flow = InstalledAppFlow.from_client_secrets_file("client_secret.json", scopes=scopes)
# Use this to pull the users credentials into a pickle file
#credentials = flow.run_console()
#pickle.dump(credentials, open("token.pkl", "wb"))
# Read the credentials from a saved pickle file
credentials = pickle.load(open("token.pkl", "rb"))
# Build the calendar resource
service = build("calendar", "v3", credentials=credentials)
# Store a list of Calendars on the account
result = service.calendarList().list().execute()
calendar_id = result["items"][0]["id"]
result = service.events().list(calendarId=calendar_id).execute()
def create_event(my_event):
"""
Create a Google Calendar Event
Args:
my_event: CalendarEvent object
"""
print("Created Event for " + str(my_event.date))
event = {
"summary": my_event.summary,
"location": my_event.location,
"description": my_event.description,
"start": {
"dateTime": my_event.start_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"end": {
"dateTime": my_event.end_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"reminders": {
"useDefault": False,
},
}
return service.events().insert(calendarId=calendar_id, body=event, sendNotifications=True).execute()
|
python
|
# -*- coding: utf-8 -*-
"""binomial_mix.
Chen Qiao: [email protected]
"""
import sys
import warnings
import numpy as np
from scipy.special import gammaln, logsumexp
from .model_base import ModelBase
class MixtureBinomial(ModelBase):
"""Mixture of Binomial Models
This class implements EM algorithm for parameter estimation of Mixture
of Binomial models.
Attributes:
n_components (int): number of mixtures.
tor (float): tolarance difference for earlier stop training.
params (numpy float array): parameters of the model, [p_1, p_2, ...,
p_K, pi_1, pi_2, ..., pi_K],
None before parameter estimation.
losses (list): list of negative loglikelihood losses of the training
process, None before parameter estimation.
model_scores (dict): scores for the model, including "BIC" and "ICL" scores
Notes
-----
Because M-step has analytical solution, parameter estimation is fast.
Usage:
em_mb = MixtureBinomial(
n_components=2,
tor=1e-6)
params = em_mb.EM((ys, ns), max_iters=250, early_stop=True)
Simulation experiment:
import numpy as np
from scipy.stats import bernoulli, binom
from bbmix.models import MixtureBinomial
n_samples = 2000
n_trials = 1000
pis = [0.6, 0.4]
p1, p2 = 0.4, 0.8
gammars = bernoulli.rvs(pis[0], size=n_samples)
n_pos_events = sum(gammars)
n_neg_events = n_samples - n_pos_events
ys_of_type1 = binom.rvs(n_trials, p1, size=n_pos_events)
ys_of_type2 = binom.rvs(n_trials, p2, size=n_neg_events)
ys = np.concatenate((ys_of_type1, ys_of_type2))
ns = np.ones(n_samples, dtype=np.int) * n_trials
em_mb = MixtureBinomial(
n_components=2,
tor=1e-20)
params = em_mb.fit((ys, ns), max_iters=250, early_stop=True)
print(params)
print(p1, p2, pis)
print(em_mb.model_scores)
"""
def __init__(self,
n_components=2,
tor=1e-6
):
"""Initialization method
Args:
n_components (int): number of mixtures. Defaults to 2.
tor (float): tolerance shreshold for early-stop training.
Defaults to 1e-6.
"""
super(MixtureBinomial, self).__init__()
self.n_components = n_components
self.tor = tor
def E_step(self, y, n, params):
"""Expectation step
Args:
y (np.array): number of positive events
n (np.array): number of total trials
params (np.array): model parameters
Returns:
np.array: expectation of the latent variables
"""
E_gammas = [None] * self.n_components
for k in range(self.n_components):
p_k, pi_k = params[k], params[k + self.n_components]
E_gammas[k] = y * np.log(p_k) + (n - y) * \
np.log(1 - p_k) + np.log(pi_k)
# normalize as they havn't been
E_gammas = E_gammas - logsumexp(E_gammas, axis=0)
return np.exp(E_gammas)
def M_step(self, y, n, E_gammas, params):
"""Maximization step
Args:
y (np.array): number of positive events
n (np.array): number of total trials
E_gammas (np.array): results of E step
params (np.array): model parameters
Returns:
np.array: updated model parameters
"""
N_samples = len(n)
for k in range(self.n_components):
E_gammas[k][E_gammas[k] == 0] = 1e-20
params[k] = np.sum(y * E_gammas[k]) / np.sum(n * E_gammas[k])
params[k + self.n_components] = np.sum(E_gammas[k]) / N_samples
return params
def log_likelihood_binomial(self, y, n, p, pi=1.0):
"""log likelihood of data under binomial distribution
Args:
y (np.array): number of positive events
n (np.array): number of total trials
p (float): probability of positive event
pi (float): weight of mixture component
Returns:
np.array: log likelihood of data
"""
return gammaln(n + 1) - (gammaln(y + 1) + gammaln(n - y + 1)) \
+ y * np.log(p) + (n - y) * np.log(1 - p) + np.log(pi)
def log_likelihood_mixture_bin(self, y, n, params):
"""log likelihood of dataset under mixture of binomial distribution
Args:
y (np.array): number of positive events
n (np.array): number of total trials
params (np.array): parameters of the model
Returns:
float: log likelihood of the dataset
"""
logLik_mat = np.zeros((len(n), self.n_components), dtype=np.float)
for k in range(self.n_components):
p_k, pi_k = params[k], params[k + self.n_components]
logLik_mat[:, k] = self.log_likelihood_binomial(y, n, p_k, pi_k)
return logsumexp(logLik_mat, axis=1).sum()
def EM(self, y, n, params, max_iters=250, early_stop=False, n_tolerance=10,
verbose=False):
"""EM algorithim
Args:
y (np.array): number of positive events
n (np.array): total number of trials respectively
params (list): init model params
max_iters (int, optional): maximum number of iterations for EM. Defaults to 250.
early_stop (bool, optional): whether early stop training. Defaults to False.
n_tolerance (int): the max number of violations to trigger early stop.
pseudocount (float) : add pseudocount if data is zero
verbose (bool, optional): whether print training information. Defaults to False.
Returns:
np.array: trained parameters
"""
n_tol = n_tolerance
losses = [sys.maxsize]
for ith in range(max_iters):
# E step
E_gammas = self.E_step(y, n, params)
# M step
params = self.M_step(y, n, E_gammas, params)
# record current NLL loss
losses.append(-self.log_likelihood_mixture_bin(y, n, params))
if verbose:
print("=" * 10, "Iteration {}".format(ith + 1), "=" * 10)
print("Current params: {}".format(params))
print("Negative LogLikelihood Loss: {}".format(losses[-1]))
print("=" * 25)
improvement = losses[-2] - losses[-1]
if early_stop:
if improvement < self.tor:
n_tol -= 1
else:
n_tol = n_tolerance
if n_tol == 0:
if verbose:
print("Improvement halts, early stop training.")
break
self.score_model(len(params), len(y), losses[-1], E_gammas)
self.params = params
self.losses = losses[1:]
return params
def _param_init(self, y, n):
"""Initialziation of model parameters
Args:
y (np.array): number of positive events
n (np.array): number of total trials
Returns:
np.array: initialized model parameters
"""
return np.concatenate([np.random.uniform(0.49, 0.51, self.n_components),
np.random.uniform(0.4, 0.6, self.n_components)])
def fit(self, data, max_iters=250, early_stop=False, pseudocount=0.1,
n_tolerance=10, verbose=False):
"""Fit function
Args:
data (tuple of arrays): y, n: number of positive events and total number of trials respectively
max_iters (int, optional): maximum number of iterations for EM. Defaults to 250.
early_stop (bool, optional): whether early stop training. Defaults to False.
pseudocount (float) : add pseudocount if data is zero
n_tolerance (int): the max number of violations to trigger early stop.
verbose (bool, optional): whether print training information. Defaults to False.
Returns:
np.array: trained parameters
"""
y, n = data
self.nzero_prop = np.sum(y > 0)/np.shape(y)[0]
y, n = self._preprocess(data, pseudocount)
init_params = self._param_init(y, n)
if verbose:
print("=" * 25)
print("Init params: {}".format(init_params))
print("=" * 25)
params = self.EM(y, n, init_params, max_iters=max_iters,
early_stop=early_stop, verbose=verbose,
n_tolerance=n_tolerance)
if self.n_components == 2 and np.abs(params[0] - params[1]) < 1e-4 and verbose:
print("Colapsed to one component, please check proportion of non-zero counts.")
return params
def sample(self, n_trials):
"""Generate data from fitted parameters
n_trails :
Args:
n_trails (array_like): total number of trials
Returns:
np.array: ys generated from the fitted distribution
"""
if hasattr(self, 'params') == False:
raise Exception("Error: please fit the model or set params before sample()")
mus = self.params[:self.n_components]
pis = self.params[self.n_components: 2 * self.n_components]
labels = np.random.choice(self.n_components, size=n_trials.shape, p=pis)
ys_out = np.zeros(n_trials.shape, dtype=int)
for i in range(self.n_components):
_idx = np.where(labels == i)
ys_out[_idx] = binom.rvs(n_trials[_idx].astype(np.int32), mus[i])
return ys_out
if __name__ == "__main__":
import numpy as np
from scipy.stats import bernoulli, binom
from bbmix.models import MixtureBinomial
n_samples = 2000
n_trials = 1000
pis = [0.6, 0.4]
p1, p2 = 0.4, 0.8
gammars = bernoulli.rvs(pis[0], size=n_samples)
n_pos_events = sum(gammars)
n_neg_events = n_samples - n_pos_events
ys_of_type1 = binom.rvs(n_trials, p1, size=n_pos_events)
ys_of_type2 = binom.rvs(n_trials, p2, size=n_neg_events)
ys = np.concatenate((ys_of_type1, ys_of_type2))
ns = np.ones(n_samples, dtype=np.int) * n_trials
em_mb = MixtureBinomial(
n_components=2,
tor=1e-20)
params = em_mb.fit((ys, ns), max_iters=250, early_stop=True)
print(params)
print(p1, p2, pis)
print(em_mb.model_scores)
|
python
|
from keras.layers import Conv2D, SeparableConv2D, MaxPooling2D, Flatten, Dense
from keras.layers import Dropout, Input, BatchNormalization, Activation, add, GlobalAveragePooling2D
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.utils import plot_model
from keras import callbacks
from keras import models
from keras.applications import Xception
from utils_datagen import TrainValTensorBoard
from utils_basic import chk_n_mkdir
from models.base_model import BaseModel
class XCEPTION_APP(BaseModel):
def __init__(self, output_directory, input_shape, n_classes, verbose=False):
self.output_directory = output_directory + '/xception_kapp'
chk_n_mkdir(self.output_directory)
self.model = self.build_model(input_shape, n_classes)
if verbose:
self.model.summary()
self.verbose = verbose
self.model.save_weights(self.output_directory + '/model_init.hdf5')
def build_model(self, input_shape, n_classes):
# Load the VGG model
xception_conv = Xception(weights='imagenet', include_top=False, input_shape=input_shape)
# Freeze the layers except the last 4 layers
for layer in xception_conv.layers:
layer.trainable = False
# Create the model
model = models.Sequential()
# Add the vgg convolutional base model
model.add(xception_conv)
# Add new layers
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax', name='predictions'))
# define the model with input layer and output layer
model.summary()
plot_model(model, to_file=self.output_directory + '/model_graph.png', show_shapes=True, show_layer_names=True)
model.compile(loss=categorical_crossentropy, optimizer=Adam(lr=0.01), metrics=['acc'])
# model save
file_path = self.output_directory + '/best_model.hdf5'
model_checkpoint = callbacks.ModelCheckpoint(filepath=file_path, monitor='loss', save_best_only=True)
# Tensorboard log
log_dir = self.output_directory + '/tf_logs'
chk_n_mkdir(log_dir)
tb_cb = TrainValTensorBoard(log_dir=log_dir)
self.callbacks = [model_checkpoint, tb_cb]
return model
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from ..models import PermissionModel as Model
from ..models import GroupPermissionModel
from .base_dao import BaseDao
class PermissionDao(BaseDao):
def add_permission(self, permission):
# 如果存在相同app codename,则修改permission,否则新增permission
new = Model.from_dict(permission)
exiting = self.get_permission_by_app_and_codename(
new.app, new.codename)
if exiting:
new.id = exiting.id
self.session.merge(new)
self.session.commit()
else:
self.session.add(new)
self.session.commit()
def get_permission_list(self):
query = self.session.query(Model)
return [_.to_dict() for _ in query.all()]
def delete_permission_list(self):
query = self.session.query(Model)
query.delete()
self.session.commit()
def get_permission_by_app_and_codename(self, app, codename):
query = self.session.query(Model)
permission = query.filter(
Model.app == app, Model.codename == codename).first()
return permission
def delete_permission_by_app_and_codename(self, app, codename):
permission = self.get_permission_by_app_and_codename(app, codename)
if permission:
self.session.delete(permission)
self.session.commit()
def count(self):
query = self.session.query(Model)
return query.count()
class GroupPermissionDao(BaseDao):
def add_group_permission(self, group_id, codename, app='nebula', extra_settings=''):
# 根据app codename查询permission,再增加用户组权限
permission = PermissionDao().get_permission_by_app_and_codename(app, codename)
if permission:
group_permission = GroupPermissionModel.from_dict(dict(
group_id=group_id,
permission_id=permission.id,
extra_settings=extra_settings
))
self.session.add(group_permission)
self.session.commit()
def update_group_permission(self, group_id, codename, app='nebula', extra_settings=''):
# 根据app codename查询permission,再修改用户组权限
permission = PermissionDao().get_permission_by_app_and_codename(app, codename)
if permission:
group_permission = GroupPermissionModel.from_dict(dict(
group_id=group_id,
permission_id=permission.id,
extra_settings=extra_settings
))
query = self.session.query(GroupPermissionModel)
existing = query.filter(GroupPermissionModel.group_id == group_id,
GroupPermissionModel.permission_id == permission.id).first()
if existing:
group_permission.id = existing.id
self.session.merge(group_permission)
self.session.commit()
else:
self.session.add(group_permission)
self.session.commit()
def get_group_permission(self, group_id, codename, app='nebula'):
# 根据app codename查询permission,再查询group_permission
permission = PermissionDao().get_permission_by_app_and_codename(app, codename)
if permission:
query = self.session.query(GroupPermissionModel)
group_permission = query.filter(
GroupPermissionModel.group_id == group_id, GroupPermissionModel.permission_id == permission.id).first()
return group_permission
def add_group_strategy_block(self, be_blocked_id, blocked_id):
# 保存策略查看黑名单,be_blocked_id为被禁止查看的用户组id,block_id为禁止其他用户组查看本组策略的用户组id
group_permission = self.get_group_permission(
be_blocked_id, 'view_strategy')
if group_permission:
extra_settings = json.loads(group_permission.extra_settings)
be_blocked_settings = extra_settings.get('be_blocked', [])
if blocked_id not in be_blocked_settings:
be_blocked_settings.append(blocked_id)
extra_settings['be_blocked'] = be_blocked_settings
self.update_group_permission(
be_blocked_id, 'view_strategy', extra_settings=json.dumps(extra_settings))
else:
extra_settings = {'be_blocked': [blocked_id]}
self.add_group_permission(
be_blocked_id, 'view_strategy', extra_settings=json.dumps(extra_settings))
def delete_group_strategy_block(self, be_blocked_id, blocked_id):
# 删除策略查看黑名单,be_blocked_id为被禁止查看的用户组id,block_id为禁止其他用户组查看本组策略的用户组id
group_permission = self.get_group_permission(
be_blocked_id, 'view_strategy')
if group_permission:
extra_settings = json.loads(group_permission.extra_settings)
be_blocked_settings = extra_settings.get('be_blocked', [])
if blocked_id in be_blocked_settings:
be_blocked_settings.remove(blocked_id)
extra_settings['be_blocked'] = be_blocked_settings
self.update_group_permission(
be_blocked_id, 'view_strategy', extra_settings=json.dumps(extra_settings))
def get_group_strategy_block(self, group_id):
# 本组策略查看黑名单
view_strategy = self.get_group_extra_settings(
group_id, 'view_strategy', app='nebula')
return json.loads(view_strategy) if view_strategy else {}
def get_group_extra_settings(self, group_id, codename, app='nebula'):
permission = PermissionDao().get_permission_by_app_and_codename(app, codename)
if permission:
query = self.session.query(GroupPermissionModel)
group_permission = query.filter(
GroupPermissionModel.group_id == group_id, GroupPermissionModel.permission_id == permission.id).first()
if group_permission:
return group_permission.extra_settings
|
python
|
from itertools import permutations
with open("input.txt") as f:
data = [int(i) for i in f.read().split("\n")]
preamble = 25
for d in range(preamble + 1, len(data)):
numbers = data[d - (preamble + 1):d]
target = data[d]
sol = [nums for nums in permutations(numbers, 2) if sum(nums) == target]
if not sol:
print(f"Target is: {target}")
|
python
|
import collections
import pathlib
import time
from multiprocessing import Process
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from omegaconf import OmegaConf
from gdsfactory import components
from gdsfactory.config import CONFIG, logger
from gdsfactory.doe import get_settings_list
from gdsfactory.placer import (
build_components,
doe_exists,
load_doe_component_names,
save_doe,
)
from gdsfactory.types import PathType
from gdsfactory.write_doe import write_doe_metadata
factory = {
i: getattr(components, i)
for i in dir(components)
if not i.startswith("_") and callable(getattr(components, i))
}
def separate_does_from_templates(dicts: Dict[str, Any]) -> Any:
type_to_dict = {}
does = {}
for name, d in dicts.items():
if "type" in d.keys():
template_type = d.pop("type")
if template_type not in type_to_dict:
type_to_dict[template_type] = {}
type_to_dict[template_type][name] = d
else:
does[name] = d
return does, type_to_dict
def update_dicts_recurse(
target_dict: Dict[
str, Union[List[int], str, Dict[str, List[int]], Dict[str, str], bool]
],
default_dict: Dict[str, Union[bool, Dict[str, Union[int, str]], int, str]],
) -> Dict[str, Any]:
target_dict = target_dict.copy()
default_dict = default_dict.copy()
for k, v in default_dict.items():
if k not in target_dict:
target_dict[k] = v
else:
if isinstance(target_dict[k], (dict, collections.OrderedDict)):
target_dict[k] = update_dicts_recurse(target_dict[k], default_dict[k])
return target_dict
def save_doe_use_template(doe, doe_root_path=None) -> None:
"""Write a "content.txt" pointing to the DOE used as a template"""
doe_name = doe["name"]
doe_template = doe["doe_template"]
doe_root_path = doe_root_path or CONFIG["cache_doe_directory"]
doe_dir = doe_root_path / doe_name
doe_dir.mkdir(exist_ok=True)
content_file = doe_dir / "content.txt"
with open(content_file, "w") as fw:
fw.write(f"TEMPLATE: {doe_template}")
def write_doe(
doe,
component_factory=factory,
doe_root_path: Optional[PathType] = None,
doe_metadata_path: Optional[PathType] = None,
overwrite: bool = False,
precision: float = 1e-9,
**kwargs,
) -> None:
doe_name = doe["name"]
list_settings = doe["list_settings"]
# Otherwise generate each component using the component library
component_type = doe["component"]
components = build_components(
component_type, list_settings, component_factory=component_factory
)
component_names = [c.name for c in components]
save_doe(doe_name, components, doe_root_path=doe_root_path, precision=precision)
write_doe_metadata(
doe_name=doe["name"],
cell_names=component_names,
list_settings=doe["list_settings"],
doe_settings=kwargs,
doe_metadata_path=doe_metadata_path,
)
def load_does(
filepath: PathType, defaults: Optional[Dict[str, bool]] = None
) -> Tuple[Any, Any]:
"""Load_does from file."""
does = {}
defaults = defaults or {"do_permutation": True, "settings": {}}
data = OmegaConf.load(filepath)
data = OmegaConf.to_container(data)
mask = data.pop("mask")
for doe_name, doe in data.items():
for k in defaults:
if k not in doe:
doe[k] = defaults[k]
does[doe_name] = doe
return does, mask
def generate_does(
filepath: PathType,
component_factory: Dict[str, Callable] = factory,
doe_root_path: PathType = CONFIG["cache_doe_directory"],
doe_metadata_path: PathType = CONFIG["doe_directory"],
n_cores: int = 8,
overwrite: bool = False,
precision: float = 1e-9,
cache: bool = False,
) -> None:
"""Generates a DOEs of components specified in a yaml file
allows for each DOE to have its own x and y spacing (more flexible than method1)
similar to write_doe
"""
doe_root_path = pathlib.Path(doe_root_path)
doe_metadata_path = pathlib.Path(doe_metadata_path)
doe_root_path.mkdir(parents=True, exist_ok=True)
doe_metadata_path.mkdir(parents=True, exist_ok=True)
dicts, mask_settings = load_does(filepath)
does, templates_by_type = separate_does_from_templates(dicts)
dict_templates = (
templates_by_type["template"] if "template" in templates_by_type else {}
)
default_use_cached_does = (
mask_settings["cache"] if "cache" in mask_settings else cache
)
list_args = []
for doe_name, doe in does.items():
doe["name"] = doe_name
component = doe["component"]
if component not in component_factory:
raise ValueError(f"{component} not in {component_factory.keys()}")
if "template" in doe:
# The keyword template is used to enrich the dictionary from the template
templates = doe["template"]
if not isinstance(templates, list):
templates = [templates]
for template in templates:
try:
doe = update_dicts_recurse(doe, dict_templates[template])
except Exception:
print(template, "does not exist")
raise
do_permutation = doe.pop("do_permutation")
settings = doe["settings"]
doe["list_settings"] = get_settings_list(do_permutation, **settings)
list_args += [doe]
does_running = []
start_times = {}
finish_times = {}
doe_name_to_process = {}
while list_args:
while len(does_running) < n_cores:
if not list_args:
break
doe = list_args.pop()
doe_name = doe["name"]
# Only launch a build process if we do not use the cache
# Or if the DOE is not built
list_settings = doe["list_settings"]
use_cached_does = (
default_use_cached_does if "cache" not in doe else doe["cache"]
)
_doe_exists = False
if "doe_template" in doe:
# this DOE points to another existing component
_doe_exists = True
logger.info("Using template - {}".format(doe_name))
save_doe_use_template(doe)
elif use_cached_does:
_doe_exists = doe_exists(doe_name, list_settings)
if _doe_exists:
logger.info("Cached - {}".format(doe_name))
if overwrite:
component_names = load_doe_component_names(doe_name)
write_doe_metadata(
doe_name=doe["name"],
cell_names=component_names,
list_settings=doe["list_settings"],
doe_metadata_path=doe_metadata_path,
)
if not _doe_exists:
start_times[doe_name] = time.time()
p = Process(
target=write_doe,
args=(doe, component_factory),
kwargs={
"doe_root_path": doe_root_path,
"doe_metadata_path": doe_metadata_path,
"overwrite": overwrite,
"precision": precision,
},
)
doe_name_to_process[doe_name] = p
does_running += [doe_name]
try:
p.start()
except Exception:
print("Issue starting process for {}".format(doe_name))
print(type(component_factory))
raise
to_rm = []
for i, doe_name in enumerate(does_running):
_p = doe_name_to_process[doe_name]
if not _p.is_alive():
to_rm += [i]
finish_times[doe_name] = time.time()
dt = finish_times[doe_name] - start_times[doe_name]
line = "Done - {} ({:.1f}s)".format(doe_name, dt)
logger.info(line)
for i in to_rm[::-1]:
does_running.pop(i)
time.sleep(0.001)
while does_running:
to_rm = []
for i, _doe_name in enumerate(does_running):
_p = doe_name_to_process[_doe_name]
if not _p.is_alive():
to_rm += [i]
for i in to_rm[::-1]:
does_running.pop(i)
time.sleep(0.05)
if __name__ == "__main__":
filepath = CONFIG["samples_path"] / "mask" / "does.yml"
generate_does(filepath, precision=2e-9)
|
python
|
#!/usr/bin/env python
import os
import sys
import logging
import requests
import time
from extensions import valid_tagging_extensions
from readSettings import ReadSettings
from autoprocess import plex
from tvdb_mp4 import Tvdb_mp4
from mkvtomp4 import MkvtoMp4
from post_processor import PostProcessor
from logging.config import fileConfig
logpath = '/var/log/sickbeard_mp4_automator'
if os.environ.get('sonarr_eventtype') == "Test":
sys.exit(0)
if os.name == 'nt':
logpath = os.path.dirname(sys.argv[0])
elif not os.path.isdir(logpath):
try:
os.mkdir(logpath)
except:
logpath = os.path.dirname(sys.argv[0])
configPath = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'logging.ini')).replace("\\", "\\\\")
logPath = os.path.abspath(os.path.join(logpath, 'index.log')).replace("\\", "\\\\")
fileConfig(configPath, defaults={'logfilename': logPath})
log = logging.getLogger("SonarrPostConversion")
log.info("Sonarr extra script post processing started.")
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
inputfile = os.environ.get('sonarr_episodefile_path')
original = os.environ.get('sonarr_episodefile_scenename')
tvdb_id = int(os.environ.get('sonarr_series_tvdbid'))
season = int(os.environ.get('sonarr_episodefile_seasonnumber'))
try:
episode = int(os.environ.get('sonarr_episodefile_episodenumbers'))
except:
episode = int(os.environ.get('sonarr_episodefile_episodenumbers').split(",")[0])
converter = MkvtoMp4(settings)
log.debug("Input file: %s." % inputfile)
log.debug("Original name: %s." % original)
log.debug("TVDB ID: %s." % tvdb_id)
log.debug("Season: %s episode: %s." % (season, episode))
if MkvtoMp4(settings).validSource(inputfile):
log.info("Processing %s." % inputfile)
output = converter.process(inputfile, original=original)
if output:
# Tag with metadata
if settings.tagfile and output['output_extension'] in valid_tagging_extensions:
log.info("Tagging %s with ID %s season %s episode %s." % (inputfile, tvdb_id, season, episode))
try:
tagmp4 = Tvdb_mp4(tvdb_id, season, episode, original, language=settings.taglanguage)
tagmp4.setHD(output['x'], output['y'])
tagmp4.writeTags(output['output'], settings.artwork, settings.thumbnail)
except:
log.error("Unable to tag file")
# Copy to additional locations
output_files = converter.replicate(output['output'])
# Update Sonarr to continue monitored status
try:
host = settings.Sonarr['host']
port = settings.Sonarr['port']
webroot = settings.Sonarr['web_root']
apikey = settings.Sonarr['apikey']
if apikey != '':
try:
ssl = int(settings.Sonarr['ssl'])
except:
ssl = 0
if ssl:
protocol = "https://"
else:
protocol = "http://"
seriesID = os.environ.get('sonarr_series_id')
log.debug("Sonarr host: %s." % host)
log.debug("Sonarr port: %s." % port)
log.debug("Sonarr webroot: %s." % webroot)
log.debug("Sonarr apikey: %s." % apikey)
log.debug("Sonarr protocol: %s." % protocol)
log.debug("Sonarr sonarr_series_id: %s." % seriesID)
headers = {'X-Api-Key': apikey}
# First trigger rescan
payload = {'name': 'RescanSeries', 'seriesId': seriesID}
url = protocol + host + ":" + port + webroot + "/api/command"
r = requests.post(url, json=payload, headers=headers)
rstate = r.json()
log.info("Sonarr response: ID %d %s." % (rstate['id'], rstate['state']))
log.info(str(rstate)) # debug
# Then wait for it to finish
url = protocol + host + ":" + port + webroot + "/api/command/" + str(rstate['id'])
log.info("Requesting episode information from Sonarr for series ID %s." % seriesID)
r = requests.get(url, headers=headers)
command = r.json()
attempts = 0
while command['state'].lower() not in ['complete', 'completed'] and attempts < 6:
log.info(str(command['state']))
time.sleep(10)
r = requests.get(url, headers=headers)
command = r.json()
attempts += 1
log.info("Command completed")
log.info(str(command))
# Then get episode information
url = protocol + host + ":" + port + webroot + "/api/episode?seriesId=" + seriesID
log.info("Requesting updated episode information from Sonarr for series ID %s." % seriesID)
r = requests.get(url, headers=headers)
payload = r.json()
sonarrepinfo = None
for ep in payload:
if int(ep['episodeNumber']) == episode and int(ep['seasonNumber']) == season:
sonarrepinfo = ep
break
sonarrepinfo['monitored'] = True
# Then set that episode to monitored
log.info("Sending PUT request with following payload:") # debug
log.info(str(sonarrepinfo)) # debug
url = protocol + host + ":" + port + webroot + "/api/episode/" + str(sonarrepinfo['id'])
r = requests.put(url, json=sonarrepinfo, headers=headers)
success = r.json()
log.info("PUT request returned:") # debug
log.info(str(success)) # debug
log.info("Sonarr monitoring information updated for episode %s." % success['title'])
else:
log.error("Your Sonarr API Key can not be blank. Update autoProcess.ini.")
except:
log.exception("Sonarr monitor status update failed.")
# Run any post process scripts
if settings.postprocess:
post_processor = PostProcessor(output_files, log)
post_processor.setTV(tvdb_id, season, episode)
post_processor.run_scripts()
plex.refreshPlex(settings, 'show', log)
sys.exit(0)
|
python
|
def do_print():
print "hello world"
def add(a, b):
return a + b
def names_of_three_people(a, b, c):
return a['name'] + " and " + b['name'] + " and " + c['name']
def divide(a, b):
return a / b
def float_divide(a, b):
return float(a) / float(b)
def func_return_struct(name, age, hobby1, hobby2):
return {
"name": name,
"age": age,
"hobby": [
hobby1,
hobby2
]
}
def first_param_and_other_params(first, **other):
total = other
total['first'] = first
return total
|
python
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from marshmallow import fields, post_load, validate
from typing import Optional
from ..schema import PatchedSchemaMeta
from ..fields import ArmVersionedStr
from azure.ml.constants import AssetType
class InputEntry:
def __init__(self, *, mode: Optional[str] = None, data: str):
self.data = data
self.mode = mode
INPUT_MODE_MOUNT = "Mount"
INPUT_MODE_DOWNLOAD = "Download"
INPUT_MODES = [INPUT_MODE_MOUNT, INPUT_MODE_DOWNLOAD]
class InputEntrySchema(metaclass=PatchedSchemaMeta):
mode = fields.Str(validate=validate.OneOf(INPUT_MODES))
data = ArmVersionedStr(asset_type=AssetType.DATA)
@post_load
def make(self, data, **kwargs):
return InputEntry(**data)
|
python
|
from typing import Union, List, Dict
from py_expression_eval import Parser # type: ignore
import math
from . import error
def add(num1: Union[int, float], num2: Union[int, float], *args) -> Union[int, float]:
"""Adds given numbers"""
sum: Union[int, float] = num1 + num2
for num in args:
sum += num
return sum
def subtract(
num1: Union[int, float], num2: Union[int, float], *args
) -> Union[int, float]:
"""Subtracts given numbers"""
sub: Union[int, float] = num1 - num2
for num in args:
sub -= num
return sub
def multiply(num1: Union[int, float], *args) -> Union[int, float]:
"""Multiplies given numbers"""
product: Union[int, float] = num1
for num in args:
product = product * num
return product
def divide(
num1: Union[int, float], num2: Union[int, float], type: str
) -> Union[int, float]:
"""Divides given numbers"""
if type.lower() == "int":
int_quotient: Union[int, float] = num1 / num2
return int_quotient
if type.lower() == "float":
float_quotient: Union[int, float] = num1 // num2
return float_quotient
raise error.UnknownDivisionTypeError(type)
def floatDiv(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]:
"""Divides given numbers"""
quotient: Union[int, float] = num1 / num2
return quotient
def intDiv(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]:
"""Divides given numbers and returns rounded off integer as result"""
quotient: Union[int, float] = num1 // num2
return quotient
def expo(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]:
"""Raises given number to given power and returns result"""
expo: Union[int, float] = num1 ** num2
return expo
def mod(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]:
"""Returns remainder of a division"""
remain: Union[int, float] = num1 % num2
return remain
def evalExp(exp: str, vars_: Dict[str, int] = {}):
"""Evaluates given mathematical expression"""
parser = Parser()
solution: Union[int, float] = parser.parse(exp).evaluate(vars_)
return solution
def avg(listOfNos: Union[List[int], List[float]]) -> float:
"""Return average of given numbers"""
avg: float = 0.0
for num in listOfNos:
avg += num
avg /= len(listOfNos)
return avg
def factorial(num: int) -> int:
"""Returns factorial of a number"""
factorial: int = 1
for i in range(1, num):
factorial *= i
return factorial
def ceil(num: int) -> int:
"""Returns the number rounded up"""
ceil: int = math.ceil(num)
return ceil
def floor(num: int) -> int:
"""Returns the number rounded down"""
floor: int = math.floor(num)
return floor
|
python
|
#
# This file is part of DroneBridge: https://github.com/seeul8er/DroneBridge
#
# Copyright 2017 Wolfgang Christl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import mmap
import time
from shmemctypes import ShmemRawArray
class wifi_adapter_rx_status_t(ctypes.Structure):
_fields_ = [
('received_packet_cnt', ctypes.c_uint32),
('wrong_crc_cnt', ctypes.c_uint32),
('current_signal_dbm', ctypes.c_int8),
('type', ctypes.c_int8)
]
class WBC_RX_Status(ctypes.Structure):
_fields_ = [
('last_update', ctypes.c_int32),
('received_block_cnt', ctypes.c_uint32),
('damaged_block_cnt', ctypes.c_uint32),
('lost_packet_cnt', ctypes.c_uint32),
('received_packet_cnt', ctypes.c_uint32),
('tx_restart_cnt', ctypes.c_uint32),
('kbitrate', ctypes.c_uint32),
('wifi_adapter_cnt', ctypes.c_uint32),
('adapter', wifi_adapter_rx_status_t * 8)
]
def open_shm():
f = open("/wifibroadcast_rx_status_0", "r+b")
return mmap.mmap(f.fileno(), 0)
def read_wbc_status(mapped_structure):
wbc_status = WBC_RX_Status.from_buffer(mapped_structure)
print(str(wbc_status.kbitrate)+"kbit/s"+" "+str(wbc_status.damaged_block_cnt)+" damages blocks")
def main():
print("DB_WBC_STATUSREADER: starting")
shared_data = ShmemRawArray(WBC_RX_Status, 0, "/wifibroadcast_rx_status_0", False)
#mymap = open_shm()
while(True):
for d in shared_data:
print(str(d.received_block_cnt))
time.sleep(1)
if __name__ == "__main__":
main()
|
python
|
import errno
import gc
# from collections import namedtuple
import math
import os
import os.path
import time
from functools import lru_cache
from pathlib import Path
import numpy as np
import pandas as pd
import artistools as at
@lru_cache(maxsize=8)
def get_modeldata(inputpath=Path(), dimensions=None, get_abundances=False, derived_cols=False):
"""
Read an artis model.txt file containing cell velocities, density, and abundances of radioactive nuclides.
Arguments:
- inputpath: either a path to model.txt file, or a folder containing model.txt
- dimensions: number of dimensions in input file, or None for automatic
- get_abundances: also read elemental abundances (abundances.txt) and
merge with the output DataFrame
Returns (dfmodel, t_model_init_days)
- dfmodel: a pandas DataFrame with a row for each model grid cell
- t_model_init_days: the time in days at which the snapshot is defined
"""
assert dimensions in [1, 3, None]
inputpath = Path(inputpath)
if os.path.isdir(inputpath):
modelpath = inputpath
filename = at.firstexisting(['model.txt.xz', 'model.txt.gz', 'model.txt'], path=inputpath)
elif os.path.isfile(inputpath): # passed in a filename instead of the modelpath
filename = inputpath
modelpath = Path(inputpath).parent
elif not inputpath.exists() and inputpath.parts[0] == 'codecomparison':
modelpath = inputpath
_, inputmodel, _ = modelpath.parts
filename = Path(at.config['codecomparisonmodelartismodelpath'], inputmodel, 'model.txt')
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), inputpath)
headerrows = 0
with at.misc.zopen(filename, 'rt') as fmodel:
gridcellcount = int(fmodel.readline())
t_model_init_days = float(fmodel.readline())
headerrows += 2
t_model_init_seconds = t_model_init_days * 24 * 60 * 60
filepos = fmodel.tell()
# if the next line is a single float then the model is 3D
try:
vmax_cmps = float(fmodel.readline()) # velocity max in cm/s
xmax_tmodel = vmax_cmps * t_model_init_seconds # xmax = ymax = zmax
headerrows += 1
if dimensions is None:
print("Detected 3D model file")
dimensions = 3
elif dimensions != 3:
print(f" {dimensions} were specified but file appears to be 3D")
assert False
except ValueError:
if dimensions is None:
print("Detected 1D model file")
dimensions = 1
elif dimensions != 1:
print(f" {dimensions} were specified but file appears to be 1D")
assert False
fmodel.seek(filepos) # undo the readline() and go back
columns = None
filepos = fmodel.tell()
line = fmodel.readline()
if line.startswith('#'):
headerrows += 1
columns = line.lstrip('#').split()
else:
fmodel.seek(filepos) # undo the readline() and go back
ncols_file = len(fmodel.readline().split())
if dimensions > 1:
# columns split over two lines
ncols_file += len(fmodel.readline().split())
if columns is not None:
assert ncols_file == len(columns)
elif dimensions == 1:
columns = ['inputcellid', 'velocity_outer', 'logrho', 'X_Fegroup', 'X_Ni56',
'X_Co56', 'X_Fe52', 'X_Cr48', 'X_Ni57', 'X_Co57'][:ncols_file]
elif dimensions == 3:
columns = ['inputcellid', 'inputpos_a', 'inputpos_b', 'inputpos_c', 'rho',
'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48', 'X_Ni57', 'X_Co57'][:ncols_file]
# number of grid cell steps along an axis (same for xyz)
ncoordgridx = int(round(gridcellcount ** (1. / 3.)))
ncoordgridy = int(round(gridcellcount ** (1. / 3.)))
ncoordgridz = int(round(gridcellcount ** (1. / 3.)))
assert (ncoordgridx * ncoordgridy * ncoordgridz) == gridcellcount
if dimensions == 1:
dfmodel = pd.read_csv(
filename, delim_whitespace=True, header=None, names=columns, skiprows=headerrows, nrows=gridcellcount)
else:
dfmodel = pd.read_csv(
filename, delim_whitespace=True, header=None,
skiprows=lambda x: x < headerrows or (x - headerrows - 1) % 2 == 0, names=columns[:5],
nrows=gridcellcount)
dfmodeloddlines = pd.read_csv(
filename, delim_whitespace=True, header=None,
skiprows=lambda x: x < headerrows or (x - headerrows - 1) % 2 == 1, names=columns[5:],
nrows=gridcellcount)
assert len(dfmodel) == len(dfmodeloddlines)
dfmodel = dfmodel.merge(dfmodeloddlines, left_index=True, right_index=True)
del dfmodeloddlines
if len(dfmodel) > gridcellcount:
dfmodel = dfmodel.iloc[:gridcellcount]
assert len(dfmodel) == gridcellcount
dfmodel.index.name = 'cellid'
# dfmodel.drop('inputcellid', axis=1, inplace=True)
if dimensions == 1:
dfmodel['velocity_inner'] = np.concatenate([[0.], dfmodel['velocity_outer'].values[:-1]])
dfmodel.eval(
'cellmass_grams = 10 ** logrho * 4. / 3. * 3.14159265 * (velocity_outer ** 3 - velocity_inner ** 3)'
'* (1e5 * @t_model_init_seconds) ** 3', inplace=True)
vmax_cmps = dfmodel.velocity_outer.max() * 1e5
elif dimensions == 3:
wid_init = at.misc.get_wid_init_at_tmodel(modelpath, gridcellcount, t_model_init_days, xmax_tmodel)
dfmodel.eval('cellmass_grams = rho * @wid_init ** 3', inplace=True)
dfmodel.rename(columns={
'pos_x_min': 'pos_x_min', 'pos_y_min': 'pos_y_min', 'pos_z_min': 'pos_z_min'
}, inplace=True)
if 'pos_x_min' in dfmodel.columns:
print("Cell positions in model.txt are defined in the header")
else:
cellid = dfmodel.index.values
xindex = cellid % ncoordgridx
yindex = (cellid // ncoordgridx) % ncoordgridy
zindex = (cellid // (ncoordgridx * ncoordgridy)) % ncoordgridz
dfmodel['pos_x_min'] = -xmax_tmodel + 2 * xindex * xmax_tmodel / ncoordgridx
dfmodel['pos_y_min'] = -xmax_tmodel + 2 * yindex * xmax_tmodel / ncoordgridy
dfmodel['pos_z_min'] = -xmax_tmodel + 2 * zindex * xmax_tmodel / ncoordgridz
def vectormatch(vec1, vec2):
xclose = np.isclose(vec1[0], vec2[0], atol=xmax_tmodel / ncoordgridx)
yclose = np.isclose(vec1[1], vec2[1], atol=xmax_tmodel / ncoordgridy)
zclose = np.isclose(vec1[2], vec2[2], atol=xmax_tmodel / ncoordgridz)
return all([xclose, yclose, zclose])
posmatch_xyz = True
posmatch_zyx = True
# important cell numbers to check for coordinate column order
indexlist = [0, ncoordgridx - 1, (ncoordgridx - 1) * (ncoordgridy - 1),
(ncoordgridx - 1) * (ncoordgridy - 1) * (ncoordgridz - 1)]
for index in indexlist:
cell = dfmodel.iloc[index]
if not vectormatch([cell.inputpos_a, cell.inputpos_b, cell.inputpos_c],
[cell.pos_x_min, cell.pos_y_min, cell.pos_z_min]):
posmatch_xyz = False
if not vectormatch([cell.inputpos_a, cell.inputpos_b, cell.inputpos_c],
[cell.pos_z_min, cell.pos_y_min, cell.pos_x_min]):
posmatch_zyx = False
assert posmatch_xyz != posmatch_zyx # one option must match
if posmatch_xyz:
print("Cell positions in model.txt are consistent with calculated values when x-y-z column order")
if posmatch_zyx:
print("Cell positions in model.txt are consistent with calculated values when z-y-x column order")
if get_abundances:
if dimensions == 3:
print('Getting abundances')
abundancedata = get_initialabundances(modelpath)
dfmodel = dfmodel.merge(abundancedata, how='inner', on='inputcellid')
if derived_cols:
add_derived_cols_to_modeldata(dfmodel, derived_cols, dimensions, t_model_init_seconds, wid_init, modelpath)
return dfmodel, t_model_init_days, vmax_cmps
def add_derived_cols_to_modeldata(dfmodel, derived_cols, dimensions=None, t_model_init_seconds=None, wid_init=None,
modelpath=None):
"""add columns to modeldata using e.g. derived_cols = ('velocity', 'Ye')"""
if dimensions is None:
dimensions = get_dfmodel_dimensions(dfmodel)
if dimensions == 3 and 'velocity' in derived_cols:
dfmodel['vel_x_min'] = dfmodel['pos_x_min'] / t_model_init_seconds
dfmodel['vel_y_min'] = dfmodel['pos_y_min'] / t_model_init_seconds
dfmodel['vel_z_min'] = dfmodel['pos_z_min'] / t_model_init_seconds
dfmodel['vel_x_max'] = (dfmodel['pos_x_min'] + wid_init) / t_model_init_seconds
dfmodel['vel_y_max'] = (dfmodel['pos_y_min'] + wid_init) / t_model_init_seconds
dfmodel['vel_z_max'] = (dfmodel['pos_z_min'] + wid_init) / t_model_init_seconds
dfmodel['vel_x_mid'] = (dfmodel['pos_x_min'] + (0.5 * wid_init)) / t_model_init_seconds
dfmodel['vel_y_mid'] = (dfmodel['pos_y_min'] + (0.5 * wid_init)) / t_model_init_seconds
dfmodel['vel_z_mid'] = (dfmodel['pos_z_min'] + (0.5 * wid_init)) / t_model_init_seconds
dfmodel.eval('vel_mid_radial = sqrt(vel_x_mid ** 2 + vel_y_mid ** 2 + vel_z_mid ** 2)', inplace=True)
if dimensions == 3 and 'pos_mid' in derived_cols or 'angle_bin' in derived_cols:
dfmodel['pos_x_mid'] = (dfmodel['pos_x_min'] + (0.5 * wid_init))
dfmodel['pos_y_mid'] = (dfmodel['pos_y_min'] + (0.5 * wid_init))
dfmodel['pos_z_mid'] = (dfmodel['pos_z_min'] + (0.5 * wid_init))
if 'angle_bin' in derived_cols:
get_cell_angle(dfmodel, modelpath)
if 'Ye' in derived_cols and os.path.isfile(modelpath / 'Ye.txt'):
dfmodel['Ye'] = at.inputmodel.opacityinputfile.get_Ye_from_file(modelpath)
if 'Q' in derived_cols and os.path.isfile(modelpath / 'Q_energy.txt'):
dfmodel['Q'] = at.inputmodel.energyinputfiles.get_Q_energy_from_file(modelpath)
return dfmodel
def get_cell_angle(dfmodel, modelpath):
"""get angle between cell midpoint and axis"""
syn_dir = at.get_syn_dir(modelpath)
cos_theta = np.zeros(len(dfmodel))
i = 0
for _, cell in dfmodel.iterrows():
mid_point = [cell['pos_x_mid'], cell['pos_y_mid'], cell['pos_z_mid']]
cos_theta[i] = (
at.dot(mid_point, syn_dir)) / (at.vec_len(mid_point) * at.vec_len(syn_dir))
i += 1
dfmodel['cos_theta'] = cos_theta
cos_bins = [-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1] # including end bin
labels = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90] # to agree with escaping packet bin numbers
dfmodel['cos_bin'] = pd.cut(dfmodel['cos_theta'], cos_bins, labels=labels)
# dfmodel['cos_bin'] = np.searchsorted(cos_bins, dfmodel['cos_theta'].values) -1
return dfmodel
def get_mean_cell_properties_of_angle_bin(dfmodeldata, vmax_cmps, modelpath=None):
if 'cos_bin' not in dfmodeldata:
get_cell_angle(dfmodeldata, modelpath)
dfmodeldata['rho'][dfmodeldata['rho'] == 0] = None
dfmodeldata['rho']
cell_velocities = np.unique(dfmodeldata['vel_x_min'].values)
cell_velocities = cell_velocities[cell_velocities >= 0]
velocity_bins = np.append(cell_velocities, vmax_cmps)
mid_velocities = np.unique(dfmodeldata['vel_x_mid'].values)
mid_velocities = mid_velocities[mid_velocities >= 0]
mean_bin_properties = {}
for bin_number in range(10):
mean_bin_properties[bin_number] = pd.DataFrame({'velocity': mid_velocities,
'mean_rho': np.zeros_like(mid_velocities, dtype=float),
'mean_Ye': np.zeros_like(mid_velocities, dtype=float),
'mean_Q': np.zeros_like(mid_velocities, dtype=float)})
# cos_bin_number = 90
for bin_number in range(10):
cos_bin_number = bin_number * 10
# get cells with bin number
dfanglebin = dfmodeldata.query('cos_bin == @cos_bin_number', inplace=False)
binned = pd.cut(dfanglebin['vel_mid_radial'], velocity_bins, labels=False, include_lowest=True)
i = 0
for binindex, mean_rho in dfanglebin.groupby(binned)['rho'].mean().iteritems():
i += 1
mean_bin_properties[bin_number]['mean_rho'][binindex] += mean_rho
i = 0
if 'Ye' in dfmodeldata.keys():
for binindex, mean_Ye in dfanglebin.groupby(binned)['Ye'].mean().iteritems():
i += 1
mean_bin_properties[bin_number]['mean_Ye'][binindex] += mean_Ye
if 'Q' in dfmodeldata.keys():
for binindex, mean_Q in dfanglebin.groupby(binned)['Q'].mean().iteritems():
i += 1
mean_bin_properties[bin_number]['mean_Q'][binindex] += mean_Q
return mean_bin_properties
def get_2d_modeldata(modelpath):
filepath = os.path.join(modelpath, 'model.txt')
num_lines = sum(1 for line in open(filepath))
skiprowlist = [0, 1, 2]
skiprowlistodds = skiprowlist + [i for i in range(3, num_lines) if i % 2 == 1]
skiprowlistevens = skiprowlist + [i for i in range(3, num_lines) if i % 2 == 0]
model1stlines = pd.read_csv(filepath, delim_whitespace=True, header=None, skiprows=skiprowlistevens)
model2ndlines = pd.read_csv(filepath, delim_whitespace=True, header=None, skiprows=skiprowlistodds)
model = pd.concat([model1stlines, model2ndlines], axis=1)
column_names = ['inputcellid', 'cellpos_mid[r]', 'cellpos_mid[z]', 'rho_model',
'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48']
model.columns = column_names
return model
def get_3d_model_data_merged_model_and_abundances_minimal(args):
"""Get 3D data without generating all the extra columns in standard routine.
Needed for large (eg. 200^3) models"""
model = get_3d_modeldata_minimal(args.modelpath)
abundances = get_initialabundances(args.modelpath[0])
with open(os.path.join(args.modelpath[0], 'model.txt'), 'r') as fmodelin:
fmodelin.readline() # npts_model3d
args.t_model = float(fmodelin.readline()) # days
args.vmax = float(fmodelin.readline()) # v_max in [cm/s]
print(model.keys())
merge_dfs = model.merge(abundances, how='inner', on='inputcellid')
del model
del abundances
gc.collect()
merge_dfs.info(verbose=False, memory_usage="deep")
return merge_dfs
def get_3d_modeldata_minimal(modelpath):
"""Read 3D model without generating all the extra columns in standard routine.
Needed for large (eg. 200^3) models"""
model = pd.read_csv(os.path.join(modelpath[0], 'model.txt'),
delim_whitespace=True, header=None, skiprows=3, dtype=np.float64)
columns = ['inputcellid', 'cellpos_in[z]', 'cellpos_in[y]', 'cellpos_in[x]', 'rho_model',
'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48']
model = pd.DataFrame(model.values.reshape(-1, 10))
model.columns = columns
print('model.txt memory usage:')
model.info(verbose=False, memory_usage="deep")
return model
def save_modeldata(
dfmodel, t_model_init_days, filename=None, modelpath=None, vmax=None, dimensions=1, radioactives=True):
"""Save a pandas DataFrame and snapshot time into ARTIS model.txt"""
timestart = time.perf_counter()
assert dimensions in [1, 3, None]
if dimensions == 1:
standardcols = ['inputcellid', 'velocity_outer', 'logrho', 'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52',
'X_Cr48']
elif dimensions == 3:
dfmodel.rename(columns={'gridindex': 'inputcellid'}, inplace=True)
griddimension = int(round(len(dfmodel) ** (1. / 3.)))
print(f' grid size: {len(dfmodel)} ({griddimension}^3)')
assert griddimension ** 3 == len(dfmodel)
standardcols = [
'inputcellid', 'pos_x_min', 'pos_y_min', 'pos_z_min', 'rho',
'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48']
# these two columns are optional, but position is important and they must appear before any other custom cols
if 'X_Ni57' in dfmodel.columns:
standardcols.append('X_Ni57')
if 'X_Co57' in dfmodel.columns:
standardcols.append('X_Co57')
dfmodel['inputcellid'] = dfmodel['inputcellid'].astype(int)
customcols = [col for col in dfmodel.columns if col not in standardcols and col.startswith('X_')]
customcols.sort(key=lambda col: at.get_z_a_nucname(col)) # sort columns by atomic number, mass number
# set missing radioabundance columns to zero
for col in standardcols:
if col not in dfmodel.columns and col.startswith('X_'):
dfmodel[col] = 0.0
assert modelpath is not None or filename is not None
if filename is None:
filename = 'model.txt'
if modelpath is not None:
modelfilepath = Path(modelpath, filename)
else:
modelfilepath = Path(filename)
with open(modelfilepath, 'w') as fmodel:
fmodel.write(f'{len(dfmodel)}\n')
fmodel.write(f'{t_model_init_days}\n')
if dimensions == 3:
fmodel.write(f'{vmax}\n')
if customcols:
fmodel.write(f'#{" ".join(standardcols)} {" ".join(customcols)}\n')
abundcols = [*[col for col in standardcols if col.startswith('X_')], *customcols]
# for cell in dfmodel.itertuples():
# if dimensions == 1:
# fmodel.write(f'{cell.inputcellid:6d} {cell.velocity_outer:9.2f} {cell.logrho:10.8f} ')
# elif dimensions == 3:
# fmodel.write(f"{cell.inputcellid:6d} {cell.posx} {cell.posy} {cell.posz} {cell.rho}\n")
#
# fmodel.write(" ".join([f'{getattr(cell, col)}' for col in abundcols]))
#
# fmodel.write('\n')
if dimensions == 1:
for cell in dfmodel.itertuples(index=False):
fmodel.write(f'{cell.inputcellid:6d} {cell.velocity_outer:9.2f} {cell.logrho:10.8f} ')
fmodel.write(" ".join([f'{getattr(cell, col)}' for col in abundcols]))
fmodel.write('\n')
elif dimensions == 3:
zeroabund = ' '.join(['0.0' for _ in abundcols])
for inputcellid, posxmin, posymin, poszmin, rho, *massfracs in dfmodel[
['inputcellid', 'pos_x_min', 'pos_y_min', 'pos_z_min', 'rho', *abundcols]
].itertuples(index=False, name=None):
fmodel.write(f"{inputcellid:6d} {posxmin} {posymin} {poszmin} {rho}\n")
fmodel.write(" ".join([f'{abund}' for abund in massfracs]) if rho > 0. else zeroabund)
fmodel.write('\n')
print(f'Saved {filename} (took {time.perf_counter() - timestart:.1f} seconds)')
def get_mgi_of_velocity_kms(modelpath, velocity, mgilist=None):
"""Return the modelgridindex of the cell whose outer velocity is closest to velocity.
If mgilist is given, then chose from these cells only"""
modeldata, _, _ = get_modeldata(modelpath)
velocity = float(velocity)
if not mgilist:
mgilist = [mgi for mgi in modeldata.index]
arr_vouter = modeldata['velocity_outer'].values
else:
arr_vouter = np.array([modeldata['velocity_outer'][mgi] for mgi in mgilist])
index_closestvouter = np.abs(arr_vouter - velocity).argmin()
if velocity < arr_vouter[index_closestvouter] or index_closestvouter + 1 >= len(mgilist):
return mgilist[index_closestvouter]
elif velocity < arr_vouter[index_closestvouter + 1]:
return mgilist[index_closestvouter + 1]
elif np.isnan(velocity):
return float('nan')
else:
print(f"Can't find cell with velocity of {velocity}. Velocity list: {arr_vouter}")
assert(False)
@lru_cache(maxsize=8)
def get_initialabundances(modelpath):
"""Return a list of mass fractions."""
abundancefilepath = at.firstexisting(
['abundances.txt.xz', 'abundances.txt.gz', 'abundances.txt'], path=modelpath)
abundancedata = pd.read_csv(abundancefilepath, delim_whitespace=True, header=None)
abundancedata.index.name = 'modelgridindex'
abundancedata.columns = [
'inputcellid', *['X_' + at.get_elsymbol(x) for x in range(1, len(abundancedata.columns))]]
if len(abundancedata) > 100000:
print('abundancedata memory usage:')
abundancedata.info(verbose=False, memory_usage="deep")
return abundancedata
def save_initialabundances(dfelabundances, abundancefilename):
"""Save a DataFrame (same format as get_initialabundances) to abundances.txt.
columns must be:
- inputcellid: integer index to match model.txt (starting from 1)
- X_El: mass fraction of element with two-letter code 'El' (e.g., X_H, X_He, H_Li, ...)
"""
timestart = time.perf_counter()
if Path(abundancefilename).is_dir():
abundancefilename = Path(abundancefilename) / 'abundances.txt'
dfelabundances['inputcellid'] = dfelabundances['inputcellid'].astype(int)
atomic_numbers = [at.get_atomic_number(colname[2:])
for colname in dfelabundances.columns if colname.startswith('X_')]
elcolnames = [f'X_{at.get_elsymbol(Z)}' for Z in range(1, 1 + max(atomic_numbers))]
# set missing elemental abundance columns to zero
for col in elcolnames:
if col not in dfelabundances.columns:
dfelabundances[col] = 0.0
with open(abundancefilename, 'w') as fabund:
for row in dfelabundances.itertuples(index=False):
fabund.write(f' {row.inputcellid:6d} ')
fabund.write(" ".join([f'{getattr(row, colname, 0.)}' for colname in elcolnames]))
fabund.write("\n")
print(f'Saved {abundancefilename} (took {time.perf_counter() - timestart:.1f} seconds)')
def save_empty_abundance_file(ngrid, outputfilepath='.'):
"""Dummy abundance file with only zeros"""
Z_atomic = np.arange(1, 31)
abundancedata = {'cellid': range(1, ngrid + 1)}
for atomic_number in Z_atomic:
abundancedata[f'Z={atomic_number}'] = np.zeros(ngrid)
# abundancedata['Z=28'] = np.ones(ngrid)
abundancedata = pd.DataFrame(data=abundancedata)
abundancedata = abundancedata.round(decimals=5)
abundancedata.to_csv(Path(outputfilepath) / 'abundances.txt', header=False, sep='\t', index=False)
def get_dfmodel_dimensions(dfmodel):
if 'pos_x_min' in dfmodel.columns:
return 3
return 1
def sphericalaverage(dfmodel, t_model_init_days, vmax, dfelabundances=None, dfgridcontributions=None):
"""Convert 3D Cartesian grid model to 1D spherical"""
t_model_init_seconds = t_model_init_days * 24 * 60 * 60
xmax = vmax * t_model_init_seconds
ngridpoints = len(dfmodel)
ncoordgridx = round(ngridpoints ** (1. / 3.))
wid_init = 2 * xmax / ncoordgridx
print(f'Spherically averaging 3D model with {ngridpoints} cells...')
timestart = time.perf_counter()
# dfmodel = dfmodel.query('rho > 0.').copy()
dfmodel = dfmodel.copy()
celldensity = {cellindex: rho for cellindex, rho in dfmodel[['inputcellid', 'rho']].itertuples(index=False)}
dfmodel = add_derived_cols_to_modeldata(
dfmodel, ['velocity'], dimensions=3, t_model_init_seconds=t_model_init_seconds, wid_init=wid_init)
# print(dfmodel)
# print(dfelabundances)
km_to_cm = 1e5
velocity_bins = [vmax * n / ncoordgridx for n in range(ncoordgridx + 1)] # cm/s
outcells = []
outcellabundances = []
outgridcontributions = []
# cellidmap_3d_to_1d = {}
highest_active_radialcellid = -1
for radialcellid, (velocity_inner, velocity_outer) in enumerate(zip(velocity_bins[:-1], velocity_bins[1:]), 1):
assert velocity_outer > velocity_inner
matchedcells = dfmodel.query(
'vel_mid_radial > @velocity_inner and vel_mid_radial <= @velocity_outer')
matchedcellrhosum = matchedcells.rho.sum()
# cellidmap_3d_to_1d.update({cellid_3d: radialcellid for cellid_3d in matchedcells.inputcellid})
if len(matchedcells) == 0:
rhomean = 0.
else:
shell_volume = (4 * math.pi / 3) * (
(velocity_outer * t_model_init_seconds) ** 3 - (velocity_inner * t_model_init_seconds) ** 3)
rhomean = matchedcellrhosum * wid_init ** 3 / shell_volume
# volumecorrection = len(matchedcells) * wid_init ** 3 / shell_volume
# print(radialcellid, volumecorrection)
if rhomean > 0. and dfgridcontributions is not None:
dfcellcont = dfgridcontributions.query('cellindex in @matchedcells.inputcellid.values')
for particleid, dfparticlecontribs in dfcellcont.groupby('particleid'):
frac_of_cellmass_avg = sum([
(row.frac_of_cellmass *
celldensity[row.cellindex])
for row in dfparticlecontribs.itertuples(index=False)]) / matchedcellrhosum
frac_of_cellmass_includemissing_avg = sum([
(row.frac_of_cellmass_includemissing *
celldensity[row.cellindex])
for row in dfparticlecontribs.itertuples(index=False)]) / matchedcellrhosum
outgridcontributions.append({
'particleid': particleid,
'cellindex': radialcellid,
'frac_of_cellmass': frac_of_cellmass_avg,
'frac_of_cellmass_includemissing': frac_of_cellmass_includemissing_avg,
})
if rhomean > 0.:
highest_active_radialcellid = radialcellid
logrho = math.log10(max(1e-99, rhomean))
dictcell = {
'inputcellid': radialcellid,
'velocity_outer': velocity_outer / km_to_cm,
'logrho': logrho,
}
for column in matchedcells.columns:
if column.startswith('X_'):
if rhomean > 0.:
massfrac = np.dot(matchedcells[column], matchedcells.rho) / matchedcellrhosum
else:
massfrac = 0.
dictcell[column] = massfrac
outcells.append(dictcell)
if dfelabundances is not None:
if rhomean > 0.:
abund_matchedcells = dfelabundances.loc[matchedcells.index]
else:
abund_matchedcells = None
dictcellabundances = {'inputcellid': radialcellid}
for column in dfelabundances.columns:
if column.startswith('X_'):
if rhomean > 0.:
massfrac = np.dot(abund_matchedcells[column], matchedcells.rho) / matchedcellrhosum
else:
massfrac = 0.
dictcellabundances[column] = massfrac
outcellabundances.append(dictcellabundances)
dfmodel1d = pd.DataFrame(outcells[:highest_active_radialcellid])
dfabundances1d = (
pd.DataFrame(outcellabundances[:highest_active_radialcellid]) if outcellabundances else None)
dfgridcontributions1d = pd.DataFrame(outgridcontributions) if outgridcontributions else None
print(f' took {time.perf_counter() - timestart:.1f} seconds')
return dfmodel1d, dfabundances1d, dfgridcontributions1d
|
python
|
#Codeacademy's Madlibs
from datetime import datetime
now = datetime.now()
print(now)
story = "%s wrote this story on a %s line train to test Python strings. Python is better than %s but worse than %s -------> written by %s on %02d/%02d/%02d at %02d:%02d"
story_name = raw_input("Enter a name: ")
story_line = raw_input("Enter a tube line: ")
story_programme_one = raw_input("Enter a programme: ")
story_programme_two = raw_input("Enter another programme: ")
print story % (story_name, story_line, story_programme_one, story_programme_two, story_name, now.day, now.month, now.year, now.hour, now.minute)
|
python
|
import logging
import os
import json
from pprint import pformat
import pysftp
from me4storage.common.exceptions import ApiError
logger = logging.getLogger(__name__)
def save_logs(host, port, username, password, output_file):
cnopts = pysftp.CnOpts(knownhosts=os.path.expanduser(os.path.join('~','.ssh','known_hosts')))
cnopts.hostkeys = None
logger.info(f"Downloading log bundle from {host} to "
f"{output_file} ... This can take a few minutes.")
with pysftp.Connection(host,
port=int(port),
username=username,
password=password,
cnopts=cnopts,
) as sftp:
sftp.get(remotepath='/logs', localpath=output_file)
return True
|
python
|
import builtins
import traceback
from os.path import relpath
def dprint(*args, **kwargs):
"""Pre-pends the filename and linenumber to the print statement"""
stack = traceback.extract_stack()[:-1]
i = -1
last = stack[i]
if last.name in ('clearln', 'finish'):
return builtins.__dict__['oldprint'](*args, **kwargs)
# Handle print wrappers in pytorch_classification/utils/progress/progress/helpers.py
while last.name in ('writeln','write','update','write'):
i = i - 1
last = stack[i]
# Handle different versions of the traceback module
if hasattr(last, 'filename'):
out_str = "{}:{} ".format(relpath(last.filename), last.lineno)
else:
out_str = "{}:{} ".format(relpath(last[0]), last[1])
# Prepend the filename and linenumber
return builtins.__dict__['oldprint'](out_str, *args, **kwargs)
def enable():
if 'oldprint' not in builtins.__dict__:
builtins.__dict__['oldprint'] = builtins.__dict__['print']
builtins.__dict__['print'] = dprint
def disable():
if 'oldprint' in builtins.__dict__:
builtins.__dict__['print'] = builtins.__dict__['oldprint']
|
python
|
import config_cosmos
import azure.cosmos.cosmos_client as cosmos_client
import json
from dateutil import parser
def post_speech(speech_details, category):
speech_details = speech_details.copy()
collection_link = "dbs/speakeasy/colls/" + category
speech_details["id"] = speech_details["user_name"] + "_" + speech_details["speech_name"]
client = cosmos_client.CosmosClient(url_connection=config_cosmos.COSMOSDB_HOST, auth={'masterKey': config_cosmos.COSMOSDB_KEY})
client.CreateItem(collection_link, speech_details)
return True
def get_speech_details(speech_name, user_name, category):
collection_link = "dbs/speakeasy/colls/" + category
client = cosmos_client.CosmosClient(url_connection=config_cosmos.COSMOSDB_HOST, auth={'masterKey': config_cosmos.COSMOSDB_KEY})
query = "SELECT * FROM %s WHERE %s.speech_name ='%s' AND %s.user_name='%s'" %(category, category, speech_name, category, user_name)
data = list(client.QueryItems(collection_link, query, config_cosmos.OPTIONS))
return data[0]
def get_all_speeches(user_name):
categories = ["gaze", "speech", "gestures"]
final = []
for category in categories:
collection_link = "dbs/speakeasy/colls/" + category
client = cosmos_client.CosmosClient(url_connection=config_cosmos.COSMOSDB_HOST, auth={'masterKey': config_cosmos.COSMOSDB_KEY})
query = "SELECT * FROM %s WHERE %s.user_name='%s'" %(category, category, user_name)
data = list(client.QueryItems(collection_link, query, config_cosmos.OPTIONS))
for item in data:
final.append({"speech_name": item["speech_name"], "timestamp": item["timestamp"], "category": category})
final = sorted(final, key=lambda x: parser.parse(" ".join(x["timestamp"].split(" ")[:-4])))[::-1]
return final
|
python
|
#!/usr/bin/env python
# Outputs the relative error in a particular stat for deg1 and deg2 FEM.
# Output columns:
# mesh_num medianEdgeLength deg1Error deg2Error
import sys, os, re, numpy as np
from numpy.linalg import norm
resultDir, stat = sys.argv[1:]
# Input data columns
meshInfo = ["mesh_num", "corner_angle", "medianEdgeLength"]
strains = ["strain"]
displacements = ["u_x", "u_y"] # per sample
numSamples = 3
columnNames = meshInfo
columnNames += strains
for s in range(numSamples):
columnNames += map(lambda n: "%s[%i]" % (n, s), displacements)
for s in range(numSamples):
columnNames += map(lambda n: "mathematica %s[%i]" % (n, s), displacements)
def read_table_sorted(path):
data = map(lambda s: s.strip().split('\t'), file(path))
return sorted(data, key=lambda r: int(r[0]))
def validateColumnCount(table, numColumns):
for row in table:
if (len(row) != numColumns):
raise Exception("Invalid number of columns: %i (expected %i)" % (len(row), numColumns))
deg1Table = read_table_sorted(resultDir + "/deg_1.txt")
deg2Table = read_table_sorted(resultDir + "/deg_2.txt")
validateColumnCount(deg1Table, len(columnNames))
validateColumnCount(deg2Table, len(columnNames))
if (len(deg1Table) != len(deg2Table)):
raise Exception("Data tables for deg1 and deg2 differ in length")
groundTruth = np.array(map(float, deg2Table[-1]))
for (d1, d2) in zip(deg1Table, deg2Table):
msh_num, medianEdgeLength = [d1[0], d1[2]];
relErrors = []
if stat in columnNames:
cidx = columnNames.index(stat)
relErrors = [ abs(float(d1[cidx]) - groundTruth[cidx]) / abs(groundTruth[cidx]),
abs(float(d2[cidx]) - groundTruth[cidx]) / abs(groundTruth[cidx])]
elif (stat.replace("norm", "x") in columnNames):
xidx = columnNames.index(stat.replace("norm", "x"))
yidx = columnNames.index(stat.replace("norm", "y"))
d1Vec = np.array(map(float, [d1[xidx], d1[yidx]]))
d2Vec = np.array(map(float, [d2[xidx], d2[yidx]]))
groundTruthVec = groundTruth[[xidx, yidx]]
relErrors = [ norm(d1Vec - groundTruthVec),
norm(d2Vec - groundTruthVec) ]
else: raise Exception("Unknown stat %s" % stat)
# mesh_num medianEdgeLength deg1Error deg2Error
print "\t".join([msh_num, medianEdgeLength] + map(str, relErrors))
|
python
|
"""
Here we implement some simple policies that
one can use directly in simple tasks.
More complicated policies can also be created
by inheriting from the Policy class
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal, Bernoulli
class Policy(nn.Module):
def __init__(self, fn_approximator):
super().__init__()
self.fn_approximator = fn_approximator
def forward(self, state):
raise NotImplementedError('Must be implemented.')
class RandomPolicy(Policy):
"""
A random policy that just takes one of output_dim actions randomly
"""
def __init__(self, output_dim=2):
super().__init__(None)
self.output_dim = output_dim
self.p = nn.Parameter(torch.IntTensor([0]), requires_grad=False)
def forward(self, state):
batch_size = state.size()[0]
probs = torch.ones(batch_size, self.output_dim) / self.output_dim
stochastic_policy = Categorical(probs)
actions = stochastic_policy.sample()
log_probs = stochastic_policy.log_prob(actions)
return actions, log_probs
class CategoricalPolicy(Policy):
"""
Used to pick from a range of actions.
```
fn_approximator = MLP_factory(input_size=4, output_size=3)
policy = policies.MultinomialPolicy(fn_approximator)
the actions will be a number in [0, 1, 2]
```
"""
def forward(self, state):
policy_log_probs = self.fn_approximator(state)
probs = F.softmax(policy_log_probs, dim=1)
stochastic_policy = Categorical(probs)
# sample discrete actions
actions = stochastic_policy.sample()
# get log probs
log_probs = stochastic_policy.log_prob(actions)
return actions, log_probs
def log_prob(self, state, action):
policy_log_probs = self.fn_approximator(state)
probs = F.softmax(policy_log_probs, dim=1)
stochastic_policy = Categorical(probs)
return stochastic_policy.log_prob(action)
class MultinomialPolicy(CategoricalPolicy):
def __init__(self, fn_approximator):
super().__init__(fn_approximator)
logging.warning('Use `CategoricalPolicy` since `MultinomialPolicy` will soon be deprecated.')
class GaussianPolicy(Policy):
"""
Used to take actions in continous spaces
```
fn_approximator = MLP_factory(input_size=4, output_size=2)
policy = policies.GaussianPolicy(fn_approximator)
```
"""
def forward(self, state):
policy_mu, policy_sigma = self.fn_approximator(state)
policy_sigma = F.softplus(policy_sigma)
stochastic_policy = Normal(policy_mu, policy_sigma)
actions = stochastic_policy.sample()
log_probs = stochastic_policy.log_prob(actions)
return actions, log_probs
def log_prob(self, state, action):
raise NotImplementedError('Not implemented yet')
class BernoulliPolicy(Policy):
"""
Used to take binary actions.
This can also be used when each action consists of
a many binary actions, for example:
```
fn_approximator = MLP_factory(input_size=4, output_size=5)
policy = policies.BernoulliPolicy(fn_approximator)
```
this will result in each action being composed of 5 binary actions.
"""
def forward(self, state):
policy_p = self.fn_approximator(state)
policy_p = F.sigmoid(policy_p)
try:
stochastic_policy = Bernoulli(policy_p)
actions = stochastic_policy.sample()
log_probs = stochastic_policy.log_prob(actions)
except RuntimeError as e:
logging.debug('Runtime error occured. policy_p was {}'.format(policy_p))
logging.debug('State was: {}'.format(state))
logging.debug('Function approximator return was: {}'.format(self.fn_approximator(state)))
logging.debug('This has occured before when parameters of the network became NaNs.')
logging.debug('Check learning rate, or change eps in adaptive gradient descent methods.')
raise RuntimeError('BernoulliPolicy returned nan information. Logger level with DEBUG will have more '
'information')
return actions, log_probs
def log_prob(self, state, action):
policy_p = self.fn_approximator(state)
policy_p = F.sigmoid(policy_p)
stochastic_policy = Bernoulli(policy_p)
return stochastic_policy.log_prob(action)
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('person', '__first__'),
]
operations = [
migrations.CreateModel(
name='Attendee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('state', models.CharField(max_length=4, choices=[(b'yes', b'yes'), (b'no', b'no')])),
('event', models.ForeignKey(to='person.Person')),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('from_dt', models.DateTimeField()),
('to_dt', models.DateTimeField()),
('title', models.CharField(max_length=128)),
('text', models.TextField()),
('price', models.IntegerField()),
],
),
]
|
python
|
class GPSlocation:
"""used to translate the location system"""
_prop_ = 'GPSlocation'
import math
pi = 3.1415926535897932384626
a = 6378245.0
ee = 0.00669342162296594323
def gcj02_to_wgs84(self, lng, lat):
"""GCJ02 system to WGS1984 system"""
dlat = self._transformlat(lng - 105.0, lat - 35.0)
dlng = self._transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * self.pi
magic = math.sin(radlat)
magic = 1 - self.ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((self.a * (1 - self.ee)) / (magic * sqrtmagic) * self.pi)
dlng = (dlng * 180.0) / (self.a / sqrtmagic * math.cos(radlat) * self.pi)
mglat = lat + dlat
mglng = lng + dlng
return [lng * 2 - mglng, lat * 2 - mglat]
def wgs84_to_gcj02(self, lng, lat):
"""WGS1984 system to GCJ02 system"""
dlat = self._transformlat(lng - 105.0, lat - 35.0)
dlng = self._transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * self.pi
magic = math.sin(radlat)
magic = 1 - self.ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((self.a * (1 - self.ee)) / (magic * sqrtmagic) * self.pi)
dlng = (dlng * 180.0) / (self.a / sqrtmagic * math.cos(radlat) * self.pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglng, mglat]
def _transformlat(self, lng, lat):
ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + 0.1 * lng * lat + 0.2 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * self.pi) + 20.0 * math.sin(2.0 * lng * self.pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lat * self.pi) + 40.0 * math.sin(lat / 3.0 * self.pi)) * 2.0 / 3.0
ret += (160.0 * math.sin(lat / 12.0 * self.pi) + 320 * math.sin(lat * self.pi / 30.0)) * 2.0 / 3.0
return ret
def _transformlng(self, lng, lat):
ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + 0.1 * lng * lat + 0.1 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * self.pi) + 20.0 * math.sin(2.0 * lng * self.pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lng * self.pi) + 40.0 * math.sin(lng / 3.0 * self.pi)) * 2.0 / 3.0
ret += (150.0 * math.sin(lng / 12.0 * self.pi) + 300.0 * math.sin(lng / 30.0 * self.pi)) * 2.0 / 3.0
return ret
|
python
|
# -*- coding: utf-8 -
import re
import copy
import urllib
import urllib3
import string
import dateutil.parser
from iso8601 import parse_date
from robot.libraries.BuiltIn import BuiltIn
from datetime import datetime, timedelta
import pytz
TZ = pytz.timezone('Europe/Kiev')
def get_library():
return BuiltIn().get_library_instance('Selenium2Library')
def get_webdriver_instance():
return get_library()._current_browser()
# return of variable is None
def get_variable_is_none(variable):
if variable is None:
return True
return False
# run specified keyword if condition is not none type
def run_keyword_if_condition_is_not_none(condition, name, *args):
if get_variable_is_none(condition) == False:
BuiltIn().run_keyword(name, *args)
# run specified keyword if condition is none type
def run_keyword_if_condition_is_none(condition, name, *args):
if get_variable_is_none(condition) == True:
BuiltIn().run_keyword(name, *args)
# return value for *keys (nested) in `element` (dict).
def get_from_dictionary_by_keys(element, *keys):
if not isinstance(element, dict):
raise AttributeError('keys_exists() expects dict as first argument.')
if len(keys) == 0:
raise AttributeError('keys_exists() expects at least two arguments, one given.')
_element = element
for key in keys:
try:
_element = _element[key]
except KeyError:
return None
return _element
# returns if element exists on page. optimization
def get_is_element_exist(locator):
jquery_locator = convert_locator_to_jquery(locator)
if get_variable_is_none(jquery_locator) == False:
jquery_locator = jquery_locator.replace('"', '\\"')
length = get_webdriver_instance().execute_script('return $("' + jquery_locator + '").length;')
return length > 0
try:
get_library()._element_find(locator, None, True)
except Exception:
return False
return True
# click
def js_click_element(locator):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'var $el = jQuery(arguments[0]); if($el.length) $el.click();',
element
)
# convert locator to jquery locator
def convert_locator_to_jquery(locator):
locator_params = locator.split('=', 1)
if locator_params[0] == 'id':
return '#' + locator_params[1]
if locator_params[0] == 'jquery':
return locator_params[1]
if locator_params[0] == 'css':
return locator_params[1]
return None
# set scroll to element in view
def set_element_scroll_into_view(locator):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'var $el = jQuery(arguments[0]); if($el.length) $el.get(0).scrollIntoView();',
element
)
# return text/value by specified locator
def get_value_by_locator(locator):
element = get_library()._element_find(locator, None, True)
text = get_webdriver_instance().execute_script(
'var $element = jQuery(arguments[0]);'
'if($element.is("input[type=checkbox]")) return $element.is(":checked") ? "1":"0";'
'if($element.is("input,textarea,select")) return $element.val();'
'return $element.text();',
element
)
return text
# input text to hidden input
def input_text_to_hidden_input(locator, text):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'jQuery(arguments[0]).val("' + text.replace('"', '\\"') + '");',
element
)
# select option by label for hidden select
def select_from_hidden_list_by_label(locator, label):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'var $option = jQuery("option:contains(' + label.replace('"', '\\"') + ')", arguments[0]);' +
'if($option.length) jQuery(arguments[0]).val($option.attr("value"));',
element
)
# trigger change event for input by locator
def trigger_input_change_event(locator):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'var $el = jQuery(arguments[0]); if($el.length) $el.trigger("change");',
element
)
# convert all numners to string
def convert_float_to_string(number):
return repr(float(number))
def convert_esco__float_to_string(number):
return '{0:.5f}'.format(float(number))
def convert_float_to_string_3f(number):
return '{0:.3f}'.format(float(number))
# convert any variable to specified type
def convert_to_specified_type(value, type):
value = "%s" % (value)
if type == 'integer':
value = value.split()
value = ''.join(value)
print(value)
value = int(value)
if type == 'float':
value = value.split()
value = ''.join(value)
print(value)
value = float(value)
return value
# prepare isodate in needed format
def isodate_format(isodate, format):
iso_dt = parse_date(isodate)
return iso_dt.strftime(format)
def procuring_entity_name(tender_data):
tender_data.data.procuringEntity['name'] = u"ТОВ \"ПабликБид\""
tender_data.data.procuringEntity['name_en'] = u"TOV \"publicbid\""
tender_data.data.procuringEntity.identifier['id'] = u"1234567890-publicbid"
tender_data.data.procuringEntity.identifier['legalName'] = u"ТОВ \"ПабликБид\""
tender_data.data.procuringEntity.identifier['legalName_en'] = u"TOV \"publicbid\""
if 'address' in tender_data.data.procuringEntity:
tender_data.data.procuringEntity.address['region'] = u"м. Київ"
tender_data.data.procuringEntity.address['postalCode'] = u"123123"
tender_data.data.procuringEntity.address['locality'] = u"Київ"
tender_data.data.procuringEntity.address['streetAddress'] = u"address"
if 'contactPoint' in tender_data.data.procuringEntity:
tender_data.data.procuringEntity.contactPoint['name'] = u"Test ЗамовникОборони"
tender_data.data.procuringEntity.contactPoint['name_en'] = u"Test"
tender_data.data.procuringEntity.contactPoint['email'] = u"[email protected]"
tender_data.data.procuringEntity.contactPoint['telephone'] = u"+3801111111111"
tender_data.data.procuringEntity.contactPoint['url'] = u"https://public-bid.com.ua"
if 'buyers' in tender_data.data:
tender_data.data.buyers[0]['name'] = u"ТОВ \"ПабликБид\""
tender_data.data.buyers[0].identifier['id'] = u"1234567890-publicbid"
tender_data.data.buyers[0].identifier['legalName'] = u"ТОВ \"ПабликБид\""
return tender_data
# prepare data
def prepare_procuring_entity_data(data):
try:
data['name'] = u"publicbid"
data.identifier['id'] = u"publicbid"
data.identifier['legalName'] = u"publicbid"
data.identifier['scheme'] = u"UA-EDR"
if 'name_en' in data:
data['name_en'] = u"publicbid"
if 'legalName_en' in data.identifier:
data.identifier['legalName_en'] = u"publicbid"
if 'address' in data:
data.address['countryName'] = u"Україна"
data.address['locality'] = u"Київ"
data.address['postalCode'] = u"01111"
data.address['region'] = u"місто Київ"
data.address['streetAddress'] = u"вулиця Тестова, 220, 8"
if 'contactPoint' in data:
data.contactPoint['email'] = u"[email protected]"
data.contactPoint['faxNumber'] = u"+3801111111111"
data.contactPoint['telephone'] = u"+3801111111111"
data.contactPoint['name'] = u"Test"
if 'name_en' in data.contactPoint:
data.contactPoint['name_en'] = u"Test"
data.contactPoint['url'] = u"https://public-bid.com.ua"
except Exception:
raise Exception('data is not a dictionary')
# prepare data
def prepare_buyers_data(data):
if type(data) is not list:
raise Exception('data is not a list')
# preventing console errors about changing buyer data in cases
if len(data) != 1:
return
item = next(iter(data), None)
item['name'] = u"publicbid"
item.identifier['id'] = u"publicbid"
item.identifier['legalName'] = u"publicbid"
item.identifier['scheme'] = u"UA-EDR"
# prepare dictionary from field path + value
def generate_dictionary_from_field_path_and_value(path, value):
data = dict()
path_keys_list = path.split('.')
if len(path_keys_list) > 1:
key = path_keys_list.pop(0)
value = generate_dictionary_from_field_path_and_value('.'.join(path_keys_list), value)
indexRegex = re.compile(r'(\[(\d+)\]$)')
matchObj = indexRegex.search(key)
print matchObj
if matchObj:
key = indexRegex.sub('', key)
value['list_index'] = matchObj.group(2)
value = [value]
data[key] = value
else:
data = dict()
data[path] = value
return data
# Percentage conversion
def multiply_hundred(number):
return number * 100
# prepares data for filling form in easiest way
def prepare_tender_data(data_original):
# preventing change data in global view
data = copy.deepcopy(data_original)
# check if data is for multilot
if 'lots' not in data:
return data
# moves features to its related items
if 'features' in data:
i = 0
l = len(data['features'])
while i < l:
if data['features'][i]['featureOf'] == 'lot':
for lot in data['lots']:
if lot['id'] == data['features'][i]['relatedItem']:
if 'features' not in lot:
lot['features'] = []
lot['features'].append(data['features'].pop(i))
l = l - 1
i = i - 1
break
if data['features'][i]['featureOf'] == 'item':
for item in data['items']:
if item['id'] == data['features'][i]['relatedItem']:
if 'features' not in item:
item['features'] = []
item['features'].append(data['features'].pop(i))
l = l - 1
i = i - 1
break
i = i + 1
if 'features' in data:
if len(data['features']) == 0:
del data['features']
# moves items to its related lots
i = 0
l = len(data['items'])
while i < l:
for lot in data['lots']:
if lot['id'] == data['items'][i]['relatedLot']:
if 'items' not in lot:
lot['items'] = []
lot['items'].append(data['items'].pop(i))
l = l - 1
i = i - 1
break
i = i + 1
del data['items']
if 'milestones' not in data:
return data
# moves milestones to its related lots
i = 0
l = len(data['milestones'])
while i < l:
for lot in data['lots']:
if lot['id'] == data['milestones'][i]['relatedLot']:
if 'milestones' not in lot:
lot['milestones'] = []
lot['milestones'].append(data['milestones'].pop(i))
l = l - 1
i = i - 1
break
i = i + 1
del data['milestones']
return data
def split_agreementDuration(str, type):
if type in 'year':
year_temp = str.split('Y', 1)
value = year_temp[0].split('P', 1)
elif type in 'month':
month_temp = str.split('M', 1)
value = month_temp[0].split('Y', 1)
else:
day_temp = str.split('D', 1)
value = day_temp[0].split('M', 1)
return value[1]
def convert_date_to_string_contr(date):
date = dateutil.parser.parse(date)
date = date.strftime("%d.%m.%Y %H:%M:%S")
return date
def get_value_minimalStepPercentage(value):
value = value / 100
return value
def set_value_minimalStepPercentage(value):
value = value * 100
return value
def convert_esco__float_to_string(number):
return '{0:.5f}'.format(float(number))
def convert_string_to_float(number):
return float(number)
def download_file(url, file_name, output_dir):
urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name)))
def parse_complaintPeriod_date(date_string):
date_str = datetime.strptime(date_string, "%d.%m.%Y %H:%M")
date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second,
date_str.microsecond)
date = TZ.localize(date).isoformat()
return date
def parse_deliveryPeriod_date1(date):
date = dateutil.parser.parse(date)
date = date.strftime("%d.%m.%Y")
return date
def parse_deliveryPeriod_date(date_string):
# date_str = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S+03:00")
if '+03' in date_string:
date_str = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S+03:00")
else:
date_str = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S+02:00")
date = datetime(date_str.year, date_str.month, date_str.day)
date = date.strftime("%d.%m.%Y")
return date
def split_joinvalue(str_value):
str_value = str_value.split()
str_value = ''.join(str_value)
print(str_value)
str_value.replace(" ", "")
return str_value
|
python
|
import sys
sys.path.append("C:\Program Files\Vicon\Nexus2.1\SDK\Python")
import ViconNexus
import numpy as np
import smooth
vicon = ViconNexus.ViconNexus()
subject = vicon.GetSubjectNames()[0]
print 'Gap filling for subject ', subject
markers = vicon.GetMarkerNames(subject)
frames = vicon.GetFrameCount()
# Get data from nexus
print 'Populating data matrix'
rawData = np.zeros((frames,len(markers)*3))
for i in range(0,len(markers)):
rawData[:,3*i-3], rawData[:,3*i-2], rawData[:,3*i-1], E = vicon.GetTrajectory(subject,markers[i])
rawData[np.asarray(E)==0,3*i-3] = np.nan;
rawData[np.asarray(E)==0,3*i-2] = np.nan;
rawData[np.asarray(E)==0,3*i-1] = np.nan;
# Run low dimensional smoothing
Y = smooth.smooth(rawData,tol =1e-2,sigR=1e-3,keepOriginal=True)
print 'Writing new trajectories'
#Create new smoothed trjectories
for i in range(0,len(markers)):
E = np.ones((len(E),1)).tolist();
vicon.SetTrajectory(subject,markers[i],Y[:,3*i-3].tolist(),Y[:,3*i-2].tolist(),Y[:,3*i-1].tolist(),E)
print 'Done'
|
python
|
from jinja2 import DictLoader, Environment
import argparse
import json
import importlib
import random
import string
HEADER = """
#pragma once
#include <rapidjson/rapidjson.h>
#include <rapidjson/writer.h>
#include <rapidjson/reader.h>
#include <iostream>
#include <string>
#include <vector>
#include <map>
struct {{ schema["title"] }}
{
{{ schema["title"] }}()
{
{%- for property_name, property_dict in schema["properties"].items() %}
PropertyMap["{{ property_dict["title"] }}"] = &{{ property_dict["title"] }};
{%- endfor %}
}
template<typename OutputStream>
void Write(rapidjson::Writer<OutputStream>& writer)
{
writer.StartObject();
{%- for property_name, property_dict in schema["properties"].items() %}
writer.Key("{{ property_dict["title"] }}");
{{ get_writer_code(property_dict) }}
{%- endfor %}
writer.EndObject();
}
{%- for property_name, property_dict in schema["properties"].items() %}
{{ get_property_type(property_dict) }} {{ property_dict["title"] }};
{%- endfor %}
bool operator==(const {{ schema["title"] }}& rhs) const
{
bool equals = true;
{%- for property_name, property_dict in schema["properties"].items() %}
equals = equals && {{ property_dict["title"] }} == rhs.{{ property_dict["title"] }};
{%- endfor %}
return equals;
}
std::map<std::string, void*> PropertyMap;
};
struct {{ schema["title"] }}Handler
{
{{ schema["title"] }}Handler( {{ schema["title"] }}* ParseObject)
{
Object = ParseObject;
}
template<typename T>
void WriteProperty(const T& Value)
{
T& Property = *reinterpret_cast<T*>(CurrentProperty);
Property = Value;
CurrentProperty = nullptr;
CurrentPropertyName = "";
}
template<typename T>
void WriteArray(const T& Value)
{
std::vector<T>& PropertyArray = *reinterpret_cast<std::vector<T>*>(CurrentProperty);
PropertyArray.push_back(Value);
}
template<typename T>
bool WriteType(const T& Value)
{
if(!CurrentProperty)
{
std::cerr << "WriteType no CurrentProperty" << std::endl; return true;
return false;
}
if(CurrentArray)
{
WriteArray(Value);
return true;
}
else
{
WriteProperty(Value);
return true;
}
return false;
}
bool Null() { std::cout << "Null()" << std::endl; return true; }
bool Bool(bool b)
{
return WriteType(b);
}
bool Int(int i)
{
return WriteType(i);
}
bool Uint(unsigned u)
{
return WriteType(u);
}
bool Int64(int64_t i)
{
return WriteType(i);
}
bool Uint64(uint64_t u)
{
return WriteType(u);
}
bool Double(double d)
{
return WriteType(d);
}
bool RawNumber(const char* str, rapidjson::SizeType length, bool copy)
{
std::cout << "Number(" << str << ", " << length << ", " << "boolalpha" << copy << ")" << std::endl;
return true;
}
bool String(const char* str, rapidjson::SizeType length, bool copy)
{
if(!CurrentProperty)
{
std::cerr << "String no CurrentProperty" << std::endl; return true;
return false;
}
if(CurrentArray)
{
std::string str = std::string(str, length);
WriteArray(str);
return true;
}
else
{
std::string& PropertyString = *reinterpret_cast<std::string*>(CurrentProperty);
PropertyString = std::string(str, length);
CurrentProperty = nullptr;
CurrentPropertyName = "";
}
return true;
}
bool Key(const char* str, rapidjson::SizeType length, bool copy)
{
const auto it = Object->PropertyMap.find(str);
if(it != Object->PropertyMap.end())
{
CurrentProperty = it->second;
CurrentPropertyName = str;
return true;
}
else
{
std::cerr << "Key Property Not Found:" << str << std::endl; return true;
return false;
}
}
bool StartObject() { std::cout << "StartObject()" << std::endl; return true; }
bool EndObject(rapidjson::SizeType memberCount) { std::cout << "EndObject(" << memberCount << ")" << std::endl; return true; }
bool StartArray()
{
if(CurrentPropertyName.empty())
{
std::cerr << "StartArray Property " << CurrentPropertyName << "not found!" << std::endl;
return false;
}
const auto it = Object->PropertyMap.find(CurrentPropertyName);
if(it != Object->PropertyMap.end())
{
CurrentArray = it->second;
return true;
}
else
{
std::cerr << "StartArray Property " << CurrentPropertyName << "not found!" << std::endl;
return false;
}
}
bool EndArray(rapidjson::SizeType elementCount)
{
CurrentProperty = nullptr;
CurrentArray = nullptr;
return true;
}
{{ schema["title"] }}* Object = nullptr;
void* CurrentProperty = nullptr;
void* CurrentArray= nullptr;
std::string CurrentPropertyName;
};
"""
TEST = """
#include "Json{{ schema["title"] }}.h"
int main(int argc, char** argv)
{
{{ schema["title"] }} WriteObject;
{%- for property_name, property_dict in schema["properties"].items() %}
WriteObject.{{ property_dict["title"] }} = {{ get_random_property(property_dict) }};
{%- endfor %}
{{ schema["title"] }} ReadObject;
rapidjson::StringBuffer StringBuf;
rapidjson::Writer<rapidjson::StringBuffer> Writer(StringBuf);
WriteObject.Write(Writer);
{{ schema["title"] }}Handler Handler(&ReadObject);
rapidjson::Reader Reader;
rapidjson::StringStream StringStream(StringBuf.GetString());
Reader.Parse(StringStream, Handler);
bool Equals = WriteObject == ReadObject;
if(!Equals)
{
std::cerr << "Objects not equals." << std::endl;
return 1;
}
else
{
std::cout << "Objects are equals." << std::endl;
}
return 0;
}
"""
writer_function_map = {
"integer" : "Int",
"number" : "Double",
"boolean" : "Bool"
}
def get_writer_code(prop : dict, title = None):
type_name = prop["type"]
if title == None: title = prop["title"]
if type_name in writer_function_map:
return "writer." + writer_function_map[type_name] + "(" + title + ");"
elif type_name == "string":
return "writer.String("+ prop["title"] + ".c_str());"
elif type_name == "array":
write_array = "writer.StartArray();\n"
write_array += " for( auto it = " + title + ".begin(); it != " + title + ".end(); ++it)\n"
write_array += " {\n"
write_array += " " + get_writer_code(prop["items"], "(*it)") + "\n"
write_array += " }\n"
write_array += " writer.EndArray(" + title + ".size());"
return write_array
return None
# types
basic_type_map = {
"integer" : "int32_t",
"string" : "std::string",
"number" : "double",
"boolean" : "bool"
}
def get_property_type(prop : dict):
type_name = prop["type"]
if type_name in basic_type_map:
return basic_type_map[type_name]
if type_name == "array":
return "std::vector<" + get_property_type(prop["items"]) + ">"
return "void"
# test methods
def random_string(len=10):
letters = string.ascii_lowercase
s = ''.join(random.choice(letters) for i in range(len))
return "\"" + s + "\""
def random_int():
return random.randint(0,1024)
def random_double():
return random.randint(0,1024)
def random_bool():
return random.choice(["true", "false"])
random_function_map = {
"integer" : random_int,
"string" : random_string,
"number" : random_double,
"boolean" : random_bool
}
def get_random_property(prop):
type_name = prop["type"]
if type_name in random_function_map:
return random_function_map[type_name]()
if type_name == "array":
array =[str(get_random_property(prop["items"])) for i in range(10)]
return "{" +",".join(array) + "}"
return "void"
templates = Environment(loader=DictLoader(globals()))
def generate_header(schema_class):
print(schema_class.schema_json())
template = templates.get_template("HEADER")
schema = json.loads(schema_class.schema_json())
rendered = template.render(
{ "schema" : schema,
"get_property_type" : get_property_type,
"get_writer_code" : get_writer_code,
}
)
header = open("Json"+schema["title"]+".h", "w+")
header.write(rendered)
header.close()
def generate_test(schema_class):
template = templates.get_template("TEST")
schema = json.loads(schema_class.schema_json())
rendered = template.render(
{ "schema" : schema,
"get_property_type" : get_property_type,
"get_random_property" : get_random_property
}
)
test = open("Json"+schema["title"]+"Test.cpp", "w+")
test.write(rendered)
test.close()
if __name__== "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--package", help="Package that needs to be loaded to access your type")
parser.add_argument("--typename", help="Name of the type to generate code from.")
args = parser.parse_args();
module = None
if args.package != None:
print("Loading %s" %(args.package))
module = importlib.import_module(args.package)
if args.typename != None:
generate_header(getattr(module,args.typename))
generate_test(getattr(module,args.typename))
|
python
|
def summary(p,c=10,x=5):
print('-' * 30)
print(f'Value Summary'.center(30))
print('-' * 30)
print(f'{"analyzed price:"} \t{coins(p)}')
print(f"{'Half-price: '} \t{half(p, True)}")
print(f'{"double the price: "}\t{double(p, True)}')
print(f'{c}% {"increase: ":} \t{increase(p, c, True)}')
print(f'{x}% {"reduction: "} \t{reduction(p, x, True)}')
print('-'*30)
def increase(p = 0, por= 0, formato=False):
#increase the desired%
"""
=> Function that increases the price by the desired percentage
: param p: original price
: param por: desired percentage
: param format: formatting if desired
: return: returns the price to the variable
"""
p = ((p / 100) * por) + p
return p if formato is False else coins(p)
def reduction(p = 0, por= 0, formato=False):
"""
=> Function that decreases the price by the desired percentage
:param p: Original price
:param por: porcentagem desejada
:param formato: formatting if desired
:return: returns the price to the variable
"""
p = p - ((p / 100) * por)
return p if not formato else coins(p)
#Reduction the desired %
def double(p = 0, formato=False):
"""
=> Function that doubles the price
:param p: Original price
:param formato: formatting if desired
:return: returns the price to the variable
"""
p = p * 2
return p if not formato else coins(p)
#dobra o preço
def half(p = 0, formato=False):
"""
=> Function that cuts the price in half
:param p: Original price
:param formato: formatting if desired
:return: returns the price to the variable
"""
p = p / 2
# Half-Price
return p if formato is False else coins(p)
def coins(p = 0, moeda = 'R$'):
"""
=> Formatting function
:param p: Original price
:param moeda: currency
:return: returns the formatted price
"""
return f'{moeda}{p:>.2f}'.replace('.',',')
|
python
|
from ._sha512 import sha384
|
python
|
from app import app, api
from flask import request
from flask_restful import Resource
import json
import pprint
import os
import subprocess
import traceback
import logging
class WelcomeController(Resource):
def get(self):
return {'welcome': "welcome, stranger!"}
api.add_resource(WelcomeController, '/')
|
python
|
import os
from dotenv import dotenv_values
config = {
**dotenv_values(os.path.join(os.getcwd(), ".env")),
**os.environ
}
VERSION = "0.0.0-alfa"
APP_HOST = config['APP_HOST']
APP_PORT = int(config['APP_PORT'])
APP_DEBUG = bool(config['APP_DEBUG'])
|
python
|
# -*- coding: utf-8 -*-
""" Views for the stats application. """
# standard library
# django
# models
from .models import Stat
# views
from base.views import BaseCreateView
from base.views import BaseDeleteView
from base.views import BaseDetailView
from base.views import BaseListView
from base.views import BaseUpdateView
# forms
from .forms import StatForm
class StatListView(BaseListView):
"""
View for displaying a list of stats.
"""
model = Stat
template_name = 'stats/list.pug'
permission_required = 'stats.view_stat'
class StatCreateView(BaseCreateView):
"""
A view for creating a single stat
"""
model = Stat
form_class = StatForm
template_name = 'stats/create.pug'
permission_required = 'stats.add_stat'
class StatDetailView(BaseDetailView):
"""
A view for displaying a single stat
"""
model = Stat
template_name = 'stats/detail.pug'
permission_required = 'stats.view_stat'
class StatUpdateView(BaseUpdateView):
"""
A view for editing a single stat
"""
model = Stat
form_class = StatForm
template_name = 'stats/update.pug'
permission_required = 'stats.change_stat'
class StatDeleteView(BaseDeleteView):
"""
A view for deleting a single stat
"""
model = Stat
permission_required = 'stats.delete_stat'
template_name = 'stats/delete.pug'
|
python
|
"""Session class and utility functions used in conjunction with the session."""
from .session import Session
from .session_manager import SessionManager
__all__ = ["Session", "SessionManager"]
|
python
|
''' 046 Faça um programa que mostre na tela uma contagem regressiva para o estouro de fogos de artifício, indo de 10 até 0
, com um pausa de 1 seg entre eles'''
from time import sleep
for c in range(10, -1, -1):
print(c)
sleep(1)
print('Fogos !!!!!')
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Shun Arahata
"""
Imitation learning environment
"""
import pathlib
# import cupy as xp
import sys
import numpy as xp
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append(str(current_dir) + '/../mpc')
sys.path.append(str(current_dir) + '/../')
from box_ddp import BoxDDP
from pendulum import PendulumDx
from chainer import functions as F
from util import QuadCost, chainer_diag
class IL_Env:
"""
Imitation learning Environmn class
"""
def __init__(self, env, lqr_iter=500, mpc_T=20):
"""
:param env:
:param lqr_iter:
:param mpc_T:
"""
self.env = env
if self.env == 'pendulum':
self.true_dx = PendulumDx()
else:
assert False
self.lqr_iter = lqr_iter
self.mpc_T = mpc_T
self.train_data = None
self.val_data = None
self.test_data = None
@staticmethod
def sample_xinit(n_batch=1):
""" random sampling x_init
:param n_batch:
:return:
"""
def uniform(shape, low, high):
"""
:param shape:
:param low:
:param high:
:return:
"""
r = high - low
return xp.random.rand(shape) * r + low
th = uniform(n_batch, -(1 / 2) * xp.pi, (1 / 2) * xp.pi)
# th = uniform(n_batch, -xp.pi, xp.pi)
thdot = uniform(n_batch, -1., 1.)
xinit = xp.stack((xp.cos(th), xp.sin(th), thdot), axis=1)
return xinit
def populate_data(self, n_train, n_val, n_test, seed=0):
"""
:param n_train:
:param n_val:
:param n_test:
:param seed:
:return:
"""
xp.random.seed(seed)
n_data = n_train + n_val + n_test
xinit = self.sample_xinit(n_batch=n_data)
print(xinit.shape)
# for (1,0,0) into the dataset
'''
n_init_zero = int(n_train/4)
xinit[n_init_zero][0] = 1.0
xinit[n_init_zero][1] = 0.0
xinit[n_init_zero][2] = 0.0
'''
true_q, true_p = self.true_dx.get_true_obj()
# self.mpc defined later
true_x_mpc, true_u_mpc = self.mpc(self.true_dx, xinit, true_q, true_p, update_dynamics=True)
true_x_mpc = true_x_mpc.array
true_u_mpc = true_u_mpc.array
tau = xp.concatenate((true_x_mpc, true_u_mpc), axis=2)
tau = xp.transpose(tau, (1, 0, 2))
self.train_data = tau[:n_train]
self.val_data = tau[n_train:n_train + n_val]
self.test_data = tau[-n_test:]
def mpc(self, dx, xinit, q, p, u_init=None, eps_override=None,
lqr_iter_override=None, update_dynamics=False):
"""
:param dx:
:param xinit:
:param q:
:param p:
:param u_init:
:param eps_override:
:param lqr_iter_override:
:return:
"""
n_batch = xinit.shape[0]
n_sc = self.true_dx.n_state + self.true_dx.n_ctrl
Q = chainer_diag(q)
Q = F.expand_dims(Q, axis=0)
Q = F.expand_dims(Q, axis=0)
Q = F.repeat(Q, self.mpc_T, axis=0)
Q = F.repeat(Q, n_batch, axis=1)
p = F.expand_dims(p, axis=0)
p = F.expand_dims(p, axis=0)
p = F.repeat(p, self.mpc_T, axis=0)
p = F.repeat(p, n_batch, axis=1)
if eps_override:
eps = eps_override
else:
eps = self.true_dx.mpc_eps
if lqr_iter_override:
lqr_iter = lqr_iter_override
else:
lqr_iter = self.lqr_iter
assert len(Q.shape) == 4
assert len(p.shape) == 3
solver = BoxDDP(
T=self.mpc_T, u_lower=self.true_dx.lower, u_upper=self.true_dx.upper,
n_batch=n_batch, n_state=self.true_dx.n_state, n_ctrl=self.true_dx.n_ctrl,
u_init=u_init, eps=eps, max_iter=lqr_iter, verbose=False,
exit_unconverged=False, detach_unconverged=True,
line_search_decay=self.true_dx.linesearch_decay,
max_line_search_iter=self.true_dx.max_linesearch_iter,
update_dynamics=update_dynamics
)
x_mpc, u_mpc, objs_mpc = solver((xinit, QuadCost(Q, p), dx))
'''
g = c.build_computational_graph(u_mpc)
with open('graph.dot', 'w') as o:
o.write(g.dump())
assert False
'''
return x_mpc, u_mpc
def mpc_Q(self, dx, xinit, Q, p, u_init=None, eps_override=None,
lqr_iter_override=None, update_dynamics=False):
"""
:param dx:
:param xinit:
:param q:
:param p:
:param u_init:
:param eps_override:
:param lqr_iter_override:
:return:
"""
n_batch = xinit.shape[0]
n_sc = self.true_dx.n_state + self.true_dx.n_ctrl
Q = F.expand_dims(Q, axis=0)
Q = F.expand_dims(Q, axis=0)
Q = F.repeat(Q, self.mpc_T, axis=0)
Q = F.repeat(Q, n_batch, axis=1)
p = F.expand_dims(p, axis=0)
p = F.expand_dims(p, axis=0)
p = F.repeat(p, self.mpc_T, axis=0)
p = F.repeat(p, n_batch, axis=1)
if eps_override:
eps = eps_override
else:
eps = self.true_dx.mpc_eps
if lqr_iter_override:
lqr_iter = lqr_iter_override
else:
lqr_iter = self.lqr_iter
assert len(Q.shape) == 4
assert len(p.shape) == 3
solver = BoxDDP(
T=self.mpc_T, u_lower=self.true_dx.lower, u_upper=self.true_dx.upper,
n_batch=n_batch, n_state=self.true_dx.n_state, n_ctrl=self.true_dx.n_ctrl,
u_init=u_init, eps=eps, max_iter=lqr_iter, verbose=False,
exit_unconverged=False, detach_unconverged=True,
line_search_decay=self.true_dx.linesearch_decay,
max_line_search_iter=self.true_dx.max_linesearch_iter,
update_dynamics=update_dynamics
)
x_mpc, u_mpc, objs_mpc = solver((xinit, QuadCost(Q, p), dx))
'''
g = c.build_computational_graph(u_mpc)
with open('graph.dot', 'w') as o:
o.write(g.dump())
assert False
'''
return x_mpc, u_mpc
|
python
|
# -*- Python -*-
# Copyright 2021 The Verible Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bazel rule to wrap sh_test with a wrapper loading runfiles library prior to execution
"""
def sh_test_with_runfiles_lib(name, srcs, size, args, data, deps = []):
"""sh_test wrapper that loads bazel's runfiles library before calling the test.
This is necessary because on Windows, runfiles are not symlinked like on Unix and
are thus not available from the path returned by $(location Label). The runfiles
library provide the rlocation function, which converts a runfile path (from $location)
to the fullpath of the file.
Args:
name: sh_test's name
srcs: sh_test's srcs, must be an array of a single file
size: sh_test's size
args: sh_test's args
data: sh_test's data
deps: sh_test's deps
"""
if len(srcs) > 1:
fail("you must specify exactly one file in 'srcs'")
# Add the runfiles library to dependencies
if len(deps) == 0:
deps = ["@bazel_tools//tools/bash/runfiles"]
else:
deps.append("@bazel_tools//tools/bash/runfiles")
# Replace first arguments with location of the main script to run
# and add script to run to sh_test's data
args = ["$(location " + srcs[0] + ")"] + args
data += srcs
native.sh_test(
name = name,
srcs = ["//bazel:sh_test_with_runfiles_lib.sh"],
size = size,
args = args,
data = data,
deps = deps,
)
|
python
|
# https://github.com/ArtemNikolaev/gb-hw/issues/23
def run(array):
return [
array[i]
for i in range(1, len(array))
if array[i] > array[i-1]
]
test_input = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]
print(run(test_input))
|
python
|
import glob
from hdf5_getters import *
import os
import numpy as np
from collections import Counter
from music_utils import *
tags_list = []
data_path = "/mnt/snap/data/"
count = 0
for root, dirs, files in os.walk(data_path):
files = glob.glob(os.path.join(root, '*h5'))
#if count > 1000: break
for f in files:
h5 = open_h5_file_read(f)
tags = get_artist_mbtags(h5).tolist()
tags_list += tags
#count += 1
h5.close()
print Counter(tags_list).most_common(100)
|
python
|
#!/usr/bin/env python3
import time
from data_output import DataOutput
from html_downloader import HtmlDownloader
from html_parser import HtmlParser
__author__ = 'Aollio Hou'
__email__ = '[email protected]'
class Spider:
def __init__(self):
self.downloader = HtmlDownloader()
self.parser = HtmlParser()
self.output = DataOutput()
def crawl(self, root_url):
content = self.downloader.download(root_url)
urls = self.parser.parse_url(root_url, content)
for url in urls:
try:
# http://service.library.mtime.com/Movie.api
# ?Ajax_CallBack=true
# &Ajax_CallBackType=Mtime.Library.Services
# &Ajax_CallBackMethod=GetMovieOverviewRating
# &Ajax_CrossDomain=1
# &Ajax_RequestUrl=http%3A%2F%2Fmovie.mtime.com%2F246526%2F&t=201710117174393728&Ajax_CallBackArgument0=246526
t = time.strftime('%Y%m%d%H%M%S3282', time.localtime())
rank_url = 'http://service.library.mtime.com/Movie.api' \
'?Ajax_CallBack=true' \
'&Ajax_CallBackType=Mtime.Library.Services' \
'&Ajax_CallBackMethod=GetMovieOverviewRating' \
'&Ajax_CrossDomain=1' \
'&Ajax_RequestUrl=%s' \
'&t=%s' \
'&Ajax_CallbackArgument0=%s' % (url[0].replace('://', '%3A%2F%2F')[:-1], t, url[1])
rank_content = self.downloader.download(rank_url)
if rank_content is None:
print('None')
data = self.parser.parse_json(rank_url, rank_content)
self.output.store_data(data)
except Exception as e:
raise e
# print(e)
# print('Crawl failed')
self.output.output_end()
print('Crawl finish')
def main():
spider = Spider()
spider.crawl('http://theater.mtime.com/China_Beijing/')
if __name__ == '__main__':
main()
|
python
|
from django import forms
from .models import Project
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ["title", "describe", "technology"]
|
python
|
#!/usr/bin/env python
# encoding: utf-8
"""Terminal UI for histdata_downloader project."""
import os
import sys
import logging
import subprocess
from datetime import date
import time
import npyscreen
from histdata_downloader.logger import log_setup
from histdata_downloader.histdata_downloader import load_available_pairs
logger = logging.getLogger(__name__)
class TestApp(npyscreen.NPSAppManaged):
def onStart(self):
logger.debug("On start")
self.registerForm("MAIN", MainForm())
def onCleanExit(self):
logger.debug("onCleanExit called")
class MainForm(npyscreen.ActionFormV2):
def create(self):
logger.debug("main form method called.")
self.type = self.add(npyscreen.TitleSelectOne, name='type',
max_height=2, values=['M1', 'ticks'],
scroll_exit=True)
self.date_start = self.add(npyscreen.TitleDateCombo, name="Date start")
self.date_start.value = date(2019, 1, 1)
self.date_end = self.add(npyscreen.TitleDateCombo, name="Date end")
self.instruments = self.add(npyscreen.TitleMultiSelect,
name='instruments', max_height=5,
values=load_available_pairs(),
scroll_exit=True)
self.select_all = self.add(SelectAllButton,
name='select all', relx=20)
self.unselect_all = self.add(UnselectAllButton,
name='unselect all', relx=20)
self.output_path = self.add(npyscreen.TitleFilenameCombo,
name="Output path", label=True)
self.verbosity = self.add(npyscreen.TitleSelectOne, name='verbosity',
max_height=3, values=['DEBUG',
'INFO',
'WARNING'],
scroll_exit=True, value=1)
self.command = self.add(npyscreen.TitleFixedText, name="cmd",
editable=False,
value='histdata_downloader download')
self.launch_button = self.add(LauchButton, name='Run', relx=50)
self.log = self.add(Output, name='Output',
editable=True, scroll_exit=True,
values=['Waiting...'])
def while_editing(self, *args):
verb = self.selected_verbosity[0]
cmd = "histdata_downloader -v {} download".format(verb)
if self.type.value:
cmd += " -t %s " % self.selected_type[0]
if self.date_end.value:
cmd += " -ds {} -de {}".format(self.date_start.value,
self.date_end.value)
if self.output_path.value:
cmd += " -o {}".format(self.output_path.value)
if self.instruments.value:
sub_cmd = ' '.join(['-i %s' % i for i in self.selected_instruments])
cmd += ' ' + sub_cmd
self.command.value = cmd
self.command.update()
def afterEditing(self):
self.parentApp.setNextForm(None)
def return_as_config(self):
logger.debug('return_as_config method called.')
config = {'type' : self.type.values[self.type.value[0]],
'date_start': self.date_start.value,
'date_end': self.date_end.value,
'instruments': self.selected_instruments,
'output_path': self.output_path.value}
return config
@property
def selected_instruments(self):
name_field = lambda idx : self.instruments.values[idx]
return list(map(name_field, self.instruments.value))
@property
def selected_type(self):
name_field = lambda idx : self.type.values[idx]
return list(map(name_field, self.type.value))
@property
def selected_verbosity(self):
name_field = lambda idx : self.verbosity.values[idx]
return list(map(name_field, self.verbosity.value))
def perform(cmd, log):
with subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
for line in iter(proc.stdout.readline, b''):
log.values.append(line.decode('ascii'))
log.display()
for line in iter(proc.stderr.readline, b''):
log.values.append(line.decode('ascii'))
log.display()
class LauchButton(npyscreen.ButtonPress):
def whenPressed(self):
self.parent.log.values = ['Executing %s.' % self.parent.command.value]
self.parent.log.display()
perform(self.parent.command.value, self.parent.log)
class SelectAllButton(npyscreen.ButtonPress):
def whenPressed(self):
instr = self.parent.instruments
instr.value = [x for x in range(len(instr.values))]
instr.display()
class UnselectAllButton(npyscreen.ButtonPress):
def whenPressed(self):
instr = self.parent.instruments
instr.value = []
instr.display
class Output(npyscreen.BoxTitle):
_contained_widget = npyscreen.MultiLine
if __name__ == "__main__":
App = TestApp()
App.run()
|
python
|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Jython example AMF server and client with Swing interface.
@see: U{Jython<http://pyamf.org/wiki/JythonExample>} wiki page.
@since: 0.5
"""
import logging
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from pyamf.remoting.gateway.wsgi import WSGIGateway
from pyamf.remoting.client import RemotingService
import java.lang as lang
import javax.swing as swing
import java.awt as awt
class AppGUI(object):
"""
Swing graphical user interface.
"""
def __init__(self, title, host, port, service):
# create window
win = swing.JFrame(title, size=(800, 480))
win.setDefaultCloseOperation(swing.JFrame.EXIT_ON_CLOSE)
win.contentPane.layout = awt.BorderLayout(10, 10)
# add scrollable textfield
status = swing.JTextPane(preferredSize=(780, 400))
status.setAutoscrolls(True)
status.setEditable(False)
status.setBorder(swing.BorderFactory.createEmptyBorder(20, 20, 20, 20))
paneScrollPane = swing.JScrollPane(status)
paneScrollPane.setVerticalScrollBarPolicy(
swing.JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED)
win.contentPane.add(paneScrollPane, awt.BorderLayout.CENTER)
# add server button
self.started = "Start Server"
self.stopped = "Stop Server"
self.serverButton = swing.JButton(self.started, preferredSize=(150, 20),
actionPerformed=self.controlServer)
# add client button
self.clientButton = swing.JButton("Invoke Method", preferredSize=(150, 20),
actionPerformed=self.runClient)
self.clientButton.enabled = False
# position buttons
buttonPane = swing.JPanel()
buttonPane.setLayout(swing.BoxLayout(buttonPane, swing.BoxLayout.X_AXIS))
buttonPane.setBorder(swing.BorderFactory.createEmptyBorder(0, 10, 10, 10))
buttonPane.add(swing.Box.createHorizontalGlue())
buttonPane.add(self.serverButton)
buttonPane.add(swing.Box.createRigidArea(awt.Dimension(10, 0)))
buttonPane.add(self.clientButton)
win.contentPane.add(buttonPane, awt.BorderLayout.SOUTH)
# add handler that writes log messages to the status textfield
txtHandler = TextFieldLogger(status)
logger = logging.getLogger("")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
txtHandler.setFormatter(formatter)
logger.addHandler(txtHandler)
# setup server
self.service_name = service
self.url = "http://%s:%d" % (host, port)
self.server = ThreadedAmfServer(host, port, self.service_name)
# center and display window on the screen
win.pack()
us = win.getSize()
them = awt.Toolkit.getDefaultToolkit().getScreenSize()
newX = (them.width - us.width) / 2
newY = (them.height - us.height) / 2
win.setLocation(newX, newY)
win.show()
def controlServer(self, event):
"""
Handler for server button clicks.
"""
if event.source.text == self.started:
logging.info("Created AMF gateway at %s" % self.url)
event.source.text = self.stopped
self.clientButton.enabled = True
self.server.start()
else:
logging.info("Terminated AMF gateway at %s\n" % self.url)
event.source.text = self.started
self.clientButton.enabled = False
self.server.stop()
def runClient(self, event):
"""
Invoke a method on the server using an AMF client.
"""
self.client = ThreadedAmfClient(self.url, self.service_name)
self.client.invokeMethod("Hello World!")
class ThreadedAmfClient(object):
"""
Threaded AMF client that doesn't block the Swing GUI.
"""
def __init__(self, url, serviceName):
self.gateway = RemotingService(url, logger=logging)
self.service = self.gateway.getService(serviceName)
def invokeMethod(self, param):
"""
Invoke a method on the AMF server.
"""
class ClientThread(lang.Runnable):
"""
Create a thread for the client.
"""
def run(this):
try:
self.service(param)
except lang.InterruptedException:
return
swing.SwingUtilities.invokeLater(ClientThread())
class ThreadedAmfServer(object):
"""
Threaded WSGI server that doesn't block the Swing GUI.
"""
def __init__(self, host, port, serviceName):
services = {serviceName: self.echo}
gw = WSGIGateway(services, logger=logging)
self.httpd = WSGIServer((host, port),
ServerRequestLogger)
self.httpd.set_app(gw)
def start(self):
"""
Start the server.
"""
class WSGIThread(lang.Runnable):
"""
Create a thread for the server.
"""
def run(this):
try:
self.httpd.serve_forever()
except lang.InterruptedException:
return
self.thread = lang.Thread(WSGIThread())
self.thread.start()
def stop(self):
"""
Stop the server.
"""
self.thread = None
def echo(self, data):
"""
Just return data back to the client.
"""
return data
class ServerRequestLogger(WSGIRequestHandler):
"""
Request handler that logs WSGI server messages.
"""
def log_message(self, format, *args):
"""
Log message with debug level.
"""
logging.debug("%s - %s" % (self.address_string(), format % args))
class TextFieldLogger(logging.Handler):
"""
Logging handler that displays PyAMF log messages in the status text field.
"""
def __init__(self, textfield, *args, **kwargs):
self.status = textfield
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
msg = '%s\n' % self.format(record)
doc = self.status.getStyledDocument()
doc.insertString(doc.getLength(), msg, doc.getStyle('regular'))
self.status.setCaretPosition(self.status.getStyledDocument().getLength())
host = "localhost"
port = 8000
service_name = "echo"
title = "PyAMF server/client using Jython with Swing"
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--port", default=port,
dest="port", help="port number [default: %default]")
parser.add_option("--host", default=host,
dest="host", help="host address [default: %default]")
(opt, args) = parser.parse_args()
app = AppGUI(title, opt.host, int(opt.port), service_name)
|
python
|
from autodisc.systems.lenia.classifierstatistics import LeniaClassifierStatistics
from autodisc.systems.lenia.isleniaanimalclassifier import IsLeniaAnimalClassifier
from autodisc.systems.lenia.lenia import *
|
python
|
# MIT License
#
# Copyright (c) 2017 Anders Steen Christensen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import os
import numpy as np
import qml
import qml.data
from qml.ml.kernels import laplacian_kernel
from qml.ml.math import cho_solve
from qml.ml.representations import get_slatm_mbtypes
from qml.ml.kernels import get_local_kernels_gaussian
from qml.ml.kernels import get_local_kernels_laplacian
def get_energies(filename):
""" Returns a dictionary with heats of formation for each xyz-file.
"""
f = open(filename, "r")
lines = f.readlines()
f.close()
energies = dict()
for line in lines:
tokens = line.split()
xyz_name = tokens[0]
hof = float(tokens[1])
energies[xyz_name] = hof
return energies
def test_krr_gaussian_local_cmat():
test_dir = os.path.dirname(os.path.realpath(__file__))
# Parse file containing PBE0/def2-TZVP heats of formation and xyz filenames
data = get_energies(test_dir + "/data/hof_qm7.txt")
# Generate a list of qml.data.Compound() objects"
mols = []
for xyz_file in sorted(data.keys())[:1000]:
# Initialize the qml.data.Compound() objects
mol = qml.data.Compound(xyz=test_dir + "/qm7/" + xyz_file)
# Associate a property (heat of formation) with the object
mol.properties = data[xyz_file]
# This is a Molecular Coulomb matrix sorted by row norm
mol.generate_atomic_coulomb_matrix(size=23, sorting="row-norm")
mols.append(mol)
# Shuffle molecules
np.random.seed(666)
np.random.shuffle(mols)
# Make training and test sets
n_test = 100
n_train = 200
training = mols[:n_train]
test = mols[-n_test:]
X = np.concatenate([mol.representation for mol in training])
Xs = np.concatenate([mol.representation for mol in test])
N = np.array([mol.natoms for mol in training])
Ns = np.array([mol.natoms for mol in test])
# List of properties
Y = np.array([mol.properties for mol in training])
Ys = np.array([mol.properties for mol in test])
# Set hyper-parameters
sigma = 724.0
llambda = 10**(-6.5)
K = get_local_kernels_gaussian(X, X, N, N, [sigma])[0]
assert np.allclose(K, K.T), "Error in local Gaussian kernel symmetry"
# Test below will sometimes fail, since sorting occasionally differs due close row-norms
# K_test = np.loadtxt(test_dir + "/data/K_local_gaussian.txt")
# assert np.allclose(K, K_test), "Error in local Gaussian kernel (vs. reference)"
# Solve alpha
K[np.diag_indices_from(K)] += llambda
alpha = cho_solve(K,Y)
# Calculate prediction kernel
Ks = get_local_kernels_gaussian(Xs, X, Ns, N, [sigma])[0]
# Test below will sometimes fail, since sorting occasionally differs due close row-norms
# Ks_test = np.loadtxt(test_dir + "/data/Ks_local_gaussian.txt")
# assert np.allclose(Ks, Ks_test), "Error in local Gaussian kernel (vs. reference)"
Yss = np.dot(Ks, alpha)
mae = np.mean(np.abs(Ys - Yss))
print(mae)
assert abs(19.0 - mae) < 1.0, "Error in local Gaussian kernel-ridge regression"
def test_krr_laplacian_local_cmat():
test_dir = os.path.dirname(os.path.realpath(__file__))
# Parse file containing PBE0/def2-TZVP heats of formation and xyz filenames
data = get_energies(test_dir + "/data/hof_qm7.txt")
# Generate a list of qml.data.Compound() objects"
mols = []
for xyz_file in sorted(data.keys())[:1000]:
# Initialize the qml.data.Compound() objects
mol = qml.data.Compound(xyz=test_dir + "/qm7/" + xyz_file)
# Associate a property (heat of formation) with the object
mol.properties = data[xyz_file]
# This is a Molecular Coulomb matrix sorted by row norm
mol.generate_atomic_coulomb_matrix(size=23, sorting="row-norm")
mols.append(mol)
# Shuffle molecules
np.random.seed(666)
np.random.shuffle(mols)
# Make training and test sets
n_test = 100
n_train = 200
training = mols[:n_train]
test = mols[-n_test:]
X = np.concatenate([mol.representation for mol in training])
Xs = np.concatenate([mol.representation for mol in test])
N = np.array([mol.natoms for mol in training])
Ns = np.array([mol.natoms for mol in test])
# List of properties
Y = np.array([mol.properties for mol in training])
Ys = np.array([mol.properties for mol in test])
# Set hyper-parameters
sigma = 10**(3.6)
llambda = 10**(-12.0)
K = get_local_kernels_laplacian(X, X, N, N, [sigma])[0]
assert np.allclose(K, K.T), "Error in local Laplacian kernel symmetry"
# Test below will sometimes fail, since sorting occasionally differs due close row-norms
# K_test = np.loadtxt(test_dir + "/data/K_local_laplacian.txt")
# assert np.allclose(K, K_test), "Error in local Laplacian kernel (vs. reference)"
# Solve alpha
K[np.diag_indices_from(K)] += llambda
alpha = cho_solve(K,Y)
# Calculate prediction kernel
Ks = get_local_kernels_laplacian(Xs, X, Ns, N, [sigma])[0]
# Test below will sometimes fail, since sorting occasionally differs due close row-norms
# Ks_test = np.loadtxt(test_dir + "/data/Ks_local_laplacian.txt")
# assert np.allclose(Ks, Ks_test), "Error in local Laplacian kernel (vs. reference)"
Yss = np.dot(Ks, alpha)
mae = np.mean(np.abs(Ys - Yss))
assert abs(8.7 - mae) < 1.0, "Error in local Laplacian kernel-ridge regression"
if __name__ == "__main__":
test_krr_gaussian_local_cmat()
test_krr_laplacian_local_cmat()
|
python
|
import dash, os, itertools, flask
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
from pandas_datareader import data as web
from datetime import datetime as dt
import plotly.graph_objs as go
import pandas as pd
from random import randint
import plotly.plotly as py
server = flask.Flask(__name__)
server.secret_key = os.environ.get('secret_key', 'secret')
app = dash.Dash(name = __name__, server = server)
app.config.supress_callback_exceptions = True
#Data variables
cli = pd.read_pickle('Climate_full.p')
models_list = ['GFDL-CM3', 'GISS-E2-R', 'NCAR-CCSM4', 'IPSL-CM5A-LR','MRI-CGCM3']
web = 'https://www.snap.uaf.edu/webshared/jschroder/db/CSV/'
metrics = [ 'avg_fire_size','number_of_fires','total_area_burned']
#Function updating #1 plot => Alfresco plot
def get_data( models , scenarios, metric, domain, cumsum ) :
metric = str(metric)
domain = str(domain)
def _get_metric_cumsum(lnk , cumsum ):
#Extract, average and cumsum the raw data to a dataframe
_df = pd.read_csv(lnk, index_col=0)
_df = _df.ix[2006:].mean(axis = 1)
if 'cumsum' in cumsum :
_df = _df.cumsum(axis=0)
else : pass
return pd.Series.to_frame(_df)
#Build the models full name and the link towards the CSV <= todo build decent database but will do for now
selection = [a[0]+ '_' + a[1] for a in itertools.product(models,scenarios)]
if type(selection) is str : selection = [selection]
rmt = [os.path.join(web, metric, "_".join(['alfresco', metric.replace('_',''), domain.title(), model, '1902_2100.csv' ])) for model in selection]
#Extract dataframe and concat them together
df_list = [_get_metric_cumsum(lnk , cumsum) for lnk in rmt]
df = pd.concat(df_list,axis=1)
df.columns=selection
return df
#Functions used to update #2 and #3 with climate data
def get_cli_data(models, scenarios, dictionnary):
date = pd.date_range('2006','2101',freq='A-DEC')
def _get_climate_annual(_df) :
_df = _df[(_df.index.month >= 3 ) & (_df.index.month <= 9 )]
_df1 = _df.resample("A-DEC").mean()["Boreal"]
_df2 = pd.DataFrame(['NaN'] * len(date),date)
_df3 = pd.concat([_df1 , _df2],axis=1)["Boreal"]
return pd.Series.to_frame(_df3)
#Build the full models name and extract the dataframe
selection = [a[0]+ '_' + a[1] for a in itertools.product(models,scenarios)]
if type(selection) is str : selection = [selection]
df_list = [_get_climate_annual(dictionnary[model]) for model in selection]
df = pd.concat(df_list,axis=1)
df.columns=selection
return df
app.css.append_css({'external_url': 'https://cdn.rawgit.com/plotly/dash-app-stylesheets/2d266c578d2a6e8850ebce48fdb52759b2aef506/stylesheet-oil-and-gas.css'}) # noqa: E501
app.layout = html.Div(
[
html.Div(
[
html.H1(
'ALFRESCO Post Processing Outputs',
className='eight columns',
),
html.Img(
src="https://www.snap.uaf.edu/sites/all/themes/snap_bootstrap/logo.png",
className='one columns',
style={
'height': '80',
'width': '225',
'float': 'right',
'position': 'relative',
},
),
],
className='row'
),
html.Div(
[
html.Div(
[
html.P('Scenarios Selection :'),
dcc.Dropdown(
id='rcp',
options=[
{'label': 'RCP 45 ', 'value': 'rcp45'},
{'label': 'RCP 60 ', 'value': 'rcp60'},
{'label': 'RCP 85 ', 'value': 'rcp85'}
],
multi=True,
value=[]
),
html.P('Models Selection :'),
dcc.Dropdown(
id='model',
options=[{'label': a , 'value' : a} for a in models_list],
multi=True,
value=[]
),
dcc.Checklist(
id='cumsum',
options=[
{'label': 'Cumulative Sum', 'value': 'cumsum'}
],
values=[],
)
],
className='six columns'
),
html.Div(
[
html.P('Metric Selection:'),
dcc.Dropdown(
id='metric',
options=[{'label': a.replace('_',' ').title() , 'value' : a} for a in metrics],
value=None
),
html.P('Domains Selection :'),
dcc.Dropdown(
id='domains',
options=[
{'label': 'Boreal', 'value': 'boreal'},
{'label': 'Tundra', 'value': 'tundra'}
],
value=None
),
],
className='six columns'
),
],
className='row'
),
html.Div(
[
html.Div(
[
dcc.Graph(id='ALF')
],
className='eleven columns'
),
],
),
html.Div(
[
html.Div(
[
dcc.Graph(id='climate_tas')
],
className='eleven columns'
),
],
),
html.Div(
[
html.Div(
[
dcc.Graph(id='climate_pr')
],
className='eleven columns'
),
],
),
],
className='ten columns offset-by-one'
)
@app.callback(
Output('ALF', 'figure'),
[Input('model', 'value'),
Input('rcp', 'value'),
Input('metric', 'value'),
Input('domains', 'value'),
Input('cumsum', 'values')]
)
def update_graph(models, rcp, met_value, domain, cumsum):
if (len(models) > 0 and len(rcp) > 0 and domain is not None and met_value is not None):
df = get_data(models, rcp, met_value, domain, cumsum)
if str(met_value) in ['total_area_burned','avg_fire_size'] :
label = 'Area (km\u00b2)'
else : label = 'Number of fires'
return {
'data': [{
'x': df.index,
'y': df[col],
'name':col,
} for col in df.columns],
'layout' : go.Layout(
height=300,
margin= {'t': 20,'b':30 },
xaxis = {
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
yaxis = {
'title' : label,
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
showlegend=False)
}
@app.callback(
Output('climate_tas', 'figure'),
[Input('model', 'value'),
Input('rcp', 'value')
])
def update_tas(models, rcp):
if (len(models) > 0 and len(rcp) > 0):
df = get_cli_data(models, rcp, cli['tas'])
return {
'data': [{
'x': df.index,
'y': df[col],
'name':col,
} for col in df.columns],
'layout' : go.Layout(
height=200,
margin= {'t': 20,'b':30 },
xaxis = {
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
yaxis = {
'title' : "Temperature (\xb0C)",
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
showlegend=False)
}
@app.callback(
Output('climate_pr', 'figure'),
[Input('model', 'value'),
Input('rcp', 'value')
])
def update_pr(models, rcp):
if (len(models) > 0 and len(rcp) > 0):
df = get_cli_data(models, rcp, cli['pr'])
return {
'data': [{
'x': df.index,
'y': df[col],
'name':col
}
for col in df.columns],
'layout' : go.Layout(
height=200,
margin= {'t': 20,'b':30 },
xaxis = {
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
yaxis = {
'title' : 'Precipitation (mm)',
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False
},
showlegend=False)
}
# Run the Dash app
if __name__ == '__main__':
app.server.run()
|
python
|
import tanjun
import typing
from hikari import Embed
from modules import package_fetcher
component = tanjun.Component()
@component.with_command
@tanjun.with_argument("repo_n", default="main")
@tanjun.with_argument("arch_n", default="aarch64")
@tanjun.with_argument("pkg_n", default=None)
@tanjun.with_parser
@tanjun.as_message_command("pkg", "apt")
async def pkg_msg(ctx: tanjun.abc.MessageContext, pkg_n: str, arch_n: str, repo_n: str) -> None:
if repo_n not in ["main", "root", "x11"] or arch_n not in ["aarch64", "arm", "i686", "x86_64"]:
await ctx.respond(embed=Embed(
description="the Arch or Repo name are Wrong!",
color="#ff0000"
))
return
await pkg(ctx, pkg_n, arch_n, repo_n)
@component.with_slash_command
@tanjun.with_str_slash_option("repo_name", "The repo name", choices=["main", "root", "x11"], default="main")
@tanjun.with_str_slash_option("arch", "The arch name", choices=["aarch64", "arm", "i686", "x86_64"], default="aarch64")
@tanjun.with_str_slash_option("package_name", "The package name", default=None)
@tanjun.as_slash_command("pkg", "show package details")
async def pkg_slash(ctx: tanjun.abc.SlashContext, package_name: typing.Optional[str], arch: typing.Optional[str], repo_name: typing.Optional[str]) -> None:
await pkg(ctx, package_name, arch, repo_name)
async def pkg(ctx: tanjun.abc.Context, pkg_n, arch_n, repo_n) -> None:
if pkg_n:
await ctx.respond(embed=Embed(
description="Connecting to the repository...",
color="#ffff00"
))
r = package_fetcher.fetch(arch_n, repo_n)
ct = lambda x, y: x[y-3] + "..." if len(x) > y else x
if not r:
await ctx.edit_last_response(embed=Embed(
description="Failed to connect to the repository!",
color="#ff0000"
))
elif pkg_n in r and pkg_n != "_host":
pkg_embed = Embed(color="#00ff00")
pkg_embed.add_field(name="Package name:", value=r[pkg_n]["Package"])
pkg_embed.add_field(name="Description:", value=ct(r[pkg_n]["Description"], 500))
pkg_embed.add_field(name="Version:", value=ct(r[pkg_n]["Version"], 200))
if "Depends" in r[pkg_n]:
pkg_embed.add_field(name="Dependencies:", value=ct(", ".join(f"`{x}`" for x in r[pkg_n]["Depends"].split(", ")), 2500))
pkg_embed.add_field(name="Size:", value=f"{int(r[pkg_n]['Size'])/1024/1024:.2f} MB")
pkg_embed.add_field(name="Maintainer:", value=ct(r[pkg_n]["Maintainer"], 300))
pkg_embed.add_field(name="Installation:", value=f"```\napt install {r[pkg_n]['Package']}\n```")
pkg_embed.add_field(name="Links:", value=f"[Homepage]({r[pkg_n]['Homepage']}) | [Download .deb]({r['_host']['url']}/{r[pkg_n]['Filename']})")
pkg_embed.set_footer(text=f"Connected to {r['_host']['host_name']}")
await ctx.edit_last_response(embed=pkg_embed)
else:
await ctx.edit_last_response(embed=Embed(
description=f"Unable to locate package `{pkg_n}`",
color="#ff0000"
))
else:
await ctx.respond(embed=Embed(
description="Please enter the package name!",
color="#ff0000"
))
load_command = component.make_loader()
|
python
|
# Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import unittest
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import torch
import fastestimator as fe
from fastestimator.test.unittest_util import check_img_similar, fig_to_rgb_array, img_to_rgb_array
class TestShowImage(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.color_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_color.png")))
cls.hw_ratio_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_height_width.png")))
cls.bb_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_bounding_box.png")))
cls.mixed_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_mixed.png")))
cls.text_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_text.png")))
cls.title_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_title.png")))
cls.float_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_check_float.png")))
def setUp(self) -> None:
self.old_backend = matplotlib.get_backend()
matplotlib.use("Agg")
def tearDown(self) -> None:
matplotlib.use(self.old_backend)
def test_show_image_color_np(self):
img = np.zeros((90, 90, 3), dtype=np.uint8)
img[:, 0:30, :] = np.array([255, 0, 0])
img[:, 30:60, :] = np.array([0, 255, 0])
img[:, 60:90, :] = np.array([0, 0, 255])
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
# Now we can save it to a numpy array.
obj1 = fig_to_rgb_array(fig)
obj2 = self.color_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_color_torch(self):
img = np.zeros((90, 90, 3), dtype=np.uint8)
img[:, 0:30, :] = np.array([255, 0, 0])
img[:, 30:60, :] = np.array([0, 255, 0])
img[:, 60:90, :] = np.array([0, 0, 255])
img = torch.from_numpy(img.transpose((2, 0, 1)))
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.color_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_color_tf(self):
img = np.zeros((90, 90, 3), dtype=np.uint8)
img[:, 0:30, :] = np.array([255, 0, 0])
img[:, 30:60, :] = np.array([0, 255, 0])
img[:, 60:90, :] = np.array([0, 0, 255])
img = tf.convert_to_tensor(img)
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.color_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_check_float_0_to_1_np(self):
img = np.zeros((256, 256, 3), dtype=np.float32)
for x in range(256):
img[x, :, :] = x / 255
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.float_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_check_float_neg_1_to_1_np(self):
img = np.zeros((256, 256, 3), dtype=np.float32)
for x in range(256):
img[x, :, :] = (x - 127.5) / 127.5
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.float_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_color_arbitrary_range_np(self):
img = np.zeros((256, 256, 3), dtype=np.float32)
for x in range(256):
img[x, :, :] = x * 0.2
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.float_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_height_width_np(self):
img = np.zeros((150, 100))
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.hw_ratio_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_text_np(self):
text = "apple"
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(text, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.text_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_bounding_box_np(self):
bg_img = np.zeros((150, 150))
boxes = np.array([[0, 0, 10, 20, "apple"], [10, 20, 30, 50, "dog"], [40, 70, 200, 200, "cat"],
[0, 0, 0, 0, "shouldn't shown"], [0, 0, -50, -30, "shouldn't shown2"]])
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(bg_img, fig=fig, axis=axis)
fe.util.show_image(boxes, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.bb_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_mixed_figure_layer_np(self):
bg_img = np.ones((150, 150, 3), dtype=np.uint8) * 255
boxes = np.array([[0, 0, 10, 20], [10, 20, 30, 50], [40, 70, 200, 200]])
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(bg_img, fig=fig, axis=axis)
fe.util.show_image(boxes, fig=fig, axis=axis)
fe.util.show_image("apple", fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.mixed_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_title_np(self):
img = np.ones((150, 150), dtype=np.uint8) * 255
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis, title="test title")
obj1 = fig_to_rgb_array(fig)
obj2 = self.title_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
|
python
|
import data
from base import nbprint
from tokenizer.main import run_tokenizer
def check_requirements(info):
# Check if tokens file exists
if not data.tokenized_document_exists(info):
# Run Tokenizer
nbprint('Tokens missing.')
run_tokenizer(info)
# Check if it was successfull
return data.tokenized_document_exists(info)
return True
class VocabItem:
def __init__(self, token, total = 0, document = 0):
self.token = token
self.total = total
self.document = document
def increase_total(self, count = 1):
self.total += count
def increase_document(self, count = 1):
self.document += count
class Vectorizer:
def __init__(self, info):
self.info = info
def build_vocab(self):
self.counts = []
def get_vocab(self):
return [{'id': id, 'token': vi.token, 'total': vi.total, 'document': vi.document}
for id, vi in enumerate(self.counts)]
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from robotender_flexbe_behaviors.multiple_cups_pour_behavior_using_containers_sm import multiplecupspourbehaviorusingcontainersSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu Nov 02 2017
@author: Davis Catherman, Shannon Enders
'''
class multiplecuponloopSM(Behavior):
'''
loooped
'''
def __init__(self):
super(multiplecuponloopSM, self).__init__()
self.name = 'multiple cup on loop'
# parameters of this behavior
# references to used behaviors
self.add_behavior(multiplecupspourbehaviorusingcontainersSM, 'multiple cups pour behavior using containers')
_state_machine.userdata.joint_names = ["m1n6s200_joint_1", "m1n6s200_joint_2", "m1n6s200_joint_3", "m1n6s200_joint_4", "m1n6s200_joint_5", "m1n6s200_joint_6"]
_state_machine.userdata.center_values = [4.825370393837993, 4.804768712277358, 1.7884682005958692, 2.781744729201632, 1.7624776125694588, 2.5668808924540394]
_state_machine.userdata.prep_pour_to_left = [4.8484381625680415, 4.172889801498073, 1.372345285529353, 3.0126762157540004, 1.4690217615247554, 2.627620406383804]
_state_machine.userdata.pour_to_left = [4.610045297589599, 4.293199701639057, 1.419019181003809, 3.012844793851002, 1.4674078859041673, 4.845438377916176]
_state_machine.userdata.post_pour_to_left = [4.8484381625680415, 4.172889801498073, 1.372345285529353, 3.0126762157540004, 1.4690217615247554, 2.627620406383804]
_state_machine.userdata.left_values = [4.501794723496712, 4.784133474886988, 1.6909002314255626, 2.766800400744653, 1.8037183931040444, 2.543646143523643]
_state_machine.userdata.prep_pour_to_center = [4.4696588912549435, 4.2865780179046835, 1.371823705429861, 2.7555946178259263, 1.6906042210704002, 2.5960829864389763]
_state_machine.userdata.pour_to_center = [4.700331784865464, 4.265325726089742, 1.4461706409493849, 2.7535296027166787, 1.4171899888090882, 0.5029200288136196]
_state_machine.userdata.post_pour_to_center = [4.4696588912549435, 4.2865780179046835, 1.371823705429861, 2.7555946178259263, 1.6906042210704002, 2.5960829864389763]
_state_machine.userdata.OPEN = [0,0]
_state_machine.userdata.CLOSE = [5000,5000]
_state_machine.userdata.pre_grab_left = [4.616985495390345, 4.361768642857545, 0.8309522662125534, 2.772490244413607, 1.7511775537481435, 2.6507113446153356]
_state_machine.userdata.back_off_center = [4.8380550301100405, 4.49428940291265, 1.2147491327564424, 2.784340512316133, 1.7494544885228622, 2.530367888644617]
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:947 y:100, x:618 y:382
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:212 y:48
OperatableStateMachine.add('multiple cups pour behavior using containers',
self.use_behavior(multiplecupspourbehaviorusingcontainersSM, 'multiple cups pour behavior using containers'),
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
python
|
import numpy as np
a = input("enter the matrix with ; after each row : ")
m =np.matrix(a)
b = input("enter the matrix 2 with row matching with matrix 1 : ")
n =np.matrix(b)
print(m)
print(n)
m3 = np.dot(m,n)
print(m3)
|
python
|
#//////////////#####///////////////
#
# ANU u6325688 Yangyang Xu
# Supervisor: Dr.Penny Kyburz
# SPP used in this scrip is adopted some methods from :
# https://github.com/yueruchen/sppnet-pytorch/blob/master/cnn_with_spp.py
#//////////////#####///////////////
"""
Policy Generator
"""
import torch.nn as nn
from GAIL.SPP import SPP
from commons.DataInfo import DataInfo
from torch.distributions import Categorical
import torch.nn.functional as F
import torch
from torch.distributions import Normal, Beta
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Generator(nn.Module):
def __init__(self, datainfo:DataInfo):
super(Generator, self).__init__()
self.inChannel = datainfo.generatorIn #state space size
self.outChannel = datainfo.generatorOut #action space size
self.maxAction = datainfo.maxAction
self.criticScore = 0
self.hidden = torch.nn.Linear(self.inChannel, self.inChannel*2)
self.out = torch.nn.Linear(self.inChannel*2, self.outChannel)
def forward(self, input):
mid = self.hidden(input)
hOut = F.sigmoid(mid)
out = self.out(hOut)
# Critic's
criticFC = nn.Linear(self.outChannel, 1).to(device)
self.criticScore = criticFC(mid)
# Generator's
actionDistribution = self.softmax(out)
action = (actionDistribution).argmax(1)
for x in range(actionDistribution.shape[0]):
if sum(actionDistribution[x]) == 0:
actionDistribution[x]= actionDistribution[x] + 1e-8
tmp = Categorical(actionDistribution)
actionDistribution = tmp.log_prob(action)
entropy = tmp.entropy()
return actionDistribution, action.detach(), entropy
|
python
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import time
sys.path.append('..')
import childmgt.ChildMgt
def create_children(num_children=5):
for child_num in range(0, num_children):
child_pid = os.fork()
if child_pid == 0:
time.sleep(3)
sys.exit(0)
for child_num in range(0, num_children):
child_pid = os.fork()
if child_pid == 0:
time.sleep(3)
sys.exit(1)
def main():
result = 0
create_children()
yyy = childmgt.ChildMgt.ChildMgt()
print("Checking Count Zombies=",yyy.countZombiedChild())
print("Sleeping wait for children to exit.")
time.sleep(30)
print("back from sleep")
print("Count Zombies=",yyy.countZombiedChild())
print("Reaping Status.")
child_status = yyy.reapZombieChildStatus()
for key in child_status.keys():
if os.WIFEXITED(child_status[key]) is True:
print("pid:", key, "status:",os.WEXITSTATUS(child_status[key]))
else:
print("pid:", key, "status:",child_status[key])
print("Child status: ",child_status)
print("Sleeping for 120 seconds")
time.sleep(120)
return result
if __name__ == "__main__":
result = main()
sys.exit(result)
|
python
|
#!/usr/bin/env python
import rospy
from week2.srv import roboCmd, roboCmdResponse
import math as np
class Unicycle:
def __init__(self, x, y, theta, dt=0.05):
self.x = x
self.y = y
self.theta = theta
self.dt = dt
self.x_points = [self.x]
self.y_points = [self.y]
def step(self, v, w, n=50):
for i in range(n):
self.theta += w*self.dt # angle = angle + angular_velociy * delta
self.x += v*np.cos(self.theta)*self.dt # X = X + horizontal_velocity * delta
self.y += v*np.sin(self.theta)*self.dt # Y = Y + vertical_velocity * delta
self.x_points.append(self.x)
self.y_points.append(self.y)
return self.x_points, self.y_points
def handle_return_traj(req):
uni = Unicycle(req.x, req.y, req.theta)
resp = uni.step(req.v, req.w)
return roboCmdResponse(resp[0], resp[1])
def return_traj_server():
rospy.init_node('return_traj_server')
s = rospy.Service('return_traj', roboCmd, handle_return_traj)
rospy.loginfo('Available to return Trajectory')
rospy.spin()
if __name__ == "__main__":
return_traj_server()
|
python
|
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
Get Jc, RA, etc from measured parameter DB
BB, 2015
"""
import sqlite3
import numpy as np
import matplotlib.pyplot as plt
# display units
unit_i = 1e-6 # uA
unit_v = 1e-6 # uV
unit_r = 1 # Ohm
unit_i1 = 1e-3 # mA; control I
unit_v1 = 1e-3 # mV; control V
unit_h = 10 # mT
def setplotparams():
plt.rcParams['font.size'] = 12
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['legend.frameon'] = False
def plot_svjj(filenames, **kwargs):
whichplot = kwargs('whichplot', 'hic')
if whichplot == 'hic': # H vs Ic
ix = 1; iy = 3
#for fn = in filenames:
# data = np.loadtxt(filename,
class LinFitSVJJ():
def __init__(self, filename='svjj.db'):
self.conn = sqlite3.connect(filename)
self.c = self.conn.cursor()
setplotparams()
def get_area(self, row):
if row[0] == 'circle':
return np.pi*row[1]**2/4
elif row[0] == 'ellipse':
return np.pi*row[1]*row[2]/4
elif row[0] == 'rectangle':
return row[1]*row[2]
def select_chip(self, wafer, chip):
self.c.execute('''
SELECT shape.shape, shape.dim1, shape.dim2,
josephson.ic_p, josephson.ic_ap, josephson.r_p, josephson.r_ap
FROM shape JOIN josephson
ON shape.wafer=josephson.wafer AND shape.chip=josephson.chip
AND shape.device=josephson.device''')
self.chipdata = self.c.fetchall()
self.areas = []
self.ic_p = []
self.ic_ap = []
self.r_p = []
for row in self.chipdata:
self.areas += [self.get_area(row)]
self.ic_p += [row[3]]
self.ic_ap += [row[4]]
self.r_p += [row[5]]
def print_chip(self):
print(self.chipdata)
def plot_chip(self):
fig = plt.figure(0, (12,6))
# plot Ic's
ax1 = fig.add_subplot(121)
ax1.plot(self.areas, self.ic_p, 's')
ax1.plot(self.areas, self.ic_ap, 'o')
# plot R's
ax2 = fig.add_subplot(122)
ax2.plot(self.areas, 1/np.array(self.r_p), 's')
print(self.ic_p)
plt.show()
# main shell interface (run SVJJDBInteract class)
def app(argv):
"""Execute in system shell
"""
if len(argv) < 2:
print("Usage: python %s <command> <table>\n"
" <command>: print, insert, delete, or edit\n"
" <table>: barrier, shape, or josephson\n" % argv[0])
sys.exit(0)
db = SVJJDBInteract()
methodname = argv[1]
print(argv[2:])
getattr(db, methodname)(*argv[2:])
db.close()
print('Bye!')
def test(argv):
lf = LinFitSVJJ()
lf.select_chip('B150323a', '56')
lf.print_chip()
lf.plot_chip()
if __name__ == '__main__':
import sys
print(sys.version)
test(sys.argv)
print('Bye!')
|
python
|
#
# PySNMP MIB module ZHONE-COM-IP-DHCP-SERVER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZHONE-COM-IP-DHCP-SERVER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:40:42 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
sysObjectID, = mibBuilder.importSymbols("SNMPv2-MIB", "sysObjectID")
ModuleIdentity, Counter64, Counter32, IpAddress, ObjectIdentity, Integer32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, iso, Bits, MibIdentifier, Unsigned32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Counter64", "Counter32", "IpAddress", "ObjectIdentity", "Integer32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "iso", "Bits", "MibIdentifier", "Unsigned32", "NotificationType")
TruthValue, TextualConvention, DisplayString, PhysAddress = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "DisplayString", "PhysAddress")
cardPostResults, cardMfgSerialNumber = mibBuilder.importSymbols("ZHONE-CARD-RESOURCES-MIB", "cardPostResults", "cardMfgSerialNumber")
ZhoneRDIndex, rdEntry = mibBuilder.importSymbols("ZHONE-COM-IP-RD-MIB", "ZhoneRDIndex", "rdEntry")
ipIfAddr, ipIfLgId, ipIfVpi, ipIfVci = mibBuilder.importSymbols("ZHONE-COM-IP-REC-MIB", "ipIfAddr", "ipIfLgId", "ipIfVpi", "ipIfVci")
zhoneShelfNumber, pportNumber, zhoneSlotNumber, subPortNumber = mibBuilder.importSymbols("ZHONE-INTERFACE-TRANSLATION-MIB", "zhoneShelfNumber", "pportNumber", "zhoneSlotNumber", "subPortNumber")
zhoneSysCardSwSpecificVers, = mibBuilder.importSymbols("ZHONE-SYSTEM-MIB", "zhoneSysCardSwSpecificVers")
zhoneModules, zhoneIp = mibBuilder.importSymbols("Zhone", "zhoneModules", "zhoneIp")
ZhoneShelfValue, ZhoneRowStatus, ZhoneFileName, ZhoneSlotValue, ZhoneAdminString = mibBuilder.importSymbols("Zhone-TC", "ZhoneShelfValue", "ZhoneRowStatus", "ZhoneFileName", "ZhoneSlotValue", "ZhoneAdminString")
comIpDhcpServer = ModuleIdentity((1, 3, 6, 1, 4, 1, 5504, 6, 61))
comIpDhcpServer.setRevisions(('2003-09-10 10:47', '2003-04-18 10:10', '2000-12-03 14:00', '2000-11-28 15:00', '2000-12-05 12:11', '2000-10-02 12:05', '2000-09-15 16:50', '2000-09-11 15:41',))
if mibBuilder.loadTexts: comIpDhcpServer.setLastUpdated('200309101500Z')
if mibBuilder.loadTexts: comIpDhcpServer.setOrganization('Zhone Technologies, Inc.')
dhcpServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11))
if mibBuilder.loadTexts: dhcpServer.setStatus('current')
dhcpServerTraps = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0))
if mibBuilder.loadTexts: dhcpServerTraps.setStatus('current')
dhcpTrapZhoneCpeDetected = NotificationType((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0, 1)).setObjects(("ZHONE-INTERFACE-TRANSLATION-MIB", "zhoneShelfNumber"), ("ZHONE-INTERFACE-TRANSLATION-MIB", "zhoneSlotNumber"), ("ZHONE-INTERFACE-TRANSLATION-MIB", "pportNumber"), ("ZHONE-INTERFACE-TRANSLATION-MIB", "subPortNumber"), ("ZHONE-COM-IP-REC-MIB", "ipIfVpi"), ("ZHONE-COM-IP-REC-MIB", "ipIfVci"), ("ZHONE-COM-IP-REC-MIB", "ipIfLgId"), ("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpTrapZhoneCpeSysObjectID"), ("ZHONE-CARD-RESOURCES-MIB", "cardMfgSerialNumber"), ("ZHONE-CARD-RESOURCES-MIB", "cardPostResults"), ("ZHONE-SYSTEM-MIB", "zhoneSysCardSwSpecificVers"), ("ZHONE-COM-IP-REC-MIB", "ipIfAddr"))
if mibBuilder.loadTexts: dhcpTrapZhoneCpeDetected.setStatus('current')
dhcpTrapZhoneCpeSysObjectID = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0, 2), ObjectIdentifier()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: dhcpTrapZhoneCpeSysObjectID.setStatus('current')
dhcpTrapZhoneIpAddressUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0, 3)).setObjects(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpTrapZhoneIpInterfaceIndex"), ("ZHONE-COM-IP-REC-MIB", "ipIfAddr"))
if mibBuilder.loadTexts: dhcpTrapZhoneIpAddressUpdate.setStatus('current')
dhcpTrapZhoneIpInterfaceIndex = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0, 4), InterfaceIndex()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: dhcpTrapZhoneIpInterfaceIndex.setStatus('current')
dhcpServerDefaultLeaseTime = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultLeaseTime.setStatus('current')
dhcpServerDefaultMinLeaseTime = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultMinLeaseTime.setStatus('current')
dhcpServerDefaultMaxLeaseTime = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultMaxLeaseTime.setStatus('current')
dhcpServerDefaultReserveStart = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultReserveStart.setStatus('current')
dhcpServerDefaultReserveEnd = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultReserveEnd.setStatus('current')
dhcpServerLeaseTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6), )
if mibBuilder.loadTexts: dhcpServerLeaseTable.setStatus('current')
dhcpServerLeaseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1), ).setIndexNames((0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpLeaseDomain"), (0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpLeaseIpAddress"))
if mibBuilder.loadTexts: dhcpServerLeaseEntry.setStatus('current')
dhcpLeaseDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 1), ZhoneRDIndex())
if mibBuilder.loadTexts: dhcpLeaseDomain.setStatus('current')
dhcpLeaseIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 2), IpAddress())
if mibBuilder.loadTexts: dhcpLeaseIpAddress.setStatus('current')
dhcpLeaseStarts = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 3), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseStarts.setStatus('current')
dhcpLeaseEnds = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 4), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseEnds.setStatus('current')
dhcpLeaseHardwareAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 5), PhysAddress().subtype(subtypeSpec=ValueSizeConstraint(0, 16)).clone(hexValue="0000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseHardwareAddress.setStatus('current')
dhcpLeaseFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 6), Bits().clone(namedValues=NamedValues(("static", 0), ("bootp", 1), ("unused2", 2), ("unused3", 3), ("abandoned", 4), ("zhoneCPE", 5)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseFlags.setStatus('current')
dhcpLeaseClientId = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseClientId.setStatus('current')
dhcpLeaseClientHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 8), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseClientHostname.setStatus('current')
dhcpLeaseHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseHostname.setStatus('current')
dhcpLeaseDDNSFwdName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 10), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseDDNSFwdName.setStatus('current')
dhcpLeaseDDNSRevName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 11), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseDDNSRevName.setStatus('current')
dhcpLeaseRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 12), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseRowStatus.setStatus('current')
dhcpServerNextGroupIndex = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpServerNextGroupIndex.setStatus('current')
dhcpServerGroupTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8), )
if mibBuilder.loadTexts: dhcpServerGroupTable.setStatus('current')
dhcpServerGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1), ).setIndexNames((0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpGroupIndex"))
if mibBuilder.loadTexts: dhcpServerGroupEntry.setStatus('current')
dhcpGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dhcpGroupIndex.setStatus('current')
dhcpGroupName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 2), ZhoneAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupName.setStatus('current')
dhcpGroupDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 3), ZhoneRDIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupDomain.setStatus('current')
dhcpGroupVendorMatchString = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupVendorMatchString.setStatus('current')
dhcpGroupVendorMatchOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupVendorMatchOffset.setStatus('current')
dhcpGroupVendorMatchLength = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255)).clone(-1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupVendorMatchLength.setStatus('current')
dhcpGroupClientMatchString = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupClientMatchString.setStatus('current')
dhcpGroupClientMatchOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupClientMatchOffset.setStatus('current')
dhcpGroupClientMatchLength = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255)).clone(-1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupClientMatchLength.setStatus('current')
dhcpGroupRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 10), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupRowStatus.setStatus('current')
dhcpServerGroupOptionTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9), )
if mibBuilder.loadTexts: dhcpServerGroupOptionTable.setStatus('current')
dhcpServerGroupOptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1), )
dhcpServerGroupEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpServerGroupOptionEntry"))
dhcpServerGroupOptionEntry.setIndexNames(*dhcpServerGroupEntry.getIndexNames())
if mibBuilder.loadTexts: dhcpServerGroupOptionEntry.setStatus('current')
dhcpGroupOptionDefaultLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionDefaultLeaseTime.setStatus('current')
dhcpGroupOptionMinLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionMinLeaseTime.setStatus('current')
dhcpGroupOptionMaxLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionMaxLeaseTime.setStatus('current')
dhcpGroupOptionBootFile = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 4), ZhoneFileName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionBootFile.setStatus('current')
dhcpGroupOptionBootServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionBootServer.setStatus('current')
dhcpGroupOptionDefaultRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionDefaultRouter.setStatus('current')
dhcpGroupOptionPrimaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionPrimaryNameServer.setStatus('current')
dhcpGroupOptionSecondaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionSecondaryNameServer.setStatus('current')
dhcpGroupOptionDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionDomainName.setStatus('current')
dhcpServerNextSubnetIndex = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpServerNextSubnetIndex.setStatus('current')
dhcpServerSubnetTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11), )
if mibBuilder.loadTexts: dhcpServerSubnetTable.setStatus('current')
dhcpServerSubnetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1), ).setIndexNames((0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpSubnetIndex"))
if mibBuilder.loadTexts: dhcpServerSubnetEntry.setStatus('current')
dhcpSubnetIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dhcpSubnetIndex.setStatus('current')
dhcpSubnetNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetNetwork.setStatus('current')
dhcpSubnetNetmask = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 3), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetNetmask.setStatus('current')
dhcpSubnetDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 4), ZhoneRDIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetDomain.setStatus('current')
dhcpSubnetRange1Start = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 5), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange1Start.setStatus('current')
dhcpSubnetRange1End = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 6), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange1End.setStatus('current')
dhcpSubnetRange2Start = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 7), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange2Start.setStatus('current')
dhcpSubnetRange2End = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 8), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange2End.setStatus('current')
dhcpSubnetRange3Start = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 9), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange3Start.setStatus('current')
dhcpSubnetRange3End = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 10), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange3End.setStatus('current')
dhcpSubnetRange4Start = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 11), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange4Start.setStatus('current')
dhcpSubnetRange4End = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 12), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange4End.setStatus('current')
dhcpSubnetRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 13), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRowStatus.setStatus('current')
dhcpSubnetGroup2 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 14), Integer32().clone(0)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetGroup2.setStatus('current')
dhcpStickyAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 15), TruthValue().clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpStickyAddr.setStatus('current')
dhcpSubnetExternalServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 16), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetExternalServer.setStatus('current')
dhcpSubnetExternalServerAlt = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 17), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetExternalServerAlt.setStatus('current')
dhcpServerSubnetOptionTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12), )
if mibBuilder.loadTexts: dhcpServerSubnetOptionTable.setStatus('current')
dhcpServerSubnetOptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1), )
dhcpServerSubnetEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpServerSubnetOptionEntry"))
dhcpServerSubnetOptionEntry.setIndexNames(*dhcpServerSubnetEntry.getIndexNames())
if mibBuilder.loadTexts: dhcpServerSubnetOptionEntry.setStatus('current')
dhcpSubnetOptionDefaultLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionDefaultLeaseTime.setStatus('current')
dhcpSubnetOptionMinLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionMinLeaseTime.setStatus('current')
dhcpSubnetOptionMaxLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionMaxLeaseTime.setStatus('current')
dhcpSubnetOptionBootFile = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 4), ZhoneFileName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionBootFile.setStatus('current')
dhcpSubnetOptionBootServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionBootServer.setStatus('current')
dhcpSubnetOptionDefaultRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionDefaultRouter.setStatus('current')
dhcpSubnetOptionPrimaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionPrimaryNameServer.setStatus('current')
dhcpSubnetOptionSecondaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionSecondaryNameServer.setStatus('current')
dhcpSubnetOptionDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionDomainName.setStatus('current')
dhcpServerNextHostIndex = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpServerNextHostIndex.setStatus('current')
dhcpServerHostTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14), )
if mibBuilder.loadTexts: dhcpServerHostTable.setStatus('current')
dhcpServerHostEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1), ).setIndexNames((0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpHostIndex"))
if mibBuilder.loadTexts: dhcpServerHostEntry.setStatus('current')
dhcpHostIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dhcpHostIndex.setStatus('current')
dhcpHostHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 2), ZhoneAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostHostname.setStatus('current')
dhcpHostDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 3), ZhoneRDIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostDomain.setStatus('current')
dhcpHostHardwareAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 4), PhysAddress().subtype(subtypeSpec=ValueSizeConstraint(0, 16)).clone(hexValue="0000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostHardwareAddress.setStatus('current')
dhcpHostClientId = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostClientId.setStatus('current')
dhcpHostIpAddress1 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 6), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostIpAddress1.setStatus('current')
dhcpHostIpAddress2 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 7), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostIpAddress2.setStatus('current')
dhcpHostIpAddress3 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 8), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostIpAddress3.setStatus('current')
dhcpHostIpAddress4 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 9), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostIpAddress4.setStatus('current')
dhcpHostRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 10), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostRowStatus.setStatus('current')
dhcpServerHostOptionTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15), )
if mibBuilder.loadTexts: dhcpServerHostOptionTable.setStatus('current')
dhcpServerHostOptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1), )
dhcpServerHostEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpServerHostOptionEntry"))
dhcpServerHostOptionEntry.setIndexNames(*dhcpServerHostEntry.getIndexNames())
if mibBuilder.loadTexts: dhcpServerHostOptionEntry.setStatus('current')
dhcpHostOptionDefaultLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionDefaultLeaseTime.setStatus('current')
dhcpHostOptionMinLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionMinLeaseTime.setStatus('current')
dhcpHostOptionMaxLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionMaxLeaseTime.setStatus('current')
dhcpHostOptionBootFile = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 4), ZhoneFileName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionBootFile.setStatus('current')
dhcpHostOptionBootServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionBootServer.setStatus('current')
dhcpHostOptionDefaultRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionDefaultRouter.setStatus('current')
dhcpHostOptionPrimaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionPrimaryNameServer.setStatus('current')
dhcpHostOptionSecondaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionSecondaryNameServer.setStatus('current')
dhcpHostOptionDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionDomainName.setStatus('current')
dhcpServerStatistics = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16))
if mibBuilder.loadTexts: dhcpServerStatistics.setStatus('current')
serverSystem = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1))
if mibBuilder.loadTexts: serverSystem.setStatus('current')
serverSystemDescr = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 1), ZhoneAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverSystemDescr.setStatus('current')
serverSystemObjectID = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverSystemObjectID.setStatus('current')
serverUptime = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverUptime.setStatus('current')
serverActiveShelf = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 4), ZhoneShelfValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverActiveShelf.setStatus('current')
serverActiveSlot = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 5), ZhoneSlotValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverActiveSlot.setStatus('current')
serverStandbyShelf = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 6), ZhoneShelfValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverStandbyShelf.setStatus('current')
serverStandbySlot = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 7), ZhoneSlotValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverStandbySlot.setStatus('current')
bootpCountersTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2), )
if mibBuilder.loadTexts: bootpCountersTable.setStatus('current')
bootpCountersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1), )
rdEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "bootpCountersEntry"))
bootpCountersEntry.setIndexNames(*rdEntry.getIndexNames())
if mibBuilder.loadTexts: bootpCountersEntry.setStatus('current')
bootpCountRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountRequests.setStatus('current')
bootpCountInvalids = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountInvalids.setStatus('current')
bootpCountReplies = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountReplies.setStatus('current')
bootpCountDroppedUnknownClients = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountDroppedUnknownClients.setStatus('current')
bootpCountDroppedNotServingSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountDroppedNotServingSubnet.setStatus('current')
dhcpCountersTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3), )
if mibBuilder.loadTexts: dhcpCountersTable.setStatus('current')
dhcpCountersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1), )
rdEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpCountersEntry"))
dhcpCountersEntry.setIndexNames(*rdEntry.getIndexNames())
if mibBuilder.loadTexts: dhcpCountersEntry.setStatus('current')
dhcpCountDiscovers = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountDiscovers.setStatus('current')
dhcpCountRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountRequests.setStatus('current')
dhcpCountReleases = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountReleases.setStatus('current')
dhcpCountDeclines = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountDeclines.setStatus('current')
dhcpCountInforms = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountInforms.setStatus('current')
dhcpCountInvalids = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountInvalids.setStatus('current')
dhcpCountOffers = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountOffers.setStatus('current')
dhcpCountAcks = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountAcks.setStatus('current')
dhcpCountNacks = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountNacks.setStatus('current')
dhcpCountDroppedUnknownClient = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountDroppedUnknownClient.setStatus('current')
dhcpCountDroppedNotServingSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountDroppedNotServingSubnet.setStatus('current')
dhcpServerConfigurationVersion = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 17), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerConfigurationVersion.setStatus('deprecated')
dhcpServerRestart = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerRestart.setStatus('current')
mibBuilder.exportSymbols("ZHONE-COM-IP-DHCP-SERVER-MIB", dhcpGroupOptionMinLeaseTime=dhcpGroupOptionMinLeaseTime, dhcpSubnetRange3Start=dhcpSubnetRange3Start, dhcpSubnetExternalServer=dhcpSubnetExternalServer, dhcpGroupVendorMatchString=dhcpGroupVendorMatchString, dhcpSubnetOptionBootFile=dhcpSubnetOptionBootFile, dhcpSubnetRange1End=dhcpSubnetRange1End, dhcpServerGroupTable=dhcpServerGroupTable, serverActiveShelf=serverActiveShelf, dhcpServer=dhcpServer, bootpCountDroppedNotServingSubnet=bootpCountDroppedNotServingSubnet, dhcpGroupClientMatchString=dhcpGroupClientMatchString, dhcpGroupOptionSecondaryNameServer=dhcpGroupOptionSecondaryNameServer, dhcpSubnetRange1Start=dhcpSubnetRange1Start, dhcpHostOptionDefaultRouter=dhcpHostOptionDefaultRouter, serverSystemObjectID=serverSystemObjectID, dhcpSubnetNetmask=dhcpSubnetNetmask, dhcpGroupClientMatchOffset=dhcpGroupClientMatchOffset, dhcpGroupIndex=dhcpGroupIndex, dhcpServerDefaultReserveStart=dhcpServerDefaultReserveStart, dhcpGroupVendorMatchOffset=dhcpGroupVendorMatchOffset, dhcpCountersTable=dhcpCountersTable, dhcpServerNextGroupIndex=dhcpServerNextGroupIndex, dhcpHostOptionBootServer=dhcpHostOptionBootServer, dhcpHostOptionPrimaryNameServer=dhcpHostOptionPrimaryNameServer, dhcpTrapZhoneIpAddressUpdate=dhcpTrapZhoneIpAddressUpdate, dhcpServerTraps=dhcpServerTraps, dhcpLeaseIpAddress=dhcpLeaseIpAddress, dhcpSubnetRange4End=dhcpSubnetRange4End, dhcpSubnetRange2End=dhcpSubnetRange2End, dhcpHostDomain=dhcpHostDomain, dhcpLeaseHardwareAddress=dhcpLeaseHardwareAddress, dhcpLeaseRowStatus=dhcpLeaseRowStatus, bootpCountersEntry=bootpCountersEntry, dhcpHostOptionMinLeaseTime=dhcpHostOptionMinLeaseTime, PYSNMP_MODULE_ID=comIpDhcpServer, dhcpServerGroupOptionEntry=dhcpServerGroupOptionEntry, dhcpGroupRowStatus=dhcpGroupRowStatus, dhcpSubnetOptionSecondaryNameServer=dhcpSubnetOptionSecondaryNameServer, dhcpSubnetOptionDefaultLeaseTime=dhcpSubnetOptionDefaultLeaseTime, dhcpServerSubnetOptionEntry=dhcpServerSubnetOptionEntry, dhcpSubnetRange4Start=dhcpSubnetRange4Start, dhcpSubnetOptionBootServer=dhcpSubnetOptionBootServer, dhcpLeaseDDNSFwdName=dhcpLeaseDDNSFwdName, dhcpSubnetNetwork=dhcpSubnetNetwork, dhcpCountOffers=dhcpCountOffers, comIpDhcpServer=comIpDhcpServer, dhcpGroupVendorMatchLength=dhcpGroupVendorMatchLength, dhcpGroupOptionDefaultLeaseTime=dhcpGroupOptionDefaultLeaseTime, dhcpServerRestart=dhcpServerRestart, dhcpSubnetExternalServerAlt=dhcpSubnetExternalServerAlt, dhcpHostIpAddress4=dhcpHostIpAddress4, dhcpServerConfigurationVersion=dhcpServerConfigurationVersion, dhcpGroupName=dhcpGroupName, dhcpTrapZhoneCpeDetected=dhcpTrapZhoneCpeDetected, dhcpSubnetOptionMinLeaseTime=dhcpSubnetOptionMinLeaseTime, dhcpServerNextSubnetIndex=dhcpServerNextSubnetIndex, dhcpSubnetIndex=dhcpSubnetIndex, dhcpServerDefaultMinLeaseTime=dhcpServerDefaultMinLeaseTime, bootpCountDroppedUnknownClients=bootpCountDroppedUnknownClients, dhcpServerLeaseEntry=dhcpServerLeaseEntry, serverSystemDescr=serverSystemDescr, dhcpServerDefaultReserveEnd=dhcpServerDefaultReserveEnd, dhcpGroupOptionDomainName=dhcpGroupOptionDomainName, dhcpGroupOptionMaxLeaseTime=dhcpGroupOptionMaxLeaseTime, dhcpServerSubnetTable=dhcpServerSubnetTable, dhcpLeaseClientHostname=dhcpLeaseClientHostname, dhcpHostIpAddress2=dhcpHostIpAddress2, dhcpServerSubnetEntry=dhcpServerSubnetEntry, dhcpLeaseEnds=dhcpLeaseEnds, dhcpSubnetOptionMaxLeaseTime=dhcpSubnetOptionMaxLeaseTime, dhcpSubnetGroup2=dhcpSubnetGroup2, dhcpGroupClientMatchLength=dhcpGroupClientMatchLength, dhcpCountNacks=dhcpCountNacks, dhcpHostOptionDomainName=dhcpHostOptionDomainName, dhcpTrapZhoneCpeSysObjectID=dhcpTrapZhoneCpeSysObjectID, serverActiveSlot=serverActiveSlot, dhcpSubnetRowStatus=dhcpSubnetRowStatus, dhcpServerNextHostIndex=dhcpServerNextHostIndex, dhcpServerLeaseTable=dhcpServerLeaseTable, dhcpStickyAddr=dhcpStickyAddr, dhcpSubnetOptionPrimaryNameServer=dhcpSubnetOptionPrimaryNameServer, dhcpCountReleases=dhcpCountReleases, dhcpTrapZhoneIpInterfaceIndex=dhcpTrapZhoneIpInterfaceIndex, dhcpSubnetRange2Start=dhcpSubnetRange2Start, dhcpServerSubnetOptionTable=dhcpServerSubnetOptionTable, bootpCountInvalids=bootpCountInvalids, dhcpGroupOptionPrimaryNameServer=dhcpGroupOptionPrimaryNameServer, dhcpHostIndex=dhcpHostIndex, dhcpHostOptionBootFile=dhcpHostOptionBootFile, dhcpHostClientId=dhcpHostClientId, dhcpHostOptionMaxLeaseTime=dhcpHostOptionMaxLeaseTime, dhcpLeaseDDNSRevName=dhcpLeaseDDNSRevName, serverStandbySlot=serverStandbySlot, dhcpHostHostname=dhcpHostHostname, dhcpServerGroupEntry=dhcpServerGroupEntry, dhcpServerDefaultLeaseTime=dhcpServerDefaultLeaseTime, dhcpHostOptionSecondaryNameServer=dhcpHostOptionSecondaryNameServer, serverUptime=serverUptime, dhcpServerDefaultMaxLeaseTime=dhcpServerDefaultMaxLeaseTime, dhcpGroupOptionDefaultRouter=dhcpGroupOptionDefaultRouter, bootpCountReplies=bootpCountReplies, dhcpServerHostOptionTable=dhcpServerHostOptionTable, dhcpHostRowStatus=dhcpHostRowStatus, dhcpHostHardwareAddress=dhcpHostHardwareAddress, dhcpCountDroppedUnknownClient=dhcpCountDroppedUnknownClient, dhcpHostIpAddress1=dhcpHostIpAddress1, dhcpHostIpAddress3=dhcpHostIpAddress3, dhcpServerHostOptionEntry=dhcpServerHostOptionEntry, dhcpCountAcks=dhcpCountAcks, dhcpServerGroupOptionTable=dhcpServerGroupOptionTable, serverSystem=serverSystem, dhcpGroupOptionBootServer=dhcpGroupOptionBootServer, bootpCountRequests=bootpCountRequests, dhcpSubnetDomain=dhcpSubnetDomain, dhcpCountRequests=dhcpCountRequests, dhcpCountInvalids=dhcpCountInvalids, dhcpSubnetOptionDefaultRouter=dhcpSubnetOptionDefaultRouter, dhcpLeaseFlags=dhcpLeaseFlags, dhcpLeaseDomain=dhcpLeaseDomain, dhcpCountDeclines=dhcpCountDeclines, dhcpGroupOptionBootFile=dhcpGroupOptionBootFile, dhcpLeaseStarts=dhcpLeaseStarts, dhcpHostOptionDefaultLeaseTime=dhcpHostOptionDefaultLeaseTime, dhcpServerHostTable=dhcpServerHostTable, dhcpGroupDomain=dhcpGroupDomain, dhcpLeaseClientId=dhcpLeaseClientId, dhcpSubnetRange3End=dhcpSubnetRange3End, dhcpSubnetOptionDomainName=dhcpSubnetOptionDomainName, dhcpLeaseHostname=dhcpLeaseHostname, dhcpCountersEntry=dhcpCountersEntry, dhcpCountDroppedNotServingSubnet=dhcpCountDroppedNotServingSubnet, serverStandbyShelf=serverStandbyShelf, bootpCountersTable=bootpCountersTable, dhcpCountDiscovers=dhcpCountDiscovers, dhcpCountInforms=dhcpCountInforms, dhcpServerStatistics=dhcpServerStatistics, dhcpServerHostEntry=dhcpServerHostEntry)
|
python
|
"""Implementation classes that are used as application configuration containers
parsed from files.
"""
__author__ = 'Paul Landes'
from typing import Dict, Set
import logging
import re
import collections
from zensols.persist import persisted
from . import Configurable, ConfigurableError
logger = logging.getLogger(__name__)
class StringConfig(Configurable):
"""A simple string based configuration. This takes a single comma delimited
key/value pair string in the format:
``<section>.<name>=<value>[,<section>.<name>=<value>,...]``
A dot (``.``) is used to separate the section from the option instead of a
colon (``:``), as used in more sophisticaed interpolation in the
:class:`configparser.ExtendedInterpolation`. The dot is used for this
reason to make other section interpolation easier.
"""
KEY_VAL_REGEX = re.compile(r'^(?:([^.]+?)\.)?([^=]+?)=(.+)$')
def __init__(self, config_str: str, option_sep: str = ',',
default_section: str = None):
"""Initialize with a string given as described in the class docs.
:param config_str: the configuration
:param option_sep: the string used to delimit the section
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`
"""
super().__init__(default_section)
self.config_str = config_str
self.option_sep = option_sep
@persisted('_parsed_config')
def _get_parsed_config(self) -> Dict[str, str]:
"""Parse the configuration string given in the initializer (see class docs).
"""
conf = collections.defaultdict(lambda: {})
for kv in self.config_str.split(self.option_sep):
m = self.KEY_VAL_REGEX.match(kv)
if m is None:
raise ConfigurableError(f'unexpected format: {kv}')
sec, name, value = m.groups()
sec = self.default_section if sec is None else sec
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'section={sec}, name={name}, value={value}')
conf[sec][name] = value
return conf
@property
@persisted('_sections')
def sections(self) -> Set[str]:
return set(self._get_parsed_config().keys())
def has_option(self, name: str, section: str = None) -> bool:
section = self.default_section if section is None else section
return self._get_parsed_config(section)[name]
def get_options(self, section: str = None) -> Dict[str, str]:
section = self.default_section if section is None else section
opts = self._get_parsed_config()[section]
if opts is None:
raise ConfigurableError(f'no section: {section}')
return opts
def __str__(self) -> str:
return self.__class__.__name__ + ': config=' + self.config_str
def __repr__(self) -> str:
return f'<{self.__str__()}>'
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.