content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import math
import os
import time
import unicodedata
from pyrogram.errors import MessageNotModified
from help.progress import humanbytes, TimeFormatter
import requests
from config import Config
from tqdm.utils import CallbackIOWrapper
from pathlib import Path
from tqdm.contrib.telegram import tqdm
CHUNK_SIZE = 10240
TIMEOUT: float = 60
header = {
'Connection': 'keep-alive',
'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
'Accept': 'application/json, text/plain, */*',
'requesttoken': '',
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4389.90 Safari/537.36',
'Content-Type': 'application/json;charset=UTF-8',
'Origin': 'https://nube.ucf.edu.cu',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Accept-Language': 'en-US,en;q=0.9,es;q=0.8'
}
#async def upload_file_old(file):
# print("Func. upload_file")
# with open(file, 'rb') as upload:
# with requests.Session() as request:
# request.auth = (Config.USER, Config.PASSWORD)
# conn = request.put('https://nube.ucf.edu.cu/remote.php/webdav/{}'.format(file), data=upload)
# print(conn.status_code)
# os.unlink(file)
# print('Upload Ok!')
async def upload_file(file, chat_id):
filename_path = Path(f"{file}")
print("Func. upload_file")
# with open(file, 'rb') as upload:
with requests.Session() as request:
request.auth = (Config.USER, Config.PASSWORD)
size = filename_path.stat().st_size if filename_path.exists() else 0
print(size)
with tqdm(token=Config.BOT,
chat_id=chat_id,
total=size,
desc="Subiendo... ",
mininterval=3.0,
unit="B",
unit_scale=True,
bar_format="{desc}{percentage:3.0f}% / {rate_fmt}{postfix}",
unit_divisor=CHUNK_SIZE,
) as t, open(filename_path, "rb") as fileobj:
wrapped_file = CallbackIOWrapper(t.update, fileobj, "read")
with request.put(
url="https://nube.ucf.edu.cu/remote.php/webdav/{}".format(file),
data=wrapped_file, # type: ignore
headers=header,
timeout=TIMEOUT,
stream=True,
) as resp:
print(resp.status_code)
resp.raise_for_status()
t.tgio.delete()
print("UPLOAD OK!")
async def get_share_link(full_name):
with requests.Session() as request:
request.auth = (Config.USER, Config.PASSWORD)
response = request.get('https://nube.ucf.edu.cu/index.php/apps/dashboard/')
i = response.content.index(b'token=')
tok = str(response.content[i + 7:i + 96])[2:-1]
header.update({'requesttoken': tok})
data = '{"path":"' + f'/{full_name}' + '","shareType":3, "password":"' + f'{Config.LINK_PASSWORD}' + '"}'
response = request.post('https://nube.ucf.edu.cu/ocs/v2.php/apps/files_sharing/api/v1/shares',
headers=header, cookies=response.cookies, data=data)
url = response.json()
try:
url = url['ocs']['data']['url']
url = url + "/download/" + full_name
except Exception as e:
print(f'Error getting share link: {e}')
url = "Error: {}".format(e)
return url
async def delete_file(filename):
with requests.Session() as request:
request.auth = (Config.USER, Config.PASSWORD)
url = "https://nube.ucf.edu.cu/remote.php/webdav{}".format(filename)
req = request.delete(url=url)
return req.status_code
async def filename_geturl(url, resp):
if url.find("heroku") != -1:
print("heroku")
return await get_heroku_bot(resp, url)
else:
file = url.split("/", -1)[-1]
if file.find("?") != -1:
file = file.split("?", -1)[0]
if file.find(".") == -1:
try:
file = resp.headers["Content-Disposition"].split("", 1)[1].split("=", 1)[1][1:-1]
except Exception as err:
print(err)
if url.find("checker") != -1:
file += ".mp4"
else:
file += ".ext"
return ["direct", file]
async def get_heroku_bot(resp, url):
print(resp.headers)
try:
file = resp.headers["Conetnt-Disposition"].split(" ", 1)[1].split("=", 1)[1][1:-1]
except Exception as err:
print(err)
try:
# ext = resp.headers["Content-Type"]
# file = "heroku_file.{}".format(ext.split("/", -1)[1])
file_name = url.split("/")
file = file_name[-1]
except Exception as error:
print(error)
file = "defaul_name.ext"
return ["heroku", file]
async def clean_name(name):
full_name = unicodedata.normalize("NFKD", name).encode("ascii", "ignore").decode("ascii")
full_name = full_name.replace(" ", "_")
full_name = full_name.replace("%20", "_")
full_name = full_name.replace("(", "")
full_name = full_name.replace(")", "")
full_name = full_name.replace("$", "")
full_name = full_name.replace("%", "_")
full_name = full_name.replace("@", "_")
full_name = full_name.replace("/", "")
full_name = full_name.replace("|", "")
full_name = full_name.replace("..", ".")
return full_name
async def download_file(message, url, file_name):
start = time.time()
with open(file_name, mode='wb') as f:
with requests.Session() as session:
with session.get(url, stream=True) as r:
total_length = r.headers.get('content-length') or r.headers.get("Content-Length")
current = 0
if total_length is None:
await message.edit(f"Descargando archivo... \nArchivo: {file_name}\nTamaño: Desconocido")
f.write(r.content)
total_length = 0
else:
total = int(total_length)
for chunk in r.iter_content(1024*1204*15):
now = time.time()
diff = now - start
current += len(chunk)
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
elapsed_time = TimeFormatter(milliseconds=elapsed_time)
estimated_total_time = TimeFormatter(milliseconds=estimated_total_time)
progressed = "[{0}{1}] \n\nProgreso: {2}%\n".format(
''.join(["█" for i in range(math.floor(percentage / 5))]),
''.join(["░" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
tmp = progressed + "Descargado: {0}\nTotal: {1}\nVelocidad: {2}/s\nFaltan: {3}\n".format(
humanbytes(current),
humanbytes(total),
humanbytes(speed),
# elapsed_time if elapsed_time != '' else "0 s",
estimated_total_time if estimated_total_time != '' else "0 s")
f.write(chunk)
try:
await message.edit("Descargando...\n{}".format(tmp))
except MessageNotModified:
time.sleep(5.0)
pass
return file_name, int(total_length)
|
python
|
import click
from flask.cli import with_appcontext
from goslinks.db.factory import get_model
@click.command()
@with_appcontext
def migrate():
"""Creates and migrates database tables."""
for model_name in ("user", "link"):
model = get_model(model_name)
click.echo(f"Creating table {model.Meta.table_name}... ", nl=False)
try:
model.create_table()
except Exception:
click.echo(click.style("FAILED!", fg="red"))
raise
else:
click.echo(click.style("SUCCESS!", fg="green"))
|
python
|
from .nondet import Nondet
|
python
|
# test_file.py
import io
import pytest
import graspfile.torfile
test_file = "tests/test_data/tor_files/python-graspfile-example.tor"
"""TICRA Tools 10.0.1 GRASP .tor file"""
@pytest.fixture
def empty_tor_file():
"""Return an empty GraspTorFile instance."""
return graspfile.torfile.GraspTorFile()
@pytest.fixture
def input_file_object():
"""Return a file object pointing to the test file."""
return open(test_file)
@pytest.fixture
def filled_tor_file(empty_tor_file, input_file_object):
"""Return a GraspTorFile instance filled from the tor_file"""
empty_tor_file.read(input_file_object)
input_file_object.close()
return empty_tor_file
def test_loading_tor_file(filled_tor_file):
"""Test loading from a tor cutfile"""
# Check that something was loaded
assert len(filled_tor_file.keys()) > 0
# Check that the frequencies were loaded
assert len(filled_tor_file["single_frequencies"].keys()) > 0
def test_reloading_tor_file(filled_tor_file):
"""Test outputting the filled_tor_file to text and reloading it with StringIO"""
test_str = repr(filled_tor_file)
try:
test_io = io.StringIO(test_str)
except TypeError:
test_io = io.StringIO(unicode(test_str))
reload_tor_file = graspfile.torfile.GraspTorFile(test_io)
assert len(filled_tor_file.keys()) == len(reload_tor_file.keys())
|
python
|
"""Choose python classifiers with a curses frontend."""
from __future__ import unicode_literals
import os
import curses
from collections import namedtuple
from .constants import VERSION
from .constants import CHECKMARK
class BoxSelector(object): # pragma: no cover
"""Originally designed for accman.py.
Display options build from a list of strings in a (unix) terminal.
The user can browser though the textboxes and select one with enter.
Used in pypackage to display the python trove classifiers in a somewhat
logical/easy to navigate way. The unfortunate part is that this uses
curses to display this to the user. Ideally a cross-platform solution can
be found to replace this class.
Known issues:
curses incorrectly handles unicode, might look like crap, YMMV
curses uses (y,x) for coordinates because fuck your logic
curses support on winderps is sketchy/non-existant
"""
# Author: Nikolai Tschacher
# Date: 02.06.2013
# adapted for use in pypackage by Adam Talsma in May 2015
def __init__(self, classifier, screen, choices=None, current=0):
"""Create a BoxSelector object.
Args:
classifier: the Classifier root to find choices inside of
screen: the curses screen object
choices: a list of values in the classifier that are selected
current: integer index of classifiers/values to start on
"""
self.stdscr = screen
choices = choices or []
self.current_selected = current
selections = []
if classifier.name != "__root__":
selections.append("..")
for group in classifier.classifiers:
selections.append("[+] {}".format(group.name))
for value in classifier.values:
selections.append(" {} {}".format(
CHECKMARK if value in choices else " ",
value,
))
# Element parameters. Change them here.
self.TEXTBOX_WIDTH = max(79, max([len(i) for i in selections]) + 2)
self.TEXTBOX_HEIGHT = 3
if classifier.name == "__root__":
selections.append("Done".center(self.TEXTBOX_WIDTH - 4, " "))
self.L = selections
self.PAD_WIDTH = 600
self.PAD_HEIGHT = 10000
def pick(self):
"""Runs the user selection proccess, returns their choice index."""
self._init_curses()
self._create_pad()
windows = self._make_textboxes()
picked = self._select_textbox(windows)
self._end_curses()
return picked
def _init_curses(self):
"""Initializes the curses appliation."""
# turn off automatic echoing of keys to the screen
curses.noecho()
# Enable non-blocking mode. Keys are read without hitting enter
curses.cbreak()
# Disable the mouse cursor.
curses.curs_set(0)
self.stdscr.keypad(1)
# Enable colorous output.
curses.start_color()
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLACK)
self.stdscr.bkgd(curses.color_pair(2))
self.stdscr.refresh()
def _end_curses(self):
"""Terminates the curses application."""
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def _create_pad(self):
"""Creates a big self.pad to place the textboxes in."""
self.pad = curses.newpad(self.PAD_HEIGHT, self.PAD_WIDTH)
self.pad.box()
def _make_textboxes(self):
"""Build the textboxes in the center of the pad."""
# Get the actual screensize.
maxy, maxx = self.stdscr.getmaxyx()
banner = "{} -- choose python trove classifiers".format(VERSION)
self.stdscr.addstr(0, maxx // 2 - len(banner) // 2, banner)
windows = []
i = 2
for item in self.L:
pad = self.pad.derwin(
self.TEXTBOX_HEIGHT,
self.TEXTBOX_WIDTH,
i,
self.PAD_WIDTH // 2 - self.TEXTBOX_WIDTH // 2,
)
pad.box()
try:
pad.addstr(1, 2, item)
except UnicodeEncodeError:
# curses has fucked unicode support
item = item.replace(CHECKMARK, "*")
pad.addstr(1, 2, item)
windows.append(pad)
i += self.TEXTBOX_HEIGHT
return windows
def _center_view(self, window):
"""Centers and aligns the view according to the window argument given.
Returns:
the (y, x) coordinates of the centered window
"""
# The refresh() and noutrefresh() methods of a self.pad require 6 args
# to specify the part of self.pad to be displayed and the location on
# the screen to be used for the display. The arguments are pminrow,
# pmincol, sminrow, smincol, smaxrow, smaxcol; the p arguments refer
# to the top left corner of the self.pad region to be displayed and the
# s arguments define a clipping box on the screen within which the
# self.pad region is to be displayed.
cy, cx = window.getbegyx()
maxy, maxx = self.stdscr.getmaxyx()
self.pad.refresh(cy, cx, 1, maxx // 2 - self.TEXTBOX_WIDTH // 2,
maxy - 1, maxx - 1)
return (cy, cx)
def _select_textbox(self, windows):
"""Handles keypresses and user selection."""
# See at the root textbox.
topy, topx = self._center_view(windows[0])
last = self.current_selected - 1
top_textbox = windows[0]
while True:
# Highligth the selected one, the last selected textbox should
# become normal again.
windows[self.current_selected].bkgd(curses.color_pair(1))
windows[last].bkgd(curses.color_pair(2))
# While the textbox can be displayed on the page with the current
# top_textbox, don't alter the view. When this becomes impossible,
# center the view to last displayable textbox on the previous view.
maxy, maxx = self.stdscr.getmaxyx()
cy, cx = windows[self.current_selected].getbegyx()
# The current window is to far down. Switch the top textbox.
if ((topy + maxy - self.TEXTBOX_HEIGHT) <= cy):
top_textbox = windows[self.current_selected]
# The current window is to far up. There is a better way though...
if topy >= cy + self.TEXTBOX_HEIGHT:
top_textbox = windows[self.current_selected]
if last != self.current_selected:
last = self.current_selected
topy, topx = self._center_view(top_textbox)
c = self.stdscr.getch()
# Vim like KEY_UP/KEY_DOWN with j(DOWN) and k(UP).
if c in (106, curses.KEY_DOWN): # 106 == j
if self.current_selected >= len(windows) - 1:
self.current_selected = 0 # wrap around.
else:
self.current_selected += 1
elif c in (107, curses.KEY_UP): # 107 == k
if self.current_selected <= 0:
self.current_selected = len(windows) - 1 # wrap around.
else:
self.current_selected -= 1
elif c == 113: # 113 = q == Quit without selecting.
break
# At hitting enter, return the index of the selected list element.
elif c == curses.KEY_ENTER or c == 10:
return int(self.current_selected)
elif c == 27: # esc or alt, try to determine which
self.stdscr.nodelay(True)
n_seq = self.stdscr.getch()
self.stdscr.nodelay(False)
if n_seq == -1:
# Escape was pressed, check if the top option has .. in it
if ".." in str(windows[0].instr(1, 0)):
return 0 # backs up a level
else:
break # exits
Classifier = namedtuple("Classifier", ("name", "values", "classifiers"))
def _ensure_chain(top_level, sub_categories):
"""Ensure a chain of Classifiers from top_level through sub_categories."""
def _chain_in(level, item):
for sub_class in level.classifiers:
if sub_class.name == item:
return sub_class
else:
new_sub = Classifier(item, [], [])
level.classifiers.append(new_sub)
return new_sub
for sub_cat in sub_categories:
top_level = _chain_in(top_level, sub_cat)
return top_level
def read_classifiers():
"""Reads the trove file and returns a Classifier representing all."""
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"classifiers")) as openc:
classifiers = [c.strip() for c in openc.read().splitlines() if c]
all_classifiers = []
def _get_classifier(categories):
"""Find or create the classifier for categories."""
top_level = categories[0]
sub_categories = categories[1:]
for classifier in all_classifiers:
if classifier.name == top_level:
top_level = classifier
break
else:
top_level = Classifier(top_level, [], [])
all_classifiers.append(top_level)
return _ensure_chain(top_level, sub_categories)
for clsifier in classifiers:
_get_classifier(clsifier.split(" :: ")[:-1]).values.append(clsifier)
return Classifier("__root__", [], all_classifiers)
def back_it_up(current_level, all_classifiers, recursive=False):
"""Returns the classifier up a level from current."""
for classifier in all_classifiers.classifiers:
if current_level in classifier.classifiers:
return classifier
for classifier in all_classifiers.classifiers:
attempt = back_it_up(current_level, classifier, True)
if attempt:
return attempt
if not recursive:
return all_classifiers
def choose_classifiers(config):
"""Get some user input for the classifiers they'd like to use.
Returns:
list of valid classifier names
"""
all_classifiers = read_classifiers()
root_classifier = all_classifiers
old_delay = os.getenv("ESCDELAY")
os.environ["ESCDELAY"] = "25" # the default delay is a full second...
screen = curses.initscr()
choices = getattr(config, "classifiers", [])
choice = BoxSelector(root_classifier, screen, choices).pick()
while choice is not None:
init = 0
if choice == 0 and root_classifier.name != "__root__":
root_classifier = back_it_up(root_classifier, all_classifiers)
elif choice == 9 and root_classifier.name == "__root__":
break # the "done" box from the top level
elif choice > len(root_classifier.classifiers):
choice_index = (choice - len(root_classifier.classifiers) -
int(root_classifier.name != "__root__"))
choice_as_str = root_classifier.values[choice_index]
if choice_as_str not in choices:
choices.append(choice_as_str)
else:
choices.remove(choice_as_str)
init = choice
else:
choice_index = choice - int(root_classifier.name != "__root__")
root_classifier = root_classifier.classifiers[choice_index]
choice = BoxSelector(root_classifier, screen, choices, init).pick()
if old_delay:
os.environ["ESCDELAY"] = old_delay
else:
os.environ.pop("ESCDELAY")
return choices
|
python
|
'''
Created on 20/01/2014
@author: MMPE
See documentation of HTCFile below
'''
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import int
from builtins import str
from future import standard_library
import os
standard_library.install_aliases()
class HTCDefaults(object):
empty_htc = """begin simulation;
time_stop 600;
solvertype 2; (newmark)
begin newmark;
deltat 0.02;
end newmark;
end simulation;
;
;----------------------------------------------------------------------------------------------------------------------------------------------------------------
;
begin wind ;
density 1.225 ;
wsp 10 ;
tint 1;
horizontal_input 1 ; 0=false, 1=true
windfield_rotations 0 0.0 0.0 ; yaw, tilt, rotation
center_pos0 0 0 -30 ; hub heigth
shear_format 1 0;0=none,1=constant,2=log,3=power,4=linear
turb_format 0 ; 0=none, 1=mann,2=flex
tower_shadow_method 0 ; 0=none, 1=potential flow, 2=jet
end wind;
;
;----------------------------------------------------------------------------------------------------------------------------------------------------------------
;
;
begin output;
filename ./tmp;
general time;
end output;
exit;"""
def add_mann_turbulence(self, L=29.4, ae23=1, Gamma=3.9, seed=1001, high_frq_compensation=True,
filenames=None,
no_grid_points=(16384, 32, 32), box_dimension=(6000, 100, 100),
dont_scale=False,
std_scaling=None):
wind = self.add_section('wind')
wind.turb_format = 1
mann = wind.add_section('mann')
if 'create_turb_parameters' in mann:
mann.create_turb_parameters.values = [L, ae23, Gamma, seed, int(high_frq_compensation)]
else:
mann.add_line('create_turb_parameters', [L, ae23, Gamma, seed, int(high_frq_compensation)],
"L, alfaeps, gamma, seed, highfrq compensation")
if filenames is None:
import numpy as np
dxyz = tuple(np.array(box_dimension) / no_grid_points)
from wetb.wind.turbulence import mann_turbulence
filenames = ["./turb/" + mann_turbulence.name_format %
((L, ae23, Gamma, high_frq_compensation) + no_grid_points + dxyz + (seed, uvw))
for uvw in ['u', 'v', 'w']]
if isinstance(filenames, str):
filenames = ["./turb/%s_s%04d%s.bin" % (filenames, seed, c) for c in ['u', 'v', 'w']]
for filename, c in zip(filenames, ['u', 'v', 'w']):
setattr(mann, 'filename_%s' % c, filename)
for c, n, dim in zip(['u', 'v', 'w'], no_grid_points, box_dimension):
setattr(mann, 'box_dim_%s' % c, "%d %.4f" % (n, dim / (n)))
if dont_scale:
mann.dont_scale = 1
else:
try:
del mann.dont_scale
except KeyError:
pass
if std_scaling is not None:
mann.std_scaling = "%f %f %f" % std_scaling
else:
try:
del mann.std_scaling
except KeyError:
pass
def add_turb_export(self, filename="export_%s.turb", samplefrq=None):
exp = self.wind.add_section('turb_export', allow_duplicate=True)
for uvw in 'uvw':
exp.add_line('filename_%s' % uvw, [filename % uvw])
sf = samplefrq or max(1, int(self.wind.mann.box_dim_u[1] / (self.wind.wsp[0] * self.deltat())))
exp.samplefrq = sf
if "time" in self.output:
exp.time_start = self.output.time[0]
else:
exp.time_start = 0
exp.nsteps = (self.simulation.time_stop[0] - exp.time_start[0]) / self.deltat()
for vw in 'vw':
exp.add_line('box_dim_%s' % vw, self.wind.mann['box_dim_%s' % vw].values)
def import_dtu_we_controller_input(self, filename):
dtu_we_controller = [dll for dll in self.dll if dll.name[0] == 'dtu_we_controller'][0]
with open(filename) as fid:
lines = fid.readlines()
K_r1 = float(lines[1].replace("K = ", '').replace("[Nm/(rad/s)^2]", ''))
Kp_r2 = float(lines[4].replace("Kp = ", '').replace("[Nm/(rad/s)]", ''))
Ki_r2 = float(lines[5].replace("Ki = ", '').replace("[Nm/rad]", ''))
Kp_r3 = float(lines[7].replace("Kp = ", '').replace("[rad/(rad/s)]", ''))
Ki_r3 = float(lines[8].replace("Ki = ", '').replace("[rad/rad]", ''))
KK = lines[9].split("]")
KK1 = float(KK[0].replace("K1 = ", '').replace("[deg", ''))
KK2 = float(KK[1].replace(", K2 = ", '').replace("[deg^2", ''))
cs = dtu_we_controller.init
cs.constant__11.values[1] = "%.6E" % K_r1
cs.constant__12.values[1] = "%.6E" % Kp_r2
cs.constant__13.values[1] = "%.6E" % Ki_r2
cs.constant__16.values[1] = "%.6E" % Kp_r3
cs.constant__17.values[1] = "%.6E" % Ki_r3
cs.constant__21.values[1] = "%.6E" % KK1
cs.constant__22.values[1] = "%.6E" % KK2
def add_hydro(self, mudlevel, mwl, gravity=9.81, rho=1027):
wp = self.add_section("hydro").add_section('water_properties')
wp.mudlevel = mudlevel
wp.mwl = mwl
wp.gravity = gravity
wp.rho = rho
class HTCExtensions(object):
def get_shear(self):
shear_type, parameter = self.wind.shear_format.values
z0 = -self.wind.center_pos0[2]
wsp = self.wind.wsp[0]
if shear_type == 1: # constant
return lambda z: wsp
elif shear_type == 3:
from wetb.wind.shear import power_shear
return power_shear(parameter, z0, wsp)
else:
raise NotImplementedError
|
python
|
from apscheduler.schedulers.blocking import BlockingScheduler
from server.sync import sync_blocks, sync_tokens
background = BlockingScheduler()
background.add_job(sync_tokens, "interval", seconds=30)
background.add_job(sync_blocks, "interval", seconds=5)
background.start()
|
python
|
########################################################
# plot_norms.py #
# Matheus J. Castro #
# Version 1.2 #
# Last Modification: 11/11/2021 (month/day/year) #
# https://github.com/MatheusJCastro/spectra_comparator #
# Licensed under MIT License #
########################################################
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
import os
def open_spec(fl_name):
# Subroutine to open the .fits spectrum and read it
hdul = fits.open(fl_name) # open the file
spec_data = hdul[0].data # get the data
spec_header = hdul[0].header # get the header
if spec_data.shape != (2048,): # get only the actual spectrum (for multidimensional data)
spec_data = spec_data[1][0]
# Get the wavelength information from the header
# CDELT1 or CD1_1
wl = spec_header['CRVAL1'] + spec_header['CD1_1'] * np.arange(0, len(spec_data))
hdul.close() # close the file
return wl, spec_data, spec_header
def finish_plot(show=False, save=False, fl1=None, fl2=None):
# End and save plot subroutine
if save:
plt.savefig("Plots_{}_{}".format(fl1, fl2))
if show:
plt.show()
plt.close()
def plot_spectra(spec, name=None):
# Subroutine to plot the spectrum
plt.plot(spec[0], spec[1], label=name)
def main():
# Main subroutine, find and plot the spectra
onlynorm = False # change to True to plot only the normalized spectrum
files = []
for i in os.listdir(): # search for all non-normalized files in the current directory
if "tha_" in i and "norm" not in i and "list" not in i:
files.append(i)
files_norm = []
for i in os.listdir(): # search for all normalized files in the current directory
if "norm_tha_" in i:
files_norm.append(i)
for i in range(len(files)): # for each tuple of files
fig = plt.figure(figsize=(21, 9))
fig.suptitle("Comparison of normalized and non normalized spectrum", fontsize=28)
if not onlynorm: # to plot non-normalized files as subplot
plt.subplot(121)
plt.title("Standard", fontsize=22)
plt.xlabel("Pixel", fontsize=20)
plt.ylabel("Intensity", fontsize=20)
plt.yscale("log")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tick_params(axis='y', which='minor', labelsize=16)
spec_info = open_spec(files[i]) # open the current spectrum
plot_spectra(spec_info) # plot the spectrum
plt.grid(True, which="both", linewidth=1)
plt.subplot(122)
plt.title("Normalized", fontsize=22)
plt.xlabel("Pixel", fontsize=20)
if onlynorm:
plt.ylabel("Intensity", fontsize=20)
plt.yscale("log")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tick_params(axis='y', which='minor', labelsize=16)
spec_info = open_spec(files_norm[i]) # open the current spectrum
plot_spectra(spec_info) # plot the spectrum
plt.grid(True, which="both", linewidth=1)
if files[i][-16:-12] == "3080": # there are two spectra of the 3080A, save both without erasing one
finish_plot(save=True, fl1="comp_norm", fl2=files[i][-16:-8])
else:
finish_plot(save=True, fl1="comp_norm", fl2=files[i][-16:-12])
if __name__ == '__main__':
main()
|
python
|
from django.shortcuts import render
from django.http import JsonResponse
import os
import json
import time
from .api import GoogleAPI
from threpose.settings import BASE_DIR
from src.caching.caching_gmap import APICaching
from decouple import config
gapi = GoogleAPI()
api_caching = APICaching()
PLACE_IMG_PATH = os.path.join(BASE_DIR, 'media', 'places_image')
# Place List page
def get_next_page_from_token(request): # pragma: no cover
"""Get places list data by next_page_token."""
# Check request
if request.method != 'POST':
return JsonResponse({"status": "INVALID METHOD"})
if 'token' not in request.POST:
return JsonResponse({"status": "INVALID PAYLOAD"})
# Get next page token from request
token = request.POST['token']
context = []
# Check next_page cache
if api_caching.get(f'{token[:30]}') is None:
for _ in range(6):
# Request data for 6 times, if response is not OK
# and reached maximum, it will return empty
data = json.loads(gapi.next_search_nearby(token))
if data['status'] == "OK":
context = restruct_nearby_place(data['results'])
break
time.sleep(0.2)
# write cache file
byte_context = json.dumps({"cache": context, "status": "OK"}, indent=3).encode()
api_caching.add(f'{token[:30]}', byte_context)
if len(context) > 0:
return JsonResponse({"places": context, "status": "OK"})
return JsonResponse({"places": context, "status": "NOT FOUND"})
else: # Have cache
# load cache
context = json.loads(api_caching.get(f'{token[:30]}'))
# check place images
context = check_downloaded_image(context['cache'])
return JsonResponse({"places": context, "status": "OK"})
def place_list(request, *args, **kwargs): # pragma: no cover
"""Place_list view for list place that nearby the user search input."""
data = request.GET # get lat and lng from url
# Our default search type
types = ['restaurant', 'zoo', 'tourist_attraction', 'museum', 'cafe', 'aquarium']
lat = data['lat']
lng = data['lng']
# Get place cache
if api_caching.get(f'{lat}{lng}searchresult'):
# data exists
data = json.loads(api_caching.get(f'{lat}{lng}searchresult'))
context = data['cache']
token = data['next_page_token']
else:
# data not exist
context, token = get_new_context(types, lat, lng)
context = check_downloaded_image(context)
# get all image file name in static/images/place_image
api_key = config('FRONTEND_API_KEY')
return render(request, "search/place_list.html", {'places': context, 'all_token': token, 'api_key': api_key})
# Helper function
def get_new_context(types: list, lat: int, lng: int) -> list: # pragma: no cover
"""Cache new data and return the new data file
Args:
types: place type
lat, lng: latitude and longitude of user search input for
Returns:
context: places nearby data
token: next page token
"""
token = {}
# This create for keeping data from search nearby
tempo_context = []
for type in types:
data = json.loads(gapi.search_nearby(lat, lng, type))
if 'next_page_token' in data:
token[type] = data['next_page_token']
places = data['results']
restructed_places = restruct_nearby_place(places)
tempo_context = add_more_place(tempo_context, restructed_places)
# Caching places nearby
cache = {'cache': tempo_context, 'next_page_token': token}
api_caching.add(f'{lat}{lng}searchresult', json.dumps(cache, indent=3).encode())
# Load data from cache
context = json.loads(api_caching.get(f'{lat}{lng}searchresult'))['cache']
return context, token
def restruct_nearby_place(places: list) -> list:
"""Process data for frontend
Args:
places: A place nearby data from google map api.
Returns:
context: A place data that place-list page needed.
Data struct:
[
{
# Essential key
'place_name': <name>,
'place_id': <place_id>,
'photo_ref': [<photo_ref],
'types': [],
# other...
}
. . .
]
"""
context = []
for place in places:
init_place = {
'place_name': None,
'place_id': None,
'photo_ref': [],
'types': [],
}
if 'photos' in place:
# Place have an image
photo_ref = place['photos'][0]['photo_reference']
init_place['photo_ref'].append(photo_ref)
else:
# Place don't have an image
continue
init_place['place_name'] = place['name']
init_place['place_id'] = place['place_id']
init_place['types'] = place['types']
context.append(init_place)
return context
def check_downloaded_image(context: list) -> list:
"""Check that image from static/images/place_image that is ready for frontend to display or not
Args:
context: place nearby data
Returns:
context: place nearby data with telling the image of each place were downloaded or not
"""
# Check places_image dir that is exists
if os.path.exists(PLACE_IMG_PATH):
# Get image file name from static/images/places_image
all_img_file = [f for f in os.listdir(PLACE_IMG_PATH)
if os.path.isfile(os.path.join(PLACE_IMG_PATH, f))]
for place in context:
# If place that have photo_ref imply that place have an images
if 'photo_ref' in place:
place_id = place['place_id']
downloaded_img = f'{place_id}photo.jpeg' in all_img_file
have_image = len(place['photo_ref']) == 0
if downloaded_img or have_image:
place['downloaded'] = True
else:
place['downloaded'] = False
return context
def add_more_place(context: list, new: list):
"""Append places to context
Args:
context: total nearby palce data
new: new data by next page tokens
Returns:
context: total nearby place that append
new to is's with out duplicated place
"""
place_exist = [place['place_id'] for place in context]
for place in new:
# Check that place is exists or not
if place['place_id'] in place_exist:
continue
context.append(place)
return context
|
python
|
import os
import math
import random
import argparse
def generate_entities(num_entities=100):
"""generate num_entities random entities for synthetic knowledge graph."""
i = 0
entity_list = []
hex_chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
l = int(math.log(num_entities, 18)+1)
# print l
while i < num_entities:
entity = "/entity_{}".format(''.join(random.sample(hex_chars, l)))
if entity not in entity_list:
entity_list.append(entity)
i += 1
return entity_list
parser = argparse.ArgumentParser()
parser.add_argument("--N", type=int)
args = parser.parse_args()
N = args.N
print N
entities = generate_entities(N)
entity_file = os.path.join(os.getcwd(), "data", "fake-420", "entities.txt")
f = open(entity_file, 'w+')
with open(entity_file, 'w+') as f:
for e in entities:
f.write("{}\n".format(e))
f.close()
|
python
|
# Simple script for drawing the chi-squared density
#
from rpy import *
def draw(df, start=0, end=10):
grid = r.seq(start, end, by=0.1)
l = [r.dchisq(x, df) for x in grid]
r.par(ann=0, yaxt='n')
r.plot(grid, l, type='lines')
if __name__ == '__main__':
print "<Enter> to quit."
while 1:
try:
df = int(raw_input('Degrees of freedom> '))
draw(df)
except ValueError:
break
|
python
|
"""
queue.py
location queue implementation for ducky25, decides next location to travel to
"""
class DestinationQueue:
def __init__(self):
self.queue = []
self.position = 0
def add_to_queue(self, location):
self.queue.append(location)
def pop_next_destination(self):
if len(self.queue) > self.position:
result = self.queue[self.position]
self.position = self.position + 1
else:
result = -1
return result
|
python
|
from logging.config import dictConfig # type: ignore
import json
from config import CONFIG_DICT
done_setup = False
def setup_logging():
global done_setup
if not done_setup and CONFIG_DICT['LOGGING']:
try:
logging_config_file_path = CONFIG_DICT['LOGGING_CONFIG_FILE_PATH']
with open(logging_config_file_path, 'rt') as file:
config = json.load(file)
dictConfig(config)
done_setup = True
except IOError as e:
raise(Exception('Failed to load logging configuration', e))
|
python
|
"""
MIT License
Copyright(c) 2021 Andy Zhou
"""
from flask import render_template, request, abort
from flask.json import jsonify
from flask_wtf.csrf import CSRFError
from flask_babel import _
def api_err_response(err_code: int, short_message: str, long_message: str = None):
if (
request.accept_mimetypes.accept_json
and not request.accept_mimetypes.accept_html
or request.blueprint == "api_v1"
or request.blueprint == "api_v2"
or request.blueprint == "api_v3"
):
response = {"error": short_message}
if long_message:
response["message"] = long_message
response = jsonify(response)
response.status_code = err_code
return response
return None # explicitly return None
def err_handler(
err_code: int,
short_message: str,
long_message: str,
error_description: str,
template: str = "errors/error.html",
):
json_response = api_err_response(err_code, short_message)
if json_response is not None:
return json_response
return (
render_template(
template, error_message=long_message, error_description=error_description
),
err_code,
)
def register_error_handlers(app): # noqa: C901
@app.errorhandler(400)
@app.errorhandler(CSRFError)
def bad_request(e):
return err_handler(
400,
"bad request",
"400 Bad Request",
"You have sent an invalid request. This can either be caused by a false CSRF-token or an invalid value of a form.",
)
@app.errorhandler(403)
def forbidden(e):
return err_handler(
403,
"forbidden",
"403 Forbidden",
"You do not have the permission to access this page. Maybe you are not signed in (viewing posts directly), or you tried to enter a page where you aren't allowed to enter.",
)
@app.errorhandler(404)
def page_not_found(e):
return err_handler(
404,
"not found",
"404 Not Found",
"The page you want is not here or deleted.",
"errors/404.html",
)
@app.errorhandler(405)
def method_not_allowed(e):
return err_handler(
405,
"method not allowed",
"405 Method Not Allowed",
"Your request has a wrong method. Maybe you entered some page without a form submission.",
)
@app.errorhandler(413)
def payload_to_large(e):
return err_handler(
413,
"request entity too large",
"413 Request Entity Too Large",
"Things you upload is too large.",
)
@app.errorhandler(429) # handle when IP is limited
def too_many_requests(e):
return err_handler(
429,
"too many requests",
"429 Too Many Requests",
"You see 429 because you entered a page too many times and triggered our self-protection program. Usually you can wait for a while, in some cases it takes a day.",
)
@app.errorhandler(500)
def internal_server_error(e):
return err_handler(
500,
"internal server error",
"500 Internal Server Error",
"The server went wrong and returned 500. You can contact them to report this 500 error.",
)
|
python
|
from tidyms import lcms
from tidyms import utils
import numpy as np
import pytest
from itertools import product
mz_list = [200, 250, 300, 420, 450]
@pytest.fixture
def simulated_experiment():
mz = np.array(mz_list)
rt = np.linspace(0, 100, 100)
# simulated features params
mz_params = np.array([mz_list,
[3, 10, 5, 31, 22]])
mz_params = mz_params.T
rt_params = np.array([[30, 40, 60, 80, 80],
[1, 2, 2, 3, 3],
[1, 1, 1, 1, 1]])
rt_params = rt_params.T
noise_level = 0.1
sim_exp = utils.SimulatedExperiment(mz, rt, mz_params, rt_params,
noise=noise_level, mode="centroid")
return sim_exp
# parameters of make_chromatograms are tested in the test_validation module
def test_make_chromatograms(simulated_experiment):
# test that the chromatograms generated are valid
# create chromatograms
n_sp = simulated_experiment.getNrSpectra()
n_mz = simulated_experiment.mz_params.shape[0]
rt = np.zeros(n_sp)
chromatogram = np.zeros((n_mz, n_sp))
for scan in range(n_sp):
sp = simulated_experiment.getSpectrum(scan)
rt[scan] = sp.getRT()
_, spint = sp.get_peaks()
chromatogram[:, scan] = spint
expected_chromatograms = [lcms.Chromatogram(rt, x) for x in chromatogram]
test_chromatograms = lcms.make_chromatograms(simulated_experiment, mz_list)
assert len(test_chromatograms) == len(expected_chromatograms)
for ec, tc in zip(expected_chromatograms, test_chromatograms):
assert np.array_equal(ec.rt, tc.rt)
assert np.array_equal(ec.spint, tc.spint)
def test_make_chromatograms_accumulator_mean(simulated_experiment):
lcms.make_chromatograms(simulated_experiment, mz_list, accumulator="mean")
assert True
def test_make_chromatograms_start(simulated_experiment):
n_sp = simulated_experiment.getNrSpectra()
start = 10
chromatogram_length = n_sp - start
chromatograms = lcms.make_chromatograms(simulated_experiment, mz_list,
start=start)
for c in chromatograms:
assert c.rt.size == chromatogram_length
assert c.rt[0] == simulated_experiment.getSpectrum(start).getRT()
def test_make_chromatograms_end(simulated_experiment):
end = 90
chromatograms = lcms.make_chromatograms(simulated_experiment, mz_list,
end=end)
for c in chromatograms:
assert c.rt.size == end
assert c.rt[-1] == simulated_experiment.getSpectrum(end - 1).getRT()
def test_make_chromatograms_outside_range_mz(simulated_experiment):
# the total intensity of the chromatogram should be equal to zero
chromatograms = lcms.make_chromatograms(simulated_experiment, [550])
assert np.isclose(chromatograms[0].spint.sum(), 0)
# def test_accumulate_spectra(simulated_experiment):
# lcms.accumulate_spectra_profile(simulated_experiment, start=10, end=20)
# assert True
#
#
# def test_accumulate_spectra_subtract(simulated_experiment):
# lcms.accumulate_spectra_profile(simulated_experiment, start=10, end=20,
# subtract_left=5, subtract_right=25)
# assert True
#
#
# def test_get_roi_params():
# func_params = [("uplc", "qtof"), ("uplc", "orbitrap"), ("hplc", "qtof"),
# ("hplc", "orbitrap")]
# n_sp = 100 # dummy value for the validator
# for separation, instrument in func_params:
# params = lcms.get_roi_params(separation, instrument)
# validation.validate_make_roi_params(n_sp, params)
# assert True
#
#
# def test_get_roi_params_bad_separation():
# with pytest.raises(ValueError):
# lcms.get_roi_params("bad-value", "qtof")
#
#
# def test_get_roi_params_bad_instrument():
# with pytest.raises(ValueError):
# lcms.get_roi_params("uplc", "bad-value")
#
#
# # Test Chromatogram object
#
@pytest.fixture
def chromatogram_data():
rt = np.arange(200)
spint = utils.gauss(rt, 50, 2, 100)
spint += np.random.normal(size=rt.size, scale=1.0)
return rt, spint
def test_chromatogram_creation(chromatogram_data):
# test building a chromatogram with default mode
rt, spint = chromatogram_data
chromatogram = lcms.Chromatogram(rt, spint)
assert chromatogram.mode == "uplc"
def test_chromatogram_creation_with_mode(chromatogram_data):
rt, spint = chromatogram_data
chromatogram = lcms.Chromatogram(rt, spint, mode="hplc")
assert chromatogram.mode == "hplc"
def test_chromatogram_creation_invalid_mode(chromatogram_data):
rt, spint = chromatogram_data
with pytest.raises(ValueError):
lcms.Chromatogram(rt, spint, mode="invalid-mode")
def test_chromatogram_find_peaks(chromatogram_data):
chromatogram = lcms.Chromatogram(*chromatogram_data)
chromatogram.find_peaks()
assert len(chromatogram.peaks) == 1
# Test MSSPectrum
@pytest.fixture
def ms_data():
mz = np.linspace(100, 110, 1000)
spint = utils.gauss(mz, 105, 0.005, 100)
spint += + np.random.normal(size=mz.size, scale=1.0)
return mz, spint
def test_ms_spectrum_creation(ms_data):
sp = lcms.MSSpectrum(*ms_data)
assert sp.instrument == "qtof"
def test_ms_spectrum_creation_with_instrument(ms_data):
instrument = "orbitrap"
sp = lcms.MSSpectrum(*ms_data, instrument=instrument)
assert sp.instrument == instrument
def test_ms_spectrum_creation_invalid_instrument(ms_data):
with pytest.raises(ValueError):
instrument = "invalid-mode"
lcms.MSSpectrum(*ms_data, instrument=instrument)
def test_find_centroids_qtof(ms_data):
sp = lcms.MSSpectrum(*ms_data)
# the algorithm is tested on test_peaks.py
sp.find_centroids()
assert True
# Test ROI
@pytest.fixture
def roi_data():
rt = np.arange(200)
spint = utils.gauss(rt, 50, 2, 100)
mz = np.random.normal(loc=150.0, scale=0.001, size=spint.size)
# add some nan values
nan_index = [0, 50, 100, 199]
spint[nan_index] = np.nan
mz[nan_index] = np.nan
return rt, mz, spint
def test_roi_creation(roi_data):
rt, mz, spint = roi_data
lcms.Roi(spint, mz, rt, rt)
assert True
def test_fill_nan(roi_data):
rt, mz, spint = roi_data
roi = lcms.Roi(spint, mz, rt, rt)
roi.fill_nan()
has_nan = np.any(np.isnan(roi.mz) & np.isnan(roi.spint))
assert not has_nan
# roi making tests
def test_match_mz_no_multiple_matches():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([40, 51, 78, 91, 99, 130, 150])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
mz1_match_index = np.array([0, 2, 4], dtype=int)
mz2_match_index = np.array([1, 4, 6], dtype=int)
mz2_no_match_index = np.array([0, 2, 3, 5], dtype=int)
mode = "closest"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.array_equal(mz2[mz2_match_index], mz2_match)
assert np.array_equal(sp2[mz2_match_index], sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_no_matches():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([40, 53, 78, 91, 97, 130, 154])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
mz1_match_index = np.array([], dtype=int)
mz2_match_index = np.array([], dtype=int)
mz2_no_match_index = np.array([0, 1, 2, 3, 4, 5, 6], dtype=int)
mode = "closest"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.array_equal(mz2[mz2_match_index], mz2_match)
assert np.array_equal(sp2[mz2_match_index], sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_all_match():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([51, 77, 99, 126, 150])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
mz1_match_index = np.array([0, 1, 2, 3, 4], dtype=int)
mz2_match_index = np.array([0, 1, 2, 3, 4], dtype=int)
mz2_no_match_index = np.array([], dtype=int)
mode = "closest"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.array_equal(mz2[mz2_match_index], mz2_match)
assert np.array_equal(sp2[mz2_match_index], sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_multiple_matches_mode_closest():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([49, 51, 78, 99, 100, 101, 126, 150, 151])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
# in closest mode, argmin is used to select the closest value. If more
# than one value has the same difference, the first one in the array is
# going to be selected.
mz1_match_index = np.array([0, 2, 3, 4], dtype=int)
mz2_match_index = np.array([0, 4, 6, 7], dtype=int)
mz2_no_match_index = np.array([1, 2, 3, 5, 8], dtype=int)
mode = "closest"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.array_equal(mz2[mz2_match_index], mz2_match)
assert np.array_equal(sp2[mz2_match_index], sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_multiple_matches_mode_reduce():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150], dtype=float)
mz2 = np.array([49, 51, 78, 99, 100, 101, 126, 150, 151], dtype=float)
sp2 = np.array([100] * mz2.size, dtype=float)
# expected values for match/no match indices
# in closest mode, argmin is used to select the closest value. If more
# than one value has the same difference, the first one in the array is
# going to be selected.
mz1_match_index = np.array([0, 2, 3, 4], dtype=int)
mz2_match_index = np.array([0, 1, 3, 4, 5, 6, 7, 8], dtype=int)
mz2_no_match_index = np.array([2], dtype=int)
expected_mz2_match = [50.0, 100.0, 126.0, 150.5]
expected_sp2_match = [200, 300, 100, 200]
mode = "reduce"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.sum)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.allclose(mz2_match, expected_mz2_match)
assert np.allclose(sp2_match, expected_sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_invalid_mode():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([49, 51, 78, 99, 100, 101, 126, 150, 151])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
# in closest mode, argmin is used to select the closest value. If more
# than one value has the same difference, the first one in the array is
# going to be selected.
mz1_match_index = np.array([0, 2, 3, 4], dtype=int)
mz2_match_index = np.array([0, 4, 6, 7], dtype=int)
mz2_no_match_index = np.array([1, 2, 3, 5, 8], dtype=int)
mode = "invalid-mode"
with pytest.raises(ValueError):
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
def test_make_roi(simulated_experiment):
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce")
assert len(roi_list) == simulated_experiment.mz_params.shape[0]
def test_make_roi_targeted_mz(simulated_experiment):
# the first three m/z values generated by simulated experiment are used
targeted_mz = simulated_experiment.mz_params[:, 0][:3]
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", targeted_mz=targeted_mz)
assert len(roi_list) == targeted_mz.size
def test_make_roi_min_intensity(simulated_experiment):
min_intensity = 15
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0,
min_intensity=min_intensity,
multiple_match="reduce")
# only two roi should have intensities greater than 15
assert len(roi_list) == 2
def test_make_roi_start(simulated_experiment):
start = 10
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", start=start)
n_sp = simulated_experiment.getNrSpectra()
for r in roi_list:
assert r.mz.size == (n_sp - start)
def test_make_roi_end(simulated_experiment):
end = 10
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", end=end)
n_sp = simulated_experiment.getNrSpectra()
for r in roi_list:
assert r.mz.size == end
def test_make_roi_multiple_match_closest(simulated_experiment):
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="closest")
assert len(roi_list) == simulated_experiment.mz_params.shape[0]
def test_make_roi_multiple_match_reduce_merge(simulated_experiment):
# set a tolerance such that two mz values are merged
# test is done in targeted mode to force a multiple match by removing
# one of the mz values
targeted_mz = simulated_experiment.mz_params[:, 0]
targeted_mz = np.delete(targeted_mz, 3)
tolerance = 31
roi_list = lcms.make_roi(simulated_experiment, tolerance=tolerance,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", targeted_mz=targeted_mz)
assert len(roi_list) == (simulated_experiment.mz_params.shape[0] - 1)
def test_make_roi_multiple_match_reduce_custom_mz_reduce(simulated_experiment):
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", mz_reduce=np.median)
assert len(roi_list) == simulated_experiment.mz_params.shape[0]
def test_make_roi_multiple_match_reduce_custom_sp_reduce(simulated_experiment):
sp_reduce = lambda x: 1
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", sp_reduce=sp_reduce)
assert len(roi_list) == simulated_experiment.mz_params.shape[0]
def test_make_roi_invalid_multiple_match(simulated_experiment):
with pytest.raises(ValueError):
lcms.make_roi(simulated_experiment, tolerance=0.005, max_missing=0,
min_length=0, min_intensity=0,
multiple_match="invalid-value")
# test accumulate spectra
def test_accumulate_spectra_centroid(simulated_experiment):
n_sp = simulated_experiment.getNrSpectra()
sp = lcms.accumulate_spectra_centroid(simulated_experiment, 0, n_sp - 1,
tolerance=0.005)
assert sp.mz.size == simulated_experiment.mz_params.shape[0]
def test_accumulate_spectra_centroid_subtract_left(simulated_experiment):
sp = lcms.accumulate_spectra_centroid(simulated_experiment, 70, 90,
subtract_left=20, tolerance=0.005)
# only two peaks at rt 80 should be present
assert sp.mz.size == 2
# test default parameter functions
def test_get_lc_filter_params_uplc():
lcms.get_lc_filter_peak_params("uplc")
assert True
def test_get_lc_filter_params_hplc():
lcms.get_lc_filter_peak_params("hplc")
assert True
def test_get_lc_filter_params_invalid_mode():
with pytest.raises(ValueError):
lcms.get_lc_filter_peak_params("invalid-mode")
@pytest.mark.parametrize("separation,instrument",
list(product(["hplc", "uplc"], ["qtof", "orbitrap"])))
def test_get_roi_params(separation, instrument):
lcms.get_roi_params(separation, instrument)
assert True
def test_get_roi_params_bad_separation():
with pytest.raises(ValueError):
lcms.get_roi_params("invalid-separation", "qtof")
def test_get_roi_params_bad_ms_mode():
with pytest.raises(ValueError):
lcms.get_roi_params("uplc", "invalid-ms-mode")
|
python
|
from flask_login import UserMixin
from datetime import datetime
from sqlalchemy import ForeignKey
from sqlalchemy.orm import backref
from app.extensions import db
ACCESS = {
'guest': 0,
'user': 1,
'admin': 2
}
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(100), index=True, unique=True)
password = db.Column(db.String(50))
profile = db.relationship('Profile', back_populates='user', cascade='all,delete', uselist=False)
fantasy_team = db.relationship('FantasyTeam', back_populates='user', cascade='all,delete', uselist=False)
is_admin = db.Column(db.Boolean, default=0)
def __repr__(self):
return '<User {}>'.format(self.username)
class Profile(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', back_populates='profile')
def __repr__(self):
return '<Profile {}>'.format(self.user_id, self.first_name, self.last_name)
# FANTASYTEAM and PLAYER relationship Many to Many
fantasy_teams_players = db.Table('association',
db.Column('fantasy_team_id', db.Integer, ForeignKey('fantasy_team.id')),
db.Column('player_id', db.Integer, ForeignKey('player.id'))
)
class FantasyTeam(db.Model):
__tablename__ = 'fantasy_team'
id = db.Column(db.Integer, primary_key=True)
players = db.relationship("Player",
secondary=fantasy_teams_players,
back_populates="fantasy_teams")
name = db.Column(db.String(255))
# USER relationship 1 to 1
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', back_populates='fantasy_team')
overall_score = db.Column(db.Integer)
# ROUNDSCORE relationship (1) to Many
round_scores = db.relationship('RoundScore', back_populates='fantasy_team', cascade='all,delete', uselist=False)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<FantasyTeam {}>'.format(self.name)
class Player(db.Model):
id = db.Column(db.Integer, primary_key=True)
fantasy_teams = db.relationship("FantasyTeam",
secondary=fantasy_teams_players,
back_populates="players")
# GOAL relationship (1) to Many
goals = db.relationship("Goal", back_populates="player", cascade='all,delete')
# TEAM relationship 1 to (Many)
team_id = db.Column(db.Integer, ForeignKey('team.id'))
team = db.relationship("Team", back_populates="players")
number = db.Column(db.Integer)
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
nickname = db.Column(db.String(255))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Player {}>'.format(self.id, self.number)
class Goal(db.Model):
id = db.Column(db.Integer, primary_key=True)
# PLAYER relationship 1 to (Many)
player_id = db.Column(db.Integer, ForeignKey('player.id'))
player = db.relationship("Player", back_populates="goals")
# MATCH relationship 1 to (Many)
match_id = db.Column(db.Integer, ForeignKey('match.id'))
match = db.relationship("Match", back_populates="goals")
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Goal {}>'.format(self.fantasy_team_id)
class Match(db.Model):
id = db.Column(db.Integer, primary_key=True)
match_id = db.Column(db.Integer)
# GOAL relationship (1) to Many
goals = db.relationship("Goal", back_populates="match", cascade='all,delete')
# TEAM relationship (1) to Many
# TEAM relationship (1) to Many
team1_id = db.Column(db.Integer, ForeignKey("team.id"))
team2_id = db.Column(db.Integer, ForeignKey("team.id"))
team1 = db.relationship("Team", foreign_keys="Match.team1_id", back_populates="matches1")
team2 = db.relationship("Team", foreign_keys="Match.team2_id", back_populates="matches2")
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Match {}>'.format(self.id)
class Team(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
# PLAYER relationship (1) to Many
players = db.relationship("Player", back_populates="team", cascade='all,delete')
# MATCH relationship 1 to (Many)
matches1 = db.relationship("Match", foreign_keys="Match.team1_id", back_populates="team1")
matches2 = db.relationship("Match", foreign_keys="Match.team2_id", back_populates="team2")
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Team {}>'.format(self.id, self.name)
class RoundScore(db.Model):
__tablename__ = 'round_score'
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer)
name = db.Column(db.String(255))
round_score = db.Column(db.Integer)
# FANTASYTEAM relationship 1 to (Many)
fantasy_team_id = db.Column(db.Integer, db.ForeignKey('fantasy_team.id'))
fantasy_team = db.relationship('FantasyTeam', back_populates='round_scores')
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<RoundScore {}>'.format(self.fantasy_team_id)
|
python
|
# encoding: utf-8
"""
Enumerations related to tables in WordprocessingML files
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from docxx.enum.base import (
alias, Enumeration, EnumMember, XmlEnumeration, XmlMappedEnumMember
)
@alias('WD_ALIGN_VERTICAL')
class WD_CELL_VERTICAL_ALIGNMENT(XmlEnumeration):
"""
alias: **WD_ALIGN_VERTICAL**
Specifies the vertical alignment of text in one or more cells of a table.
Example::
from docxx.enum.table import WD_ALIGN_VERTICAL
table = document.add_table(3, 3)
table.cell(0, 0).vertical_alignment = WD_ALIGN_VERTICAL.BOTTOM
"""
__ms_name__ = 'WdCellVerticalAlignment'
__url__ = 'https://msdn.microsoft.com/en-us/library/office/ff193345.aspx'
__members__ = (
XmlMappedEnumMember(
'TOP', 0, 'top', 'Text is aligned to the top border of the cell.'
),
XmlMappedEnumMember(
'CENTER', 1, 'center', 'Text is aligned to the center of the cel'
'l.'
),
XmlMappedEnumMember(
'BOTTOM', 3, 'bottom', 'Text is aligned to the bottom border of '
'the cell.'
),
XmlMappedEnumMember(
'BOTH', 101, 'both', 'This is an option in the OpenXml spec, but'
' not in Word itself. It\'s not clear what Word behavior this se'
'tting produces. If you find out please let us know and we\'ll u'
'pdate this documentation. Otherwise, probably best to avoid thi'
's option.'
),
)
@alias('WD_ROW_HEIGHT')
class WD_ROW_HEIGHT_RULE(XmlEnumeration):
"""
alias: **WD_ROW_HEIGHT**
Specifies the rule for determining the height of a table row
Example::
from docxx.enum.table import WD_ROW_HEIGHT_RULE
table = document.add_table(3, 3)
table.rows[0].height_rule = WD_ROW_HEIGHT_RULE.EXACTLY
"""
__ms_name__ = "WdRowHeightRule"
__url__ = 'https://msdn.microsoft.com/en-us/library/office/ff193620.aspx'
__members__ = (
XmlMappedEnumMember(
'AUTO', 0, 'auto', 'The row height is adjusted to accommodate th'
'e tallest value in the row.'
),
XmlMappedEnumMember(
'AT_LEAST', 1, 'atLeast', 'The row height is at least a minimum '
'specified value.'
),
XmlMappedEnumMember(
'EXACTLY', 2, 'exact', 'The row height is an exact value.'
),
)
class WD_TABLE_ALIGNMENT(XmlEnumeration):
"""
Specifies table justification type.
Example::
from docxx.enum.table import WD_TABLE_ALIGNMENT
table = document.add_table(3, 3)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
"""
__ms_name__ = 'WdRowAlignment'
__url__ = ' http://office.microsoft.com/en-us/word-help/HV080607259.aspx'
__members__ = (
XmlMappedEnumMember(
'LEFT', 0, 'left', 'Left-aligned'
),
XmlMappedEnumMember(
'CENTER', 1, 'center', 'Center-aligned.'
),
XmlMappedEnumMember(
'RIGHT', 2, 'right', 'Right-aligned.'
),
)
class WD_TABLE_DIRECTION(Enumeration):
"""
Specifies the direction in which an application orders cells in the
specified table or row.
Example::
from docxx.enum.table import WD_TABLE_DIRECTION
table = document.add_table(3, 3)
table.direction = WD_TABLE_DIRECTION.RTL
"""
__ms_name__ = 'WdTableDirection'
__url__ = ' http://msdn.microsoft.com/en-us/library/ff835141.aspx'
__members__ = (
EnumMember(
'LTR', 0, 'The table or row is arranged with the first column '
'in the leftmost position.'
),
EnumMember(
'RTL', 1, 'The table or row is arranged with the first column '
'in the rightmost position.'
),
)
|
python
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient import api_versions
from novaclient.tests.unit import utils
from novaclient.tests.unit.v2 import fakes
class QuotaClassSetsTest(utils.TestCase):
def setUp(self):
super(QuotaClassSetsTest, self).setUp()
self.cs = fakes.FakeClient(api_versions.APIVersion("2.0"))
def test_class_quotas_get(self):
class_name = 'test'
q = self.cs.quota_classes.get(class_name)
self.assert_request_id(q, fakes.FAKE_REQUEST_ID_LIST)
self.cs.assert_called('GET', '/os-quota-class-sets/%s' % class_name)
return q
def test_update_quota(self):
q = self.cs.quota_classes.get('test')
self.assert_request_id(q, fakes.FAKE_REQUEST_ID_LIST)
q.update(cores=2)
self.cs.assert_called('PUT', '/os-quota-class-sets/test')
return q
def test_refresh_quota(self):
q = self.cs.quota_classes.get('test')
q2 = self.cs.quota_classes.get('test')
self.assertEqual(q.cores, q2.cores)
q2.cores = 0
self.assertNotEqual(q.cores, q2.cores)
q2.get()
self.assertEqual(q.cores, q2.cores)
class QuotaClassSetsTest2_50(QuotaClassSetsTest):
"""Tests the quota classes API binding using the 2.50 microversion."""
api_version = '2.50'
invalid_resources = ['floating_ips', 'fixed_ips', 'networks',
'security_groups', 'security_group_rules']
def setUp(self):
super(QuotaClassSetsTest2_50, self).setUp()
self.cs = fakes.FakeClient(api_versions.APIVersion(self.api_version))
def test_class_quotas_get(self):
"""Tests that network-related resources aren't in a 2.50 response
and server group related resources are in the response.
"""
q = super(QuotaClassSetsTest2_50, self).test_class_quotas_get()
for invalid_resource in self.invalid_resources:
self.assertFalse(hasattr(q, invalid_resource),
'%s should not be in %s' % (invalid_resource, q))
# Also make sure server_groups and server_group_members are in the
# response.
for valid_resource in ('server_groups', 'server_group_members'):
self.assertTrue(hasattr(q, valid_resource),
'%s should be in %s' % (invalid_resource, q))
def test_update_quota(self):
"""Tests that network-related resources aren't in a 2.50 response
and server group related resources are in the response.
"""
q = super(QuotaClassSetsTest2_50, self).test_update_quota()
for invalid_resource in self.invalid_resources:
self.assertFalse(hasattr(q, invalid_resource),
'%s should not be in %s' % (invalid_resource, q))
# Also make sure server_groups and server_group_members are in the
# response.
for valid_resource in ('server_groups', 'server_group_members'):
self.assertTrue(hasattr(q, valid_resource),
'%s should be in %s' % (invalid_resource, q))
def test_update_quota_invalid_resources(self):
"""Tests trying to update quota class values for invalid resources.
This will fail with TypeError because the network-related resource
kwargs aren't defined.
"""
q = self.cs.quota_classes.get('test')
self.assertRaises(TypeError, q.update, floating_ips=1)
self.assertRaises(TypeError, q.update, fixed_ips=1)
self.assertRaises(TypeError, q.update, security_groups=1)
self.assertRaises(TypeError, q.update, security_group_rules=1)
self.assertRaises(TypeError, q.update, networks=1)
return q
class QuotaClassSetsTest2_57(QuotaClassSetsTest2_50):
"""Tests the quota classes API binding using the 2.57 microversion."""
api_version = '2.57'
def setUp(self):
super(QuotaClassSetsTest2_57, self).setUp()
self.invalid_resources.extend(['injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes'])
def test_update_quota_invalid_resources(self):
"""Tests trying to update quota class values for invalid resources.
This will fail with TypeError because the file-related resource
kwargs aren't defined.
"""
q = super(
QuotaClassSetsTest2_57, self).test_update_quota_invalid_resources()
self.assertRaises(TypeError, q.update, injected_files=1)
self.assertRaises(TypeError, q.update, injected_file_content_bytes=1)
self.assertRaises(TypeError, q.update, injected_file_path_bytes=1)
|
python
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import nlm_pb2 as nlm__pb2
class NLMStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.StrRecall = channel.unary_unary(
'/nlm.NLM/StrRecall',
request_serializer=nlm__pb2.RawString.SerializeToString,
response_deserializer=nlm__pb2.GraphOutput.FromString,
)
self.NLURecall = channel.unary_unary(
'/nlm.NLM/NLURecall',
request_serializer=nlm__pb2.NLMInput.SerializeToString,
response_deserializer=nlm__pb2.GraphOutput.FromString,
)
self.NodeRecall = channel.unary_unary(
'/nlm.NLM/NodeRecall',
request_serializer=nlm__pb2.GraphNode.SerializeToString,
response_deserializer=nlm__pb2.GraphNode.FromString,
)
self.RelationRecall = channel.unary_unary(
'/nlm.NLM/RelationRecall',
request_serializer=nlm__pb2.GraphRelation.SerializeToString,
response_deserializer=nlm__pb2.GraphRelation.FromString,
)
class NLMServicer(object):
# missing associated documentation comment in .proto file
pass
def StrRecall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NLURecall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NodeRecall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelationRecall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NLMServicer_to_server(servicer, server):
rpc_method_handlers = {
'StrRecall': grpc.unary_unary_rpc_method_handler(
servicer.StrRecall,
request_deserializer=nlm__pb2.RawString.FromString,
response_serializer=nlm__pb2.GraphOutput.SerializeToString,
),
'NLURecall': grpc.unary_unary_rpc_method_handler(
servicer.NLURecall,
request_deserializer=nlm__pb2.NLMInput.FromString,
response_serializer=nlm__pb2.GraphOutput.SerializeToString,
),
'NodeRecall': grpc.unary_unary_rpc_method_handler(
servicer.NodeRecall,
request_deserializer=nlm__pb2.GraphNode.FromString,
response_serializer=nlm__pb2.GraphNode.SerializeToString,
),
'RelationRecall': grpc.unary_unary_rpc_method_handler(
servicer.RelationRecall,
request_deserializer=nlm__pb2.GraphRelation.FromString,
response_serializer=nlm__pb2.GraphRelation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nlm.NLM', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, PibiCo and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import datetime, time
from frappe.utils import cstr
from frappe import msgprint, _
from frappe.core.doctype.sms_settings.sms_settings import send_sms
from atvirtual.atvirtual.doctype.telegram_settings.telegram_settings import send_telegram
import paho.mqtt.client as mqtt
import os, ssl, urllib, json
from frappe.utils.password import get_decrypted_password
class pibiMessage(Document):
def validate(self):
if self.message_type == "IoT" and not self.std_message:
frappe.throw(_("Please fill the message content"))
if self.message_type == "IoT":
if not self.all_places and not self.all_roles:
if len(self.location_table) == 0 and len(self.device_table) == 0 and len(self.recipient_table) == 0 and len(self.participant_table) == 0:
frappe.throw(_("Please choose any destination recipient"))
def before_save(self):
if self.message_type == "IoT":
std_message = frappe.get_doc("Standard Message", self.std_message)
def before_submit(self):
## Prepare recipients list
sms_list = []
telegram_list = []
mqtt_list = []
email_list = []
str_attach = ''
recipients = []
str_message = ""
## Send E-mails
if self.message_type == "E-mail":
## Read message body
str_message = self.email_body
## Read Recipients Table
recipient_list = self.recipient_item
if len(recipient_list) > 0:
for item in recipient_list:
recipients.append(item.participant_email_id)
## Read and prepare message with Attachments
if len(self.message_item) > 0:
for idx, row in enumerate(self.message_item):
if "http" in row.attachment:
str_attach = str_attach + '<a href="' + row.attachment + '">Anexo ' +str(idx+1) + ': ' + row.description + '</a><br>'
else:
str_attach = str_attach + '<a href="' + frappe.utils.get_url() + urllib.parse.quote(row.attachment) + '">Anexo ' +str(idx+1) + ': ' + row.description + '</a><br>'
str_message = str_message + "<p>Con archivos anexos:</p><p>" + str_attach + "</p>"
## Finally Send message by Email
email_args = {
"sender": self.from_email_account,
"recipients": recipients,
"message": str_message,
"subject": self.subject,
"reference_doctype": self.doctype,
"reference_name": self.name
}
frappe.sendmail(**email_args)
## Send IoT messages
if self.message_type == "IoT":
## Read main message
dict_message = json.loads(self.message_text)
if "message" in dict_message:
str_message = dict_message["message"]["text"]
## Read and prepare message with attachments
if len(self.message_item) > 0 and str_message != '':
for idx, row in enumerate(self.message_item):
if "http" in row.attachment:
str_attach = str_attach + 'Anexo ' + str(idx+1) + ': ' + row.description + ' @ ' + row.attachment + '\n'
else:
str_attach = str_attach + 'Anexo ' + str(idx+1) + ': ' + row.description + ' @ ' + frappe.utils.get_url() + urllib.parse.quote(row.attachment) + '\n'
str_message = str_message + "\nCon archivos anexos:\n" + str_attach
dict_message["message"]["text"] = str_message
## Prepare location recipients
if len(self.location_table) > 0 and not self.all_places:
for loc in self.location_table:
""" Get from database devices assigned to locations in session """
locdev = frappe.db.sql("""SELECT device FROM `tabPlace Item` WHERE parent=%s AND place=%s and docstatus < 2""", (self.course, loc.place), True)
if len(locdev) > 0:
for plc in locdev:
if plc.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(plc.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare device recipients even in case all places selectect
if len(self.device_table) > 0 and not self.all_places:
for dev in self.device_table:
if dev.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(dev.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare all devices
if self.all_places:
""" Get from database devices in session """
locdev = frappe.db.sql("""SELECT device FROM `tabPlace Item` WHERE parent=%s and docstatus < 2""", (self.course), True)
if len(locdev) > 0:
for plc in locdev:
if plc.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(plc.device, sms_list, mqtt_list, telegram_list, email_list)
""" Get from database devices in session in roles table """
roldev = frappe.db.sql("""SELECT device FROM `tabSession Role Item` WHERE parent=%s and docstatus < 2""", (self.course), True)
if len(roldev) > 0:
for itm in roldev:
if itm.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(itm.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare role recipients
if len(self.recipient_table) > 0 and not self.all_roles:
for rol in self.recipient_table:
frappe.msgprint(rol.participant_role)
""" Get from database devices ported in session """
roldev = frappe.db.sql("""SELECT device FROM `tabSession Role Item` WHERE parent=%s AND participant_role=%s and docstatus < 2""", (self.course, rol.participant_role), True)
if len(roldev) > 0:
for itm in roldev:
if itm.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(itm.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare participants
if len(self.participant_table) > 0 and not self.all_roles:
for per in self.participant_table:
frappe.msgprint(per.participant)
""" Get from database devices ported in session """
perdev = frappe.db.sql("""SELECT device FROM `tabSession Role Item` WHERE parent=%s AND participant=%s and docstatus < 2""", (self.course, per.participant), True)
if len(perdev) > 0:
for per in perdev:
if per.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(per.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare all roles
if self.all_roles:
""" Get from database devices in session in roles table """
roldev = frappe.db.sql("""SELECT device FROM `tabSession Role Item` WHERE parent=%s and docstatus < 2""", (self.course), True)
if len(roldev) > 0:
for itm in roldev:
if itm.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(itm.device, sms_list, mqtt_list, telegram_list, email_list)
## Send message by MQTT
if len(mqtt_list) > 0:
path = frappe.utils.get_bench_path()
site_name = frappe.utils.get_url().replace("http://","").replace("https://","")
if ":" in site_name:
pos = site_name.find(":")
site_name = site_name[:pos]
client = frappe.get_doc('MQTT Settings', 'MQTT Settings')
server = client.broker_gateway
port = client.port
user = client.user
client.secret = get_decrypted_password('MQTT Settings', 'MQTT Settings', 'secret', False)
secret = client.secret
do_ssl = client.is_ssl
# connect to MQTT Broker to Publish Message
pid = os.getpid()
client_id = '{}:{}'.format('client', str(pid))
try:
backend = mqtt.Client(client_id=client_id, clean_session=True)
backend.username_pw_set(user, password=secret)
if do_ssl == True:
ca = os.path.join(path, "sites", site_name, frappe.get_site_path('private', 'files', client.ca)[1:])
client_crt = os.path.join(path, "sites", site_name, frappe.get_site_path('private', 'files', client.client_crt)[1:])
client_key = os.path.join(path, "sites", site_name, frappe.get_site_path('private', 'files', client.client_key)[1:])
port_ssl = client.ssl_port
## Prepare mqtt
backend.tls_set(ca_certs=ca, certfile=client_crt, keyfile=client_key, cert_reqs=ssl.CERT_REQUIRED, ciphers=None)
backend.tls_insecure_set(False)
time.sleep(.5)
backend.connect(server, port_ssl)
else:
backend.connect(server, port)
payload = frappe.safe_decode(json.dumps(dict_message)).encode('utf-8')
for dev in mqtt_list:
mqtt_topic = str(dev) + "/display/text"
backend.publish(mqtt_topic, cstr(payload))
backend.disconnect()
except:
frappe.msgprint(_("Error in MQTT Broker sending to ", str(mqtt_list)))
pass
## Send message by Email
if len(email_list) > 0 and "email" in dict_message:
try:
email_args = {
"sender": dict_message['email']['email_account'],
"recipients": email_list,
"message": str_message,
"subject": dict_message['email']['subject'],
"reference_doctype": self.doctype,
"reference_name": self.name
}
frappe.sendmail(**email_args)
except:
frappe.throw(_("Error in sending mail"))
pass
## Send message by Telegram
if len(telegram_list) > 0 and self.message_type == "IoT" and str_message != "":
try:
send_telegram(telegram_list, cstr(str_message))
except:
frappe.throw(_("Error in sending Telegram"))
pass
## Send message by SMS
if len(sms_list) > 0 and self.message_type == "IoT" and str_message != "":
try:
send_sms(sms_list, cstr(str_message))
except:
frappe.throw(_("Error in sending SMS"))
pass
## Final Message
frappe.msgprint(_("Actions Completed and Messages Sent"))
def append_recipients(device, sms_list, mqtt_list, telegram_list, email_list):
doc = frappe.get_doc('Device', device)
if not doc.disabled:
if doc.is_connected and doc.alerts_active:
if doc.by_sms:
if doc.sms_number != '':
if not doc.sms_number in sms_list:
sms_list.append(doc.sms_number)
#frappe.msgprint(_("Message by sms to ") + str(doc.sms_number))
if doc.by_text:
if doc.device_name != '' and doc.by_mqtt and not doc.device_name in mqtt_list:
mqtt_list.append(doc.device_name)
#frappe.msgprint(_("Message by mqtt to ") + str(doc.device_name))
if doc.by_email and doc.device_email != '' and not doc.device_email in email_list:
email_list.append(doc.device_email)
if doc.by_telegram:
if doc.telegram_number != '':
if not doc.telegram_number in telegram_list:
telegram_list.append(doc.telegram_number)
#frappe.msgprint(_("Message by sms to ") + str(doc.telegram_number))
return sms_list, mqtt_list, telegram_list, email_list
|
python
|
from __future__ import division
import json
import time
import serial as _serial
import platform
import sys
if sys.version_info >= (3, 0):
import queue
else:
import Queue as queue
from threading import Event, Thread
from serial.tools.list_ports import comports
from . import IOHandler
try:
JSONDecodeError = json.decoder.JSONDecodeError
except AttributeError:
JSONDecodeError = ValueError
class Serial(IOHandler):
poll_frequency = 200
@classmethod
def available_hosts(cls):
devices = comports(include_links=True)
return [d.device for d in devices]
@classmethod
def is_host_compatible(cls, host):
return host in cls.available_hosts()
def __init__(self, host, baudrate=1000000):
self._serial = _serial.Serial(host, baudrate)
self._serial.flush()
self._msg = queue.Queue(100)
self._running = True
self._poll_loop = Thread(target=self._poll)
self._poll_loop.daemon = True
self._poll_loop.start()
def is_ready(self):
if self._serial.in_waiting == 0:
return False
try:
self.read()
return True
except (UnicodeDecodeError, JSONDecodeError):
return False
def recv(self):
return self._msg.get()
def write(self, data):
self._serial.write(data + '\r'.encode() + '\n'.encode())
#print(data + '\r'.encode())
def close(self):
self._running = False
self._poll_loop.join()
self._serial.close()
def _poll(self):
def extract_line(s):
j = s.find(b'\n')
if j == -1:
return b'', s
# Sometimes the begin of serial data can be wrong remove it
# Find the first '{'
x = s.find(b'{')
if x == -1:
return b'', s[j + 1:]
return s[x:j], s[j + 1:]
period = 1 / self.poll_frequency
buff = b''
while self._running:
to_read = self._serial.in_waiting
if to_read == 0:
time.sleep(period)
continue
s = self._serial.read(to_read)
buff = buff + s
while self._running:
line, buff = extract_line(buff)
if not len(line):
break
if self._msg.full():
self._msg.get()
self._msg.put(line)
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name = 'elastico',
version = '0.6.3',
description = "Elasticsearch Companion - a commandline tool",
author = "Kay-Uwe (Kiwi) Lorenz",
author_email = "[email protected]",
url = 'https://github.com/klorenz/python-elastico',
license = "MIT",
install_requires=[
'elasticsearch',
'PyYAML',
'pyaml',
'requests',
'argdeco',
'markdown',
'jinja2',
'pytz',
'python-dateutil',
],
packages=[
'elastico', 'elastico.cli'
],
# package_data = {
# elastico: ['subfolder/*.x', ...]
# }
# include_package_data = True
entry_points={
'console_scripts': [
'elastico = elastico.cli:main',
]
}
)
|
python
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required,Email, EqualTo
from ..models import User
from wtforms import ValidationError
class RegisterationForm(FlaskForm):
email = StringField('Enter Your email Address', validators= [Required(), Email()])
username = StringField('Enter your username', validators=[Required()])
password = PasswordField('Password', validators=[Required(), EqualTo('password_confirm', message='passwords must match')])
password_confirm = PasswordField('Confirm Passwords', validators=[Required()])
submit = SubmitField('sign Up')
def validate_email(self,data_field):
if User.query.filter_by(email =data_field.data).first():
raise ValidationError('There is an account with that email address')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('The username is already taken')
class LoginForm(FlaskForm):
email = StringField('Your email Address', validators=[Required(), Email()])
password = PasswordField('password', validators=[Required()])
remember = BooleanField('Remember me')
submit = SubmitField('sign in')
|
python
|
import pytest
from nengo_os import SimpleProc, ConventScheduler
procs = [ # name id arrival1,+2, comp_t, needed_cores
SimpleProc("Cifar", 0, 0, 0, 10, 4038), # test single process
SimpleProc("SAR", 1, 0, 0, 100, 3038), #test two procs that can not run together
SimpleProc("SAR", 2, 0, 0, 100, 3038)
]
@pytest.fixture(params=["SINGLE","TWO_INT","ALL"])
def paper_procs(request):
pl = procs
if request.param == "SINGLE":
return [pl[0]], 1
if request.param == "TWO_INT":
return [pl[1],pl[2]], 2
if request.param == "ALL":
pl[1].arrival = 10
pl[2].arrival = 10
return pl, len(pl)
@pytest.fixture
def create_non_nengo_scheduler(paper_procs):
pl, num = paper_procs
return ConventScheduler(simple_proc_list = pl, mode="RR",time_slice=5), num
class RR_Status:
def __init__(self, time=0):
self.time = time
def test_non_nengo_rr(create_non_nengo_scheduler):
sched,num = create_non_nengo_scheduler
end_time_est = sum(et.needed_time for et in sched.queue.wait_q)
quant = sched.time_slice
running_proc = 2
waiting_proc = 1
rtx = []
for i in range(end_time_est):
sched.scheduler_run_tick()
if num == 1:
if i == 0:
assert(sched.running_proc_size == 4038)
elif num == 2:
if i == 10:
print(i)
assert(sched.running_proc_size == 3038)
if i % sched.time_slice == 0:
#assert(sched.running_proc_list[0] == running_proc)
r = waiting_proc
running_proc = waiting_proc
waiting_proc = r
rtx.append(sched.running_proc_list[0])
elif i > 200:
assert(sched.waiting_proc_size < 1)
print(rtx)
def test_load_json():
from pathlib import Path
model_data_file = Path("/Users/plaggm/dev/nemo-codes/config/paper_models.json")
sched_type = "RR"
rr_ts = 100
scheduler = ConventScheduler(mode=sched_type, total_cores=4096, time_slice=rr_ts,
proc_js_file=str(model_data_file.absolute()))
|
python
|
from daq_server import DAQServer
import asyncio
server = DAQServer
event_loop = asyncio.get_event_loop()
# task = asyncio.ensure_future(heartbeat())
task_list = asyncio.Task.all_tasks()
async def heartbeat():
while True:
print('lub-dub')
await asyncio.sleep(10)
def shutdown(server):
print('shutdown:')
# for controller in controller_list:
# # print(sensor)
# controller.stop()
server.shutdown()
task = asyncio.ensure_future(heartbeat())
tasks = asyncio.Task.all_tasks()
for t in tasks:
# print(t)
t.cancel()
print("Tasks canceled")
asyncio.get_event_loop().stop()
try:
event_loop.run_until_complete(asyncio.wait(task_list))
except KeyboardInterrupt:
print('closing client')
shutdown(server)
event_loop.run_forever()
finally:
print('closing event loop')
event_loop.close()
|
python
|
import pathlib
import numpy as np
import simscale_eba.HourlyContinuous as hc
epw = hc.HourlyContinuous()
# Put any path here
path = r'E:\Current Cases\SimScale Objects\examples\epw_to_stat\USA_MA_Boston-Logan.Intl.AP.725090_TMYx.2004-2018.epw'
epw.import_epw(pathlib.Path(path))
weather_stats = hc.WeatherStatistics()
weather_stats.set_directions(np.arange(0, 360, 10))
weather_stats.set_speeds(np.arange(0.5, 16, 1))
weather_stats.set_hourly_continuous(epw)
weather_stats.to_stat()
weather_stats.plot_cumulative_distributions()
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pygsheets
def boxplot(models_list,
d_exp,
xlabel='Models',
ylabel='Mise Surv 0',
option=0):
n = len(models_list)
if option == 0:
input = [d_exp[f'mise_0_{model_name}'] for model_name in models_list]
if option == 1:
input = [d_exp[f'mise_1_{model_name}'] for model_name in models_list]
if option == 2:
input = [d_exp[f'CATE_{model_name}'] for model_name in models_list]
fig, ax = plt.subplots()
bp = ax.boxplot(input, widths=0.2, sym='', patch_artist=True)
# for each boxplot different color
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['medians'], color='white')
plt.setp(bp['fliers'], color='black')
plt.setp(bp['caps'], color='red')
ax.set_xticklabels(models_list)
ax.set_xticks(np.arange(1, n + 1))
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_title('Boxplot of the Mise survenance')
plt.show()
# test
if __name__ == '__main__':
models_list = ['model_1', 'model_2', 'model_3']
d_exp = {'mise_0_model_1': np.random.randint(0, 100, size=10),
'mise_0_model_2': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'mise_0_model_3': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'mise_1_model_1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'mise_1_model_2': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'mise_1_model_3': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'CATE_model_1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'CATE_model_2': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'CATE_model_3': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
boxplot(models_list, d_exp, option=0)
# plot norme (true quantiles - predicted quantiles) for each model as boxplot
def quantile_boxplot(models_list,d_exp):
n = len(models_list)
input = [d_exp[f'norme_0_{model_name}'] for model_name in models_list]
fig, ax = plt.subplots()
bp = ax.boxplot(input, widths=0.2, sym='', patch_artist=True)
# for each boxplot different color
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['medians'], color='white')
plt.setp(bp['fliers'], color='black')
plt.setp(bp['caps'], color='black')
ax.set_xticklabels(models_list)
ax.set_xticks(np.arange(1, n + 1))
ax.set_ylabel('Norme')
ax.set_xlabel('Models')
ax.set_title('Boxplot of the Norme')
plt.show()
# construct table : rows = models, columns = scores (mise_0, mise_1, CATE)
def table(models_list, d_expn,WD):
n = len(models_list)
mise_0 = [np.mean(d_exp[f'mise_0_{model_name}']) for model_name in models_list]
mise_1 = [np.mean(d_exp[f'mise_1_{model_name}']) for model_name in models_list]
CATE = [np.mean(d_exp[f'CATE_{model_name}']) for model_name in models_list]
wd = [WD]*n
table = np.array([mise_0, mise_1, CATE, wd])
df_table = pd.DataFrame(table.T, index=models_list, columns=['mise_0', 'mise_1', 'CATE', 'WD'])
df_table.index.name = 'Models'
return df_table
# concatenate tables
def concatenate(df_table_list):
df_table_concat = pd.concat(df_table_list, axis=1)
df_table_concat.index.name = 'Models'
# send table to sheet drive using api
gc = pygsheets.authorize(service_file='./cred.json')
sh = gc.open('tables survcaus')
# with names of models as index
df_table_concat.to_csv('./table.csv')
wks = sh[0]
wks.set_dataframe(df_table_concat, (1, 1))
return df_table_concat
# test table
"""if __name__ == '__main__':
models_list = ['model_1', 'model_2', 'model_3']
d_exp = {'mise_0_model_1': np.random.randint(0, 100, size=10),
'mise_0_model_2': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'mise_0_model_3': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'mise_1_model_1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'mise_1_model_2': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'mise_1_model_3': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'CATE_model_1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'CATE_model_2': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'CATE_model_3': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'norme_0_model_1': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'norme_0_model_2': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'norme_0_model_3': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
list_df_table = []
for WD in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]:
df_table = table(models_list, d_exp, WD)
list_df_table.append(df_table)
df_table_concat = concatenate(list_df_table)
df_table_concat.to_csv('table.csv')
print(df_table_concat)
"""
|
python
|
#coding=UTF-8
import argparse, sys
from urllib.request import urlopen
parse = argparse.ArgumentParser(description="Check vaadin.com version lists")
parse.add_argument("version", help="Released Vaadin version number")
args = parse.parse_args()
if hasattr(args, "echo"):
print(args.echo)
sys.exit(1)
prerelease = None
(major, minor, maintenance) = args.version.split(".", 2)
if "." in maintenance:
(maintenance, prerelease) = maintenance.split(".", 1)
# Version without prerelease tag
version = "%s.%s.%s" % (major, minor, maintenance)
isPrerelease = prerelease is not None
failed = False
vaadin7Latest = "http://vaadin.com/download/LATEST7"
vaadin7Versions = "http://vaadin.com/download/VERSIONS_7"
vaadin6Latest = "http://vaadin.com/download/LATEST"
vaadinPrerelease = "http://vaadin.com/download/PRERELEASES"
try:
latest = urlopen(vaadin7Latest).read().decode().split("\n")
releaseRow = "release/%s.%s/%s" % (major, minor, version)
assert (version in latest[0]) ^ isPrerelease, "Latest version mismatch. %s: %s, was: %s" % ("should not be" if isPrerelease else "should be", args.version, latest[0])
assert (releaseRow in latest[1]) ^ isPrerelease, "Release row mismatch; %s: %s, was %s" % ("should not be" if isPrerelease else "should be", releaseRow, latest[1])
except Exception as e:
failed = True
print("Latest version was not correctly updated: %s" % (e))
try:
assert "%s," % (args.version) in urlopen(vaadin7Versions).read().decode().split("\n"), "Released version not in version list"
except Exception as e:
if isPrerelease:
print("Prerelease version needs to be added manually to versions!")
else:
failed = True
print(e)
try:
latest = urlopen(vaadin6Latest).read().decode().split("\n")
releaseRow = "release/6.8/6.8."
assert ("6.8." in latest[0]), "Latest version mismatch; should be: %sX, was: %s" % ("6.8.", latest[0])
assert (releaseRow in latest[1]), "Release row mismatch; should be: %sX, was %s" % (releaseRow, latest[1])
except Exception as e:
failed = True
print("Latest Vaadin 6 version was updated by release. %s" % (e))
try:
latest = urlopen(vaadinPrerelease).read().decode().split("\n")
assert (args.version in latest[0]) or not isPrerelease, "%s: %s, was: %s" % ("should be", args.version, latest[0])
except Exception as e:
print("Prerelease file was not correctly updated: %s" % (e))
sys.exit(1 if failed else 0)
|
python
|
'''
Created on May 2, 2018
@author: hwase0ng
'''
import settings as S
import requests
from BeautifulSoup import BeautifulSoup
from utils.fileutils import getStockCode
from common import loadMap
WSJSTOCKSURL = 'https://quotes.wsj.com/company-list/country/malaysia'
def connectStocksListing(url):
global soup
stocksListingUrl = url
try:
page = requests.get(stocksListingUrl, headers=S.HEADERS)
assert(page.status_code == 200)
html = page.content
soup = BeautifulSoup(html)
except Exception as e:
print(e)
soup = ''
return soup
def scrapeStocksListing(soup):
if soup is None or len(soup) <= 0:
print 'ERR: no result'
return
stocks = {}
table = soup.find('table', {'class': 'cl-table'})
# for each row, there are many rows including no table
for tr in table.findAll('tr'):
td = tr.findAll('td')
if len(td) == 0:
continue
# Sample stockLink: <a href="https://quotes.wsj.com/MY/XKLS/SEM">
stockLink = tr.find('a').get('href')
stockShortName = stockLink[31:]
# if any(stockShortName in s for s in klsefilter):
if stockShortName in klsefilter:
print "INFO:Ignored counter:", stockShortName
continue
stockName = tr.find('span', {'class': 'cl-name'}).text.upper().replace('AMP;', '')
try:
newname = wsjmap[stockShortName]
if S.DBG_ALL:
print "new name:", stockShortName, newname
stockShortName = newname
except KeyError:
pass
try:
stockCode = i3map[stockShortName]
except KeyError:
print "INFO:Unmatched stock:", stockShortName + ',', stockName
continue
'''
stockCode = getStockCode(stockShortName, '../i3investor/klse.txt', wsjmap)
if len(stockCode) == 0:
print "INFO:Skipped unmatched stock:", stockShortName + ',', stockName
'''
tds = [x.text.strip() for x in td]
xchange = tds[1]
sector = tds[2]
if len(sector) == 0:
sector = '-'
if S.DBG_ALL:
print stockShortName, stockCode, stockName, xchange, sector
stocks[stockShortName] = [stockCode, stockName, xchange, sector]
nextpg = getNextPage(soup)
return stocks, nextpg
def unpackListing(scode, sname, xchange, sector):
return scode, sname, xchange, sector
def getNextPage(soup):
pages = soup.find("div", "nav-right")
li = pages.find("li", "next")
nextpg = li.a.get('href')
if nextpg == '#':
return None
return nextpg
def writeStocksListing(outfile='klse.txt'):
global wsjmap, i3map, klsefilter
wsjmap = loadMap("klse.wsj", "=")
i3map = loadMap("../i3investor/klse.txt", ",")
with open('../klse.filter') as f:
klsefilter = f.read().splitlines()
if S.DBG_ALL:
print "Filter=", klsefilter
stocksListing = {}
nextpg = WSJSTOCKSURL
while(nextpg is not None):
listing, nextpg = scrapeStocksListing(connectStocksListing(nextpg))
stocksListing.update(listing)
fh = open(outfile, "w")
for key in sorted(stocksListing.iterkeys()):
listing = key + ',' + ','.join(map(str, unpackListing(*(stocksListing[key]))))
if S.DBG_ALL:
print listing
fh.write(listing + '\n')
fh.close()
if __name__ == '__main__':
S.DBG_ALL = False
writeStocksListing()
pass
|
python
|
#!/usr/bin/python3
# coding=utf-8
# Copyright 2021 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module """
from pathlib import Path
import flask # pylint: disable=E0401
import jinja2 # pylint: disable=E0401
from flask import request, render_template, redirect, url_for
from pylon.core.tools import log # pylint: disable=E0611,E0401
from pylon.core.tools import module # pylint: disable=E0611,E0401
from .components.render_nikto import render_nikto_card
class Module(module.ModuleModel):
""" Galloper module """
def __init__(self, settings, root_path, context):
self.settings = settings
self.root_path = root_path
self.context = context
def init(self):
""" Init module """
log.info("Initializing module")
bp = flask.Blueprint(
"nikto", "plugins.security_scanner_nikto.plugin",
static_folder=str(Path(__file__).parents[0] / "static"),
static_url_path='/nikto/static/'
)
bp.jinja_loader = jinja2.ChoiceLoader([
jinja2.loaders.PackageLoader("plugins.security_scanner_nikto", "templates"),
])
# Register in app
self.context.app.register_blueprint(bp)
# Register template slot callback
self.context.slot_manager.register_callback("security_scanners", render_nikto_card)
from .rpc_worker import get_scanner_parameters
self.context.rpc_manager.register_function(get_scanner_parameters, name='nikto')
def deinit(self): # pylint: disable=R0201
""" De-init module """
log.info("De-initializing module")
|
python
|
# -*- coding: utf-8 -*-
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslotest import base
from monasca_persister.repositories.influxdb import line_utils
class TestInfluxdb(base.BaseTestCase):
def setUp(self):
super(TestInfluxdb, self).setUp()
def tearDown(self):
super(TestInfluxdb, self).tearDown()
def test_line_utils_handles_utf8(self):
utf8_name = u'name'
self.assertEqual(u'"' + utf8_name + u'"', line_utils.escape_value(utf8_name))
self.assertEqual(utf8_name, line_utils.escape_tag(utf8_name))
def test_line_utils_escape_tag(self):
simple = u"aaaaa"
self.assertEqual(simple, line_utils.escape_tag(simple))
complex = u"a\\ b,c="
self.assertEqual("a\\\\\\ b\\,c\\=", line_utils.escape_tag(complex))
def test_line_utils_escape_value(self):
simple = u"aaaaa"
self.assertEqual(u'"' + simple + u'"', line_utils.escape_value(simple))
complex = u"a\\b\"\n"
self.assertEqual(u"\"a\\\\b\\\"\\n\"", line_utils.escape_value(complex))
|
python
|
casa = float(input('Qual valor da casa? R$'))
s = float(input('Qual valor do seu salário? R$'))
a = int(input('Em quantos anos deseja pagar a casa?'))
parcela = casa / (a * 12)
if parcela <= 0.3 * s:
print(f'Parabéns, você está apto para compra da casa, vamos fechar negócio! \nVocê pagará em {a*12} parcelas de {parcela:.2f}')
else:
print(f'Que pena, a parcela de R${parcela:.2f} é muito alta para seu salário, seria necessário um salário de R${parcela/0.3:.2f}')
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import sys
import threading
import traceback
the_current_shell = threading.local()
the_current_shell.value = None
@contextlib.contextmanager
def set_current_shell(shell):
outer = the_current_shell.value
the_current_shell.value = shell
try:
yield
finally:
the_current_shell.value = outer
def current_shell():
assert the_current_shell.value is not None, 'No current shell!'
return the_current_shell.value
# make sure that the function that is hooked by the shell has the same
# __doc__
class bayesdb_shellhookexp(object):
def __init__(self, func):
self.func = func
fdoc = func.__doc__
if fdoc is None or len(fdoc.strip()) == 0:
fdoc = 'NO DOCUMENTATION...\n...\n'
if len(fdoc.split('\n')) == 1:
fdoc += '\n...\n'
self.__doc__ = fdoc
def __call__(self, *args):
try:
return self.func(*args)
except Exception as err:
sys.stderr.write(traceback.format_exc())
print err
def bayesdb_shell_cmd(name, autorehook=False):
def wrapper(func):
# because the cmd loop doesn't handle errors and just kicks people out
current_shell()._hook(name, bayesdb_shellhookexp(func),
autorehook=autorehook)
return wrapper
def bayesdb_shell_init(func):
func(current_shell())
return func
|
python
|
from req import Service
from service.base import BaseService
import requests
import json
import config
import datetime
class DatetimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
class PayService(BaseService):
def __init__(self, db, rs):
self.db = db
self.rs = rs
PayService.inst = self
def payqr(self, data={}):
args = ['qrcode', 'id', 'token', 'latitude', 'longitude']
err = self.check_required_args(args, data)
if err: return (err, None)
err, user_info = yield from Service.User.get_user_info(data['token'], data['id'])
if err: return (err, None)
err, product_info = yield from Service.Product.get_product_by_qr(data)
if err: return (err, None)
err, resid = yield from self.pay({'product_id': product_info['id'], 'payee_account_id': product_info['account_id'], 'transaction_amount': product_info['price'], 'account_id': user_info['account_id'], 'token': data['token'], 'id_number': user_info['id_number'], 'longitude': data['longitude'], 'latitude': data['latitude']})
return (None, resid)
def pay(self, data={}):
args = ['payee_account_id', 'transaction_amount', 'account_id', 'id_number']
err = self.check_required_args(args, data)
if err: return (err, None)
token = data.pop('token')
url = self.add_client_id(config.BASE_URL + '/accounts/%s/in_house_transfer'%data.pop('account_id'))
r = requests.post(url, data=json.dumps(data), headers=self.headers(token))
meta = json.loads(r.text)
meta.update(data)
meta['product_id'] = data['product_id']
meta['longitude'] = data.get('longitude')
meta['latitude'] = data.get('latitude')
err, id = yield from self.update_db(meta)
if err: return (err, None)
err, product = yield from Service.Product.get_product_by_id({'id': data['product_id']})
err, record = yield from Service.Record.get_record_by_id({'id': id})
err, user_info = yield from Service.User.get_user_info(token, record['from_user_id'])
record.update(user_info)
record['price'] = product['price']
record['name'] = product['name']
record['description'] = product['description']
record['id'] = id
self.rs.publish('pay_list', json.dumps(record, cls=DatetimeEncoder))
return (None, id)
def update_db(self, data={}):
err, _from = yield from Service.User.get_user_id_by_account(data['account_id'])
err, _to = yield from Service.User.get_user_id_by_account(data['payee_account_id'])
meta = {
"from_user_id": _from,
"to_user_id": _to,
"product_id": data['product_id'],
'latitude': data['latitude'],
'longitude': data['longitude']
}
sql, param = self.gen_insert_sql('records', meta)
id, res_cnt = yield from self.db.execute(sql, param)
print(id, res_cnt)
id = id[0]['id']
return (None, id)
|
python
|
from .case import *
from .valkyrie_utils import *
from .section_model import *
from .cbr_cycle import *
from .functions import *
from .storage_unit import *
from .valkyrie_ner import *
|
python
|
import io
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve, average_precision_score
sns.set_context('paper')
sns.set_style('white')
register_matplotlib_converters()
def _output_figure(filename):
out = None
if filename is None:
plt.show()
elif filename == '__qcml_export__':
binary = io.BytesIO()
plt.savefig(binary, format='png')
binary.seek(0) # rewind the data
out = binary.read()
else:
plt.savefig(filename)
plt.close()
return out
# Remember to use the Agg matplotlib backend for the heatmap annotation to work!
def plot_correlation_matrix(corr, filename=None):
plt.figure(figsize=(11, 10))
# generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, vmin=-1, vmax=1, linewidths=.5, square=True,
xticklabels=corr.columns.values[:-1], yticklabels=corr.columns.values[1:],
mask=mask, cbar_kws={'shrink': .75}, annot=True, fmt='.2f', annot_kws={'size': 4})
# rotate overly long tick labels
plt.xticks(rotation=90)
plt.yticks(rotation=0)
return _output_figure(filename)
def _classes_to_colors(df):
cmap = plt.cm.get_cmap('autumn')(np.linspace(0, 1, len(df.index.levels[0])))
class_colors = {}
color_idx = 0
for c, _ in df.index.values:
if class_colors.get(c) is None:
class_colors[c] = mpl.colors.rgb2hex(cmap[color_idx])
color_idx += 1
colors = []
for c, _ in df.index.values:
colors.append(class_colors[c])
return colors
def plot_timestamps(df, filename=None):
plt.figure()
plt.scatter(df.index.get_level_values(1), [0] * len(df.index.get_level_values(1)), 500, _classes_to_colors(df), '|')
sns.despine(left=True)
plt.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)
return _output_figure(filename)
def _add_date_color_bar(df):
num_ticks = 5
ticker = mpl.ticker.MaxNLocator(num_ticks + 2, prune='both')
mappable = cm.ScalarMappable(cmap=plt.cm.get_cmap('autumn'))
mappable.set_array(range(num_ticks + 2))
cb = plt.colorbar(mappable, ticks=ticker, shrink=0.75)
cb.ax.set_yticklabels([df.index.values[i][1].strftime('%b %Y')
for i in range(0, len(df.index.values), len(df.index.values) // (num_ticks + 2))])
cb.outline.set_linewidth(0)
def _scatter_plot(scatter_data, df, filename=None):
plt.figure()
plt.scatter(scatter_data[:, 0], scatter_data[:, 1], c=_classes_to_colors(df))
sns.despine(left=True, bottom=True)
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)
_add_date_color_bar(df)
return _output_figure(filename)
def plot_pca(df, filename=None):
# transform data to lower dimension
pca_data = PCA(2).fit_transform(df.values)
# plot
return _scatter_plot(pca_data, df, filename)
def plot_tsne(df, filename=None):
# transform data to lower dimension
tsne_data = TSNE(2, init='pca').fit_transform(df.values)
# plot
return _scatter_plot(tsne_data, df, filename)
def scatter_outliers(scatter_data, df, outlier_scores, score_threshold, filename=None):
plt.figure()
colors = _classes_to_colors(df)
for i, d in enumerate(scatter_data):
if outlier_scores[i] > score_threshold:
size = 10 + (outlier_scores[i] - score_threshold) * 200
color = colors[i]
marker = 'o'
alpha = None
else:
size = 10
color = 'grey'
marker = '.'
alpha = 0.25
plt.scatter(d[0], d[1], s=size, c=color, marker=marker, alpha=alpha)
sns.despine(left=True, bottom=True)
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)
_add_date_color_bar(df)
return _output_figure(filename)
def plot_pca_outliers(df, outlier_scores, score_threshold, filename=None):
# transform data to lower dimension
pca_data = PCA(2).fit_transform(df.values)
# plot
return scatter_outliers(pca_data, df, outlier_scores, score_threshold, filename)
def plot_tsne_outliers(df, outlier_scores, score_threshold, filename=None):
# transform data to lower dimension
tsne_data = TSNE(2, init='pca').fit_transform(df.values)
# plot
return scatter_outliers(tsne_data, df, outlier_scores, score_threshold, filename)
def plot_outlier_score_hist(outlier_scores, num_bins, score_cutoff, filename=None):
plt.figure()
ax = sns.distplot(outlier_scores, bins=np.arange(0, 1.01, 1 / num_bins), kde=False, axlabel='Outlier score (%)',
hist_kws={'histtype': 'stepfilled'})
plt.ylabel('Number of experiments')
if score_cutoff is not None:
plt.axvline(score_cutoff, color=sns.color_palette()[0], linestyle='--')
# convert axis labels to percentages
ax.set_xticklabels([int(100 * float(label)) for label in ax.get_xticks()])
sns.despine()
return _output_figure(filename)
def plot_feature_importances(feature_importances, filename=None):
sorted_importances = feature_importances.sort_values(ascending=False)
with sns.axes_style('whitegrid'):
fig = plt.figure()
fig.set_tight_layout(True)
sns.barplot(x=sorted_importances.index.values, y=sorted_importances, palette='Blues_d')
plt.xticks(rotation='vertical', fontsize=5)
return _output_figure(filename)
def plot_subspace_boxplots(data, highlights=None, filename=None):
with sns.axes_style('whitegrid'):
fig = plt.figure()
fig.set_tight_layout(True)
sns.boxplot(data=data, orient='v', palette='Blues_d')
if highlights is not None:
for i in range(len(highlights)):
plt.plot(i, highlights[i], color='red', marker='d')
plt.xticks(rotation=30, fontsize=10)
return _output_figure(filename)
def plot_psm_boxplots(data, color_classes=None, filename=None, **kwargs):
with sns.axes_style('whitegrid'):
fig = plt.figure()
fig.set_tight_layout(True)
# add specific colors to the various box plots
if color_classes is not None:
kwargs['palette'] = [sns.color_palette()[c] for c in color_classes]
sns.boxplot(data=data, **kwargs)
if kwargs.get('orient') == 'h':
plt.xlabel('Number of PSM\'s')
else:
plt.ylabel('Number of PSM\'s')
return _output_figure(filename)
def plot_aucs(aucs, k_range, filename=None):
max_auc = max(aucs)
max_k = [k for k, a in zip(k_range, aucs) if a == max_auc]
plt.figure()
# plot all auc's
plt.plot(k_range, aucs)
# highlight max auc
for k in max_k:
plt.scatter(k, max_auc, s=50, c=mpl.colors.rgb2hex(sns.color_palette()[0]), marker='D')
plt.xlim(left=0)
plt.ylim([0.5, 1.0])
plt.xlabel('Local neighborhood size')
plt.ylabel('AUC')
return _output_figure(filename)
def plot_outlier_classes_score_hist(classes_scores, num_bins, filename=None):
with sns.color_palette(sns.xkcd_palette(['medium green', 'orange yellow', 'faded red'])):
plt.figure()
# generate the histogram values for the three classes
bins = np.arange(0, 1.01, 1 / num_bins)
hist = pd.DataFrame({quality: np.histogram(classes_scores.loc[classes_scores['quality'] == quality]['score'],
bins=bins)[0] for quality in ['good', 'ok', 'poor']}, bins[:-1])
ax = hist.plot(kind='bar', position=0)
plt.xlabel('Outlier score (%)')
plt.ylabel('Number of experiments')
sns.despine(right=True, top=True)
# change the x-axis to not include each bin value and convert to percentages
ax.set_xticks(range(0, 21, 4))
ax.set_xticklabels(range(0, 101, 20), rotation=0)
return _output_figure(filename)
def plot_outlier_classes_score_kde(classes_scores, num_bins, filename=None):
with sns.color_palette(sns.xkcd_palette(['medium green', 'orange yellow', 'faded red'])):
plt.figure()
bins = np.arange(0, 1.01, 1 / num_bins)
for quality in ['good', 'ok', 'poor']:
sns.distplot(classes_scores.loc[classes_scores['quality'] == quality]['score'], bins=bins, hist=False,
kde=True, kde_kws={'label': quality, 'shade': True}, norm_hist=True)
plt.xlabel('Outlier score (%)')
plt.ylabel('Density')
sns.despine(right=True, top=True)
# convert the outlier score tick labels to percentages
ax = plt.gca()
ax.set_xlim(0, 1)
ax.set_xticklabels(range(0, 101, 20), rotation=0)
return _output_figure(filename)
def _to_binary_class_labels(quality_classes, pos_label=('good', 'ok')):
# convert quality classes: 1 -> poor, 0 -> good/ok
# requires that NO unvalidated samples are present
return np.array([0 if quality in pos_label else 1 for quality in quality_classes['quality']])
def plot_roc(classes_scores, filename=None):
plt.figure()
# convert ordinal class labels to binary labels
binary_classes = _to_binary_class_labels(classes_scores)
for zorder, ignore_quality in reversed(list(enumerate(['good', 'ok', None]))):
# ignore samples of the quality that's not considered
sample_weight = None if ignore_quality is None else _to_binary_class_labels(classes_scores, (ignore_quality, ))
# compute roc
fpr, tpr, _ = roc_curve(binary_classes, classes_scores['score'], sample_weight=sample_weight)
auc = roc_auc_score(binary_classes, classes_scores['score'], sample_weight=sample_weight)
# plot the ROC curve
alpha = 1 if ignore_quality is None else 1 / 3
label = 'all' if ignore_quality is None else "only '{}'".format('good' if ignore_quality == 'ok' else 'ok')
plt.plot(fpr, tpr, zorder=zorder, alpha=alpha, label='ROC curve {} (AUC = {:.2f})'.format(label, auc))
# plot the random ROC curve at 0.5
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc='lower right')
return _output_figure(filename)
def plot_precision_recall(classes_scores, filename=None):
# compute false positive rate and true positive rate
binary_classes = _to_binary_class_labels(classes_scores)
precision, recall, _ = precision_recall_curve(binary_classes, classes_scores['score'])
plt.figure()
# plot the ROC curve
plt.plot(recall, precision, label='Precision-recall curve (average precision = {:.2f})'
.format(average_precision_score(binary_classes, classes_scores['score'])))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend(loc='lower right')
return _output_figure(filename)
def plot_score_sensitivity_specificity(classes_scores, filename=None):
# compute sensitivity and specificity
sorted_scores = classes_scores.sort_values('score', ascending=False)
sorted_binary_classes = _to_binary_class_labels(sorted_scores)
sensitivity, specificity = np.zeros(len(sorted_scores)), np.zeros(len(sorted_scores))
for i in range(len(sorted_binary_classes)):
# true positives or false positives based on predictions above score cut-off
tp = np.count_nonzero(sorted_binary_classes[:i + 1])
fp = (i + 1) - tp
# true negatives or false negatives based on predictions below score cut-off
fn = np.count_nonzero(sorted_binary_classes[i + 1:])
tn = len(sorted_binary_classes) - (i + 1) - fn
# sensitivity and specificity
sensitivity[i] = tp / (tp + fn)
specificity[i] = tn / (tn + fp)
plt.figure()
ax1 = plt.gca()
ax2 = plt.twinx()
# plot the sensitivity and specificity in function of the outlier score
p1 = ax1.plot(sorted_scores['score'], sensitivity, label='Sensitivity', color=sns.color_palette()[0])
# advance colors for the second axis
p2 = ax2.plot(sorted_scores['score'], specificity, label='Specificity', color=sns.color_palette()[1])
ax1.set_xlim([-0.05, 1.05])
ax1.set_ylim([-0.05, 1.05])
ax2.set_ylim([-0.05, 1.05])
ax1.set_xlabel('Outlier score')
ax1.set_ylabel('Sensitivity')
ax2.set_ylabel('Specificity')
plots = p1 + p2
ax1.legend(plots, [p.get_label() for p in plots], loc='center right')
return _output_figure(filename)
|
python
|
#! /usr/bin/python2.4
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# parse_khhttpd_access_log.py
#
# Parses an input GEE server log, searching for imagery requests.
# Output is either:
# 1. a CSV containing lat/lon/level (and other info) for each imagery request, or
# 2. a KML file with placemarks at each imagery request location, where the name of
# the placemark is the number of requests seen at that location.
#
# TODO: separate out KML output code into routine like CSV already is.
# TODO: compile imagery-recognition regexp in KML routine like CSV does.
# TODO: read initially into quad_dict instead of making list and de-duplicating.
# TODO: remove IP logging so Earth users aren't concerned about watching their use.
# TODO: determine output type from extension on output file
# TODO: pass output file into KML class and just my_kml.openDoc() etc.
#
import re
import sys
def Usage():
'''Tell the user how the program should be invoked.'''
print 'Usage:\n'
print ' log_parser.py <input_file> <output_file> <file_type>\n'
print 'Example: log_parser.py khhttpd_access_log access_log.kml kml\n'
print ' or: log_parser.py khhttpd_access_log access_log.csv csv\n'
def main():
if len(sys.argv) < 4:
Usage()
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
filetype = sys.argv[3].lower()
input = open(infile).readlines()
output = open(outfile, 'w')
if filetype == 'csv':
MakeCSV(input, output)
sys.exit(1)
quad_addrs = []
for line in input:
quad_addr = ParseQuad(line)
if quad_addr:
quad_addrs.append(quad_addr)
quad_dict = DeDupeQuads(quad_addrs)
my_kml = KML()
output.write(my_kml.openDoc('0'))
output.write(my_kml.openFolder(infile, '1'))
for addr in quad_dict.keys():
xy_coords = ProcessQuad(addr)
count = quad_dict[addr]
output.write(my_kml.MakePoint(xy_coords, count))
output.write(my_kml.closeFolder())
output.write(my_kml.closeDoc())
#######################################################################################
def ParseQuad(line):
'''Check for imagery (q2) requests and parse out quad-tree address.'''
quad_regex = re.compile(r'.*q2-(.*)-')
quad_match = quad_regex.match(line)
if quad_match:
quad_addr = quad_match.group(1)
return quad_addr
def DeDupeQuads(quad_addrs):
'''Identify unique quad-tree addresses and keep track of their use.'''
quad_dict = {}
for address in quad_addrs:
if address not in quad_dict:
quad_dict[address] = 1
else:
quad_dict[address] += 1
return quad_dict
#######################################################################################
def MakeCSV(input, output):
'''Parse the input log file and create pipe-delimited "|" output.'''
header = 'ip|date|lon|lat|level|req_code|bytes\n'
output.write(header)
image_regex = re.compile(r'.*q2-.*-')
for line in input:
line_match = image_regex.match(line)
if line_match:
ip_match = re.match(r'^(.+?)\s-', line)
ip = ip_match.group(1)
date_match = re.match(r'.*\[(.+?)\]', line)
date = date_match.group(1)
quad_match = re.match(r'.*q2-(.*)-', line)
quad = quad_match.group(1)
xy_coords = ProcessQuad(quad_match.group(1))
lon = xy_coords[0]
lat = xy_coords[1]
level = len(quad_match.group(1))
apache_codes_match = re.match(r'.*\s(\d+?\s\d+?)$', line)
apache_codes = apache_codes_match.group(1)
req_code = apache_codes.split()[0]
bytes = apache_codes.split()[1]
csv_string = '%s|%s|%f|%f|%s|%s|%s\n' % (ip, date, lon, lat, level, req_code, bytes)
output.write(csv_string)
#######################################################################################
def ProcessQuad(addr):
'''Convert the quad address string into a list and send it off for coords.'''
tile_list = list(addr)
tile_list.reverse()
tile_list.pop()
xy_range = 180.0
x_coord = 0.0
y_coord = 0.0
new_coords = Quad2GCS(tile_list, x_coord, y_coord, xy_range)
return new_coords
def Quad2GCS(addr_list, x_coord, y_coord, xy_range):
'''Drill down through quad-tree to get final x,y coords.'''
if not addr_list:
new_coords = (x_coord, y_coord)
return new_coords
else:
tile_addr = addr_list.pop()
new_range = xy_range/2
if tile_addr == '0':
x_coord -= new_range
y_coord -= new_range
if tile_addr == '1':
x_coord += new_range
y_coord -= new_range
if tile_addr == '2':
x_coord += new_range
y_coord += new_range
if tile_addr == '3':
x_coord -= new_range
y_coord += new_range
return Quad2GCS(addr_list, x_coord, y_coord, new_range)
#######################################################################################
class KML:
'''builds kml objects'''
def openDoc(self, visibility):
'''Opens kml file, and creates root level document.
Takes visibility toggle of "0" or "1" as input and sets Document <open>
attribute accordingly.
'''
self.visibility = visibility
kml = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<kml xmlns="http://earth.google.com/kml/2.1">\n'
'<Document>\n'
'<open>%s</open>\n') % (self.visibility)
return kml
def openFolder(self, name, visibility):
'''Creates folder element and sets the "<name>" and "<open>" attributes.
Takes folder name string and visibility toggle of "0" or "1" as input
values.
'''
kml = '<Folder>\n<open>%s</open>\n<name>%s</name>\n' % (visibility, name)
return kml
def MakePoint(self, coords, name):
'''Create point placemark.'''
x = coords[0]
y = coords[1]
kml = ('<Placemark>\n'
'<name>%s</name>\n'
'<visibility>1</visibility>\n'
'<Point>\n'
'<coordinates>%f, %f</coordinates>\n'
'</Point>\n'
'</Placemark>\n\n') % (name, x, y)
return kml
def closeFolder(self):
'''Closes folder element.'''
kml = '</Folder>\n'
return kml
def closeDoc(self):
'''Closes KML document'''
kml = '</Document>\n</kml>\n'
return kml
#######################################################################################
if __name__ == '__main__':
main()
|
python
|
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
GUI application for Forward modelling 2D potential field profiles.
Written by Brook Tozer, University of Oxford 2015-17. SIO 2018-19.
Includes ability to import seismic reflection, well, surface outcrop and xy points into the model frame.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**Dependencies**
SciPy
NumPy
Matplotlib
pylab
pickle
obspy
wxpython
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**References**
***
polygons from Fatiando a Terra.
Uieda, L., V. C. Oliveira Jr and V. C. F. Barbosa (2013), Modeling the Earth with Fatiando a Terra, Proceedings
of the 12th Python in Science Conference
www.fatiando.org/
***
***
Gravity algorithm written using NumPy by Brook Tozer (2015) (Modified from the Fatiando a Terra 2D gravity code).
CODE MODIFIED FROM: bott, M. H. P. (1969). GRAVN. Durham geophysical computer specification No. 1.
***
***
Magnetic algorithm written using NumPy by Brook Tozer (2015)
CODE MODIFIED FROM: Talwani, M., & Heirtzler, J. R. (1964). Computation of magnetic anomalies caused by two dimensional
structures of arbitrary shape, in Parks, G. A., Ed., Computers in the mineral industries, Part 1: Stanford Univ. Publ.,
Geological Sciences, 9, 464-480.
***
***
SEGY plotting is preformed using ObsPy.
ObsPy; a python toolbox for seismology Seismological Research Letters(May 2010), 81(3):530-533
obspy.org
***
***
Icons where designed using the Free icon Maker:
icons8.com
https://icons8.com/icon/set/circle/all
Pixel size: 24
Font: Roboto Slab
Font size: 200
Color: 3498db
***
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The documentation is created using Sphinx.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NB. If you created a seperate conda env for gmg you must activate it before launching gmg. e.g.::
source activate py3-gmg
"""
# IMPORT MODULES~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import wx
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar
import matplotlib.cm as cm
from matplotlib.ticker import FormatStrFormatter
import matplotlib.colors as colors
import wx.adv
from wx.lib.agw import floatspin as fs
import wx.grid as gridlib
import wx.lib.agw.customtreectrl as ct
import wx.py as py
import wx.lib.agw.aui as aui
from wx.lib.buttons import GenBitmapButton
import wx.lib.agw.foldpanelbar as fpb
import pylab as plt
import numpy as np
import csv
import math as m
import os
import sys
from sys import platform
from obspy import read
import pickle as Pickle
from scipy import signal
from scipy import interpolate as ip
from polygon import Polygon
import plot_model
import bott
import talwani_and_heirtzler
from frames import *
from dialogs import *
from objects import *
import model_stats
import struct
import gc
import webbrowser
import time
# FUTURE
# import wx.lib.agw.ribbon as RB
# import wx.EnhancedStatusBar as ESB
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Gmg(wx.Frame):
"""
Master Class for GMG GUI.
Most functions are contained in this Class.
Upon startup this sets the panels, sizer's and event bindings.
Additional classes are used for handling "popout" windows (Dialog boxes).
Objects are passed between this "master" GUI class and the Dialog Boxes.
"""
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, None, wx.ID_ANY, 'gmg: 2D Geophysical Modelling GUI', size=(1800, 1050))
# DEFIND ICONS DIRECTORY
self.gui_icons_dir = os.path.dirname(os.path.abspath(__file__)) + "/icons/"
# SET AND SHOW SPLASH SCREEN
bitmap = wx.Bitmap(self.gui_icons_dir + "gmg_logo_scaled.png")
splash = wx.adv.SplashScreen(bitmap, wx.adv.SPLASH_CENTER_ON_SCREEN | wx.adv.SPLASH_TIMEOUT, 3000,
self, id=wx.ID_ANY, size=(1, 1), style=wx.BORDER_SIMPLE | wx.FRAME_NO_TASKBAR)
splash.Show()
self.Show()
# START AUI WINDOW MANAGER
self.mgr = aui.AuiManager()
# TELL AUI WHICH FRAME TO USE
self.mgr.SetManagedWindow(self)
# SET AUI ICON SIZING AND STYLES (ARROWS)
images = wx.ImageList(16, 16)
top = wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_MENU, (16, 16))
bottom = wx.ArtProvider.GetBitmap(wx.ART_GO_DOWN, wx.ART_MENU, (16, 16))
images.Add(top)
images.Add(bottom)
# CREATE PANELS TO FILL WITH ATTRIBUTE CONTROLS, LAYER TREE CONTROL AND FAULT TREE CONTROL
self.leftPanel = wx.SplitterWindow(self, wx.ID_ANY, size=(200, 1000), style=wx.SP_NOBORDER)
self.leftPanel_b = wx.SplitterWindow(self.leftPanel, wx.ID_ANY, size=(200, 700),
style=wx.SP_NOBORDER)
# self.leftPanel.SetMinimumPaneSize(1)
# self.leftPanel_b.SetMinimumPaneSize(1)
# FIRST PANE; LEFT PANEL (=ATTRIBUTES)
self.splitter_left_panel_one = wx.ScrolledWindow(self.leftPanel, wx.ID_ANY, size=(200, 400),
style=wx.ALIGN_LEFT | wx.BORDER_RAISED)
self.controls_panel_bar_one = fpb.FoldPanelBar(self.splitter_left_panel_one, 1, size=(200, 400),
agwStyle=fpb.FPB_VERTICAL)
self.fold_panel_one = self.controls_panel_bar_one.AddFoldPanel("Layer Attributes", collapsed=True,
foldIcons=images)
self.controls_panel_bar_one.Expand(self.fold_panel_one) # ENSURES FOLD PANEL IS VISIBLE
# SECOND PANE; LEFT PANEL (=LAYERS)
# GREY wx PANEL
self.splitter_left_panel_two = wx.ScrolledWindow(self.leftPanel_b, wx.ID_ANY, size=(200, 300),
style=wx.ALIGN_LEFT | wx.BORDER_RAISED | wx.EXPAND)
# THE LAYER TREE SCROLL BAR GOES IN HERE
self.controls_panel_bar_two = fpb.FoldPanelBar(self.splitter_left_panel_two, 1, size=(200, 300),
agwStyle=fpb.FPB_VERTICAL)
self.fold_panel_two = self.controls_panel_bar_two.AddFoldPanel("Layers", collapsed=True, foldIcons=images)
self.controls_panel_bar_two.Expand(self.fold_panel_two) # ENSURES FOLD PANEL IS VISIBLE
self.fold_panel_two.SetSize(200, 300)
# THIRD PANE; LEFT PANEL (=FAULTS)
# GREY wx PANEL
self.splitter_left_panel_three = wx.ScrolledWindow(self.leftPanel_b, wx.ID_ANY, size=(200, 300),
style=wx.ALIGN_LEFT | wx.BORDER_RAISED | wx.EXPAND)
# THE FAULT TREE SCROLL BAR GOES IN HERE
self.controls_panel_bar_three = fpb.FoldPanelBar(self.splitter_left_panel_three, 1, size=(200, 300),
agwStyle=fpb.FPB_VERTICAL)
self.fold_panel_three = self.controls_panel_bar_three.AddFoldPanel("Faults", collapsed=True, foldIcons=images)
self.controls_panel_bar_three.Expand(self.fold_panel_three) # ENSURES FOLD PANEL IS VISIBLE
self.fold_panel_three.SetSize(200, 300)
# SET SPLITTERS
self.leftPanel_b.SplitHorizontally(self.splitter_left_panel_two, self.splitter_left_panel_three)
self.leftPanel.SplitHorizontally(self.splitter_left_panel_one, self.leftPanel_b)
self.splitter_left_panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.splitter_left_panel_sizer.Add(self.leftPanel, 1, wx.EXPAND)
self.splitter_left_panel_one.SetScrollbar(1, 1, 10, 10)
self.splitter_left_panel_two.SetScrollbar(1, 1, 10, 10)
self.splitter_left_panel_three.SetScrollbar(1, 1, 10, 10)
# CREATE PANEL TO FILL WITH MATPLOTLIB INTERACTIVE FIGURE (MAIN GUI MODELLING FRAME)
self.rightPanel = wx.Panel(self, -1, size=(1700, 1100), style=wx.ALIGN_RIGHT | wx.BORDER_RAISED | wx.EXPAND)
# CREATE PANEL FOR PYTHON CONSOLE (USED FOR DEBUGGING AND CUSTOM USAGES)
self.ConsolePanel = wx.Panel(self, -1, size=(1700, 100), style=wx.ALIGN_LEFT | wx.BORDER_RAISED | wx.EXPAND)
intro = "###############################################################\r" \
"!USE import sys; then sys.Gmg.OBJECT TO ACCESS PROGRAM OBJECTS \r" \
"ctrl+up FOR COMMAND HISTORY \r" \
"###############################################################"
py_local = {'__app__': 'gmg Application'}
sys.gmg = self
self.win = py.shell.Shell(self.ConsolePanel, -1, size=(2200, 1100), locals=py_local, introText=intro)
# ADD THE PANES TO THE AUI MANAGER
self.mgr.AddPane(self.leftPanel, aui.AuiPaneInfo().Name('left').Left().Caption("Controls"))
self.mgr.AddPane(self.rightPanel, aui.AuiPaneInfo().Name('right').CenterPane())
self.mgr.AddPane(self.ConsolePanel, aui.AuiPaneInfo().Name('console').Bottom().Caption("Console"))
self.mgr.GetPaneByName('console').Hide() # HIDE PYTHON CONSOLE BY DEFAULT
self.mgr.Update()
# CREATE PROGRAM MENUBAR & TOOLBAR (PLACED AT TOP OF FRAME)
self.create_menu()
# CREATE STATUS BAR
self.statusbar = self.CreateStatusBar(3)
self.controls_button = GenBitmapButton(self.statusbar, -1, wx.Bitmap(self.gui_icons_dir + 'large_up_16.png'),
pos=(0, -4), style=wx.NO_BORDER)
self.Bind(wx.EVT_BUTTON, self.show_controls, self.controls_button)
# PYTHON CONSOLE
self.console_button = GenBitmapButton(self.statusbar, -1, wx.Bitmap(self.gui_icons_dir + 'python_16.png'),
pos=(24, -4), style=wx.NO_BORDER)
self.Bind(wx.EVT_BUTTON, self.show_console, self.console_button)
# TOPOGRAPHY TOGGLE BUTTON
self.topography_button = GenBitmapButton(self.statusbar, 601, wx.Bitmap(self.gui_icons_dir + 'T_16.png'),
pos=(48, -4), style=wx.NO_BORDER)
self.Bind(wx.EVT_BUTTON, self.frame_adjustment, self.topography_button)
# GRAVITY TOGGLE BUTTON
self.gravity_button = GenBitmapButton(self.statusbar, 602, wx.Bitmap(self.gui_icons_dir + 'G_16.png'),
pos=(72, -4), style=wx.NO_BORDER)
self.Bind(wx.EVT_BUTTON, self.frame_adjustment, self.gravity_button)
# MAGNETIC TOGGLE BUTTON
self.magnetic_button = GenBitmapButton(self.statusbar, 603, wx.Bitmap(self.gui_icons_dir + 'M_16.png'),
pos=(96, -4), style=wx.NO_BORDER)
self.Bind(wx.EVT_BUTTON, self.frame_adjustment, self.magnetic_button)
self.status_text = " "
self.statusbar.SetStatusWidths([-1, -1, 1700])
self.statusbar.SetStatusText(self.status_text, 2)
self.statusbar.SetSize((1800, 24))
# SET PROGRAM STATUS
self.model_saved = False
self.newmodel = False
# BIND PROGRAM EXIT BUTTON WITH EXIT FUNCTION
self.Bind(wx.EVT_CLOSE, self.on_close_button)
# MAXIMIZE FRAME
self.Maximize(True)
def create_menu(self):
"""CREATE GUI MENUBAR"""
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_new_model = menu_file.Append(-1, "New Model...", "New Model...")
self.Bind(wx.EVT_MENU, self.new_model, m_new_model)
m_load_model = menu_file.Append(-1, "Load Model...", "Load Model...")
self.Bind(wx.EVT_MENU, self.load_model, m_load_model)
m_save_model = menu_file.Append(-1, "Save Model...", "Save Model...")
self.Bind(wx.EVT_MENU, self.save_model, m_save_model)
menu_file.AppendSeparator()
m_save_xy = menu_file.Append(-1, "Save Layers As ASCII .xy File ...",
"Save Layers as ASCII .xy...")
self.Bind(wx.EVT_MENU, self.write_layers_xy, m_save_xy)
m_save_c = menu_file.Append(-1, "Save Model As RayInvr c.in File...", "Save RayInvr c.in File...")
self.Bind(wx.EVT_MENU, self.write_c_xy, m_save_c)
m_save_fig = menu_file.Append(-1, "Save Figure...", "Save model Figure...")
self.Bind(wx.EVT_MENU, self.plot_model, m_save_fig)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "Exit...", "Exit...")
self.Bind(wx.EVT_MENU, self.exit, m_exit)
# DRAW MENU
self.menubar.Append(menu_file, "&File")
# MODEL VIEW MENU
model_view_file = wx.Menu()
m_modify_model_dimensions = model_view_file.Append(-1, "Modify Current Model Dimensions...",
"Modify Current Model Dimensions...")
self.Bind(wx.EVT_MENU, self.modify_model_dimensions, m_modify_model_dimensions)
model_view_file.AppendSeparator()
# PROGRAM FRAME WINDOW SWITCHES
self.topo_frame_switch = True
self.gravity_frame_switch = True
self.magnetic_frame_switch = True
self.m_model_frames_submenu = wx.Menu()
model_view_file.AppendSubMenu(self.m_model_frames_submenu, "Toggle Model Frames")
self.m_model_frames_submenu.Append(601, 'Topography')
self.Bind(wx.EVT_MENU, self.frame_adjustment, id=601)
self.m_model_frames_submenu.Append(602, 'Gravity')
self.Bind(wx.EVT_MENU, self.frame_adjustment, id=602)
self.m_model_frames_submenu.Append(603, 'Magnetics')
self.Bind(wx.EVT_MENU, self.frame_adjustment, id=603)
model_view_file.AppendSeparator()
m_aspect_increase = model_view_file.Append(-1, "&Increase Aspect Ratio...",
"Increase the aspect ratio of the model plot...")
self.Bind(wx.EVT_MENU, self.aspect_increase, m_aspect_increase)
m_aspect_decrease = model_view_file.Append(-1, "&Decrease Aspect Ratio...",
"Decrease the aspect ratio of the model plot...")
self.Bind(wx.EVT_MENU, self.aspect_decrease, m_aspect_decrease)
self.menubar.Append(model_view_file, "&Model View")
# GRAVITY DATA MENU --------------------------------------------------------------------------------------------
self.gravity_data = wx.Menu()
# LOAD OBSERVED GRAVITY DATA
m_load_obs_g = self.gravity_data.Append(-1, "&Load Gravity Anomaly...", "Load Observed Gravity Data...")
self.Bind(wx.EVT_MENU, self.load_obs_g, m_load_obs_g)
# EDIT
self.m_obs_g_submenu = wx.Menu()
self.gravity_data.AppendSubMenu(self.m_obs_g_submenu, "Gravity Data...")
# FILTER MENU
grav_m_filter_observed = self.gravity_data.Append(-1, "Median Filter...", "Filter Observed Anomaly")
self.Bind(wx.EVT_MENU, self.filter_observed_gravity, grav_m_filter_observed)
# HORIZONTAL DERIVATIVE
grav_m_horizontal_derivative = self.gravity_data.Append(-1, "Take Horizontal Derivative...",
"Take Horizontal Derivative")
self.Bind(wx.EVT_MENU, self.take_gravity_horizontal_derivative, grav_m_horizontal_derivative)
# SET RMS OBS ARRAYS
grav_m_set_rms = self.gravity_data.Append(-1, "Set RMS Input...", "Set RMS Input...")
self.Bind(wx.EVT_MENU, self.set_obs_grav_rms, grav_m_set_rms)
# SET ELEVATION FOR CALCULATIONS
m_set_grav_elv = self.gravity_data.Append(-1, "&Set Calculation Elevation...",
"Set Calculation Elevation...")
self.Bind(wx.EVT_MENU, self.set_gravity_elv, m_set_grav_elv)
# SAVE PREDICTED ANOMALY TO DISC
m_save_g_submenu = self.gravity_data.Append(-1, "&Save Predicted Anomaly...",
"Save Predicted Anomaly to Disc...")
self.Bind(wx.EVT_MENU, self.save_modelled_grav, m_save_g_submenu)
# DRAW MENU
self.menubar.Append(self.gravity_data, "&Gravity")
# --------------------------------------------------------------------------------------------------------------
# MAGNETIC DATA MENU -------------------------------------------------------------------------------------------
self.magnetic_data = wx.Menu()
# LOAD OBSERVED MAGNETIC DATA
m_load_obs_m = self.magnetic_data.Append(-1, "&Load Magnetic Anomaly...",
"Load Observed Magnetic Data...")
self.Bind(wx.EVT_MENU, self.load_obs_m, m_load_obs_m)
# EDIT
self.m_obs_mag_submenu = wx.Menu()
self.magnetic_data.AppendSubMenu(self.m_obs_mag_submenu, "Magnetic Anomalies...")
# FILTER MENU
mag_m_filter_observed = self.magnetic_data.Append(-1, "Median Filter...", "Filter Observed Anomaly")
self.Bind(wx.EVT_MENU, self.filter_observed_magnetic, mag_m_filter_observed)
# HORIZONTAL DERIVATIVE
mag_m_horizontal_derivative = self.magnetic_data.Append(-1, "Take Horizontal Derivative...",
"Take Horizontal Derivative")
self.Bind(wx.EVT_MENU, self.take_magnetic_horizontal_derivative, mag_m_horizontal_derivative)
# SET RMS OBS ARRAYS
mag_m_set_rms = self.magnetic_data.Append(-1, "Set RMS Input..", "Set RMS input..")
self.Bind(wx.EVT_MENU, self.set_obs_mag_rms, mag_m_set_rms)
# SET MAG
m_set_mag_variables = self.magnetic_data.Append(-1, "&Set Calculation Elevation...",
"Set Calculation Elevation...")
self.Bind(wx.EVT_MENU, self.set_mag_variables, m_set_mag_variables)
# SAVE PREDICTED ANOMALY TO DISC
m_save_mag_submenu = self.magnetic_data.Append(-1, "&Save Predicted Anomaly...",
"Save Predicted Anomaly to Disc...")
self.Bind(wx.EVT_MENU, self.save_modelled_mag, m_save_mag_submenu)
# DRAW MENU
self.menubar.Append(self.magnetic_data, "&Magnetics")
# --------------------------------------------------------------------------------------------------------------
# TOPOGRAPHY DATA MENU -----------------------------------------------------------------------------------------
self.topography_data = wx.Menu()
# LOAD OBSERVED TOPO DATA
m_load_topo = self.topography_data.Append(-1, "&Load Topography...", "Load Observed Topography...")
self.Bind(wx.EVT_MENU, self.load_topo, m_load_topo)
# EDIT
self.m_topo_submenu = wx.Menu()
self.topography_data.AppendSubMenu(self.m_topo_submenu, "Topography Data")
# FILTER MENU
topo_m_filter_observed = self.topography_data.Append(-1, "Median Filter...", "Filter Observed Anomaly")
self.Bind(wx.EVT_MENU, self.filter_observed_topography, topo_m_filter_observed)
# HORIZONTAL DERIVATIVE
topo_m_horizontal_derivative = self.topography_data.Append(-1, "Take Horizontal Derivative...",
"Take Horizontal Derivative")
self.Bind(wx.EVT_MENU, self.take_topography_horizontal_derivative, topo_m_horizontal_derivative)
# DRAW MENU
self.menubar.Append(self.topography_data, "&Topography")
# --------------------------------------------------------------------------------------------------------------
# XY DATA MENU -------------------------------------------------------------------------------------------------
self.xy_data = wx.Menu()
# LOAD XY DATA
m_load_xy = self.xy_data.Append(-1, "&Load XY Points...", "Load XY Points...")
self.Bind(wx.EVT_MENU, self.load_xy, m_load_xy)
self.m_xy_submenu = wx.Menu()
self.xy_data.AppendSubMenu(self.m_xy_submenu, "XY Data...")
# DRAW MENU
self.menubar.Append(self.xy_data, "&XY Data")
# --------------------------------------------------------------------------------------------------------------
# SEISMIC DATA -------------------------------------------------------------------------------------------------
self.seismic_data = wx.Menu()
# SEGY LOAD
self.m_load_segy = self.seismic_data.Append(-1, "&Load Segy...", "Load Segy Data")
self.Bind(wx.EVT_MENU, self.segy_input, self.m_load_segy)
# SEGY NAME LIST
self.m_segy_submenu = wx.Menu()
self.seismic_data.AppendSubMenu(self.m_segy_submenu, "SEGY Data...")
# GAIN
self.m_gain = wx.Menu()
self.seismic_data.AppendSubMenu(self.m_gain, "Gain")
# COLOR PALETTE
self.m_color_palette = wx.Menu()
self.seismic_data.AppendSubMenu(self.m_color_palette, "Color Palette")
self.m_color_palette.Append(901, 'Grey')
self.Bind(wx.EVT_MENU, self.segy_color_adjustment, id=901)
self.m_color_palette.Append(902, 'Seismic')
self.Bind(wx.EVT_MENU, self.segy_color_adjustment, id=902)
# GAIN INCREASE
self.m_gain_increase = self.m_gain.Append(-1, "Increase...", "Increase...")
self.Bind(wx.EVT_MENU, self.gain_increase, self.m_gain_increase)
# GAIN DECREASE
self.m_gain_decrease = self.m_gain.Append(-1, "Decrease...", "Decrease...")
self.Bind(wx.EVT_MENU, self.gain_decrease, self.m_gain_decrease)
# DRAW MENU
self.menubar.Append(self.seismic_data, "&Seismic Data")
# --------------------------------------------------------------------------------------------------------------
# WELL DATA MENU -----------------------------------------------------------------------------------------------
self.well_data = wx.Menu()
self.m_load_well = self.well_data.Append(-1, "&Load well record...\tCtrl-Shift-w", "Load well record")
self.Bind(wx.EVT_MENU, self.load_well, self.m_load_well)
# WELL SUBMENU
self.m_wells_submenu = wx.Menu()
self.well_data.AppendSubMenu(self.m_wells_submenu, "Wells...")
# DRAW MENU
self.menubar.Append(self.well_data, "&Well Data")
# --------------------------------------------------------------------------------------------------------------
# OUTCROP MENU ------------------------------------------------------------------------------------------------
self.outcrop_file = wx.Menu()
self.m_load_outcrop_data = self.outcrop_file.Append(-1, "&Load Outcrop Data...",
"Load Outcrop Data")
self.Bind(wx.EVT_MENU, self.load_outcrop_data, self.m_load_outcrop_data)
self.m_outcrop_submenu = wx.Menu()
self.outcrop_file.AppendSubMenu(self.m_outcrop_submenu, "Outcrop Data...")
self.menubar.Append(self.outcrop_file, "&Outcrop Data")
# --------------------------------------------------------------------------------------------------------------
# MODEL LAYERS MENU --------------------------------------------------------------------------------------------
self.layer_file = wx.Menu()
# NEW LAYER
self.m_new_layer = self.layer_file.Append(-1, "New Layer...", "New Layer...")
self.Bind(wx.EVT_MENU, self.new_layer, self.m_new_layer)
# LOAD LAYER
self.m_load_layer = self.layer_file.Append(-1, "Load Layer...", "Load Layer...")
self.Bind(wx.EVT_MENU, self.load_layer, self.m_load_layer)
# TRANSPARENCY
self.m_layer_transperency = wx.Menu()
self.layer_file.AppendSubMenu(self.m_layer_transperency, "Transparency")
# TRANSPARENCY INCREASE
self.m_layer_transparency_increase = self.m_layer_transperency.Append(-1, "Increase...", "Increase...")
self.Bind(wx.EVT_MENU, self.transparency_increase, self.m_layer_transparency_increase)
# TRANSPARENCY DECREASE
self.m_layer_transparency_decrease = self.m_layer_transperency.Append(-1, "Decrease...", "Decrease...")
self.Bind(wx.EVT_MENU, self.transparency_decrease, self.m_layer_transparency_decrease)
# BULK SHIFT
self.m_bulk_shift = self.layer_file.Append(-1, "Bulk Shift...", "Bulk Shift...")
self.Bind(wx.EVT_MENU, self.bulk_shift, self.m_bulk_shift)
# PINCH/DEPINCH LAYER
self.pinch_submenu = wx.Menu()
self.layer_file.AppendSubMenu(self.pinch_submenu, "Pinch")
self.pinch_submenu.Append(701, "&Pinch Out Layer...", "Pinch Out Layer...")
self.Bind(wx.EVT_MENU, self.pinch_out_layer, id=701)
self.pinch_submenu.Append(702, "&Depinch Layer...", "Depinch Layer...")
self.Bind(wx.EVT_MENU, self.depinch_layer, id=702)
# SEPARATOR
self.layer_file.AppendSeparator()
# DELETE LAYER
self.m_delete_layer = self.layer_file.Append(-1, "Delete Current Layer...", "Delete Current Layer...")
self.Bind(wx.EVT_MENU, self.delete_layer, self.m_delete_layer)
# APPEND MENU
self.menubar.Append(self.layer_file, "&Layers")
# --------------------------------------------------------------------------------------------------------------
# ATTRIBUTE TABLE MENU -----------------------------------------------------------------------------------------
attribute_file = wx.Menu()
m_attribute_table = attribute_file.Append(-1, "&Open Attribute Table...",
"Open Attribute Table...")
self.Bind(wx.EVT_MENU, self.open_attribute_table, m_attribute_table)
self.menubar.Append(attribute_file, "&Attribute Table")
# --------------------------------------------------------------------------------------------------------------
# HELP MENU ----------------------------------------------------------------------------------------------------
help_file = wx.Menu()
m_help = help_file.Append(-1, "&Documentation...", "Open Documentation html...")
self.Bind(wx.EVT_MENU, self.open_documentation, m_help)
m_about = help_file.Append(-1, "&About...", "About gmg...")
self.Bind(wx.EVT_MENU, self.about_gmg, m_about)
m_legal = help_file.Append(-1, "&Legal...", "Legal...")
self.Bind(wx.EVT_MENU, self.legal, m_legal)
self.menubar.Append(help_file, "&Help")
# SET MENUBAR
self.SetMenuBar(self.menubar)
# --------------------------------------------------------------------------------------------------------------
# TOOLBAR - (THIS IS THE ICON BAR BELOW THE MENU BAR)
self.toolbar = self.CreateToolBar()
t_save_model = self.toolbar.AddTool(wx.ID_ANY, "Save model", wx.Bitmap(self.gui_icons_dir + 'save_24.png'),
shortHelp="Save model")
self.Bind(wx.EVT_TOOL, self.save_model, t_save_model)
t_load_model = self.toolbar.AddTool(wx.ID_ANY, "Load model", wx.Bitmap(self.gui_icons_dir + 'load_24.png'),
shortHelp="Load model")
self.Bind(wx.EVT_TOOL, self.load_model, t_load_model)
# t_calc_topo = self.toolbar.AddTool(wx.ID_ANY, "Calculate topography",
# wx.Bitmap(self.gui_icons_dir + 'T_24.png'), shortHelp="Calculate topography")
# self.Bind(wx.EVT_TOOL, self.calc_topo_switch, t_calc_topo) # FUTURE
self.t_calc_grav = self.toolbar.AddCheckTool(toolId=wx.ID_ANY, label="Fault pick",
bitmap1=wx.Bitmap(self.gui_icons_dir + 'G_24.png'),
bmpDisabled=wx.Bitmap(self.gui_icons_dir + 'G_24.png'),
shortHelp="Calculate gravity anomaly",
longHelp="", clientData=None)
self.Bind(wx.EVT_TOOL, self.calc_grav_switch, self.t_calc_grav)
self.t_calc_mag = self.toolbar.AddCheckTool(toolId=wx.ID_ANY, label="Fault pick",
bitmap1=wx.Bitmap(self.gui_icons_dir + 'M_24.png'),
bmpDisabled=wx.Bitmap(self.gui_icons_dir + 'M_24.png'),
shortHelp="Calculate magnetic anomaly",
longHelp="", clientData=None)
self.Bind(wx.EVT_TOOL, self.calc_mag_switch, self.t_calc_mag)
self.t_capture_coordinates = self.toolbar.AddCheckTool(toolId=wx.ID_ANY, label="Capture coordinates",
bitmap1=wx.Bitmap(self.gui_icons_dir + 'C_24.png'),
bmpDisabled=wx.Bitmap(self.gui_icons_dir + 'C_24.png'),
shortHelp="Capture coordinates",
longHelp="", clientData=None)
self.Bind(wx.EVT_TOOL, self.capture_coordinates, self.t_capture_coordinates)
t_aspect_increase = self.toolbar.AddTool(wx.ID_ANY, "Aspect increase",
wx.Bitmap(self.gui_icons_dir + 'large_up_24.png'),
shortHelp="Aspect increase")
self.Bind(wx.EVT_TOOL, self.aspect_increase, t_aspect_increase)
t_aspect_decrease = self.toolbar.AddTool(wx.ID_ANY, "Aspect decrease",
wx.Bitmap(self.gui_icons_dir + 'large_down_24.png'),
shortHelp="Aspect decrease")
self.Bind(wx.EVT_TOOL, self.aspect_decrease, t_aspect_decrease)
t_aspect_increase2 = self.toolbar.AddTool(wx.ID_ANY, "Aspect increase x2",
wx.Bitmap(self.gui_icons_dir + 'small_up_24.png'),
shortHelp="Aspect increase x2")
self.Bind(wx.EVT_TOOL, self.aspect_increase2, t_aspect_increase2)
t_aspect_decrease2 = self.toolbar.AddTool(wx.ID_ANY, "Aspect decrease x2",
wx.Bitmap(self.gui_icons_dir + 'small_down_24.png'),
shortHelp="Aspect decrease x2")
self.Bind(wx.EVT_TOOL, self.aspect_decrease2, t_aspect_decrease2)
self.t_zoom = self.toolbar.AddCheckTool(toolId=wx.ID_ANY, label="Zoom in",
bitmap1=wx.Bitmap(self.gui_icons_dir + 'zoom_in_24.png'),
bmpDisabled=wx.Bitmap(self.gui_icons_dir + 'zoom_in_24.png'),
shortHelp="Zoom in",
longHelp="", clientData=None)
self.Bind(wx.EVT_TOOL, self.zoom, self.t_zoom)
t_zoom_out = self.toolbar.AddTool(wx.ID_ANY, "Zoom out",
wx.Bitmap(self.gui_icons_dir + 'zoom_out_24.png'), shortHelp="Zoom out")
self.Bind(wx.EVT_TOOL, self.zoom_out, t_zoom_out)
t_full_extent = self.toolbar.AddTool(wx.ID_ANY, "Full extent",
wx.Bitmap(self.gui_icons_dir + 'full_extent_24.png'),
shortHelp="Full extent")
self.Bind(wx.EVT_TOOL, self.full_extent, t_full_extent, id=604)
self.t_pan = self.toolbar.AddCheckTool(toolId=wx.ID_ANY, label="Pan",
bitmap1=wx.Bitmap(self.gui_icons_dir + 'pan_24.png'),
bmpDisabled=wx.Bitmap(self.gui_icons_dir + 'pan_24.png'),
shortHelp="Pan",
longHelp="", clientData=None)
self.Bind(wx.EVT_TOOL, self.pan, self.t_pan)
t_gain_down = self.toolbar.AddTool(wx.ID_ANY, "Gain down",
wx.Bitmap(self.gui_icons_dir + 'left_small_24.png'), shortHelp="Gain down")
self.Bind(wx.EVT_TOOL, self.gain_decrease, t_gain_down)
t_gain_up = self.toolbar.AddTool(wx.ID_ANY, "Gain up",
wx.Bitmap(self.gui_icons_dir + 'right_small_24.png'), shortHelp="Gain up")
self.Bind(wx.EVT_TOOL, self.gain_increase, t_gain_up)
t_transparency_down = self.toolbar.AddTool(wx.ID_ANY, "Transparency down",
wx.Bitmap(self.gui_icons_dir + 'large_left_24.png'),
shortHelp="Transparency down")
self.Bind(wx.EVT_TOOL, self.transparency_decrease, t_transparency_down)
# INCREASE TRANSPARENCY ICON
t_transparency_up = self.toolbar.AddTool(wx.ID_ANY, "Transparency up",
wx.Bitmap(self.gui_icons_dir + 'large_right_24.png'),
shortHelp="Transparency up")
self.Bind(wx.EVT_TOOL, self.transparency_increase, t_transparency_up)
# LOAD WELL ICON
t_load_well = self.toolbar.AddTool(wx.ID_ANY, "Load well horizons",
wx.Bitmap(self.gui_icons_dir + 'well_24.png'),
shortHelp="Load well horizons")
self.Bind(wx.EVT_TOOL, self.load_well, t_load_well)
# TOOGLE FAULT PICKING MODE
self.t_toogle_fault_mode = self.toolbar.AddCheckTool(toolId=10000, label="Fault pick",
bitmap1=wx.Bitmap(self.gui_icons_dir + 'F_24.png'),
bmpDisabled=wx.Bitmap(self.gui_icons_dir + 'off_F_24.png'),
shortHelp="Toogle fault picking")
self.t_toogle_fault_mode.SetDisabledBitmap(wx.Bitmap(self.gui_icons_dir + 'off_F_24.png'))
self.Bind(wx.EVT_TOOL, self.toogle_fault_mode, self.t_toogle_fault_mode)
# FAULT PICKER ICON
self.t_fault_pick = self.toolbar.AddTool(wx.ID_ANY, "Fault pick",
bitmap=wx.Bitmap(self.gui_icons_dir + 'faultline_24.png'),
shortHelp="Fault picker")
self.Bind(wx.EVT_TOOL, self.pick_new_fault, self.t_fault_pick)
# CREATE TOOLBAR
self.toolbar.Realize()
self.toolbar.SetSize((1790, 36))
def start(self, area, xp, zp):
"""CREATE MPL FIGURE CANVAS"""
self.fig = plt.figure() # CREATE MPL FIGURE
self.canvas = FigureCanvas(self.rightPanel, -1, self.fig) # CREATE FIGURE CANVAS
self.nav_toolbar = NavigationToolbar(self.canvas) # CREATE DEFAULT NAVIGATION TOOLBAR
self.nav_toolbar.Hide() # HIDE DEFAULT NAVIGATION TOOLBAR
# SET DRAW COMMAND WHICH CAN BE CALLED TO REDRAW THE FIGURE'
self.draw = self.fig.canvas.draw
# GET THE MODEL DIMENSIONS AND SAMPLE LOCATIONS'
self.area = area
self.x1, self.x2, self.z1, self.z2 = 0.001 * np.array(area)
self.xp = np.array(xp, dtype='f')
# DRAW MAIN PROGRAM WINDOW
self.draw_main_frame()
# CONNECT MPL FUNCTIONS
self.connect()
# UPDATE DISPLAY
self.display_info()
self.size_handler()
if self.newmodel:
self.update_layer_data()
else:
pass
# REFRESH SIZER POSITIONS
self.Hide()
self.Show()
# FINIALISE INIT PROCESS
# self.run_algorithms()
self.draw()
def initalize_model(self):
"""INITIALISE OBSERVED DATA AND LAYERS"""
# INITIALISE MODEL FRAME ATTRIBUTES
self.nodes = True # SWITCH FOR NODE EDITING MODE
self.zoom_on = False # SWITCH FOR ZOOM MODE
self.pan_on = False # SWTICH PANNING MODE
self.pinch_switch = False # SWITCH FOR NODE PINCHING MODE
self.pinch_count = 0
self.didnt_get_node = False # SWTICH FOR MOSUE CLICK (EITHER CLICKED A NODE OR DIDN'T)
self.node_click_limit = 0.2 # CONTROLS HOW CLOSE A MOUSE CLICK MUST BE TO ACTIVATE A NODE
self.index_node = None # THE ACTIVE NODE
self.select_new_layer_nodes = False # SWITCH TO TURN ON WHEN MOUSE CLICK IS TO BE CAPTURED FOR A NEW LAYER
self.currently_active_layer_id = 0 # LAYER COUNTER
self.pred_topo = None # FUTURE - PREDICTED TOPOGRAPHY FROM MOHO (ISOSTATIC FUNC)
self.predicted_gravity = None # THE CALCULATED GRAVITY RESPONSE
self.predicted_nt = None # THE CALCULATED MAGNETIC RESPONSE
self.calc_padding = 5000. # PADDING FOR POTENTIAL FIELD CALCULATION POINTS
self.padding = 50000. # PADDING FOR LAYERS
self.mag_observation_elv = 0. # OBSERVATION LEVEL FOR MAGNETIC DATA
self.gravity_observation_elv = 0. # OBSERVATION LEVEL FOR GRAVITY DATA
# INITIALISE LAYER LIST
self.layer_list = [] # LIST HOLDING ALL OF THE LAYER OBJECTS
self.total_layer_count = 0
self.layer_transparency = 0.4
# INITIALISE POLYGON LISTS (USED AS MODEL LAYERS)
self.gravity_polygons = []
self.mag_polygons = []
self.polyplots = []
self.poly_fills = [[]]
# INITIALISE FAULT ATTRIBUTES
self.fault_list = []
self.currently_actives_fault_id = 0
self.total_fault_count = 0
self.fault_picking_switch = False
self.select_new_fault_nodes = False
self.selected_node = None
# INITIALISE XY DATA ATTRIBUTES
self.observed_xy_data_list = []
self.xy_data_counter = 0
# INITIALISE OBSERVED TOPOGRAPHY ATTRIBUTES
self.observed_topography_list = []
self.observed_topography_counter = 0
self.observed_topography_switch = False
# INITIALISE OBSERVED GRAVITY ATTRIBUTES
self.observed_gravity_list = []
self.observed_gravity_counter = 0
self.observed_gravity_switch = False
# INITIALISE MODELLING GRAVITY ATTRIBUTES
self.background_density = 0
self.absolute_densities = True
self.calc_grav_switch = False
self.obs_gravity_data_for_rms = [] # OBSERVED DATA LIST TO BE COMPARED TO CALCULATED
self.gravity_rms_value = None # TOTAL RMS MISFIT VALUE
self.grav_residuals = [] # CALCULATED RESIDUAL
# INITIALISE OBSERVED MAGNETIC ATTRIBUTES
self.observed_magnetic_list = []
self.observed_magnetic_counter = 0
self.observed_magnetic_switch = False
# INITIALISE MODELLING MAGNETIC ATTRIBUTES
self.earth_field = 0.
self.calc_mag_switch = False
self.obs_mag_data_for_rms = [] # OBSERVED DATA LIST TO BE COMPARED TO CALCULATED
self.magnetic_rms_value = None # TOTAL RMS MISFIT VALUE (SINGLE INTEGER)
self.mag_residuals = [] # CALCULATED RESIDUAL
# INITIALISE GEOLOGICAL CONTACT ATTRIBUTES
self.outcrop_data_list = []
self.outcrop_data_count = 0
# INITIALISE Well ATTRIBUTES
self.well_data_list = []
self.well_counter = 0
# INITIALISE SEISMIC ATTRIBUTES
self.segy_data_list = []
self.segy_counter = 0
self.segy_color_map = cm.gray
self.segy_gain_neg = -4.0
self.segy_gain_pos = 4.0
# INITIALIZE COORDINATE CAPTURE
self.capture = False
self.linex = []
self.liney = []
def draw_main_frame(self):
"""
DRAW THE GUI FRAMES ( 1. TOPO; 2. GRAVITY; 3. MAGNETICS; 4. MODEL)
docs: https://matplotlib.org/api/axes_api.html
"""
self.columns = 87 # NUMBER OF COLUMNS THE MODEL FRAMES WILL TAKE UP (89/100)
self.x_orig = 10 # X ORIGIN OF MODEL FRAMES (RELATIVE TO 0 AT LEFT MARGIN)
# TOPOGRAPHY CANVAS
self.topo_frame = plt.subplot2grid((26, 100), (0, self.x_orig), rowspan=2, colspan=self.columns)
self.topo_frame.set_ylabel("Topo (km)")
self.topo_frame.set_navigate(False)
self.topo_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_frame.grid()
self.topo_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.topo_d_frame = self.topo_frame.twinx()
self.topo_d_frame.set_navigate(False)
self.topo_d_frame.set_ylabel("dt/dx")
self.topo_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# GRAVITY CANVAS
self.gravity_frame = plt.subplot2grid((26, 100), (2, self.x_orig), rowspan=3, colspan=self.columns)
self.gravity_frame.set_navigate(False)
self.gravity_frame.set_ylabel("Grav (mGal)")
self.gravity_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.gravity_frame.grid()
self.gravity_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.gravity_d_frame = self.gravity_frame.twinx()
self.gravity_d_frame.set_navigate(False)
self.gravity_d_frame.set_ylabel("dg/dx")
self.gravity_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.gravity_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# MAGNETIC CANVAS
self.magnetic_frame = plt.subplot2grid((26, 100), (5, self.x_orig), rowspan=3, colspan=self.columns)
self.magnetic_frame.set_ylabel("Mag (nT)")
self.magnetic_frame.set_navigate(False)
self.magnetic_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_frame.grid()
self.magnetic_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.magnetic_d_frame = self.magnetic_frame.twinx()
self.magnetic_d_frame.set_ylabel("dnt/dx")
self.magnetic_d_frame.set_navigate(False)
self.magnetic_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# MODEL CANVAS
self.model_frame = plt.subplot2grid((26, 100), (8, self.x_orig), rowspan=17, colspan=self.columns)
self.model_frame.set_ylabel("Depth (km)")
self.model_frame.set_xlabel("x (km)")
# CREATE DENSITY COLOUR BAR FOR COLORING LAYERS
colormap = matplotlib.cm.coolwarm
cnorm = colors.Normalize(vmin=-0.8, vmax=0.8)
self.colormap = cm.ScalarMappable(norm=cnorm, cmap=colormap)
# SET CANVAS LIMITS
self.model_frame.set_xlim(self.x1, self.x2)
self.model_frame.set_ylim(self.z2, self.z1)
self.model_frame.grid()
self.topo_frame.set_xlim(self.model_frame.get_xlim())
self.gravity_frame.set_xlim(self.model_frame.get_xlim())
self.magnetic_frame.set_xlim(self.model_frame.get_xlim())
self.fig.subplots_adjust(top=0.99, left=-0.045, right=0.99, bottom=0.02,
hspace=1.5)
# ADD FIRST LAYER
if self.newmodel:
# CREATE LAYER0 - PLACE HOLDER FOR THE TOP OF THE MODEL - NOT ACCESSIBLE BY USER
layer0 = Layer()
layer0.type = str('fixed')
# CREATE THE XY NODES
layer0.x_nodes = [-(float(self.padding)), 0., self.x2, self.x2 + (float(self.padding))]
layer0.y_nodes = [0.001, 0.001, 0.001, 0.001]
# SET CURRENT NODES
self.current_x_nodes = layer0.x_nodes
self.current_y_nodes = layer0.y_nodes
# DRAW THE CURRENTLY ACTIVE LAYER (THE NODES THAT CAN BE INTERACTED WITH)
self.currently_active_layer, = self.model_frame.plot(layer0.x_nodes, layer0.y_nodes, marker='o', color='k',
linewidth=1.0, alpha=0.5, picker=True)
# ADD THE LAYER LINE TO THE PLOT
layer0.node_mpl_actor = self.model_frame.plot(layer0.x_nodes, layer0.y_nodes, color='black', linewidth=1.0,
alpha=1.0)
# ADD THE LAYER POLYGON FILL TO THE PLOT
layer0.polygon_mpl_actor = self.model_frame.fill(layer0.x_nodes, layer0.y_nodes, color='blue',
alpha=self.layer_transparency, closed=True, linewidth=None,
ec=None)
# SET THE CURRENTLY ACTIVE RED NODE
self.current_node = self.model_frame.scatter(-40000., 0, s=50, color='r', zorder=10) # PLACE HOLDER ONLY
self.layer_list.append(layer0)
# ADDITIONAL MAIN FRAME WIDGETS - PLACED ON LEFT HAND SIDE OF THE FRAME
# MAKE NODE X Y LABEL
self.node_text = wx.StaticText(self.fold_panel_one, -1, label="Node Position:", style=wx.ALIGN_LEFT)
# MAKE DENSITY SPINNER
self.density_text = wx.StaticText(self.fold_panel_one, -1, label="Density: ", style=wx.ALIGN_LEFT)
self.density_input = fs.FloatSpin(self.fold_panel_one, -1, min_val=-5, max_val=5, increment=0.001, value=0.00)
self.density_input.SetFormat("%f")
self.density_input.SetDigits(4)
# MAKE REFERNECE DENSITY SPINNER
self.ref_density_text = wx.StaticText(self.fold_panel_one, -1, label="Reference: \nDensity",
style=wx.ALIGN_LEFT)
self.ref_density_input = fs.FloatSpin(self.fold_panel_one, -1, min_val=-5, max_val=5, increment=0.001,
value=0.00)
self.ref_density_input.SetFormat("%f")
self.ref_density_input.SetDigits(4)
# MAKE SUSCEPTIBILITY SPINNER
self.susceptibility_text = wx.StaticText(self.fold_panel_one, -1, label="Susceptibility:", style=wx.ALIGN_LEFT)
self.susceptibility_input = fs.FloatSpin(self.fold_panel_one, -1, min_val=-2.0, max_val=1000000.0,
increment=0.00001, value=0.00, style=wx.ALIGN_RIGHT)
self.susceptibility_input.SetFormat("%f")
self.susceptibility_input.SetDigits(6)
# MAKE ANGLE A SPINNER
self.angle_a_text = wx.StaticText(self.fold_panel_one, -1, label="Angle A (Inc): ", style=wx.ALIGN_LEFT)
self.angle_a_input = fs.FloatSpin(self.fold_panel_one, -1, min_val=-90.0, max_val=90.0, increment=1.0,
value=0.0, style=wx.ALIGN_RIGHT)
self.angle_a_input.SetFormat("%f")
self.angle_a_input.SetDigits(1)
# MAKE ANGLE B SPINNER
self.angle_b_text = wx.StaticText(self.fold_panel_one, -1, label="Angle B (Dec):", style=wx.ALIGN_LEFT)
self.angle_b_input = fs.FloatSpin(self.fold_panel_one, -1, min_val=0.0, max_val=180.0, increment=1.0, value=0.0,
style=wx.ALIGN_RIGHT)
self.angle_b_input.SetFormat("%f")
self.angle_b_input.SetDigits(1)
# MAKE ANGLE C SPINNER
self.angle_c_text = wx.StaticText(self.fold_panel_one, -1, label="Angle C (Azm):", style=wx.ALIGN_LEFT)
self.angle_c_input = fs.FloatSpin(self.fold_panel_one, -1, min_val=-180.0, max_val=180.0, increment=1.0,
value=0.0, style=wx.ALIGN_RIGHT)
self.angle_c_input.SetFormat("%f")
self.angle_c_input.SetDigits(1)
# MAKE EARTH FIELD SPINNER
self.earth_field_text = wx.StaticText(self.fold_panel_one, -1, label="F:", style=wx.ALIGN_LEFT)
self.earth_field_input = fs.FloatSpin(self.fold_panel_one, -1, min_val=0.0, max_val=1000000.0, increment=1.0,
value=0.0, style=wx.ALIGN_RIGHT)
self.earth_field_input.SetFormat("%f")
self.earth_field_input.SetDigits(1)
# MAKE WELL TEXT SIZE SLIDER
self.text_size_text = wx.StaticText(self.fold_panel_one, -1, label="Label Text Size:")
self.text_size_input = wx.Slider(self.fold_panel_one, value=1, minValue=1, maxValue=20., size=(175, -1),
style=wx.SL_HORIZONTAL)
# MAKE NODE XY SPINNERS
self.x_text = wx.StaticText(self.fold_panel_one, -1, label="X Value:")
self.x_input = fs.FloatSpin(self.fold_panel_one, -1, increment=0.001, value=0.00)
self.x_input.SetDigits(4)
self.y_text = wx.StaticText(self.fold_panel_one, -1, label="Y Value:")
self.y_input = fs.FloatSpin(self.fold_panel_one, -1, increment=0.001, value=0.00)
self.y_input.SetDigits(4)
# Make Set button
self.node_set_button = wx.Button(self.fold_panel_one, -1, "Set")
# MAKE DENSITY CONTRAST SCALEBAR
# colormap = matplotlib.cm.coolwarm
# cnorm = colors.Normalize(vmin=-0.8, vmax=0.8)
# self.cb1 = matplotlib.colorbar.ColorbarBase(self.fold_panel_one, cmap=colormap, norm=cnorm,
# orientation='horizontal')
# self.cb1.ax.tick_params(labelsize=6)
# self.cb1.set_label('Density contrast ($kg/m^{3}$)', fontsize=6)
# INITALISE CALCULATED ANOMALY LINES
self.pred_gravity_plot, = self.gravity_frame.plot([], [], '-r', linewidth=2, alpha=0.5)
self.gravity_rms_plot, = self.gravity_frame.plot([], [], color='purple', linewidth=1.5, alpha=0.5)
self.predicted_nt_plot, = self.magnetic_frame.plot([], [], '-g', linewidth=2, alpha=0.5)
self.mag_rms_plot, = self.magnetic_frame.plot([], [], color='purple', linewidth=1.5, alpha=0.5)
# MAKE LAYER TREE
self.tree = ct.CustomTreeCtrl(self.fold_panel_two, -1,
agwStyle=wx.TR_DEFAULT_STYLE | wx.TR_ROW_LINES | wx.TR_HIDE_ROOT)
self.tree.SetIndent(0.0)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.on_layer_activated, self.tree)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.on_tree_right_click_down, self.tree)
# TREE ATTRIBUTES
self.root = self.tree.AddRoot("Layers:")
self.tree.SetItemPyData(self.root, None)
self.tree_items = ["Layer 0"]
self.Bind(ct.EVT_TREE_ITEM_CHECKED, self.item_checked, self.tree)
# MAKE FAULT TREE
self.fault_tree = ct.CustomTreeCtrl(self.fold_panel_three, -1,
agwStyle=wx.TR_DEFAULT_STYLE | wx.TR_ROW_LINES | wx.TR_HIDE_ROOT)
self.fault_tree.SetIndent(0.0)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.on_fault_activated, self.fault_tree)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.on_fault_tree_right_click_down, self.fault_tree)
# TREE ATTRIBUTES
self.fault_tree_root = self.fault_tree.AddRoot("Faults:")
self.fault_tree.SetItemPyData(self.fault_tree_root, None)
self.fault_tree_items = []
self.Bind(ct.EVT_TREE_ITEM_CHECKED, self.fault_checked, self.fault_tree)
# UPDATE INFO BAR
self.display_info()
# REDRAW MAIN
self.draw()
def size_handler(self):
"""PLACE THE GUI FRAMES IN THE wxSIZER WINDOWS"""
# --------------------------------------------------------------------------------------------------------------
# POPULATE ATTRIBUTES PANEL (LEFT PANEL OF GUI)
self.attributes_box = wx.GridBagSizer(hgap=2, vgap=3)
r = 1 # CURRENT ROW
c = 0 # CURRENT COLUMN
# LINE SEP
line = wx.StaticLine(self.fold_panel_one)
self.attributes_box.Add(line, pos=(r, c), span=(1, 2), flag=wx.ALIGN_LEFT | wx.EXPAND | wx.EXPAND, border=5)
# DENSITY
r += 1
self.attributes_box.Add(self.density_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT, border=5)
c += 1
self.attributes_box.Add(self.density_input, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT, border=5)
# LINE SEP
r += 1
c += 0
line = wx.StaticLine(self.fold_panel_one)
self.attributes_box.Add(line, pos=(r, c), span=(1, 2), flag=wx.ALIGN_LEFT | wx.EXPAND, border=5)
# REFERENCE DENSITY
r += 1
c = 0
self.attributes_box.Add(self.ref_density_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT, border=5)
c += 1
self.attributes_box.Add(self.ref_density_input, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT, border=5)
# LINE SEP
r += 1
c = 0
line = wx.StaticLine(self.fold_panel_one)
self.attributes_box.Add(line, pos=(r, c), span=(1, 2), flag=wx.ALIGN_LEFT | wx.EXPAND, border=5)
# SUSCEPTIBILITY
r += 1
c = 0
self.attributes_box.Add(self.susceptibility_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
c += 1
self.attributes_box.Add(self.susceptibility_input, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
# ANGLE A
r += 1
c = 0
self.attributes_box.Add(self.angle_a_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
c += 1
self.attributes_box.Add(self.angle_a_input, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
# ANGLE B
r += 1
c = 0
self.attributes_box.Add(self.angle_b_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
c += 1
self.attributes_box.Add(self.angle_b_input, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
# Angle C
r += 1
c = 0
self.attributes_box.Add(self.angle_c_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
c += 1
self.attributes_box.Add(self.angle_c_input, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
# Earth Field
r += 1
c = 0
self.attributes_box.Add(self.earth_field_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
c += 1
self.attributes_box.Add(self.earth_field_input, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
# LINE SEP
r += 1
c = 0
line = wx.StaticLine(self.fold_panel_one)
self.attributes_box.Add(line, pos=(r, c), span=(1, 2), flag=wx.ALIGN_LEFT | wx.EXPAND, border=5)
# XY NODES
r += 1
c = 0
self.attributes_box.Add(self.node_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
# LINE SEP
r += 1
c = 0
line = wx.StaticLine(self.fold_panel_one)
self.attributes_box.Add(line, pos=(r, c), span=(1, 2), flag=wx.ALIGN_LEFT | wx.EXPAND, border=5)
# X NODE
r += 1
c = 0
self.attributes_box.Add(self.x_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
c = + 1
self.attributes_box.Add(self.x_input, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
# Y NODE
r += 1
c = 0
self.attributes_box.Add(self.y_text, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
c = + 1
self.attributes_box.Add(self.y_input, pos=(r, c), span=(1, 1), flag=wx.ALIGN_LEFT,
border=5)
# LINE SEP
r += 1
c = 0
line = wx.StaticLine(self.fold_panel_one)
self.attributes_box.Add(line, pos=(r, c), span=(1, 2), flag=wx.ALIGN_LEFT | wx.EXPAND, border=5)
# SET BUTTON
r += 1
c = 0
self.attributes_box.Add(self.node_set_button, pos=(r, c), span=(1, 2), flag=wx.ALIGN_LEFT | wx.EXPAND,
border=5)
# LINE SEP
r += 1
c = 0
line = wx.StaticLine(self.fold_panel_one)
self.attributes_box.Add(line, pos=(r, c), span=(1, 2), flag=wx.ALIGN_LEFT | wx.EXPAND, border=5)
# LABEL TEXT SIZE
r += 1
c = 0
self.attributes_box.Add(self.text_size_text, pos=(r, c), span=(1, 2), flag=wx.ALIGN_CENTER,
border=5)
r += 1
c = 0
self.attributes_box.Add(self.text_size_input, pos=(r, c), span=(1, 2), flag=wx.ALIGN_CENTER,
border=5)
# LINE SEP
r += 1
c = 0
line = wx.StaticLine(self.fold_panel_one)
self.attributes_box.Add(line, pos=(r, c), span=(1, 2), flag=wx.ALIGN_LEFT | wx.EXPAND, border=5)
# DENSITY SCALE BAR
# self.attr_box.Add(self.cb1, 0, wx.ALL | wx.LEFT | wx.EXPAND, 5)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# CREATE LAYER TREE BOX
self.tree_box = wx.BoxSizer(wx.VERTICAL)
self.tree_box.Add(self.tree, 1, wx.TOP | wx.ALIGN_CENTER | wx.EXPAND, border=20)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# CREATE FAULT TREE BOX
self.fault_tree_box = wx.BoxSizer(wx.VERTICAL)
self.fault_tree_box.Add(self.fault_tree, 1, wx.TOP | wx.ALIGN_CENTER | wx.EXPAND, border=20)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# CREATE A BOX SIZER FOR THE MAIN MODELLING FRAME
self.canvas_box = wx.BoxSizer(wx.HORIZONTAL)
# ADD THE MAIN MODELLING FRAME TO IT'S A BOX SIZER
self.canvas_box.Add(self.canvas, 1, wx.ALL | wx.ALIGN_CENTER | wx.EXPAND, border=2)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# PLACE BOX SIZERS IN CORRECT PANELS
self.fold_panel_one.SetSizerAndFit(self.attributes_box)
self.fold_panel_two.SetSizerAndFit(self.tree_box)
self.fold_panel_two.SetSize(200, 300)
self.fold_panel_three.SetSizerAndFit(self.fault_tree_box)
self.leftPanel.SetSizer(self.splitter_left_panel_sizer)
self.fold_panel_three.SetSize(200, 300)
self.fold_panel_one.Collapse()
self.fold_panel_one.Expand()
self.fold_panel_two.Collapse()
self.fold_panel_two.Expand()
self.fold_panel_three.Collapse()
self.fold_panel_three.Expand()
self.rightPanel.SetSizerAndFit(self.canvas_box)
self.rightPanel.SetSize(self.GetSize())
# --------------------------------------------------------------------------------------------------------------
def frame_adjustment(self, event):
"""FIND WHICH FRAME IS REFERENCED & CHANGE SWITCH"""
self.current_xlim = self.model_frame.get_xlim()
self.current_ylim = self.model_frame.get_ylim()
if event.Id == 601:
if self.topo_frame_switch is True:
self.topo_frame.set_visible(False)
self.topo_d_frame.set_visible(False)
self.topo_frame_switch = False
else:
self.topo_frame_switch = True
self.topo_frame.set_visible(True)
self.topo_d_frame.set_visible(True)
if event.Id == 602:
if self.gravity_frame_switch is True:
self.gravity_frame.set_visible(False)
self.gravity_d_frame.set_visible(False)
self.gravity_frame_switch = False
else:
self.gravity_frame_switch = True
self.gravity_frame.set_visible(True)
self.gravity_d_frame.set_visible(True)
if event.Id == 603:
if self.magnetic_frame_switch is True:
self.magnetic_frame.set_visible(False)
self.magnetic_d_frame.set_visible(False)
self.magnetic_frame_switch = False
else:
self.magnetic_frame_switch = True
self.magnetic_frame.set_visible(True)
self.magnetic_d_frame.set_visible(True)
# ADJUST FRAME SIZING AND SET PROGRAM WINDOW
if self.topo_frame_switch is True and self.gravity_frame_switch is True and self.magnetic_frame_switch is True:
# TRUE TRUE TRUE
# TOPO CANVAS
self.topo_frame = plt.subplot2grid((26, 100), (0, self.x_orig), rowspan=2, colspan=self.columns)
self.topo_frame.set_ylabel("(m)")
self.topo_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_frame.grid()
self.topo_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.topo_d_frame = self.topo_frame.twinx()
self.topo_d_frame.set_ylabel("dt/dx")
self.topo_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# GRAV CANVAS
self.gravity_frame = plt.subplot2grid((26, 100), (2, self.x_orig), rowspan=3, colspan=self.columns)
self.gravity_frame.set_ylabel("(mGal)")
self.gravity_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.gravity_frame.grid()
self.gravity_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.gravity_d_frame = self.gravity_frame.twinx()
self.gravity_d_frame.set_ylabel("dg/dx")
self.gravity_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.gravity_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# MAG CANVAS
self.magnetic_frame = plt.subplot2grid((26, 100), (5, self.x_orig), rowspan=3, colspan=self.columns)
self.magnetic_frame.set_ylabel("(nT)")
self.magnetic_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_frame.grid()
self.magnetic_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.magnetic_d_frame = self.magnetic_frame.twinx()
self.magnetic_d_frame.set_ylabel("dnt/dx")
self.magnetic_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
elif self.topo_frame_switch is False and self.gravity_frame_switch is True and \
self.magnetic_frame_switch is True:
# FALSE TRUE TRUE
# TOPO CANVAS - HIDDEN
# GRAV CANVAS
self.gravity_frame = plt.subplot2grid((26, 100), (0, self.x_orig), rowspan=4, colspan=self.columns)
self.gravity_frame.set_ylabel("(mGal)")
self.gravity_frame.grid()
self.gravity_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.gravity_d_frame = self.gravity_frame.twinx()
self.gravity_d_frame.set_ylabel("dg/dx")
self.gravity_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.gravity_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# MAG CANVAS
self.magnetic_frame = plt.subplot2grid((26, 100), (4, self.x_orig), rowspan=4, colspan=self.columns)
self.magnetic_frame.set_ylabel("(nT)")
self.magnetic_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_frame.grid()
self.magnetic_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.magnetic_d_frame = self.magnetic_frame.twinx()
self.magnetic_d_frame.set_ylabel("dnt/dx")
self.magnetic_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
elif self.topo_frame_switch is True and self.gravity_frame_switch is False and \
self.magnetic_frame_switch is True:
# TRUE FALSE TRUE
# TOPO CANVAS
self.topo_frame = plt.subplot2grid((26, 100), (0, self.x_orig), rowspan=2, colspan=self.columns)
self.topo_frame.set_ylabel("(m)")
self.topo_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_frame.grid()
self.topo_d_frame = self.topo_frame.twinx()
self.topo_d_frame.set_ylabel("dt/dx")
self.topo_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# GRAV CANVAS - HIDDEN
# MAG CANVAS
self.magnetic_frame = plt.subplot2grid((26, 100), (2, self.x_orig), rowspan=6, colspan=self.columns)
self.magnetic_frame.set_ylabel("(nT)")
self.magnetic_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_frame.grid()
self.magnetic_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.magnetic_d_frame = self.magnetic_frame.twinx()
self.magnetic_d_frame.set_ylabel("dnt/dx")
self.magnetic_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
elif self.topo_frame_switch is True and self.gravity_frame_switch is True and \
self.magnetic_frame_switch is False:
# TRUE TRUE FALSE
# TOPO CANVAS
self.topo_frame = plt.subplot2grid((26, 100), (0, self.x_orig), rowspan=2, colspan=self.columns)
self.topo_frame.set_ylabel("(m)")
self.topo_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_frame.grid()
self.topo_d_frame = self.topo_frame.twinx()
self.topo_d_frame.set_ylabel("dt/dx")
self.topo_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# GRAV CANVAS
self.gravity_frame = plt.subplot2grid((26, 100), (2, self.x_orig), rowspan=6, colspan=self.columns)
self.gravity_frame.set_ylabel("(mGal)")
self.gravity_frame.grid()
self.gravity_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.gravity_d_frame = self.gravity_frame.twinx()
self.gravity_d_frame.set_ylabel("dg/dx")
self.gravity_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.gravity_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# MAG CANVAS - HIDDEN
elif self.topo_frame_switch is False and self.gravity_frame_switch is False and \
self.magnetic_frame_switch is True:
# FALSE FALSE TRUE'
# TOPO CANVAS - HIDDEN
# GRAV CANVAS - HIDDEN
# MAG CANVAS'
self.magnetic_frame = plt.subplot2grid((26, 100), (0, self.x_orig), rowspan=8, colspan=self.columns)
self.magnetic_frame.set_ylabel("(nT)")
self.magnetic_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_frame.grid()
self.magnetic_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.magnetic_d_frame = self.magnetic_frame.twinx()
self.magnetic_d_frame.set_ylabel("dnt/dx")
self.magnetic_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.magnetic_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
elif self.topo_frame_switch is False and self.gravity_frame_switch is True and \
self.magnetic_frame_switch is False:
# FALSE TRUE FALSE
# TOPO CANVAS - HIDDEN
# GRAV CANVAS
self.gravity_frame = plt.subplot2grid((26, 100), (0, self.x_orig), rowspan=8, colspan=self.columns)
self.gravity_frame.set_ylabel("(mGal)")
self.gravity_frame.grid()
self.gravity_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
self.gravity_d_frame = self.gravity_frame.twinx()
self.gravity_d_frame.set_ylabel("dg/dx")
self.gravity_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.gravity_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# MAG CANVAS -HIDDEN
elif self.topo_frame_switch is True and self.gravity_frame_switch is False and \
self.magnetic_frame_switch is False:
# TRUE FALSE FALSE
# TOPO CANVAS
self.topo_frame = plt.subplot2grid((26, 100), (0, self.x_orig), rowspan=8, colspan=self.columns)
self.topo_frame.set_ylabel("(m)")
self.topo_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_frame.grid()
self.topo_d_frame = self.topo_frame.twinx()
self.topo_d_frame.set_ylabel("dt/dx")
self.topo_d_frame.xaxis.set_major_formatter(plt.NullFormatter())
self.topo_d_frame.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# GRAV CANVAS - HIDDEN
# MAG CANVAS - HIDDEN
elif self.topo_frame_switch is False and self.gravity_frame_switch is False and \
self.magnetic_frame_switch is False:
pass
# FALSE FALSE FALSE
# TOPO CANVAS - HIDDEN
# GRAV CANVAS - HIDDEN
# MAG CANVAS - HIDDEN
# SET CANVAS LIMITS
self.model_frame.set_xlim(self.current_xlim)
self.model_frame.set_ylim(self.current_ylim)
self.model_frame.grid()
if self.topo_frame is not None:
self.topo_frame.set_xlim(self.model_frame.get_xlim())
self.topo_d_frame.set_xlim(self.model_frame.get_xlim())
if self.gravity_frame is not None:
self.gravity_frame.set_xlim(self.model_frame.get_xlim())
self.gravity_d_frame.set_xlim(self.model_frame.get_xlim())
if self.magnetic_frame is not None:
self.magnetic_frame.set_xlim(self.model_frame.get_xlim())
self.magnetic_d_frame.set_xlim(self.model_frame.get_xlim())
self.fig.subplots_adjust(top=0.99, left=-0.045, right=0.99, bottom=0.02, hspace=1.5)
# INITALISE CALCULATED P.F. LINES
if self.gravity_frame is not None:
self.pred_gravity_plot, = self.gravity_frame.plot([], [], '-r', linewidth=2)
self.gravity_rms_plot, = self.gravity_frame.plot([], [], color='purple', linewidth=1.5)
if self.magnetic_frame is not None:
self.predicted_nt_plot, = self.magnetic_frame.plot([], [], '-g', linewidth=2)
self.mag_rms_plot, = self.magnetic_frame.plot([], [], color='purple', linewidth=1.5)
# PLOT OBSERVED TOPO DATA
if self.topo_frame is not None:
# REPLOT OBSERVED TOPOGRAPHY DATA
for x in range(len(self.observed_topography_list)):
if self.observed_topography_list[x] is not None:
# DRAW DATA IN MODEL FRAME
if self.observed_topography_list[x].type is not "derivative":
self.observed_topography_list[x].mpl_actor = self.topo_frame.scatter(
self.observed_topography_list[x].data[:, 0],
self.observed_topography_list[x].data[:, 1],
marker='o', s=5,
color=self.observed_topography_list[x].color,
gid=self.observed_topography_list[x].id)
else:
self.observed_topography_list[x].mpl_actor = self.topo_d_frame.scatter(
self.observed_topography_list[x].data[:, 0],
self.observed_topography_list[x].data[:, 1],
marker='o', s=5,
color=self.observed_topography_list[x].color,
gid=self.observed_topography_list[x].id)
# PLOT OBSERVED GRAVITY DATA
if self.gravity_frame is not None:
# REPLOT OBSERVED GRAVITY DATA
for x in range(len(self.observed_gravity_list)):
if self.observed_gravity_list[x] is not None:
# DRAW DATA IN MODEL FRAME
if self.observed_gravity_list[x].type is not "derivative":
self.observed_gravity_list[x].mpl_actor = self.gravity_frame.scatter(
self.observed_gravity_list[x].data[:, 0],
self.observed_gravity_list[x].data[:, 1],
marker='o', s=5,
color=self.observed_gravity_list[x].color,
gid=11000 + self.observed_gravity_list[x].id)
else:
self.observed_gravity_list[x].mpl_actor = self.gravity_d_frame.scatter(
self.observed_gravity_list[x].data[:, 0],
self.observed_gravity_list[x].data[:, 1],
marker='o', s=5,
color=self.observed_gravity_list[x].color,
gid=11000 + self.observed_gravity_list[x].id)
# PLOT OBSERVED MAGNETIC DATA
if self.magnetic_frame is not None:
# REPLOT OBSERVED MAGNETIC DATA
for x in range(len(self.observed_magnetic_list)):
if self.observed_magnetic_list[x] is not None:
# DRAW DATA IN MODEL FRAME
if self.observed_magnetic_list[x].type is not "derivative":
self.observed_magnetic_list[x].mpl_actor = self.magnetic_frame.scatter(
self.observed_magnetic_list[x].data[:, 0],
self.observed_magnetic_list[x].data[:, 1],
marker='o', s=5,
color=self.observed_magnetic_list[x].color,
gid=self.observed_magnetic_list[x].id)
else:
self.observed_magnetic_list[x].mpl_actor = self.magnetic_d_frame.scatter(
self.observed_magnetic_list[x].data[:, 0],
self.observed_magnetic_list[x].data[:, 1],
marker='o', s=5,
color=self.observed_magnetic_list[x].color,
gid=self.observed_magnetic_list[x].id)
# UPDATE FRAMES
self.model_frame.grid(True)
self.run_algorithms()
self.draw()
self.set_frame_limits()
def show_controls(self, event):
""" SHOW CONTROL PANE"""
self.mgr.GetPaneByName('left').Show()
self.mgr.Update()
def show_console(self, event):
""" SHOW PYTHON CONSOLE"""
self.mgr.GetPaneByName('console').Show()
self.mgr.Update()
def new_model(self, event):
"""NEW MODEL DIALOG BOX"""
new_model_dialogbox = NewModelDialog(self, -1, 'Create New Model')
new_model_dialogbox.ShowModal()
if new_model_dialogbox.set_button is True:
self.newmodel = True
new_x1, new_x2 = float(new_model_dialogbox.x1) * 1000., float(new_model_dialogbox.x2) * 1000.
new_z1, new_z2 = float(new_model_dialogbox.z1) * 1000., float(new_model_dialogbox.z2) * 1000.
xp_inc = float(new_model_dialogbox.xp_inc) * 1000.
self.layer_lines = [[]]
self.polygon_fills = [[]]
'INITAISE THE MODEL PARAMETERS'
self.initalize_model()
'ENSURES FOLD PANELS ARE VISIBLE'
self.controls_panel_bar_one.Expand(self.fold_panel_one)
self.controls_panel_bar_two.Expand(self.fold_panel_two)
self.controls_panel_bar_three.Expand(self.fold_panel_three)
'CREATE MODEL'
self.model_aspect = 1.
area = (new_x1, new_x2, new_z1, new_z2)
xp = np.arange(new_x1 - self.calc_padding, new_x2 + self.calc_padding, xp_inc)
zp = np.zeros_like(xp)
self.start(area, xp, zp)
else:
self.newmodel = False
return # THE USER CHANGED THERE MIND
def modify_model_dimensions(self, event):
"""MODIFY MODEL DIMENSIONS"""
modify_model_dialogbox = NewModelDialog(self, -1, 'Modify Current Model', self.x1, self.x2, self.z1, self.z2)
answer = modify_model_dialogbox.ShowModal()
new_x1, new_x2 = float(modify_model_dialogbox.x1) * 1000., float(modify_model_dialogbox.x2) * 1000.
new_z1, new_z2 = float(modify_model_dialogbox.z1) * 1000., float(modify_model_dialogbox.z2) * 1000.
xp_inc = float(modify_model_dialogbox.xp_inc) * 1000.
self.area = (new_x1, new_x2, new_z1, new_z2)
self.model_frame.set_ylim(new_z2 / 1000., new_z1 / 1000.)
self.model_frame.set_xlim(new_x1 / 1000., new_x2 / 1000.)
xp = np.arange(new_x1 - self.calc_padding, new_x2 + self.calc_padding, xp_inc)
self.xp = np.array(xp, dtype='f')
zp = np.zeros_like(xp)
self.gravity_observation_elv = np.array(zp, dtype='f')
self.update_layer_data()
self.run_algorithms()
def connect(self):
"""CONNECT MOUSE AND EVENT BINDINGS"""
self.fig.canvas.mpl_connect('button_press_event', self.button_press)
self.fig.canvas.mpl_connect('motion_notify_event', self.move)
self.fig.canvas.mpl_connect('button_release_event', self.button_release)
self.fig.canvas.mpl_connect('key_press_event', self.key_press)
# self.fig.canvas.mpl_connect('pick_event', self.on_pick)
'Connect wx.widgets'
self.density_input.Bind(fs.EVT_FLOATSPIN, self.set_density)
self.ref_density_input.Bind(fs.EVT_FLOATSPIN, self.set_reference_density)
self.susceptibility_input.Bind(fs.EVT_FLOATSPIN, self.set_susceptibility)
self.angle_a_input.Bind(fs.EVT_FLOATSPIN, self.set_angle_a)
self.angle_b_input.Bind(fs.EVT_FLOATSPIN, self.set_angle_b)
self.angle_c_input.Bind(fs.EVT_FLOATSPIN, self.set_angle_c)
self.earth_field_input.Bind(fs.EVT_FLOATSPIN, self.set_earth_field)
self.text_size_input.Bind(wx.EVT_SLIDER, self.set_text_size)
self.node_set_button.Bind(wx.EVT_BUTTON, self.on_menu_set_button_press)
# LAYER TREE FUNCTIONS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def add_tree_nodes(self, parent_item, items):
i = 0
for item in items:
if type(item) == str:
new_item = self.tree.AppendItem(parent_item, item)
self.tree.SetItemPyData(new_item, i)
i += 1
else:
pass
def add_new_tree_nodes(self, parent_item, item, data):
new_item = self.tree.AppendItem(parent_item, item, ct_type=1)
new_item.Check(checked=True)
self.tree.SetItemPyData(new_item, data)
def get_item_text(self, item):
if item:
return self.tree.GetItemText(item)
else:
return
def on_layer_activated(self, event):
"""WHEN A LAYER IS SELECTED IN THE TREE"""
# FIRST CHECK IS FAULT PICKING MODE IS ON, IF IT IS, THEN TURN IT OFF
if self.fault_picking_switch is True:
self.fault_picking_switch = False
# GET HTE TREE ID AND USE IT TO SET THE CURRENT LAYER
self.currently_active_layer_id = self.tree.GetPyData(event.GetItem())
# SET OBJECTS WITH THE CHOSEN LAYER
self.density_input.SetValue(0.001 * self.layer_list[self.currently_active_layer_id].density)
self.ref_density_input.SetValue(0.001 * self.layer_list[self.currently_active_layer_id].reference_density)
self.susceptibility_input.SetValue(self.layer_list[self.currently_active_layer_id].susceptibility)
self.angle_a_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_a)
self.angle_b_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_b)
self.angle_c_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_c)
self.earth_field_input.SetValue(self.layer_list[self.currently_active_layer_id].earth_field)
# GET THE XY NODES FROM THE ACTIVE LAYER AND SET THE CUURENTLY ACTIVE NODES (I.E. MAKE THEM INTERACTIVE)
self.current_x_nodes = self.layer_list[self.currently_active_layer_id].x_nodes
self.current_y_nodes = self.layer_list[self.currently_active_layer_id].y_nodes
# SET CURRENTLY ACTIVE (RED) NODE
self.current_node.set_offsets([self.current_x_nodes[0], self.current_y_nodes[0]])
# UPDATE MODEL
self.update_layer_data()
self.run_algorithms()
def on_tree_right_click_down(self, event):
"""WHEN A LAYER IN THE LAYER TREE MENU IS RIGHT CLICKED"""
# FIRST RUN THE on_layer_activated FUNC
self.on_layer_activated(event)
# CREATE POPOUT MENU WITH OPTIONS AND BIND OPTIONS TO ACTIONS
menu = wx.Menu()
item1 = menu.Append(wx.ID_ANY, "Change layer colour")
item2 = menu.Append(wx.ID_ANY, "Rename layer")
self.Bind(wx.EVT_MENU, self.change_color, item1)
self.Bind(wx.EVT_MENU, self.rename_layer, item2)
self.PopupMenu(menu)
menu.Destroy()
def change_color(self, event):
"""SET COLOUR FOR LAYER"""
# CREATE DIALOG
dlg = wx.ColourDialog(self)
# ENSURE THE FULL COLOUR DIALOG IS DISPLAYED, NOT THE ABBREVIATED VERSION
dlg.GetColourData().SetChooseFull(True)
# WHAT TO DO WHEN THE DIALOG IS CLOSED
if dlg.ShowModal() == wx.ID_OK:
rgb = dlg.GetColourData().GetColour().Get()
rgb = rgb[0:3]
html = struct.pack('BBB', *rgb).encode('hex')
# SET FAULT OR LAYER COLOR
if self.fault_picking_switch == True:
self.fault_list[self.currently_active_fault_id].color = str('#' + str(html))
else:
self.layer_list[self.currently_active_layer_id].color = str('#' + str(html))
# CLOSE DIALOG
dlg.Destroy()
# REDRAW
self.update_layer_data()
self.draw()
def rename_layer(self, event):
"""USE A POPUP MENU TO RENAME THE LAYER"""
# CREATE POP OUT MENU AND SHOW
layer_name_box = LayerNameDialog(self, -1, 'Rename layer', self.tree_items[self.currently_active_layer_id])
new = layer_name_box.ShowModal()
# WAIT FOR USER TO CLOSE POP OUT
# GET THE NEW LAYER NAME FROM POP OUT
new_name = layer_name_box.name
# SET THE TREE AND LAYER OBJECT WITH THE NEW NAME
current_tree_items = self.tree.GetRootItem().GetChildren()
self.tree.SetItemText(current_tree_items[self.currently_active_layer_id - 1], str(new_name))
self.tree_items[self.currently_active_layer_id] = str(new_name)
self.layer_list[self.currently_active_layer_id].name = str(new_name)
def delete_all_children(self, event):
self.tree.DeleteChildren(event)
def item_checked(self, event):
"""TOGGLE WHETHER OR NOT THE SELECTED LAYER IS INCLUDED IN THE MODELLED ANOMALY CALCULATIONS"""
layer = self.tree.GetPyData(event.GetItem())
checked_value = self.tree.GetRootItem().GetChildren()[layer - 1].GetValue()
if checked_value is True:
self.layer_list[layer].include_in_calculations_switch = True
else:
self.layer_list[layer].include_in_calculations_switch = False
# UPDATE MODEL
self.run_algorithms()
self.draw()
def display_info(self):
self.statusbar.SetStatusText(" "
" "
" || Currently Editing Layer: %s || "
" || Model Aspect Ratio = %s:1.0 || GRAV RMS = %s "
" || MAG RMS = %s ||" % (self.currently_active_layer_id,
self.model_frame.get_aspect(), self.gravity_rms_value,
self.magnetic_rms_value), 2)
self.statusbar.Update()
# FIGURE DISPLAY FUNCTIONS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def zoom(self, event):
if self.zoom_on is True:
self.zoom_on = False
self.nodes = True
self.nav_toolbar.zoom()
else:
self.zoom_on = True
self.nodes = False
self.nav_toolbar.zoom()
# REDRAW
self.draw()
def zoom_out(self, event):
self.nav_toolbar.back()
# REDRAW
self.draw()
def full_extent(self, event):
"""REDRAW MODEL FRAME WITH FULL EXTENT"""
self.full_extent_adjustment()
self.update_layer_data()
self.run_algorithms()
self.draw()
def full_extent_adjustment(self):
"""FIND WHICH FRAME IS REFERENCED & CHANGE SWITCH"""
if not self.topo_frame.get_visible():
self.topo_frame.set_visible(True)
if not self.gravity_frame.get_visible():
self.gravity_frame.set_visible(True)
if not self.magnetic_frame.get_visible():
self.magnetic_frame.set_visible(True)
'SET CANVAS LIMITS'
self.model_frame.set_xlim(self.x1, self.x2)
self.model_frame.set_ylim(self.z2, self.z1)
self.model_aspect = 1
if self.topo_frame is not None:
self.topo_frame.set_xlim(self.model_frame.get_xlim())
if self.gravity_frame is not None:
self.gravity_frame.set_xlim(self.model_frame.get_xlim())
if self.magnetic_frame is not None:
self.magnetic_frame.set_xlim(self.model_frame.get_xlim())
self.fig.subplots_adjust(top=0.99, left=-0.045, right=0.99, bottom=0.02, hspace=1.5)
'ADJUST FRAME SIZING AND SET PROGRAM WINDOW'
if self.topo_frame is False:
self.topo_frame.set_visible(False)
if self.gravity_frame is False:
self.gravity_frame.set_visible(False)
if self.magnetic_frame is False:
self.magnetic_frame.set_visible(False)
def pan(self, event):
"""PAN MODEL VIEW USING MOUSE DRAG"""
if self.nodes:
self.nodes = False
self.nav_toolbar.pan()
else:
self.nodes = True
self.nav_toolbar.pan()
self.update_layer_data()
self.run_algorithms()
self.draw()
def calc_grav_switch(self, event):
"""PREDICTED ANOMALY CALCULATION SWITCH: ON/OFF (SPEEDS UP PROGRAM WHEN OFF)"""
if self.calc_grav_switch is True:
self.calc_grav_switch = False
if self.grav_residuals != [] and self.obs_gravity_data_for_rms != []:
self.grav_residuals[:, 1] = np.zeros(len(self.obs_gravity_data_for_rms[:, 0]))
self.gravity_rms_plot.set_data(self.grav_residuals[:, 0], self.grav_residuals[:, 1])
self.gravity_rms_plot.set_visible(False)
else:
self.calc_grav_switch = True
self.gravity_rms_plot.set_visible(True)
self.update_layer_data()
self.run_algorithms()
self.draw()
def calc_mag_switch(self, event):
"""PREDICTED ANOMALY CALCULATION SWITCH: ON/OFF (SPEEDS UP PROGRAM WHEN OFF)"""
if self.calc_mag_switch is True:
self.calc_mag_switch = False
if self.mag_residuals != [] and self.obs_mag_data_for_rms != []:
self.mag_residuals[:, 1] = np.zeros(len(self.obs_mag_data_for_rms[:, 0]))
self.mag_rms_plot.set_visible(False)
else:
self.calc_mag_switch = True
self.mag_rms_plot.set_visible(True)
self.update_layer_data()
self.run_algorithms()
self.draw()
# MODEL WINDOW GRAPHICS OPTIONS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def aspect_increase(self, event):
if self.model_aspect >= 1:
self.model_aspect = self.model_aspect + 1
self.update_layer_data()
self.draw()
elif 1.0 > self.model_aspect >= 0.1:
self.model_aspect = self.model_aspect + 0.1
self.update_layer_data()
self.draw()
else:
pass
def aspect_decrease(self, event):
if self.model_aspect >= 2:
self.model_aspect = self.model_aspect - 1
self.update_layer_data()
self.draw()
elif 1.0 >= self.model_aspect >= 0.2:
self.model_aspect = self.model_aspect - 0.1
self.update_layer_data()
self.draw()
else:
pass
def aspect_increase2(self, event):
self.model_aspect = self.model_aspect + 2
self.update_layer_data()
self.draw()
def aspect_decrease2(self, event):
if self.model_aspect >= 3:
self.model_aspect = self.model_aspect - 2
self.update_layer_data()
self.draw()
else:
pass
def transparency_increase(self, event):
for l in range(0, len(self.layer_list)):
if self.layer_list[l].polygon_mpl_actor[0] is not None \
and self.layer_list[l].polygon_mpl_actor[0].get_alpha() <= 0.9:
new_alpha = self.layer_list[l].polygon_mpl_actor[0].get_alpha() + 0.1
self.layer_list[l].polygon_mpl_actor[0].set_alpha(new_alpha)
self.draw()
def transparency_decrease(self, event):
for l in range(0, len(self.layer_list)):
if self.layer_list[l].polygon_mpl_actor[0] is not None \
and self.layer_list[l].polygon_mpl_actor[0].get_alpha() >= 0.1:
new_alpha = self.layer_list[l].polygon_mpl_actor[0].get_alpha() - 0.1
self.layer_list[l].polygon_mpl_actor[0].set_alpha(new_alpha)
self.draw()
# SAVE/LOAD MODEL~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def save_model(self, event):
"""
SAVE MODEL TO DISC IN .Pickle FORMAT
TO ADD NEW OBJECTS ADD THE OBJECT NAME TO BOTH THE header AND model_params.
THEN MODIFY THE load_model FUNCTION TO ALSO INLCUDE THE NEW ITEMS
"""
save_file_dialog = wx.FileDialog(self, "Save model file", "", "", "Model files (*.model)|*.model", wx.FD_SAVE
| wx.FD_OVERWRITE_PROMPT)
if save_file_dialog.ShowModal() == wx.ID_CANCEL:
return # USER CHANGED THEIR MIND
# CREATE SAVE DICTIONARY
self.save_dict = {}
header = ['model_aspect', 'area', 'xp',
'tree_items', 'fault_tree_items',
'gravity_observation_elv', 'mag_observation_elv',
'obs_gravity_data_for_rms', 'obs_mag_data_for_rms',
'layer_list',
'fault_list',
'observed_xy_data_list',
'observed_gravity_list',
'observed_magnetic_list',
'observed_topography_list',
'well_data_list',
'outcrop_data_list',
'segy_data_list']
model_params = [self.model_aspect, self.area, self.xp,
self.tree_items, self.fault_tree_items,
self.gravity_observation_elv, self.mag_observation_elv,
self.obs_gravity_data_for_rms, self.obs_mag_data_for_rms,
self.layer_list,
self.fault_list,
self.observed_xy_data_list,
self.observed_gravity_list,
self.observed_magnetic_list,
self.observed_topography_list,
self.well_data_list,
self.outcrop_data_list,
self.segy_data_list]
for i in range(0, len(model_params)):
try:
self.save_dict[header[i]] = model_params[i]
except IOError:
print((header[i]))
try:
output_stream = save_file_dialog.GetPath()
with open(output_stream, 'wb') as output_file:
Pickle.dump(self.save_dict, output_file, protocol=Pickle.HIGHEST_PROTOCOL)
self.model_saved = True
self.update_layer_data()
# DISPLAY MESSAGE
MessageDialog(self, -1, "Model saved successfully", "Save")
except IOError:
MessageDialog(self, -1, "Error in save process.\nModel not saved", "Save")
def load_model(self, event):
"""LOAD MODEL FROM DISC IN .Pickle FORMAT"""
try:
if not self.model_saved:
if wx.MessageBox("Current content has not been saved! Proceed?", "Please confirm",
wx.ICON_QUESTION | wx.YES_NO, self) == wx.NO:
return
open_file_dialog = wx.FileDialog(self, "Open model file", "", "", "Model files (*.model)|*.model",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if open_file_dialog.ShowModal() == wx.ID_CANCEL:
return # USER CHANGED THEIR MIND
# INITALISE MODEL PARAMETERS
self.initalize_model()
self.model_aspect = 1.
# OPEN DATA STREAM
with open(open_file_dialog.GetPath(), 'rb') as input_file:
model_data = Pickle.load(input_file)
# CLEAR MEMORY
gc.collect()
del gc.garbage[:]
# LOAD DATA INTO MODEL
for x in range(len(model_data)):
setattr(self, list(model_data.keys())[x], list(model_data.values())[x])
# SAVE LOADED TREE ITEMS (WILL BE REMOVED BY self.start)
self.loaded_tree_items = self.tree_items
self.loaded_fault_tree_items = self.fault_tree_items
# DRAW CANVAS
self.start(self.area, self.xp, self.gravity_observation_elv)
# ----------------------------------------------------------------------------------------------------------
# LOAD OBSERVED TOPOGRAPHY DATA
if len(self.observed_topography_list) > 0:
self.replot_observed_topography_data()
# LOAD OBSERVED GRAVITY DATA
if len(self.observed_gravity_list) > 0:
self.replot_observed_gravity_data()
# LOAD OBSERVED MAGNETIC DATA
if len(self.observed_magnetic_list) > 0:
self.replot_observed_magnetic_data()
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# Set VARIABLE VALUES FROM LOADED DATA - USING LAYER 1
self.currently_active_layer_id = 1
self.total_layer_count = len(self.layer_list) - 1
# SET LAYER ATTRIBUTES SIDE BAR
self.density_input.SetValue(0.001 * self.layer_list[1].density)
self.ref_density_input.SetValue(0.001 * self.layer_list[1].reference_density)
self.current_x_nodes = self.layer_list[1].x_nodes
self.current_y_nodes = self.layer_list[1].y_nodes
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# CREATE LAYERS
for l in range(0, self.total_layer_count + 1):
self.layer_list[l].node_mpl_actor = self.model_frame.plot(self.layer_list[l].x_nodes,
self.layer_list[l].y_nodes, color='blue',
linewidth=1.0, alpha=1.0)
# CREATE LAYER POLYGON FILL
self.layer_list[l].polygon_mpl_actor = self.model_frame.fill(self.layer_list[l].x_nodes,
self.layer_list[l].y_nodes, color='blue',
alpha=self.layer_transparency,
closed=True, linewidth=None, ec=None)
self.currently_active_layer, = self.model_frame.plot(self.current_x_nodes, self.current_y_nodes, marker='o',
color=self.layer_list[
self.currently_active_layer_id].color,
linewidth=1.0,
alpha=0.5)
# SET CURRENT NODE AS A OFF STAGE (PLACE HOLDER)
self.current_node = self.model_frame.scatter(-40000., 0., marker='o', color='r', zorder=10)
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# LOAD LAYER TREE ITEMS
self.tree.DeleteAllItems() # DELETE CURRENT LAYER TREE
self.root = self.tree.AddRoot("Layers:") # CREATE NEW TREE
self.tree.SetItemPyData(self.root, None)
for i in range(1, len(self.loaded_tree_items)):
tree_item = self.tree.AppendItem(self.root, "%s" % self.loaded_tree_items[i], ct_type=1)
tree_item.Check(checked=True)
self.tree.SetItemPyData(tree_item, i)
self.tree_items = self.loaded_tree_items
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
self.total_fault_count = len(self.fault_list)
for i in range(0, self.total_fault_count):
# DRAW FAULTS
self.fault_list[i].mpl_actor = self.model_frame.plot(self.fault_list[i].x_nodes,
self.fault_list[i].y_nodes,
color=self.fault_list[i].color,
linewidth=0.5, zorder=1, marker='s', alpha=1.0)
# CREATE NEW CURRENT FAULT GRAPHIC
self.currently_active_fault, = self.model_frame.plot([-100000, -100000], [-100000, -100000], marker='s',
color='g', linewidth=0.75, alpha=1.0, zorder=2,
picker=True)
# Set Fault PICKING SWITCH OFF (DEFAULT TO LAYER MODE)
self.fault_picking_swtich = False
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# LOAD FAULT TREE ITEMS
for i in range(0, len(self.loaded_fault_tree_items)):
fault_tree_item = self.fault_tree.AppendItem(self.fault_tree_root, "%s" %
self.loaded_fault_tree_items[i], ct_type=1)
fault_tree_item.Check(checked=True)
self.fault_tree.SetItemPyData(fault_tree_item, i)
self.fault_tree_items = self.loaded_fault_tree_items
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# LOAD OBSERVED WELL DATA
if len(self.well_data_list) > 0:
self.replot_well_data()
# LOAD OBSERVED XY DATA
if len(self.observed_xy_data_list) > 0:
self.replot_observed_xy_data()
# LOAD OUTCROP DATA
if len(self.outcrop_data_list) > 0:
self.replot_outcrop_data()
# LOAD SEGY DATA
if len(self.segy_data_list) > 0:
self.replot_segy_data()
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# REFRESH SIZER POSITIONS
self.Hide()
self.Show()
# UPDATE LAYER DATA AND PLOT
self.draw()
self.update_layer_data()
self.run_algorithms()
self.draw()
self.Restore() # FIX'S DISPLAY ISSUE
self.fold_panel_two.SetSize(200, 300)
self.fold_panel_three.SetSize(200, 300)
# ----------------------------------------------------------------------------------------------------------
# LOAD ERRORS
except IOError:
error_message = "IO ERROR IN LOADING PROCESS - MODEL NOT LOADED"
MessageDialog(self, -1, error_message, "Load Error")
raise
except IndexError:
error_message = "INDEX ERROR IN LOADING PROCESS - MODEL NOT LOADED"
MessageDialog(self, -1, error_message, "Load Error")
raise
# MAXIMIZE FRAME
self.Maximize(True)
def replot_observed_xy_data(self):
"""ADD LOADED OBSERVED XY DATA TO THE MODEL FRAME"""
for x in range(len(self.observed_xy_data_list)):
if self.observed_xy_data_list[x] is not None:
# DRAW DATA IN MODEL FRAME
self.observed_xy_data_list[x].mpl_actor = self.model_frame.scatter(
self.observed_xy_data_list[x].data[:, 0], self.observed_xy_data_list[x].data[:, 1],
marker='o', color=self.observed_xy_data_list[x].color, s=5,
gid=self.observed_xy_data_list[x].id)
# APPEND NEW DATA MENU TO 'XY data MENU'
self.obs_submenu = wx.Menu()
self.m_xy_submenu.Append(4000 + self.observed_xy_data_list[x].id, self.observed_xy_data_list[x].name,
self.obs_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.obs_submenu.Append(4000 + self.observed_xy_data_list[x].id, 'delete observed data')
# BIND TO DEL XY FUNC
self.Bind(wx.EVT_MENU, self.delete_xy, id=4000 + self.observed_xy_data_list[x].id)
# SET GRAVITY COUNTER
self.observed_xy_data_counter = len(self.observed_xy_data_list)
def replot_observed_topography_data(self):
"""ADD LOADED OBSERVED TOPOGRAPHY TO THE MODEL FRAME"""
for x in range(len(self.observed_topography_list)):
if self.observed_topography_list[x] is not None:
# DRAW DATA IN MODEL FRAME
self.observed_topography_list[x].mpl_actor = self.topo_frame.scatter(
self.observed_topography_list[x].data[:, 0], self.observed_topography_list[x].data[:, 1],
marker='o', color=self.observed_topography_list[x].color, s=5,
gid=self.observed_topography_list[x].id)
# ADD OBJECT TO MENUVAR
self.obs_submenu = wx.Menu()
self.m_topo_submenu.Append(10000 + self.observed_topography_list[x].id,
self.observed_topography_list[x].name,
self.obs_submenu)
self.obs_submenu.Append(10000 + self.observed_topography_list[x].id, 'delete observed data')
self.Bind(wx.EVT_MENU, self.delete_observed_topography, id=10000 + self.observed_topography_list[x].id)
# TURN ON OBSERVED GRAVITY SWITCH
self.observed_topography_switch = True
# SET GRAVITY COUNTER
self.observed_topography_counter = len(self.observed_topography_list)
def replot_observed_gravity_data(self):
"""ADD LOADED OBSERVED GRAVITY TO THE MODEL FRAME"""
for x in range(len(self.observed_gravity_list)):
if self.observed_gravity_list[x] is not None:
# DRAW DATA IN MODEL FRAME
self.observed_gravity_list[x].mpl_actor = self.gravity_frame.scatter(
self.observed_gravity_list[x].data[:, 0], self.observed_gravity_list[x].data[:, 1], marker='o',
color=self.observed_gravity_list[x].color, s=5, gid=self.observed_gravity_list[x].id)
# ADD OBJECT TO MENUVAR
self.obs_submenu = wx.Menu()
self.m_obs_g_submenu.Append(11000 + self.observed_gravity_list[x].id,
self.observed_gravity_list[x].name,
self.obs_submenu)
self.obs_submenu.Append(11000 + self.observed_gravity_list[x].id, 'delete observed data')
self.Bind(wx.EVT_MENU, self.delete_obs_grav, id=11000 + self.observed_gravity_list[x].id)
# TURN ON OBSERVED GRAVITY SWITCH
self.observed_gravity_switch = True
# SET GRAVITY COUNTER
self.observed_gravity_counter = len(self.observed_gravity_list)
def replot_observed_magnetic_data(self):
"""ADD LOADED OBSERVED MAGNETIC DATA TO THE MODEL FRAME"""
for x in range(len(self.observed_magnetic_list)):
if self.observed_magnetic_list[x] is not None:
# DRAW DATA IN MODEL FRAME
self.observed_magnetic_list[x].mpl_actor = self.magnetic_frame.scatter(
self.observed_magnetic_list[x].data[:, 0],
self.observed_magnetic_list[x].data[:, 1],
marker='o',
color=self.observed_magnetic_list[x].color,
s=5, gid=self.observed_magnetic_list[x].id)
# ADD OBJECT TO MENUVAR
self.mag_submenu = wx.Menu()
self.m_obs_mag_submenu.Append(12000 + self.observed_magnetic_list[x].id,
self.observed_magnetic_list[x].name,
self.mag_submenu)
self.mag_submenu.Append(12000 + self.observed_magnetic_list[x].id, 'delete observed data')
self.Bind(wx.EVT_MENU, self.delete_obs_mag, id=12000 + self.observed_magnetic_list[x].id)
# TURN ON OBSERVED GRAVITY SWITCH
self.observed_magnetic_switch = True
# SET MAGNETIC COUNTER
self.observed_magnetic_counter = len(self.observed_magnetic_list)
def replot_well_data(self):
"""ADD LOADED WELL DATA TO THE MODEL FRAME"""
for x in range(len(self.well_data_list)):
if self.well_data_list[x] is not None:
# SET CURRENT WELL
self.loaded_well = self.well_data_list[x]
well = self.well_data_list[x]
# CREATE FILE MENU DATA
self.well_name_submenu = wx.Menu()
self.m_wells_submenu.Append(well.id + 3000, well.name, self.well_name_submenu)
self.well_name_submenu.Append(well.id + 2000, 'Hide/Show')
self.well_name_submenu.Append(well.id + 3000, 'Delete well')
self.Bind(wx.EVT_MENU, self.show_hide_well, id=well.id + 2000)
self.Bind(wx.EVT_MENU, self.delete_well, id=well.id + 3000)
# DRAW WELL IN MODEL FRAME
y1 = well.data[0][1].astype(float)
y2 = well.data[-1][-1].astype(float)
well_x_location = well.data[1][1].astype(float)
wellx = (well_x_location, well_x_location)
welly = (y1, y2)
well.mpl_actor = self.model_frame.plot(wellx, welly, linestyle='-', linewidth='2', color='black')
# PLOT WELL NAME
well.mpl_actor_name = self.model_frame.annotate(well.name, xy=(well_x_location, -0.5),
xytext=(well_x_location, -0.5),
fontsize=well.textsize, weight='bold',
horizontalalignment='center', color='black',
bbox=dict(boxstyle="round,pad=.2", fc="0.8"),
clip_on=True)
# PLOT WELL HORIZONS
# SET EMPTY ARRAYS TO FILL WITH LABELS AND HORIZONS
well.labels_list = [None] * (len(well.data) - 2)
well.horizons_list = [None] * (len(well.data) - 2)
for i in range(2, len(well.data)):
y = [well.data[i][1].astype(float), well.data[i][1].astype(float)]
x = [well.data[1][1].astype(float) - 1, well.data[1][1].astype(float) + 1]
# PLOT HORIZON LINE
well.horizons_list[i - 2] = self.model_frame.plot(x, y, linestyle='-', linewidth='2', color='black')
horizon_y_pos = well.data[i][1].astype(float)
horizon = well.data[i][0].astype(str)
# ALTERNATE POSITION OF ODDs/EVENs TO TRY AND AVOID OVERLAP
if i % 2 == 0:
horizon_x_pos = well.data[1][1].astype(float) - 1.05
well.labels_list[i - 2] = self.model_frame.annotate(horizon, xy=(horizon_x_pos, horizon_y_pos),
xytext=(horizon_x_pos, horizon_y_pos),
fontsize=well.text_size, weight='bold',
horizontalalignment='left',
verticalalignment='top',
color='black',
bbox=dict(boxstyle="round,pad=.4",
fc="0.8", ec='None'),
clip_on=True)
else:
horizon_x_pos = well.data[1][1].astype(float) + 1.05
well.labels_list[i - 2] = self.model_frame.annotate(horizon, xy=(horizon_x_pos, horizon_y_pos),
xytext=(horizon_x_pos, horizon_y_pos),
fontsize=well.text_size, weight='bold',
horizontalalignment='right',
verticalalignment='top',
color='black',
bbox=dict(boxstyle="round,pad=.4",
fc="0.8", ec='None'),
clip_on=True)
# SET WELL COUNTER
self.well_counter = len(self.well_data_list)
def replot_outcrop_data(self):
"""ADD LOADED OUTCROP DATA TO THE MODEL FRAME"""
for x in range(len(self.outcrop_data_list)):
if self.outcrop_data_list[x] is not None:
# SET CURRENT OUTCROP DATA RECORD
outcrop = self.outcrop_data_list[x]
# PLOT MARKERS IN MODEL
outcrop.lines = [None] * len(outcrop.data)
for i in range(len(outcrop.data)):
x1 = outcrop.data[i, 0].astype(float)
y1 = outcrop.data[i, 1].astype(float)
y2 = outcrop.data[i, 2].astype(float)
x = (x1, x1)
y = (y1, y2)
outcrop.lines[i] = self.model_frame.plot(x, y, linestyle='-', linewidth='2', color=outcrop.color)
# DRAW TEXT LABELS
outcrop.labels = [None] * len(outcrop.data)
# CREATE TEXT XYT
text = list(zip(outcrop.data[:, 0].astype(float), outcrop.data[:, 1].astype(float),
outcrop.data[:, 3].astype(str)))
for i in range(len(outcrop.data)):
# ALTERNATE POSITION OF ODDs/EVENs To TRY AND AVOID OVERLAP
if i % 2 == 0:
outcrop.labels[i] = self.model_frame.annotate(text[i][2], xy=(text[i][0], text[i][1]),
xytext=(text[i][0], text[i][1]),
fontsize=outcrop.textsize,
weight='regular', horizontalalignment='right',
verticalalignment='bottom',
color='black',
bbox=dict(boxstyle="round,pad=.4", fc="0.8",
ec='None'), clip_on=True)
else:
outcrop.labels[i] = self.model_frame.annotate(text[i][2], xy=(text[i][0], text[i][1]),
xytext=(text[i][0], text[i][1]),
fontsize=outcrop.textsize,
weight='regular', horizontalalignment='left',
verticalalignment='top',
color='black',
bbox=dict(boxstyle="round,pad=.4", fc="0.8",
ec='None'), clip_on=True)
# APPEND NEW DATA MENU TO 'OUTCROP DATA MENU'
self.outcrop_submenu = wx.Menu()
self.m_outcrop_submenu.Append(self.outcrop_data_count, outcrop.name, self.outcrop_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.outcrop_submenu.Append(13000 + self.outcrop_data_count, 'delete observed data')
# BIND TO DEL XY FUNC
self.Bind(wx.EVT_MENU, self.delete_outcrop_data, id=13000 + self.outcrop_data_count)
# ISET OUTCROP COUNTER
self.outcrop_data_count = len(self.outcrop_data_list)
def replot_segy_data(self):
""" PLOT SEGY DATA"""
for x in range(len(self.segy_data_list)):
if self.segy_data_list[x] is not None:
# SET CURRENT SEGY OBJECT
segy = self.segy_data_list[x]
# LOAD SEGY DATA
try:
section = read(segy.file, unpack_trace_headers=False)
except IOError:
load_error = MessageDialog(self, -1, "SEGY LOAD ERROR: FILE NOT FOOUND", "Segy load error")
return
nsamples = len(section.traces[0].data)
ntraces = len(section.traces)
seis_data = plt.zeros((nsamples, ntraces))
for i, tr in enumerate(section.traces):
seis_data[:, i] = tr.data
del section
# SET AXIS
segy.axis = [segy.dimensions[0], segy.dimensions[1], segy.dimensions[3], segy.dimensions[2]]
# PLOT SEGY DATA ON MODEL
segy.mpl_actor = self.model_frame.imshow(seis_data, vmin=self.segy_gain_neg, vmax=self.segy_gain_pos,
aspect='auto', extent=segy.axis, cmap=self.segy_color_map,
alpha=0.75)
# REMOVE SEGY DATA
del seis_data
# SET SEGY ON SWTICH
self.segy_on = True
# APPEND NEW SEGY TO THE SEGY DATA LIST
self.segy_data_list.append(segy)
# ADD SEGY_NAME TO SEGY MENU. NB: ID's START AT 1000
self.segy_name_submenu = wx.Menu()
self.m_segy_submenu.Append(segy.id + 1000, segy.name, self.segy_name_submenu)
self.segy_name_submenu.Append(segy.id + 1000, 'delete segy')
self.Bind(wx.EVT_MENU, self.remove_segy, id=segy.id + 1000)
# INCREMENT COUNTER
self.segy_counter = len(self.segy_data_list)
# TOPOGRAPHY DATA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# TOPOGRAPHY DATA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def load_topo(self, event):
self.load_window = LoadObservedDataFrame(self, -1, 'Load observed data', 'topography')
self.load_window.Show(True)
def open_obs_t(self):
"""
LOAD OBSERVE TOPOGRAPHY DATA.
DATA ARE STORED IN gmg.observed_topography_list as a ObservedData object.
object IDs start at 11000.
"""
# PARSE USER INPUT FILE
input_file = self.load_window.file_path
# CREATE NEW OBSERVED GRAVITY OBJECT
observed_topography = ObservedData()
# SET ATTRIBUTES
observed_topography.id = int(self.observed_topography_counter)
observed_topography.type = str('observed')
observed_topography.name = self.load_window.observed_name
observed_topography.color = self.load_window.color_picked
observed_topography.data = np.genfromtxt(input_file, delimiter=' ', dtype=float)
observed_topography.mpl_actor = self.topo_frame.scatter(observed_topography.data[:, 0],
observed_topography.data[:, 1], marker='o',
color=observed_topography.color, s=5,
gid=observed_topography.id)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_topography_list.append(observed_topography)
# TURN ON OBSERVED GRAVITY SWITCH
self.observed_topography_switch = True
# APPEND NEW DATA MENU TO 'TOPO data MENU'
self.topo_submenu = wx.Menu()
self.m_topo_submenu.Append(10000 + observed_topography.id, observed_topography.name, self.topo_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.topo_submenu.Append(10000 + observed_topography.id, 'delete observed data')
# BIND TO DELETE OBSERVED GRAVITY FUNC
self.Bind(wx.EVT_MENU, self.delete_observed_topography, id=10000 + observed_topography.id)
# INCREMENT OBSERVED GRAVITY COUNTER
self.observed_topography_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.set_frame_limits()
self.draw()
def delete_observed_topography(self, event):
"""DELETE AN OBSERVED TOPOGRAPHY DATA RECORD"""
# DESTROY MENUBAR
self.m_topo_submenu.DestroyItem(event.Id)
# REMOVE OBJECT AND MPL ACTOR
obj_id = event.Id - 10000
self.observed_topography_list[obj_id].mpl_actor.set_visible(False)
self.observed_topography_list[obj_id] = None
# UPDATE MODEL
self.update_layer_data()
self.draw()
def filter_observed_topography(self, event):
"""FILTER OBSERVED TOPOGRAPHY USING MEDIAN FILTER - CALLS class MedianFilterDialog"""
# RUN FILTER
median_filter_box = MedianFilterDialog(self, -1, 'median filter', self.observed_topography_list)
answer = median_filter_box.ShowModal()
# CREATE NEW OBSERVED GRAVITY OBJECT
observed = ObservedData()
# SET ATTRIBUTES
observed.id = int(self.observed_topography_counter)
observed.type = str('filtered')
observed.name = median_filter_box.output_name
observed.color = median_filter_box.output_color
observed.data = median_filter_box.filtered_output
observed.mpl_actor = self.topo_frame.scatter(observed.data[:, 0], observed.data[:, 1], marker='o',
color=observed.color, s=5, gid=observed.id)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_topography_list.append(observed)
# APPEND NEW DATA MENU TO 'TOPO data MENU'
self.topo_submenu = wx.Menu()
self.m_topo_submenu.Append(10000 + observed.id, observed.name, self.topo_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.topo_submenu.Append(10000 + observed.id, 'delete observed data')
# BIND TO DELETE OBSERVED GRAVITY FUNC
self.Bind(wx.EVT_MENU, self.delete_observed_topography, id=10000 + observed.id)
# INCREMENT OBSERVED GRAVITY COUNTER
self.observed_topography_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.set_frame_limits()
self.draw()
def take_topography_horizontal_derivative(self, event):
"""
TAKE HORIZONTAL DERIVATIVE OF OBSERVED DATA.
CALLS class HorizontalDerivative
"""
# OPEN THE HORIZONTAL DERIVATIVE INPUT WINDOW
horizontal_derivative_box = HorizontalDerivative(self, -1, 'Horizontal derivative',
self.observed_topography_list)
answer = horizontal_derivative_box.ShowModal()
# CREATE NEW DATA OBJECT AND PARSE OUTPUT TO THE OBJECT
new_derivative = ObservedData()
new_derivative.data = horizontal_derivative_box.deriv
new_derivative.name = horizontal_derivative_box.output_name
new_derivative.color = horizontal_derivative_box.output_color
new_derivative.id = 10000 + self.observed_topography_counter
new_derivative.type = str('derivative')
new_derivative.mpl_actor = self.topo_d_frame.scatter(new_derivative.data[:, 0], new_derivative.data[:, 1],
marker='o', color=new_derivative.color, s=5,
gid=10000 + self.observed_topography_counter)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_topography_list.append(new_derivative)
# APPEND NEW MENUBAR TO THE GRAVITY MENUBAR
self.topo_submenu = wx.Menu()
self.m_topo_submenu.Append(10000 + self.observed_topography_counter, new_derivative.name,
self.topo_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.topo_submenu.Append(10000 + self.observed_topography_counter, 'delete observed data')
# BIND TO DEL FUNC
self.Bind(wx.EVT_MENU, self.delete_obs_topo, id=10000 + self.observed_topography_counter)
# INCREMENT TOPO DERIV COUNTER
self.observed_topography_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.draw()
# GRAVITY DATA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def load_obs_g(self, event):
self.load_window = LoadObservedDataFrame(self, -1, 'Load observed data', 'gravity')
self.load_window.Show(True)
def open_obs_g(self):
"""
LOAD OBSERVE GRAVITY DATA.
DATA ARE STORED IN gmg.observed_gravity_list as a ObservedData object.
object IDs start at 11000.
"""
# PARSE USER INPUT FILE
input_file = self.load_window.file_path
# CREATE NEW OBSERVED GRAVITY OBJECT
observed_gravity = ObservedData()
# SET ATTRIBUTES
observed_gravity.id = int(self.observed_gravity_counter)
observed_gravity.type = str('observed')
observed_gravity.name = self.load_window.observed_name
observed_gravity.color = self.load_window.color_picked
observed_gravity.data = np.genfromtxt(input_file, delimiter=' ', dtype=float)
observed_gravity.mpl_actor = self.gravity_frame.scatter(observed_gravity.data[:, 0],
observed_gravity.data[:, 1], marker='o',
color=observed_gravity.color, s=5,
gid=observed_gravity.id)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_gravity_list.append(observed_gravity)
# TURN ON OBSERVED GRAVITY SWITCH
self.observed_gravity_switch = True
# APPEND NEW DATA MENU TO 'GRAV data MENU'
self.grav_submenu = wx.Menu()
self.m_obs_g_submenu.Append(11000 + observed_gravity.id, observed_gravity.name, self.grav_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.grav_submenu.Append(11000 + observed_gravity.id, 'delete observed data')
# BIND TO DELETE OBSERVED GRAVITY FUNC
self.Bind(wx.EVT_MENU, self.delete_obs_grav, id=11000 + observed_gravity.id)
# INCREMENT OBSERVED GRAVITY COUNTER
self.observed_gravity_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.set_frame_limits()
self.draw()
def delete_obs_grav(self, event):
# DESTROY MENUBAR
self.m_obs_g_submenu.DestroyItem(event.Id)
# REMOVE OBJECT AND MPL ACTOR
obj_id = event.Id - 11000
self.observed_gravity_list[obj_id].mpl_actor.set_visible(False)
self.observed_gravity_list[obj_id] = None
# UPDATE MODEL
self.update_layer_data()
self.set_frame_limits()
self.draw()
def set_gravity_elv(self, event):
"""POPOUT BOX TO LET USER DEFINE THE ELEVATION AT WHICH TO CALCULATE THE GRAVITY ANOMALY"""
# CREATE THE POPOUT BOX FOR USER UNPUT
grav_box = GravDialog(self, -1, 'Gravity elevation', self.gravity_observation_elv)
answer = grav_box.ShowModal()
# SET THE NEW CALCULATION ELEVATION
self.gravity_observation_elv = grav_box.grav_observation_elv * 1000. # CONVERT FROM (km) TO (m)
# UPDATE GMG
self.run_algorithms()
self.draw()
def save_modelled_grav(self, event):
"""SAVE PREDICTED GRAVITY TO EXTERNAL ASCII FILE"""
save_file_dialog = wx.FileDialog(self, "Save Predicted Anomaly", "", "", "Predicted Anomaly (*.txt)|*.txt",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if save_file_dialog.ShowModal() == wx.ID_CANCEL:
return # THE USER CHANGED THEIR MIND
# SAVE TO DISC
outputfile = save_file_dialog.GetPath()
np.savetxt(outputfile, list(zip((self.xp * 0.001), self.predicted_gravity)), delimiter=' ', fmt='%.6f %.6f')
def filter_observed_gravity(self, event):
"""FILTER OBSERVED ANOMALY USING MEDIAN FILTER - CALLS class MedianFilterDialog"""
# RUN FILTER
median_filter_box = MedianFilterDialog(self, -1, 'median filter', self.observed_gravity_list)
answer = median_filter_box.ShowModal()
# CREATE NEW OBSERVED GRAVITY OBJECT
observed = ObservedData()
# SET ATTRIBUTES
observed.id = int(self.observed_gravity_counter)
observed.type = str('filtered')
observed.name = median_filter_box.output_name
observed.color = median_filter_box.output_color
observed.data = median_filter_box.filtered_output
observed.mpl_actor = self.gravity_frame.scatter(observed.data[:, 0],
observed.data[:, 1], marker='o',
color=observed.color, s=5,
gid=observed.id)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_gravity_list.append(observed)
# TURN ON OBSERVED GRAVITY SWITCH
self.observed_gravity_switch = True
# APPEND NEW DATA MENU TO 'GRAV data MENU'
self.grav_submenu = wx.Menu()
self.m_obs_g_submenu.Append(11000 + observed.id, observed.name, self.grav_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.grav_submenu.Append(11000 + observed.id, 'delete observed data')
# BIND TO DELETE OBSERVED GRAVITY FUNC
self.Bind(wx.EVT_MENU, self.delete_obs_grav, id=11000 + observed.id)
# INCREMENT OBSERVED GRAVITY COUNTER
self.observed_gravity_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.set_frame_limits()
self.draw()
def take_gravity_horizontal_derivative(self, event):
"""
TAKE HORIZONTAL DERIVATIVE OF OBSERVED DATA.
CALLS class HorizontalDerivative
"""
# OPEN THE HORIZONTAL DERIVATIVE INPUT WINDOW
horizontal_derivative_box = HorizontalDerivative(self, -1, 'Horizontal derivative', self.observed_gravity_list)
answer = horizontal_derivative_box.ShowModal()
# CREATE NEW DATA OBJECT AND PARSE OUTPUT TO THE OBJECT
new_derivative = ObservedData()
new_derivative.data = horizontal_derivative_box.deriv
new_derivative.name = horizontal_derivative_box.output_name
new_derivative.color = horizontal_derivative_box.output_color
new_derivative.id = 11000 + self.observed_gravity_counter
new_derivative.type = str('derivative')
new_derivative.mpl_actor = self.gravity_d_frame.scatter(new_derivative.data[:, 0], new_derivative.data[:, 1],
marker='o', color=new_derivative.color, s=5,
gid=11000 + self.observed_gravity_counter)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_gravity_list.append(new_derivative)
# APPEND NEW MENUBAR TO THE GRAVITY MENUBAR
self.grav_submenu = wx.Menu()
self.m_obs_g_submenu.Append(11000 + self.observed_gravity_counter, new_derivative.name, self.grav_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.grav_submenu.Append(11000 + self.observed_gravity_counter, 'delete observed data')
# BIND TO DEL FUNC
self.Bind(wx.EVT_MENU, self.delete_obs_grav, id=11000 + self.observed_gravity_counter)
# INCREMENT GRAV DERIV COUNTER
self.observed_gravity_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.draw()
# MAGNETIC DATA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def load_obs_m(self, event):
self.load_window = LoadObservedDataFrame(self, -1, 'Load observed data', 'magnetics')
self.load_window.Show(True)
def open_obs_m(self):
"""
LOAD OBSERVE GRAVITY DATA.
DATA ARE STORED IN gmg.observed_gravity_list as a ObservedData object.
object IDs start at 12000.
"""
# PARSE USER INPUT FILE
input_file = self.load_window.file_path
# CREATE NEW OBSERVED GRAVITY OBJECT
observed_magnetic = ObservedData()
# SET ATTRIBUTES
observed_magnetic.id = int(self.observed_magnetic_counter)
observed_magnetic.type = str('observed')
observed_magnetic.name = self.load_window.observed_name
observed_magnetic.color = self.load_window.color_picked
observed_magnetic.data = np.genfromtxt(input_file, delimiter=' ', dtype=float)
observed_magnetic.mpl_actor = self.magnetic_frame.scatter(observed_magnetic.data[:, 0],
observed_magnetic.data[:, 1], marker='o',
color=observed_magnetic.color, s=5,
gid=observed_magnetic.id)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_magnetic_list.append(observed_magnetic)
# TURN ON OBSERVED GRAVITY SWITCH
self.observed_magnetic_switch = True
# APPEND NEW DATA MENU TO 'GRAV data MENU'
self.mag_submenu = wx.Menu()
self.m_obs_mag_submenu.Append(12000 + observed_magnetic.id, observed_magnetic.name, self.mag_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.mag_submenu.Append(12000 + observed_magnetic.id, 'delete observed data')
# BIND TO DELETE OBSERVED GRAVITY FUNC
self.Bind(wx.EVT_MENU, self.delete_obs_mag, id=12000 + observed_magnetic.id)
# INCREMENT OBSERVED GRAVITY COUNTER
self.observed_magnetic_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.set_frame_limits()
self.draw()
def delete_obs_mag(self, event):
"""DELETE A MAGNETIC ANOMALY FROM THE MAGNETIC DATA PANEL"""
# DESTROY MENUBAR
self.m_obs_mag_submenu.DestroyItem(event.Id)
# REMOVE OBJECT AND MPL ACTOR
obj_id = event.Id - 12000
self.observed_magnetic_list[obj_id].mpl_actor.set_visible(False)
self.observed_magnetic_list[obj_id] = None
# UPDATE MODEL
self.update_layer_data()
self.set_frame_limits()
self.draw()
def set_mag_variables(self, event):
"""POPOUT BOX TO LET USER DEFINE MAGNETIC FIELD VALUES"""
# CREATE POPOUT MENU
mag_box = MagDialog(self, -1, 'Magnetic parameters', self.mag_observation_elv, self.model_azimuth,
self.earth_field)
answer = mag_box.ShowModal()
# UPDATE MAGNETIC OBSERVATION ELEVATION
self.mag_observation_elv = mag_box.mag_observation_elv * 1000. # CONVERT FROM (km) TO (m)
# UPDATE GMG
self.run_algorithms()
self.draw()
def save_modelled_mag(self, event):
"""SAVE THE MODELLED MAGNETIC ANOMALY TO DISC"""
save_file_dialog = wx.FileDialog(self, "Save Predicted Anomaly", "", "", "Predicted Anomaly (*.txt)|*.txt",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if save_file_dialog.ShowModal() == wx.ID_CANCEL:
return # THE USER CHANGED THEIR MIND
# SAVE TO DISC
outputfile = save_file_dialog.GetPath()
np.savetxt(outputfile, list(zip((self.xp * 0.001), self.predicted_nt)), delimiter=' ', fmt='%.6f %.6f')
def filter_observed_magnetic(self, event):
"""FILTER OBSERVED MAGNETIC USING MEDIAN FILTER - CALLS class MedianFilterDialog"""
# RUN FILTER
median_filter_box = MedianFilterDialog(self, -1, 'median filter', self.observed_magnetic_list)
answer = median_filter_box.ShowModal()
# CREATE NEW OBSERVED GRAVITY OBJECT
observed = ObservedData()
# SET ATTRIBUTES
observed.id = int(self.observed_magnetic_counter)
observed.type = str('filtered')
observed.name = median_filter_box.output_name
observed.color = median_filter_box.output_color
observed.data = median_filter_box.filtered_output
observed.mpl_actor = self.magnetic_frame.scatter(observed.data[:, 0], observed.data[:, 1], marker='o',
color=observed.color, s=5, gid=observed.id)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_magnetic_list.append(observed)
# APPEND NEW DATA MENU TO 'MAG data MENU'
self.mag_submenu = wx.Menu()
self.m_obs_mag_submenu.Append(12000 + observed.id, observed.name, self.mag_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.mag_submenu.Append(12000 + observed.id, 'delete observed data')
# BIND TO DELETE OBSERVED GRAVITY FUNC
self.Bind(wx.EVT_MENU, self.delete_obs_mag, id=12000 + observed.id)
# INCREMENT OBSERVED GRAVITY COUNTER
self.observed_magnetic_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.set_frame_limits()
self.draw()
def take_magnetic_horizontal_derivative(self, event):
"""
TAKE HORIZONTAL DERIVATIVE OF OBSERVED DATA.
CALLS class HorizontalDerivative
"""
# OPEN THE HORIZONTAL DERIVATIVE INPUT WINDOW
horizontal_derivative_box = HorizontalDerivative(self, -1, 'Horizontal derivative', self.observed_magnetic_list)
answer = horizontal_derivative_box.ShowModal()
# CREATE NEW DATA OBJECT AND PARSE OUTPUT TO THE OBJECT
new_derivative = ObservedData()
new_derivative.data = horizontal_derivative_box.deriv
new_derivative.name = horizontal_derivative_box.output_name
new_derivative.color = horizontal_derivative_box.output_color
new_derivative.id = 12000 + self.observed_magnetic_counter
new_derivative.type = str('derivative')
new_derivative.mpl_actor = self.magnetic_d_frame.scatter(new_derivative.data[:, 0], new_derivative.data[:, 1],
marker='o', color=new_derivative.color, s=5,
gid=12000 + self.observed_magnetic_counter)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_magnetic_list.append(new_derivative)
# APPEND NEW MENUBAR TO THE GRAVITY MENUBAR
self.mag_submenu = wx.Menu()
self.m_obs_mag_submenu.Append(12500 + self.observed_magnetic_counter, new_derivative.name,
self.mag_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.mag_submenu.Append(12500 + self.observed_magnetic_counter, 'delete observed data')
# BIND TO DEL FUNC
self.Bind(wx.EVT_MENU, self.delete_obs_mag, id=12500 + self.observed_magnetic_counter)
# INCREMENT MAG DERIV COUNTER
self.observed_magnetic_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.draw()
# SEGY DATA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def segy_input(self, event):
seismic_data_box = SeisDialog(self, -1, 'Segy Dimensions', self.area)
answer = seismic_data_box.ShowModal()
self.d = seismic_data_box.dimensions
self.segy_name = seismic_data_box.segy_name_input
self.sx1, self.sx2, self.sz1, self.sz2 = self.d
self.load_segy(self)
def load_segy(self, event):
try:
open_file_dialog = wx.FileDialog(self, "Open Observed file", "", "", "All files (*.*)|*.*",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if open_file_dialog.ShowModal() == wx.ID_CANCEL:
return # THE USER CHANGED THEIR MIND
# PARSE INPUT FILE
file_in = open_file_dialog.GetPath()
# CREATE NEW SEGY OBJECT
segy = SegyData()
# ASSIGN ATTRIBUTES
segy.id = self.segy_counter
segy.file = file_in
segy.name = self.segy_name
segy.dimensions = self.d
# LOAD SEGY DATA
section = read(file_in, unpack_trace_headers=False)
nsamples = len(section.traces[0].data)
ntraces = len(section.traces)
seis_data = plt.zeros((nsamples, ntraces))
for i, tr in enumerate(section.traces):
seis_data[:, i] = tr.data
del section
# SET AXIS
segy.axis = [segy.dimensions[0], segy.dimensions[1], segy.dimensions[3], segy.dimensions[2]]
# PLOT SEGY DATA ON MODEL
segy.mpl_actor = self.model_frame.imshow(seis_data, vmin=self.segy_gain_neg, vmax=self.segy_gain_pos,
aspect='auto', extent=segy.axis, cmap=self.segy_color_map,
alpha=0.75)
# REMOVE SEGY DATA
del seis_data
# SET SEGY ON SWTICH
self.segy_on = True
# APPEND NEW SEGY TO THE SEGY DATA LIST
self.segy_data_list.append(segy)
# ADD SEGY_NAME TO SEGY MENU. NB: ID's START AT 1000
self.segy_name_submenu = wx.Menu()
self.m_segy_submenu.Append(self.segy_counter + 1000, segy.name, self.segy_name_submenu)
self.segy_name_submenu.Append(self.segy_counter + 1000, 'delete segy')
self.Bind(wx.EVT_MENU, self.remove_segy, id=self.segy_counter + 1000)
# INCREMENT COUNTER
self.segy_counter += 1
except AttributeEditor:
load_error = MessageDialog(self, -1, "SEGY LOAD ERROR", "segy load error")
# REPLOT MODEL
self.update_layer_data()
self.draw()
def remove_segy(self, event):
"""DELETE SEGY DATA. NB 1000 IS TAKEN FROM EVENT.ID TO PREVENT OVERLAP WITH GRAV EVENT.IDS"""
if self.segy_on:
# DELETE SEGY OBJECT
obj_id = event.Id - 1000
self.segy_data_list[obj_id].mpl_actor.set_visible(False)
self.segy_data_list[obj_id].mpl_actor.remove()
self.segy_data_list[obj_id] = None
# REMOVE MENUBAR
self.m_segy_submenu.DestroyItem(event.Id)
# UPDATE MODEL
self.update_layer_data()
self.set_frame_limits()
self.draw()
def segy_color_adjustment(self, event):
if event.Id == 901:
for s in range(0, len(self.segy_data_list)):
if self.segy_data_list[s] is not None:
self.segy_data_list[s].mpl_actor.set_cmap(cm.gray)
else:
if event.Id == 902:
for s in range(0, len(self.segy_data_list)):
if self.segy_data_list[s] is not None:
self.segy_data_list[s].mpl_actor.set_cmap(cm.seismic)
# REDRAW MODEL
self.draw()
def gain_increase(self, event):
# CHANGE THE GAIN VALUE
gain_pos = self.segy_gain_pos - 1.0
if gain_pos < 1.0:
return
else:
self.segy_gain_pos = gain_pos
self.segy_gain_neg = -gain_pos
# REDRAW THE SEGY PML ACTOR
for s in range(0, len(self.segy_data_list)):
if self.segy_data_list[s] is not None:
self.segy_data_list[s].mpl_actor.set_clim(vmax=self.segy_gain_pos)
self.segy_data_list[s].mpl_actor.set_clim(vmin=self.segy_gain_neg)
# REDRAW MODEL
self.draw()
def gain_decrease(self, event):
# CHANGE THE GAIN VALUE
gain_pos = self.segy_gain_pos + 1.0
if gain_pos < 1.0:
return
else:
self.segy_gain_pos = gain_pos
self.segy_gain_neg = -gain_pos
# REDRAW THE SEGY PML ACTOR
for s in range(0, len(self.segy_data_list)):
if self.segy_data_list[s] is not None:
self.segy_data_list[s].mpl_actor.set_clim(vmax=self.segy_gain_pos)
self.segy_data_list[s].mpl_actor.set_clim(vmin=self.segy_gain_neg)
# REDRAW MODEL
self.draw()
# XY DATA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def load_xy(self, event):
""" LOAD & PLOT XY DATA E.G. EQ HYPOCENTERS. NB: ID's start at 5000"""
self.load_window = LoadObservedDataFrame(self, -1, 'Load observed data', 'XY')
self.load_window.Show(True)
def open_xy_data(self):
"""
OPEN THE XY DATA SELECTED BY THE USER USING THE load_xy FUNC
NB: IDs START AT 5000
"""
xy_input_file = self.load_window.file_path
self.xy_name = self.load_window.observed_name
self.xy_color = self.load_window.color_picked
# CREATE NEW OBSERVED GRAVITY OBJECT
new_xy = ObservedData()
# SET ATTRIBUTES
new_xy.data = np.genfromtxt(xy_input_file, dtype=float, autostrip=True)
new_xy.id = self.xy_data_counter
new_xy.name = self.load_window.observed_name
new_xy.color = self.load_window.color_picked
new_xy.type = str('observed')
new_xy.mpl_actor = self.model_frame.scatter(new_xy.data[:, 0], new_xy.data[:, 1], marker='o',
color=new_xy.color, s=3, gid=4000 + self.xy_data_counter)
# APPEND NEW DATA TO THE OBSERVED GRAVITY GMG LIST
self.observed_xy_data_list.append(new_xy)
# APPEND NEW DATA MENU TO 'XY data MENU'
self.obs_submenu = wx.Menu()
self.m_xy_submenu.Append(4000 + self.xy_data_counter, new_xy.name, self.obs_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.obs_submenu.Append(4000 + self.xy_data_counter, 'delete observed data')
# BIND TO DEL XY FUNC
self.Bind(wx.EVT_MENU, self.delete_xy, id=4000 + self.xy_data_counter)
# INCREMENT XY COUNTER
self.xy_data_counter += 1
# UPDATE GMG GUI
self.update_layer_data()
self.draw()
def delete_xy(self, event):
""""DELETE OBSERVED XY DATA NB: ID's start at 4000"""
# DESTROY MENUBAR
self.m_xy_submenu.DestroyItem(event.Id)
# REMOVE OBJECT AND MPL ACTOR
obj_id = event.Id - 4000
self.observed_xy_data_list[obj_id].mpl_actor.set_visible(False)
self.observed_xy_data_list[obj_id] = None
# UPDATE MODEL
self.update_layer_data()
self.set_frame_limits()
self.draw()
# WELL DATA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def load_well(self, event):
"""
LOAD A WELL RECORD INTO THE MODEL FRAME.
IDs BEGIN AT 3000.
HIDE/SHOW TOGGLE IDs BEGIN AT 2000.
"""
# CREATE INSTANCE OF LOADING DIALOG BOX
open_file_dialog = wx.FileDialog(self, "Open Observed file", "", "", "All files (*.*)|*.*",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if open_file_dialog.ShowModal() == wx.ID_CANCEL:
return # THE USER CHANGED THEIR MIND
else:
well_in = open_file_dialog.GetPath()
well_name_box = wx.TextEntryDialog(None, 'Please provide a name for the new well record:')
answer = well_name_box.ShowModal()
# CREATE A NEW WELL DATA OBJECT
well = ObservedWellData()
# SET WELL ATTRIBUTES
well.id = self.well_counter
# SET NAME
well.name = well_name_box.GetValue()
well.textsize = 2
# SET DATA
with open(well_in, 'r') as f:
input_record = [line.strip().split(' ') for line in f]
well.raw_record = input_record
well.data = np.array(well.raw_record[1:]) # CREATE NP ARRAY WITHOUT HEADER INFO
# CREATE FILE MENU DATA
self.well_name_submenu = wx.Menu()
self.m_wells_submenu.Append(self.well_counter + 3000, well.name, self.well_name_submenu)
self.well_name_submenu.Append(self.well_counter + 2000, 'Hide/Show')
self.well_name_submenu.Append(self.well_counter + 3000, 'Delete well')
self.Bind(wx.EVT_MENU, self.show_hide_well, id=well.id + 2000)
self.Bind(wx.EVT_MENU, self.delete_well, id=well.id + 3000)
# DRAW WELL IN MODEL FRAME
y1 = well.data[0][1].astype(float)
y2 = well.data[-1][-1].astype(float)
well_x_location = well.data[1][1].astype(float)
wellx = (well_x_location, well_x_location)
welly = (y1, y2)
well.mpl_actor = self.model_frame.plot(wellx, welly, linestyle='-', linewidth='2', color='black')
# PLOT WELL NAME
well.mpl_actor_name = self.model_frame.annotate(well.name, xy=(well_x_location, -0.5),
xytext=(well_x_location, -0.5),
fontsize=well.textsize, weight='bold',
horizontalalignment='center', color='black',
bbox=dict(boxstyle="round,pad=.2", fc="0.8"),
clip_on=True)
# PLOT WELL HORIZONS
# SET EMPTY ARRAYS TO FILL WITH LABELS AND HORIZONS
well.labels_list = [None] * (len(well.data) - 2)
well.horizons_list = [None] * (len(well.data) - 2)
for i in range(2, len(well.data)):
y = [well.data[i][1].astype(float), well.data[i][1].astype(float)]
x = [well.data[1][1].astype(float) - 1, well.data[1][1].astype(float) + 1]
# PLOT HORIZON LINE
well.horizons_list[i - 2] = self.model_frame.plot(x, y, linestyle='-', linewidth='2', color='black')
horizon_y_pos = well.data[i][1].astype(float)
horizon = well.data[i][0].astype(str)
# ALTERNATE POSITION OF ODDs/EVENs TO TRY AND AVOID OVERLAP
if i % 2 == 0:
horizon_x_pos = well.data[1][1].astype(float) - 1.05
well.labels_list[i - 2] = self.model_frame.annotate(horizon, xy=(horizon_x_pos, horizon_y_pos),
xytext=(horizon_x_pos, horizon_y_pos),
fontsize=well.text_size, weight='bold',
horizontalalignment='left', verticalalignment='top',
color='black', bbox=dict(boxstyle="round,pad=.4",
fc="0.8", ec='None'),
clip_on=True)
else:
horizon_x_pos = well.data[1][1].astype(float) + 1.05
well.labels_list[i - 2] = self.model_frame.annotate(horizon, xy=(horizon_x_pos, horizon_y_pos),
xytext=(horizon_x_pos, horizon_y_pos),
fontsize=well.text_size, weight='bold',
horizontalalignment='right',
verticalalignment='top',
color='black', bbox=dict(boxstyle="round,pad=.4",
fc="0.8", ec='None'),
clip_on=True)
# APPEND WELL TO WELL DATA LIST
self.well_data_list.append(well)
# INCREAMENT WELL COUNTER
self.well_counter += 1
# UPDATE GMG
self.update_layer_data()
self.draw()
def show_hide_well(self, event):
id = event.Id - 2000
if self.well_data_list[id].mpl_actor[0].get_visible():
# HIDE WELL
self.well_data_list[id].mpl_actor[0].set_visible(False)
self.well_data_list[id].mpl_actor_name.set_visible(False)
# HIDE HORIZONS
for h in range(len(self.well_data_list[id].horizons_list)):
if self.well_data_list[id].horizons_list[h] is not None:
self.well_data_list[id].horizons_list[h][0].set_visible(False)
for l in range(len(self.well_data_list[id].labels_list)):
if self.well_data_list[id].labels_list[l] is not None:
self.well_data_list[id].labels_list[l].set_visible(False)
else:
# SHOW WELL
self.well_data_list[id].mpl_actor[0].set_visible(True)
self.well_data_list[id].mpl_actor_name.set_visible(True)
# SHOW HORIZONS
for h in range(len(self.well_data_list[id].horizons_list)):
if self.well_data_list[id].horizons_list[h] is not None:
self.well_data_list[id].horizons_list[h][0].set_visible(True)
for l in range(len(self.well_data_list[id].labels_list)):
if self.well_data_list[id].labels_list[l] is not None:
self.well_data_list[id].labels_list[l].set_visible(True)
# REDRAW
self.draw()
def delete_well(self, event):
""""DELETE WELL DATA NB: ID's start at 2500"""
# SET ID
obj_id = event.Id - 3000
# REMOVE OBJECT AND MPL ACTOR
self.well_data_list[obj_id].mpl_actor[0].set_visible(False)
# REMOVE HORIZON MPL ACTORS
for h in range(len(self.well_data_list[obj_id].horizons_list)):
if self.well_data_list[obj_id].horizons_list[h] is not None:
self.well_data_list[obj_id].horizons_list[h][0].set_visible(False)
# REMOVE LABEL MPL ACTORS
for l in range(len(self.well_data_list[obj_id].labels_list)):
if self.well_data_list[obj_id].labels_list[l] is not None:
self.well_data_list[obj_id].labels_list[l].set_visible(False)
# SET OBJECT AS NONE
self.well_data_list[obj_id] = None
# DELETE MENUBAR ENTRY
self.m_wells_submenu.DestroyItem(event.Id)
# UPDATE MODEL
self.draw()
# GEOLOGY OUTCROP DATA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def load_outcrop_data(self, event):
""" LOAD & PLOT SURFACE CONTACT DATA. E.G. GEOLOGICAL CONTACTS NB: ID's start at 13000"""
self.load_window = LoadObservedDataFrame(self, -1, 'Load observed data', 'outcrop')
self.load_window.Show(True)
def open_outcrop_data(self):
"""OPEN THE OUTCROP DATA SELECTED BY THE USER USING THE load_outcrop_data FUNC"""
# CREATE A NEW OUTCROP DATA OBJECT
outcrop = ObservedOutcropData()
# LOAD DATA FILE
outcrop_input_file = self.load_window.file_path
outcrop.id = self.outcrop_data_count
outcrop.name = self.load_window.observed_name
outcrop.color = self.load_window.color_picked
outcrop.data = np.genfromtxt(outcrop_input_file, autostrip=True, dtype=str, comments='#')
# PLOT MARKERS IN MODEL
outcrop.lines = [None] * len(outcrop.data)
for i in range(len(outcrop.data)):
x1 = outcrop.data[i, 0].astype(float)
y1 = outcrop.data[i, 1].astype(float)
y2 = outcrop.data[i, 2].astype(float)
x = (x1, x1)
y = (y1, y2)
outcrop.lines[i] = self.model_frame.plot(x, y, linestyle='-', linewidth='2', color=outcrop.color)
# DRAW TEXT LABELS
outcrop.labels = [None] * len(outcrop.data)
# CREATE TEXT XYT
text = list(zip(outcrop.data[:, 0].astype(float), outcrop.data[:, 1].astype(float),
outcrop.data[:, 3].astype(str)))
for i in range(len(outcrop.data)):
# ALTERNATE POSITION OF ODDs/EVENs To TRY AND AVOID OVERLAP
if i % 2 == 0:
outcrop.labels[i] = self.model_frame.annotate(text[i][2], xy=(text[i][0], text[i][1]),
xytext=(text[i][0], text[i][1]),
fontsize=outcrop.textsize,
weight='regular', horizontalalignment='right',
verticalalignment='bottom',
color='black',
bbox=dict(boxstyle="round,pad=.4", fc="0.8", ec='None'),
clip_on=True)
else:
outcrop.labels[i] = self.model_frame.annotate(text[i][2], xy=(text[i][0], text[i][1]),
xytext=(text[i][0], text[i][1]),
fontsize=outcrop.textsize,
weight='regular', horizontalalignment='left',
verticalalignment='top',
color='black',
bbox=dict(boxstyle="round,pad=.4", fc="0.8", ec='None'),
clip_on=True)
# APPEND NEW DATA MENU TO 'OUTCROP DATA MENU'
self.outcrop_submenu = wx.Menu()
self.m_outcrop_submenu.Append(13000 + self.outcrop_data_count, outcrop.name, self.outcrop_submenu)
# APPEND DELETE DATA OPTION TO THE NEW DATA MENU
self.outcrop_submenu.Append(13000 + self.outcrop_data_count, 'delete observed data')
# BIND TO DEL XY FUNC
self.Bind(wx.EVT_MENU, self.delete_outcrop_data, id=13000 + self.outcrop_data_count)
# INCREMENT CONTACT COUNT
self.outcrop_data_count += 1
# APPEND NEW OUTCROP DATA OBJECT TO THE OUTCROP DATA LIST
self.outcrop_data_list.append(outcrop)
# UPDATE GMG GUI
self.update_layer_data()
self.draw()
def delete_outcrop_data(self, event):
""""DELETE OUTCROP DATA NB: ID's start at 13000"""
# SET ID
obj_id = event.Id - 13000
# REMOVE LINE MPL ACTORS
for i in range(len(self.outcrop_data_list[obj_id].lines)):
self.outcrop_data_list[obj_id].lines[i][0].set_visible(False)
# REMOVE LABEL MPL ACTORS
for i in range(len(self.outcrop_data_list[obj_id].labels)):
self.outcrop_data_list[obj_id].labels[i].set_visible(False)
# SET OBJECT AS NONE
self.outcrop_data_list[obj_id] = None
# DELETE MENUBAR ENTRY
self.m_outcrop_submenu.DestroyItem(event.Id)
# UPDATE MODEL
self.draw()
# LAYER & NODE CONTROLS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def on_menu_set_button_press(self, event):
"""
CHECK IF A NODE FROM THE CURRENT LAYER IS SELECTED; IF NOT, THEN SKIP THIS PART AND ONLY UPDATE ATTRIBUTES
"""
print()
self.index_node
print()
self.layer_list[self.currently_active_layer_id].type
# GET NEW XY POINT
new_x = float(self.x_input.GetValue())
new_y = float(self.y_input.GetValue())
# GET CURRENTLY ACTIVE LAYER NODES AND LABEL THEM xt AND yt
xt = self.layer_list[self.currently_active_layer_id].x_nodes
yt = self.layer_list[self.currently_active_layer_id].y_nodes
# MODIFY xt and yt DEPENDING ON WHAT CONDITIONS ARE MET
if self.layer_list[self.currently_active_layer_id].type == 'fixed' and self.index_node is not None:
if xt[self.index_node] == 0 and yt[self.index_node] != 0.001:
xt[self.index_node] = 0 # REPLACE OLD X WITH NEW X
yt[self.index_node] = new_y # REPLACE OLD Y WITH NEW Y
elif xt[self.index_node] == self.x2 and yt[self.index_node] != 0.001:
xt[self.index_node] = self.x2 # REPLACE OLD X WITH NEW X
yt[self.index_node] = new_y # REPLACE OLD Y WITH NEW Y
elif xt[self.index_node] == 0 and yt[self.index_node] == 0.001:
xt[self.index_node] = 0 # REPLACE OLD X WITH NEW X
yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y
elif xt[self.index_node] == self.x2 and yt[self.index_node] == 0.001:
xt[self.index_node] = self.x2 # REPLACE OLD X WITH NEW X
yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y
elif new_y <= 0:
xt[self.index_node] = new_x # REPLACE OLD X WITH NEW X
yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y
else:
xt[self.index_node] = new_x # REPLACE OLD X WITH NEW X
yt[self.index_node] = new_y # REPLACE OLD Y WITH NEW Y
elif self.layer_list[self.currently_active_layer_id].type == 'floating' and self.index_node is not None:
print("MOVING NODE ON FLOATING LAYER")
if new_y <= 0:
xt[self.index_node] = new_x # REPLACE OLD X WITH NEW X
yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y
else:
xt[self.index_node] = new_x # REPLACE OLD X WITH NEW X
yt[self.index_node] = new_y # REPLACE OLD Y WITH NEW Y
# UPDATE THE CURRENTLY ACTIVE LAYER NODE LIST
self.current_x_nodes = xt
self.current_y_nodes = yt
self.currently_active_layer.set_data(self.current_x_nodes, self.current_y_nodes)
# UPDATE THE CURRENTLY ACTIVE LAYER layer_list ENTRY
self.layer_list[self.currently_active_layer_id].x_nodes = xt
self.layer_list[self.currently_active_layer_id].y_nodes = yt
# COLOR CURRENTLY SELECTED NODE RED
self.current_node.set_offsets([new_x, new_y])
# UPDATE LAYER DATA
self.set_density(self)
self.set_susceptibility(self)
self.set_angle_a(self)
self.set_angle_b(self)
self.set_angle_c(self)
self.set_earth_field(self)
# UPDATE GMG
self.update_layer_data()
self.run_algorithms()
self.draw()
def get_node_under_point(self, event):
"""
GET THE INDEX VALUE OF THE NODE UNDER POINT, AS LONG AS IT IS WITHIN NODE_CLICK_LIMIT TOLERANCE OF CLICK
"""
# RESET NODE SWITCH
self.didnt_get_node = False
if self.pinch_switch is False:
# PINCH MODE ISN'T ON, SO DO THE NORMAL ROUTINE
# GET THE CURRENT LAYERS NODE VALUES
xyt = self.currently_active_layer.get_xydata()
xt = xyt[:, 0]
yt = xyt[:, 1]
# CALCULATE DISTANCES FROM THE MOUSE CLICK TO THE LAYER NODES
d = np.sqrt((xt - event.xdata) ** 2 + (yt - event.ydata) ** 2)
# FIND THE NODE INDEX VALUE FOR THE NODE CLOSEST TO THE CLICK
self.index_arg = np.argmin(d)
# CHECK IF THE NODE IS WITHIN THE "MEANT TO CLICK" DISTANCE
if d[self.index_arg] >= self.node_click_limit:
self.didnt_get_node = True
return None, None
else:
# CHECK IF NODE IS A PINCHED POINT, IF YES FIND NODE OF ABOVE OR BELOW LAYER
if self.layer_list[self.currently_active_layer_id].pinched is True:
# CREATE EMPTY LIST THE FILL WIH THE NODE INDEX VALUES
self.pinched_index_arg_list = []
for l in self.layer_list[self.currently_active_layer_id].pinched_list:
# GET LAYER NODES
xt = self.layer_list[l].x_nodes
yt = self.layer_list[l].y_nodes
# CALCULATE DISTANCES FROM THE MOUSE CLICK TO THE LAYER NODES
d = np.sqrt((xt - event.xdata) ** 2 + (yt - event.ydata) ** 2)
# FIND THE NODE INDEX VALUE FOR THE NODE CLOSEST TO THE CLICK AND APPEND IT
# TO THE PINCHED INDEX LIST
self.pinched_index_arg_list.append(np.argmin(d))
else:
self.pinched_index_arg_list = None
# NOW RETURN:
# 1) THE INDEX VALUE OF THE NODE CLICKED.
# 2) A LIST OF INDEX VALUES FOR NODES PINCHED TO INDEX_ARG (NONE IF THE NODE ISN'T PINCHED).
return self.index_arg, self.pinched_index_arg_list
else:
# GMG IS IN PINCH MODE - SO JUST RETURN THE INDEX OF THE NODE
# GET THE CURRENT LAYERS NODE VALUES
xyt = self.currently_active_layer.get_xydata()
xt = xyt[:, 0]
yt = xyt[:, 1]
# CALCULATE DISTANCES FROM THE MOUSE CLICK TO THE LAYER NODES
d = np.sqrt((xt - event.xdata) ** 2 + (yt - event.ydata) ** 2)
# FIND THE NODE INDEX VALUE FOR THE NODE CLOSEST TO THE CLICK
self.index_arg = np.argmin(d)
# CHECK IF THE NODE IS WITHIN THE "MEANT TO CLICK" DISTANCE
if d[self.index_arg] >= self.node_click_limit:
return None, None
else:
return self.index_arg, None
def get_fault_node_under_point(self, event):
"""GET THE INDEX VALUE OF THE NODE UNDER POINT, AS LONG AS IT IS WITHIN NODE_CLICK_LIMIT TOLERANCE OF CLICK"""
# RESET NODE SWITCH
self.didnt_get_node = False
# GET FAULT NODE XY DATA
xy_data = self.currently_active_fault.get_xydata()
x = xy_data[:, 0]
y = xy_data[:, 1]
# FIND NODE CLOSEST TO EVENT CLICK POINT
d = np.sqrt((x - event.xdata) ** 2 + (y - event.ydata) ** 2)
self.index_arg = np.argmin(d)
# RETURN RESULTING NODE OR NONE
if d[self.index_arg] >= self.node_click_limit:
self.didnt_get_node = True
return None
else:
return self.index_arg
def button_press(self, event):
"""WHAT HAPPENS WHEN THE LEFT MOUSE BUTTON IS PRESSED"""
if event.inaxes is None:
return # CLICK IS OUTSIDE MODEL FRAME SO RETURN
if event.button != 1:
return
if self.fault_picking_switch is False and self.capture is False and self.select_new_layer_nodes is False:
# THEN GMG IS IN LAYER MODE
# GET THE NODE CLOSEST TO THE CLICK AND ANY PINCHED NODES
self.index_node, self.pinched_index_arg_list = self.get_node_under_point(event)
if self.index_node is None:
return
# CHECK if the 'p' KEY IS ON (I.E. IN PINCH MODE)
if self.pinch_switch is True:
# SET THE FIRST NODE CLICKED AS NODE 1
if self.pinch_count == 0:
self.layer_getting_pinched = self.currently_active_layer_id
self.node_to_pinch_index = self.index_node
self.pinch_count += 1
elif self.pinch_count == 1 and self.currently_active_layer_id == self.layer_getting_pinched:
# USER HASN'T CHANGED TO A DIFFERENT LAYER YET
return
else:
# USER IS UP TO SECOND MOUSE CLICK. SET THE NODE TO BE PINCHED AS THE SECOND NODE CLICKED
# GET CURRENT LAYER NODES
xyt = self.currently_active_layer.get_xydata()
xt, yt = xyt[:, 0], xyt[:, 1]
# SET THE PINCHED NODE
self.layer_list[self.layer_getting_pinched].x_nodes[self.node_to_pinch_index] = xt[self.index_node]
self.layer_list[self.layer_getting_pinched].y_nodes[self.node_to_pinch_index] = yt[self.index_node]
# UPDATE LAYER LINE
self.layer_list[self.layer_getting_pinched].node_mpl_actor[0].set_visible(False)
self.layer_list[self.layer_getting_pinched].node_mpl_actor[0].remove()
self.layer_list[self.layer_getting_pinched].node_mpl_actor = self.model_frame.plot(
self.layer_list[self.layer_getting_pinched].x_nodes,
self.layer_list[self.layer_getting_pinched].y_nodes,
color='blue', linewidth=1.0, alpha=1.0)
# UPDATE LAYER POLYGON FILL
current_color = self.layer_list[self.layer_getting_pinched].polygon_mpl_actor[0].get_fc()
self.layer_list[self.layer_getting_pinched].polygon_mpl_actor[0].set_visible(False)
self.layer_list[self.layer_getting_pinched].polygon_mpl_actor[0].remove()
self.layer_list[self.layer_getting_pinched].polygon_mpl_actor = self.model_frame.fill(
self.layer_list[self.layer_getting_pinched].x_nodes,
self.layer_list[self.layer_getting_pinched].y_nodes, color=current_color, alpha=0.4,
closed=True, linewidth=None, ec=None)
# RESET PINCH COUNT
self.pinch_count = 0
# SET THE PINCH ATTRIBUTES IN THE LAYER OBJECTS
self.layer_list[self.layer_getting_pinched].pinched = True
if self.currently_active_layer_id in self.layer_list[self.layer_getting_pinched].pinched_list:
pass
else:
self.layer_list[self.layer_getting_pinched].pinched_list.append(self.currently_active_layer_id)
# SET THE PINCH ATTRIBUTES IN THE LAYER OBJECTS
self.layer_list[self.currently_active_layer_id].pinched = True
if self.layer_getting_pinched in self.layer_list[self.currently_active_layer_id].pinched_list:
pass
else:
self.layer_list[self.currently_active_layer_id].pinched_list.append(self.layer_getting_pinched)
# REDRAW MODEL
self.update_layer_data()
# self.draw()
else:
# GMG IS IN SIMPLE LAYER MODE - SO JUST SET THE NEW NODE LOCATION
xyt = self.currently_active_layer.get_xydata()
xt, yt = xyt[:, 0], xyt[:, 1]
self.x_input.SetValue(xt[self.index_node])
self.y_input.SetValue(yt[self.index_node])
# COLOR CURRENTLY SELECTED NODE RED
self.current_node.set_offsets([xt[self.index_node], yt[self.index_node]])
elif self.fault_picking_switch is True and self.select_new_layer_nodes is False \
and self.select_new_fault_nodes is False:
# THEN GMG IS IN FAULT MODE
# GET CURRENT NODE
self.selected_node = self.get_fault_node_under_point(event)
if self.selected_node is None:
return
# GET CURRENT X AND Y COORDS
xyt = self.currently_active_fault.get_xydata()
self.xt = xyt[:, 0]
self.yt = xyt[:, 1]
# COLOR CURRENTLY SELECTED NODE RED
self.current_node.set_offsets([self.xt[self.selected_node], self.yt[self.selected_node]])
elif self.capture is True or self.select_new_layer_nodes is True or self.select_new_fault_nodes is True:
# COORDINATE CAPTURE MODE OR NEW LAYER CREATION OR NEW FAULT CREATION IS ON. SO PASS
return
def move(self, event):
"""WHAT HAPPEN WHEN THE LEFT MOUSE BUTTON IS HELD AND THE MOUSE IS MOVED"""
if self.index_node is None and self.selected_node is None:
# NO NODE WAS FOUND NEAR THE CLICK
return
if event.inaxes is None:
# CLICK WAS OUTSIDE THE MODEL FRAME
return
if event.button != 1:
return
if self.pinch_switch is True:
# PINCH MODE IS ON
return
if self.pan_on is True:
# PAN MODE IS ON
return
if self.zoom_on is True:
# ZOOM MODE IS ON
return
if self.select_new_layer_nodes is True:
# CURRENTLY CREATING A NEW LAYER
return
if self.didnt_get_node is True:
# NO NODE WAS SELECTED WHEN CLICKING
return
if self.fault_picking_switch is True:
# GMG IS IN FAULT MODE
# ASSIGN NEW X AND Y POINTS
self.new_x = event.xdata # GET X OF NEW POINT
self.new_y = event.ydata # GET Y OF NEW POINT
# UPDATE NODE ARRAY
if self.xt[self.selected_node] == self.x1 and self.yt[self.selected_node] != 0.001:
self.xt[self.selected_node] = self.x1 # REPLACE OLD X WITH NEW X
self.yt[self.selected_node] = self.new_y # REPLACE OLD Y WITH NEW Y
elif self.xt[self.selected_node] == self.x2 and self.yt[self.selected_node] != 0.001:
self.xt[self.selected_node] = self.x2 # REPLACE OLD X WITH NEW X
self.yt[self.selected_node] = self.new_y # REPLACE OLD Y WITH NEW Y
elif self.xt[self.selected_node] == 0 and self.yt[self.selected_node] == 0.001:
self.xt[self.selected_node] = 0 # REPLACE OLD X WITH NEW X
self.yt[self.selected_node] = 0.001 # REPLACE OLD Y WITH NEW Y
elif self.xt[self.selected_node] == self.x2 and self.yt[self.selected_node] == 0.001:
self.xt[self.selected_node] = self.x2 # REPLACE OLD X WITH NEW X
self.yt[self.selected_node] = 0.001 # REPLACE OLD Y WITH NEW Y
elif self.new_y <= 0:
self.xt[self.selected_node] = self.new_x # REPLACE OLD X WITH NEW X
self.yt[self.selected_node] = 0.001 # REPLACE OLD Y WITH NEW Y
else:
self.xt[self.selected_node] = self.new_x # REPLACE OLD X WITH NEW X
self.yt[self.selected_node] = self.new_y # REPLACE OLD Y WITH NEW Y
# UPDATE THE FAULT LIST RECORDS
self.fault_list[self.currently_active_fault_id].x_nodes = self.xt
self.fault_list[self.currently_active_fault_id].y_nodes = self.yt
# UPDATE THE FAULT MPL ACTOR
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_xdata(self.xt)
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_ydata(self.yt)
# UPDATE THE CURRENT VIEW OF THE FAULT
self.currently_active_fault.set_data(self.xt, self.yt)
# UPDATE "CURRENT NODE" RED DOT
if self.xt[self.selected_node] == self.x1:
self.current_node.set_offsets([self.x1, self.new_y])
elif self.xt[self.selected_node] == self.x2:
self.current_node.set_offsets([self.x2, self.new_y])
else:
self.current_node.set_offsets([self.new_x, self.new_y])
self.update_layer_data() # UPDATE LAYER DATA
# GMG IS IN LAYER MODE
if self.fault_picking_switch is False:
if self.layer_list[self.currently_active_layer_id].type == str('fixed'):
# GET X AND Y VALUES
x = event.xdata # GET X OF NEW POINT
y = event.ydata # GET Y OF NEW POINT
# GET CURRENT X AND Y ARRAYS
xt = self.layer_list[self.currently_active_layer_id].x_nodes
yt = self.layer_list[self.currently_active_layer_id].y_nodes
current_x_value = xt[self.index_node]
current_y_value = yt[self.index_node]
# UPDATE NODE
if xt[self.index_node] == self.x1 and yt[self.index_node] != 0.001:
xt[self.index_node] = self.x1 # REPLACE OLD X WITH NEW X
yt[self.index_node] = y # REPLACE OLD Y WITH NEW Y
elif xt[self.index_node] == self.x2 and yt[self.index_node] != 0.001:
xt[self.index_node] = self.x2 # REPLACE OLD X WITH NEW X
yt[self.index_node] = y # REPLACE OLD Y WITH NEW Y
elif xt[self.index_node] == 0 and yt[self.index_node] == 0.001:
xt[self.index_node] = 0 # REPLACE OLD X WITH NEW X
yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y
elif xt[self.index_node] == self.x2 and yt[self.index_node] == 0.001:
xt[self.index_node] = self.x2 # REPLACE OLD X WITH NEW X
yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y
elif y <= 0:
xt[self.index_node] = x # REPLACE OLD X WITH NEW X
yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y
else:
xt[self.index_node] = x # REPLACE OLD X WITH NEW X
yt[self.index_node] = y # REPLACE OLD Y WITH NEW Y
elif self.layer_list[self.currently_active_layer_id].type == str('floating'):
# GET THE X AND Y VALUES
x = event.xdata # GET X OF NEW POINT
y = event.ydata # GET Y OF NEW POINT
xt = self.layer_list[self.currently_active_layer_id].x_nodes
yt = self.layer_list[self.currently_active_layer_id].y_nodes
current_x_value = xt[self.index_node]
current_y_value = yt[self.index_node]
if y <= 0:
xt[self.index_node] = x # REPLACE OLD X WITH NEW X
yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y
else:
xt[self.index_node] = x # REPLACE OLD X WITH NEW X
yt[self.index_node] = y # REPLACE OLD Y WITH NEW Y
# RESET THE LAYER WITH THE NEW NODE POSITION
self.layer_list[self.currently_active_layer_id].x_nodes = xt
self.layer_list[self.currently_active_layer_id].y_nodes = yt
# DEAL WITH PINCHED NODE
if self.layer_list[self.currently_active_layer_id].pinched is True:
self.layer_list[self.currently_active_layer_id].x_nodes
i = 0
for l in self.layer_list[self.currently_active_layer_id].pinched_list:
if self.layer_list[l].x_nodes[self.pinched_index_arg_list[i]] == current_x_value and \
self.layer_list[l].y_nodes[self.pinched_index_arg_list[i]] == current_y_value:
# SET PINCHED NODE AS NEW VALUE
self.layer_list[l].x_nodes[self.pinched_index_arg_list[i]] = xt[self.index_node]
self.layer_list[l].y_nodes[self.pinched_index_arg_list[i]] = yt[self.index_node]
i += 1
# SET THE CURRENTLY ACTIVE LAYER WITH THE NEW NODE
self.current_x_nodes = xt
self.current_y_nodes = yt
self.currently_active_layer.set_data(self.current_x_nodes, self.current_y_nodes)
# UPDATE "CURRENT NODE" RED DOT
if xt[self.index_node] == self.x1:
self.current_node.set_offsets([self.x1, y])
elif xt[self.index_node] == self.x2:
self.current_node.set_offsets([self.x2, y])
else:
self.current_node.set_offsets([x, y])
# UPDATE LAYER DATA
self.update_layer_data()
def button_release(self, event):
"""WHAT HAPPENS WHEN THE LEFT MOUSE BUTTON IS RELEASED"""
if event.inaxes is None:
# CLICK WAS OUTSIDE THE MODEL FRAME
return
if event.button != 1:
return
if self.capture is True:
# GMG IS IN COORDINATE CAPTURE MODE SO ADD THE CURRENT COORDINATES TO THE TABLE
self.capture_window.table.Append((event.xdata, event.ydata))
# NEW FLOATING LAYER CREATION SEQUENCE
if self.select_new_layer_nodes is True:
# APPEND NEW COORDINATES
self.new_plotx.append(event.xdata)
self.new_ploty.append(event.ydata)
if self.click_count == 0:
# PLOT NEW NODES
self.new_layer_nodes = self.model_frame.plot(self.new_plotx, self.new_ploty, color='blue', marker='o')
# FILL LAYER
self.new_layer_fill = self.model_frame.fill(self.new_plotx, self.new_ploty, color='blue',
alpha=self.layer_transparency, closed=True, linewidth=None,
ec=None)
# INCREMENT CLICK COUNTER
self.click_count += 1
elif self.click_count < 3:
self.new_layer_nodes[0].set_xdata(self.new_plotx)
self.new_layer_nodes[0].set_ydata(self.new_ploty)
self.new_layer_fill[0].set_xy(list(zip(self.new_plotx, self.new_ploty)))
# INCREMENT CLICK COUNTER
self.click_count += 1
else:
# REMOVE THE TEMP LAYER MPL ACTOR
self.select_new_layer_nodes = False
self.new_layer_nodes[0].set_visible(False)
self.new_layer_fill[0].set_visible(False)
self.new_layer_nodes[0].remove()
self.new_layer_fill[0].remove()
self.new_layer_nodes = None
self.new_layer_fill = None
# RUN FINAL PART OF LAYER LOADING
self.create_new_floating_layer()
# NEW FAULT CREATION SEQUENCE
if self.select_new_fault_nodes is True:
# APPEND NEW COORDINATES
self.new_plotx.append(event.xdata)
self.new_ploty.append(event.ydata)
if self.click_count == 0:
# PLOT NEW NODES
self.new_fault_nodes = self.model_frame.plot(self.new_plotx, self.new_ploty, color='green', marker='o')
# INCREMENT CLICK COUNTER
self.click_count += 1
elif self.click_count < 2:
self.new_fault_nodes[0].set_xdata(self.new_plotx)
self.new_fault_nodes[0].set_ydata(self.new_ploty)
# INCREMENT CLICK COUNTER
self.click_count += 1
else:
# REMOVE THE TEMP FAULT MPL ACTOR
self.new_fault_nodes[0].set_visible(False)
self.new_fault_nodes[0].remove()
self.new_fault_nodes = None
# SWITCH OFF NEW FAULT MODE
self.select_new_fault_nodes = False
self.click_count = 0
# RUN FINAL PART OF FAULT LOADING
self.create_new_fault()
# RUN MODELLING ALGORITHMS
if self.fault_picking_switch is False:
self.run_algorithms()
else:
# UPDATE GMG GRAPHICS
self.draw()
def key_press(self, event):
"""DEFINE KEY PRESS LINKS"""
if self.fault_picking_switch is True:
# GMG IS IN FAULT MODE SO USE FAULT MODE KEY FUNCTIONS
self.fault_mode_key_press(event)
return
# f = ACTIVATE FAULT PICKING MODE
if event.key == 'f':
# TURN ON/OFF FAULT PICKING MODE
if self.fault_picking_switch is True:
self.fault_picking_switch = False
else:
self.fault_picking_switch = True
# i = INSERT NEW NODE AT MOUSE POSITION
if event.key == 'i':
if event.inaxes is None:
return
# GET CURRENT LAYER XY
xt = np.array(self.current_x_nodes)
yt = np.array(self.current_y_nodes)
# INSERT NEW NODES INTO LAYER X AND Y LISTS
self.current_x_nodes = np.insert(xt, [self.index_arg + 1], event.xdata)
self.current_y_nodes = np.insert(yt, [self.index_arg + 1], event.ydata)
# SET THE CURRENT LAYER WITH THE UPDATED NODE LIST
self.currently_active_layer.set_data(self.current_x_nodes, self.current_y_nodes)
# UPDATE LAYER DATA AND PLOT
self.update_layer_data()
self.run_algorithms()
self.draw()
# d = DELETE NODE AT MOUSE POSITION
if event.key == 'd':
xt = np.array(self.current_x_nodes)
yt = np.array(self.current_y_nodes)
# FIND NODE CLOSEST TO CURSOR LOCATION
d = np.sqrt((xt - event.xdata) ** 2 + (yt - event.ydata) ** 2)
self.index_arg = np.argmin(d)
ind = d[self.index_arg]
if xt[self.index_arg] == 0: # PREVENT END NODES BEING DELETED
return 0
if ind >= self.node_click_limit:
return 0
else:
# DELETE NODE BY RECREATING XY DATA WITHOUT CURRENT NODE
self.current_x_nodes = [tup for i, tup in enumerate(xt) if i != self.index_arg] # DELETE X
self.current_y_nodes = [tup for i, tup in enumerate(yt) if i != self.index_arg] # DELETE Y
# SET THE CURRENT LAYER WITH THE UPDATED NODE LIST
self.currently_active_layer.set_data(self.current_x_nodes, self.current_y_nodes)
# # NOW CHECK FOR PINCHED NODES
# index_arg2 = None
# self.pinch_switch = False
# self.pinched_index_arg_list = [None] * (self.total_layer_count + 1) # CREATE LIST OF NONES = LENGTH AS NUMB OF LAYERS
# for x in range(0, self.total_layer_count + 1): # LOOP THROUGH ALL LAYERS TO CHECK FOR PINCHED NODES
# if x == self.currently_active_layer_id:
# pass
# x_node_list = self.layer_list[x].x_nodes
# y_node_list = self.layer_list[x].y_nodes
#
# for i in range(0, len(x_node_list)):
# # NOW CHECK X AND Y VALUES ARE EQUAL
# if x_node_list[i] == xt[self.index_arg] and y_node_list[i] == yt[self.index_arg]:
# # IF ONE OF THE NODES FORM LIST IS EQUAL TO A NODE FROM THE OTHER LAYER THEN RETURN THE INDEX
# self.pinched_index_arg_list[x] = i
# self.pinch_switch = True
#
# # REMOVE PINCHED NODES
# if self.pinch_switch is True:
# for k in range(len(self.pinched_index_arg_list)):
# if self.pinched_index_arg_list[k] is not None:
# next_x_list = self.plotx_list[k]
# next_y_list = self.ploty_list[k]
# # GET THE NODE LIST OF THE NEXT LAYER
# next_x_list = [tup for i, tup in enumerate(next_x_list) if
# i != self.pinched_index_arg_list[k]] # DELETE X
# next_y_list = [tup for i, tup in enumerate(next_y_list) if
# i != self.pinched_index_arg_list[k]] # DELETE Y
# # OVERWRITE THE NODE LIST WITH UPDATED LIST
# self.plotx_list[k], self.ploty_list[k] = next_x_list, next_y_list
# SHIFT CURRENT NODE COLORING TO PREVIOUS NODE
self.current_node.set_offsets([xt[self.index_arg - 1], yt[self.index_arg - 1]])
# UPDATE LAYER DATA AND PLOT
self.update_layer_data()
self.run_algorithms()
self.draw()
# q = BEAT THE ZOOM BUG
if event.key == 'q':
self.nodes = True
# n = CREATE NEW LAYER AT MOUSE POINT
if event.key == 'n':
self.new_layer(event)
# < = MOVE TO NEXT LAYER
if event.key == '.':
if self.currently_active_layer_id == self.total_layer_count:
# UPDATE LAYER DATA
self.update_layer_data()
# MOVE TO LAYER ABOVE INPUT LAYER & ASSIGN CURRENT LAYER XY DATA
self.currently_active_layer_id = 0
# SET CURRENT INPUT VALUES IN MENU
self.density_input.SetValue(self.layer_list[self.currently_active_layer_id].density)
self.ref_density_input.SetValue(self.layer_list[self.currently_active_layer_id].reference_density)
self.susceptibility_input.SetValue(self.layer_list[self.currently_active_layer_id].susceptibility)
self.angle_a_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_a)
self.angle_b_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_b)
self.angle_c_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_c)
self.earth_field_input.SetValue(self.layer_list[self.currently_active_layer_id].earth_field)
# SET CURRENT NODE VALUES
self.x_input.SetValue(self.layer_list[self.currently_active_layer_id].x_nodes[0])
self.y_input.SetValue(self.layer_list[self.currently_active_layer_id].y_nodes[0])
self.current_x_nodes = self.layer_list[self.currently_active_layer_id].x_nodes
self.current_y_nodes = self.layer_list[self.currently_active_layer_id].y_nodes
self.current_node.set_offsets([self.current_x_nodes[0], self.current_y_nodes[0]])
# UPDATE MODEL
self.update_layer_data()
self.run_algorithms()
else:
# UPDATE LAYER DATA
self.update_layer_data()
# MOVE TO LAYER ABOVE INPUT LAYER & ASSIGN CURRENT LAYER XY DATA
self.currently_active_layer_id += 1
# SET CURRENT INPUT VALUES IN MENU
self.density_input.SetValue(self.layer_list[self.currently_active_layer_id].density)
self.ref_density_input.SetValue(self.layer_list[self.currently_active_layer_id].reference_density)
self.susceptibility_input.SetValue(self.layer_list[self.currently_active_layer_id].susceptibility)
self.angle_a_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_a)
self.angle_b_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_b)
self.angle_c_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_c)
self.earth_field_input.SetValue(self.layer_list[self.currently_active_layer_id].earth_field)
# SET CURRENT NODE VALUES
self.x_input.SetValue(self.layer_list[self.currently_active_layer_id].x_nodes[0])
self.y_input.SetValue(self.layer_list[self.currently_active_layer_id].y_nodes[0])
self.current_x_nodes = self.layer_list[self.currently_active_layer_id].x_nodes
self.current_y_nodes = self.layer_list[self.currently_active_layer_id].y_nodes
self.current_node.set_offsets([self.current_x_nodes[0], self.current_y_nodes[0]])
# UPDATE MODEL
self.update_layer_data()
self.run_algorithms()
# < = MOVE TO NEXT LAYER
if event.key == ',':
if self.currently_active_layer_id == 0:
# UPDATE LAYER DATA
self.update_layer_data()
# MOVE TO LAYER ABOVE INPUT LAYER & ASSIGN CURRENT LAYER XY DATA
self.currently_active_layer_id = self.total_layer_count
# SET CURRENT INPUT VALUES IN MENU
self.density_input.SetValue(self.layer_list[self.currently_active_layer_id].density)
self.ref_density_input.SetValue(self.layer_list[self.currently_active_layer_id].reference_density)
self.susceptibility_input.SetValue(self.layer_list[self.currently_active_layer_id].susceptibility)
self.angle_a_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_a)
self.angle_b_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_b)
self.angle_c_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_c)
self.earth_field_input.SetValue(self.layer_list[self.currently_active_layer_id].earth_field)
# SET CURRENT NODE VALUES
self.x_input.SetValue(self.layer_list[self.currently_active_layer_id].x_nodes[0])
self.y_input.SetValue(self.layer_list[self.currently_active_layer_id].y_nodes[0])
self.current_x_nodes = self.layer_list[self.currently_active_layer_id].x_nodes
self.current_y_nodes = self.layer_list[self.currently_active_layer_id].y_nodes
self.current_node.set_offsets([self.current_x_nodes[0], self.current_y_nodes[0]])
# UPDATE MODEL
self.update_layer_data()
self.run_algorithms()
else:
# UPDATE LAYER DATA
self.update_layer_data()
# MOVE TO LAYER ABOVE INPUT LAYER & ASSIGN CURRENT LAYER XY DATA
self.currently_active_layer_id -= 1
# SET CURRENT INPUT VALUES IN MENU
self.density_input.SetValue(self.layer_list[self.currently_active_layer_id].density)
self.ref_density_input.SetValue(self.layer_list[self.currently_active_layer_id].reference_density)
self.susceptibility_input.SetValue(self.layer_list[self.currently_active_layer_id].susceptibility)
self.angle_a_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_a)
self.angle_b_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_b)
self.angle_c_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_c)
self.earth_field_input.SetValue(self.layer_list[self.currently_active_layer_id].earth_field)
# SET CURRENT NODE VALUES
self.x_input.SetValue(self.layer_list[self.currently_active_layer_id].x_nodes[0])
self.y_input.SetValue(self.layer_list[self.currently_active_layer_id].y_nodes[0])
self.current_x_nodes = self.layer_list[self.currently_active_layer_id].x_nodes
self.current_y_nodes = self.layer_list[self.currently_active_layer_id].y_nodes
self.current_node.set_offsets([self.current_x_nodes[0], self.current_y_nodes[0]])
# UPDATE MODEL
self.update_layer_data()
self.run_algorithms()
# UPDATE MODEL FRAME
self.draw()
# z = ZOOM IN MODE
if event.key == 'z':
self.zoom(event)
# ctrl+z = ZOOM OUT
if event.key == 'ctrl+z':
self.zoom_out(event)
# shift = PAN MODE
if event.key == 'ctrl+p':
self.pan(event)
# a = FULL EXTENT VIEW'
if event.key == 'a':
self.full_extent(event)
# p = TURN ON PINCH NODE MODE
if event.key == 'p':
if self.pinch_switch is False:
self.pinch_switch = True
else:
self.pinch_switch = False
self.pinch_count = 0
# ctrl+i = INCREASE LAYER TRANSPARENCY
if event.key == 'ctrl+i':
self.transparency_increase(event)
# ctrl+d = INCREASE ASPECT TRANSPARENCY
if event.key == 'ctrl+d':
self.transparency_decrease(event)
# up arrow = INCREASE ASPECT RATIO
if event.key == 'up':
self.aspect_increase(event)
# ctrl+up = INCREASE ASPECT RATIO X2
if event.key == 'ctrl+up':
self.aspect_increase2(event)
# down arrow = DECREASE ASPECT RATIO
if event.key == 'down':
self.aspect_decrease(event)
# ctrl+down = DECREASE ASPECT RATIO
if event.key == 'ctrl+down':
self.aspect_decrease2(event)
if event.key == 'return':
pass
def pinch_out_layer(self, event):
"""PINCH OUT A FIXED LAYER OVER A GIVEN X RANGE"""
if self.layer_list[self.currently_active_layer_id].type == 'floating':
error_message = "Only fixed layers can use the bulk pinch function! \n" \
"Use the p key to pinch individual floating layer nodes"
MessageDialog(self, -1, error_message, "Error")
return
else:
# CREATE AND SHOW POP OUT BOX
pinch_box = PinchDialog(self, -1, 'Pinch Out Layer:', self.layer_list, self.currently_active_layer_id)
answer = pinch_box.ShowModal()
# SET NEW NODE VALUES
self.current_x_nodes = pinch_box.pinched_x
self.current_y_nodes = pinch_box.pinched_y
self.layer_list[self.currently_active_layer_id].x_nodes = pinch_box.pinched_x
self.layer_list[self.currently_active_layer_id].y_nodes = pinch_box.pinched_y
self.layer_getting_pinched = pinch_box.layer_getting_pinched
# SET THE PINCH ATTRIBUTES IN THE LAYER OBJECTS
self.layer_list[self.layer_getting_pinched].pinched = True
if self.currently_active_layer_id in self.layer_list[self.layer_getting_pinched].pinched_list:
pass
else:
self.layer_list[self.layer_getting_pinched].pinched_list.append(self.currently_active_layer_id)
# SET THE PINCH ATTRIBUTES IN THE LAYER OBJECTS
self.layer_list[self.currently_active_layer_id].pinched = True
if self.layer_getting_pinched in self.layer_list[self.currently_active_layer_id].pinched_list:
pass
else:
self.layer_list[self.currently_active_layer_id].pinched_list.append(self.layer_getting_pinched)
# SET THE CURRENTLY SELECTED (RED) ACTIVE NODE
self.current_node.set_offsets([self.layer_list[self.currently_active_layer_id].x_nodes[0],
self.layer_list[self.currently_active_layer_id].y_nodes[0]])
# REDRAW MODEL
self.update_layer_data()
self.draw()
def depinch_layer(self, event):
"""PINCH OUT A FIXED LAYER OVER A GIVEN X RANGE"""
# CREATE AND SHOW POP OUT BOX
depinch_box = DepinchDialog(self, -1, 'Depinch layer', self.layer_list, self.currently_active_layer_id,
self.total_layer_count)
answer = depinch_box.ShowModal()
# SET NEW NODE VALUES
self.current_x_nodes = depinch_box.depinched_x
self.current_y_nodes = depinch_box.depinched_y
self.layer_list[self.currently_active_layer_id].x_nodes = depinch_box.depinched_x
self.layer_list[self.currently_active_layer_id].y_nodes = depinch_box.depinched_y
# CHECK IF THE PINCHED LAYER CONNECTIONS ATE STILL ACTIVE. IF NOT, THEN REMOVE THEM FROM THE LIST
# SET THE CURRENT LAYER NODES (USE X+Y FOR COMPARISON)
current_nodes = self.layer_list[self.currently_active_layer_id].x_nodes \
+ self.layer_list[self.currently_active_layer_id].y_nodes
# SET THE PINCHED LAYERS NODES (USE X+Y FOR COMPARISON)
for l in self.layer_list[self.currently_active_layer_id].pinched_list:
pinched_nodes = self.layer_list[l].x_nodes + self.layer_list[l].y_nodes
# LOOP THROUGH NODES AND COMPARE
still_pinch_connected = False
for node in current_nodes:
for node2 in pinched_nodes:
if node == node2:
still_pinch_connected = True
# IF THE LAYERS ARE NO LONGER CONNECTED, THEN REMOVE THE CONNECTION IN THE PINCH LIST
if still_pinch_connected == False:
self.layer_list[self.currently_active_layer_id].pinched_list.remove(l)
self.layer_list[l].pinched_list.remove(self.currently_active_layer_id)
# IF THE LAYER NO LONGER HAS ANY CONNECTIONS, THEN SET PINCHED SWITCH AS FALSE
if not self.layer_list[l].pinched_list:
self.layer_list[l].pinched = False
# IF THE LAYER NO LONGER HAS ANY CONNECTIONS, THEN SET PINCHED SWITCH AS FALSE
if not self.layer_list[self.currently_active_layer_id].pinched_list:
self.layer_list[self.currently_active_layer_id].pinched = False
# SET THE CURRENTLY SELECTED (RED) ACTIVE NODE
self.current_node.set_offsets([self.layer_list[self.currently_active_layer_id].x_nodes[0],
self.layer_list[self.currently_active_layer_id].y_nodes[0]])
# REDRAW MODEL
self.update_layer_data()
self.draw()
def bulk_shift(self, event):
"""APPLY A BULK X AND/OR Y SHIFT TO A GIVEN LAYERS NODES"""
# CREATE AND SHOW POP OUT BOX
bulk_shift_box = BulkShiftDialog(self, -1, 'Layer bulk shift', self.layer_list, self.currently_active_layer_id)
answer = bulk_shift_box.ShowModal()
# SET NEW NODE VALUES
self.current_x_nodes = bulk_shift_box.new_x
self.current_y_nodes = bulk_shift_box.new_y
self.layer_list[self.currently_active_layer_id].x_nodes = bulk_shift_box.new_x
self.layer_list[self.currently_active_layer_id].y_nodes = bulk_shift_box.new_y
# SET THE CURRENTLY SELECTED (RED) ACTIVE NODE
self.current_node.set_offsets([self.layer_list[self.currently_active_layer_id].x_nodes[0],
self.layer_list[self.currently_active_layer_id].y_nodes[0]])
# REDRAW MODEL
self.update_layer_data()
self.draw()
def new_layer(self, event):
new_layer_dialogbox = NewLayerDialog(self, -1, 'Create New Layer')
answer = new_layer_dialogbox.ShowModal()
if new_layer_dialogbox.fixed:
# CREATING A NEW FIXED LAYER
# INCREMENT THE CURRENT LAYER INDEX VALUE (self.currently_active_layer_id)
self.total_layer_count += 1
# SET THE ACTIVE LAYER AS THE NEWLY CREATED LAYER
self.currently_active_layer_id = self.total_layer_count
# CREATE A NEW LAYER OBJECT
new_layer = Layer()
# SET SOME OF THE NEW LAYERS ATTRIBUTES
new_layer.id = self.currently_active_layer_id
new_layer.name = str('layer %s') % self.currently_active_layer_id
new_layer.type = str('fixed')
new_layer.include_in_calculations_switch = True
# ADD NEW LAYER TO THE LAYER TREE DISPLAY
self.tree_items.append('layer %s' % (int(self.currently_active_layer_id)))
self.item = 'layer %s' % (int(self.currently_active_layer_id))
self.add_new_tree_nodes(self.root, self.item, self.currently_active_layer_id)
# DETERMINE WHICH LAYER IS THE LAST PREVIOUS "FIXED LAYER"; SET THIS LAYER AS "previous_fixed_layer"
if self.total_layer_count > 0:
for i in range(0, self.total_layer_count):
if self.layer_list[i].type == 'fixed':
previous_fixed_layer = i
else:
continue
else:
previous_fixed_layer = 0
# SET NEW LAYER NODES
layer_above_x = np.array(self.layer_list[previous_fixed_layer].x_nodes)
layer_above_y = np.array(self.layer_list[previous_fixed_layer].y_nodes)
new_layer_thickness = new_layer_dialogbox.new_thickness
new_layer.x_nodes = layer_above_x
new_layer.y_nodes = layer_above_y + new_layer_thickness
# CREATE LAYER LINE
new_layer.node_mpl_actor = self.model_frame.plot(new_layer.x_nodes, new_layer.y_nodes, color='blue',
linewidth=1.0, alpha=1.0)
# CREATE LAYER POLYGON FILL
new_layer.polygon_mpl_actor = self.model_frame.fill(new_layer.x_nodes, new_layer.y_nodes, color='blue',
alpha=self.layer_transparency, closed=True,
linewidth=None, ec=None)
# SET CURRENTLY ACTIVE LAYER AS THE NEW LAYER
self.currently_active_layer.set_xdata(new_layer.x_nodes)
self.currently_active_layer.set_ydata(new_layer.y_nodes)
self.currently_active_layer.set_color(new_layer.color)
# SET CURRENTLY ACTIVE LAYER NODE OBJECTS
self.current_x_nodes = new_layer.x_nodes
self.current_y_nodes = new_layer.y_nodes
# SET THE CURRENTLY SELECTED (RED) ACTIVE NODE
self.current_node.set_offsets([new_layer.x_nodes[0], new_layer.y_nodes[0]])
# SET CURRENT ATTRIBUTE INPUTS IN LEFT PANEL
self.density_input.SetValue(new_layer.density)
self.ref_density_input.SetValue(new_layer.reference_density)
self.susceptibility_input.SetValue(new_layer.susceptibility)
self.angle_a_input.SetValue(new_layer.angle_a)
self.angle_b_input.SetValue(new_layer.angle_b)
self.angle_c_input.SetValue(new_layer.angle_c)
self.earth_field_input.SetValue(new_layer.earth_field)
# APPEND NEW LAYER TO THE LAYER LIST
self.layer_list.append(new_layer)
# UPDATE GMG FRAME
self.update_layer_data()
self.draw()
elif not new_layer_dialogbox.fixed:
# CREATEING A NEW FLOATING LAYER
self.new_plotx = []
self.new_ploty = []
self.click_count = 0
self.new_layer_nodes = None
self.new_layer_fill = None
# SWITCH ON MOUSE CLICK CAPTURE MODE TO CREATE NEW LAYER (SEE button_release FUNC FOR CONTINUATION OF CODE)
self.select_new_layer_nodes = True
else:
# USER CHANGED THEIR MIND - NO NEW LAYER ADDED = EXIT FUNC
pass
def create_new_floating_layer(self):
"""CREATE A NEW FLOATING LAYER USING FOUR USER INPUT MOUSE CLICKS"""
# INCREMENT THE TOTAL LAYER COUNT
self.total_layer_count += 1
# SET CURRENTLY ACTIVE LAYER AS THE NEWLY CREATED LAYER
self.currently_active_layer_id = self.total_layer_count
# CREATE NEW LAYER OBJECT
new_layer = Layer()
# SOURCE NEW NODES FROM USER CLICKS
new_layer.x_nodes = self.new_plotx
new_layer.y_nodes = self.new_ploty
# SET CURRENTLY ACTIVE LAYER NODE OBJECTS
self.current_x_nodes = new_layer.x_nodes
self.current_y_nodes = new_layer.y_nodes
# SET SOME OF THE NEW LAYERS ATTRIBUTES
new_layer.id = self.currently_active_layer_id
new_layer.name = str('layer %s') % self.currently_active_layer_id
new_layer.type = str('floating')
new_layer.include_in_calculations_switch = True
# ADD NEW LAYER TO THE LAYER TREE DISPLAY
self.tree_items.append('layer %s' % (int(self.currently_active_layer_id)))
self.item = 'layer %s' % (int(self.currently_active_layer_id))
self.add_new_tree_nodes(self.root, self.item, self.currently_active_layer_id)
# CREATE LAYER LINE
new_layer.node_mpl_actor = self.model_frame.plot(new_layer.x_nodes, new_layer.y_nodes, color='blue',
linewidth=1.0, alpha=1.0)
# CREATE LAYER POLYGON FILL
new_layer.polygon_mpl_actor = self.model_frame.fill(new_layer.x_nodes, new_layer.y_nodes, color='blue',
alpha=self.layer_transparency, closed=True,
linewidth=None, ec=None)
# SET THE CURRENTLY SELECTED (RED) ACTIVE NODE
self.current_node.set_offsets([new_layer.x_nodes[0], new_layer.y_nodes[0]])
# SET CURRENT ATTRIBUTE INPUTS IN LEFT PANEL
self.density_input.SetValue(new_layer.density)
self.ref_density_input.SetValue(new_layer.reference_density)
self.susceptibility_input.SetValue(new_layer.susceptibility)
self.angle_a_input.SetValue(new_layer.angle_a)
self.angle_b_input.SetValue(new_layer.angle_b)
self.angle_c_input.SetValue(new_layer.angle_c)
self.earth_field_input.SetValue(new_layer.earth_field)
# APPEND NEW LAYER TO THE LAYER LIST
self.layer_list.append(new_layer)
# UPDATE MODEL
self.update_layer_data()
self.run_algorithms()
self.draw()
def load_layer(self, event):
"""LOAD A NEW FLOATING LAYER FROM A SPACE DELIMITED XY TEXT FILE"""
open_file_dialog = wx.FileDialog(self, "Open Layer", "", "", "Layer XY files (*.txt)|*.txt",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if open_file_dialog.ShowModal() == wx.ID_CANCEL:
return # THE USER CHANGED THERE MIND
file_in = open_file_dialog.GetPath()
new_layer_nodes = np.genfromtxt(file_in, autostrip=True, delimiter=' ', dtype=float)
# INCREMENT THE LAYER COUNT
self.currently_active_layer_id = self.total_layer_count
self.currently_active_layer_id += 1
# INCREMENT THE TOTAL LAYER COUNT
self.total_layer_count += 1
# CREATE NEW LAYER OBJECT
new_layer = Layer()
# SOURCE NEW NODES FROM USER CLICKS
new_layer.x_nodes = new_layer_nodes[:, 0]
new_layer.y_nodes = new_layer_nodes[:, 1]
# SET CURRENTLY ACTIVE LAYER NODE OBJECTS
self.current_x_nodes = new_layer.x_nodes
self.current_y_nodes = new_layer.y_nodes
# SET SOME OF THE NEW LAYERS ATTRIBUTES
new_layer.id = self.currently_active_layer_id
new_layer.name = str('layer %s') % self.currently_active_layer_id
new_layer.type = str('floating')
new_layer.include_in_calculations_switch = True
# ADD NEW LAYER TO THE LAYER TREE DISPLAY
self.tree_items.append('layer %s' % (int(self.currently_active_layer_id)))
self.item = 'layer %s' % (int(self.currently_active_layer_id))
self.add_new_tree_nodes(self.root, self.item, self.currently_active_layer_id)
# CREATE LAYER LINE
new_layer.node_mpl_actor = self.model_frame.plot(new_layer.x_nodes, new_layer.y_nodes, color='blue',
linewidth=1.0, alpha=1.0)
# CREATE LAYER POLYGON FILL
new_layer.polygon_mpl_actor = self.model_frame.fill(new_layer.x_nodes, new_layer.y_nodes, color='blue',
alpha=self.layer_transparency, closed=True,
linewidth=None, ec=None)
# SET THE CURRENTLY SELECTED (RED) ACTIVE NODE
self.current_node.set_offsets([new_layer.x_nodes[0], new_layer.y_nodes[0]])
# SET CURRENT ATTRIBUTE INPUTS IN LEFT PANEL
self.density_input.SetValue(new_layer.density)
self.ref_density_input.SetValue(new_layer.reference_density)
self.susceptibility_input.SetValue(new_layer.susceptibility)
self.angle_a_input.SetValue(new_layer.angle_a)
self.angle_b_input.SetValue(new_layer.angle_b)
self.angle_c_input.SetValue(new_layer.angle_c)
self.earth_field_input.SetValue(new_layer.earth_field)
# APPEND NEW LAYER TO THE LAYER LIST
self.layer_list.append(new_layer)
# UPDATE MODEL
self.update_layer_data()
self.run_algorithms()
self.draw()
def delete_layer(self, event):
"""Delete LAYER DATA"""
# CANNOT DELETE THE FIRST LAYER
if self.total_layer_count == 1:
msg = "Sorry - layer one cannot be deleted!"
dlg = wx.MessageDialog(self, msg, "Warning", wx.OK | wx.ICON_INFORMATION)
result = dlg.ShowModal()
dlg.Destroy()
else:
# HIDE THE LAYER MPL ACTORS
self.layer_list[self.currently_active_layer_id].node_mpl_actor[0].set_visible(False)
self.layer_list[self.currently_active_layer_id].polygon_mpl_actor[0].set_visible(False)
# DELETE THE LAYER OBJECT
del self.layer_list[self.currently_active_layer_id]
# REMOVE THE LAYER TREE ITEMS
del self.tree_items[self.currently_active_layer_id]
layers = self.tree.GetRootItem().GetChildren()
self.tree.Delete(layers[self.currently_active_layer_id - 1])
# RESET TREE ITEM ID'S
layers = self.tree.GetRootItem().GetChildren()
for i in range(len(layers)):
self.tree.SetPyData(layers[i], i + 1)
# DECREASE LAYER ID BY 1 FOR EACH LAYER THAT COMES AFTER THE ONE BEING DELETED
# (I.E. IF LAYER 3 IS DEL; THEN LAYER 4 ID BECOMES 3 etc)
try:
for i in range(self.currently_active_layer_id, len(self.layer_list)):
self.layer_list[i].id -= 1
except IndexError:
pass
# INCREMENT THE TOTAL LAYER COUNT
self.total_layer_count -= 1
# SET CURRENTLY ACTIVE LAYER TO LAYER 1
self.currently_active_layer_id = 1
# SET OBJECTS WITH THE CHOSEN LAYER
self.density_input.SetValue(0.001 * self.layer_list[self.currently_active_layer_id].density)
self.ref_density_input.SetValue(0.001 * self.layer_list[self.currently_active_layer_id].reference_density)
self.susceptibility_input.SetValue(self.layer_list[self.currently_active_layer_id].susceptibility)
self.angle_a_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_a)
self.angle_b_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_b)
self.angle_c_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_c)
self.earth_field_input.SetValue(self.layer_list[self.currently_active_layer_id].earth_field)
# GET THE XY NODES FROM THE ACTIVE LAYER AND SET THE CURRENTLY ACTIVE NODES (I.E. MAKE THEM INTERACTIVE)
self.current_x_nodes = self.layer_list[self.currently_active_layer_id].x_nodes
self.current_y_nodes = self.layer_list[self.currently_active_layer_id].y_nodes
# UPDATE MODEL
self.draw()
self.update_layer_data()
# SET THE CURRENTLY SELECTED (RED) ACTIVE NODE
self.current_node.set_offsets([self.layer_list[self.currently_active_layer_id].x_nodes[0],
self.layer_list[self.currently_active_layer_id].y_nodes[0]])
self.draw()
def write_layers_xy(self, event):
"""OUTPUT ALL LAYERS XY DATA TO INDIVIDUAL TEXT FILES"""
# CREATE OUTPUT FILE
save_file_dialog = wx.FileDialog(self, "Save LAYER XY", "", "", "xy files (*.xy)|*.xy",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if save_file_dialog.ShowModal() == wx.ID_CANCEL:
return # THE USER CHANGED THERE MIND
# OUTPUT FILE
all_layers_output_file = save_file_dialog.GetPath()
# THE OUTPUT DIRECTORY
output_dir = os.path.dirname(all_layers_output_file)
# NOW WRITE OUT THE DATA
with open(all_layers_output_file, 'wb') as f:
try:
# OPEN "ALL LAYERS" OUTPUT FILE
out = csv.writer(f, delimiter=' ')
# LOOP THROUGH THE LAYERS
for i in range(1, self.total_layer_count + 1):
# DEFINE THE LAYER NODES (REMOVE FIXED LAYER PADDING NODES)
if self.layer_list[i].type == 'fixed':
data = [self.layer_list[i].x_nodes[1:-1], self.layer_list[i].y_nodes[1:-1]]
layer_write = list(zip(self.layer_list[i].x_nodes[1:-1], self.layer_list[i].y_nodes[1:-1]))
else:
data = [self.layer_list[i].x_nodes, self.layer_list[i].y_nodes]
layer_write = list(zip(self.layer_list[i].x_nodes, self.layer_list[i].y_nodes))
# OUTPUT THE LAYER NAME TO THE ALL LAYERS FILE
f.write(">" + self.loaded_tree_items[i] + "\n")
# WRITE LAYER TO "ALL LAYERS" FILE
out.writerows(list(zip(*data)))
# SAVE THE LAYER AS AN INDIVIDUAL FILE
np.savetxt(output_dir + '/' + self.loaded_tree_items[i] + '.xy', layer_write, delimiter=' ',
fmt='%f %f')
# CLOSE THE "ALL LAYERS" OUTPUT FILE
f.close()
except IndexError:
f.close()
pass
def write_c_xy(self, event):
# CREATE OUTPUT FILE
save_file_dialog = wx.FileDialog(self, "Save c.in RayInvr file", "", "", "in files (*.in)|*.in",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if save_file_dialog.ShowModal() == wx.ID_CANCEL:
return # THE USER CHANGED THEIR MIND
# NOW WRITE OUT THE DATA
output_stream = save_file_dialog.GetPath()
with open(output_stream, 'wb') as f:
# LAYER NODES
for i in range(0, self.total_layer_count + 1):
f.write('B {0}\n'.format(i + 1))
# DEFINE THE LAYER NODES (REMOVE FIXED LAYER PADDING NODES)
if self.layer_list[i].type == 'fixed':
x_nodes = self.layer_list[i].x_nodes[1:-1]
y_nodes = self.layer_list[i].y_nodes[1:-1]
else:
x_nodes = self.layer_list[i].x_nodes
y_nodes = self.layer_list[i].y_nodes
# SAVE FILE
data = list(zip(x_nodes, y_nodes, np.ones(len(y_nodes))))
np.savetxt(f, data, delimiter=' ', fmt='%6.02f %3.02f %1d')
# VELOCITY NODES
for i in range(0, self.total_layer_count):
density = self.layer_list[i].density
# CONVERT DENSITY TO VELOCITY USING GARNERS RULE
velocity = round((m.pow((density / 1670.), (1. / .25))), 2)
# CONVERT DENSITY TO VELOCITY USING NAFE-DRAKE EQUATION
# velocity = (1.6612*density) - (0.4721*density)**2 + (0.0671*density)**3 -
# (0.0043*density)**4 + (0.000106*density)**5
# DEFINE THE LAYER NODES (REMOVE FIXED LAYER PADDING NODES)
if self.layer_list[i].type == 'fixed':
x_nodes = self.layer_list[i].x_nodes[1:-1]
y_nodes = self.layer_list[i].y_nodes[1:-1]
else:
x_nodes = self.layer_list[i].x_nodes
y_nodes = self.layer_list[i].y_nodes
# FORMAT c.in FILE
f.write('B {0}\n'.format(i))
data = list(zip(x_nodes, np.linspace(velocity, velocity, len(y_nodes)), np.ones(len(x_nodes)),
np.linspace(velocity, velocity, len(x_nodes)), np.ones(len(x_nodes))))
# OUTPUT FILE
np.savetxt(f, data, delimiter=' ', fmt='%6.02f %3.02f %1d %3.02f %1d')
def capture_coordinates(self, event):
if self.capture is False:
self.capture = True
# CREATE INSTANCE OF CAPTURE COORDINATES
self.capture_window = CaptureCoordinates(self, -1, 'Capture Coordinates')
self.capture_window.Show(True)
# FAULT MODE CONTROLS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def toogle_fault_mode(self, event):
"""SWITCH FAULT PICKING MODE ON AND OFF"""
if self.fault_picking_switch is True:
self.fault_picking_switch = False
elif self.fault_picking_switch is False:
self.fault_picking_switch = True
def pick_new_fault(self, event):
"""FAULT PICKING/LINE DRAWING MODE"""
# CHECK IF FAULT PICKING MODE IS ON
if self.fault_picking_switch is False:
MessageDialog(self, -1, "Faulting picking mode is not activated.\nTurn on fault picking mode first.",
"Fault picker")
else:
# PROMPT NEW FAULT DIALOG BOX
if self.total_fault_count == 0:
# CREATE NEW CURRENT FAULT GRAPHIC
self.currently_active_fault, = self.model_frame.plot([-100000, -100000], [-100000, -100000], marker='s',
color='green', linewidth=0.75, alpha=1.0, zorder=2,
picker=True)
# START CREATE NEW FAULT PROCESS
MessageDialog(self, -1, "Select three nodes to create a new fault", "Select New Fault")
self.select_new_fault_nodes = True
self.new_plotx = []
self.new_ploty = []
self.click_count = 0
self.new_layer_nodes = None
# NOW WAIT FOR USER NODE CLICKS - ON THE THIRD CLICK THE button_release FUNC
# WILL ACTIVATE create_new_fault()
def create_new_fault(self):
"""CREATE A NEW FAULT USING THREE USER INPUT MOUSE CLICKS"""
# SET CURRENTLY ACTIVE LAYER AS THE NEWLY CREATED LAYER
self.currently_active_fault_id = self.total_fault_count
# CREATE NEW LAYER OBJECT
new_fault = Fault()
# SOURCE NEW NODES FROM USER CLICKS
new_fault.id = self.currently_active_fault_id
new_fault.name = str('Fault')
new_fault.x_nodes = self.new_plotx
new_fault.y_nodes = self.new_ploty
# SET CURRENTLY ACTIVE LAYER NODE OBJECTS
self.current_x_nodes = new_fault.x_nodes
self.current_y_nodes = new_fault.y_nodes
# SET SOME OF THE NEW LAYERS ATTRIBUTES
new_fault.id = self.currently_active_layer_id
new_fault.name = str('Fault %s') % self.currently_active_fault_id
# CREATE LAYER LINE
new_fault.mpl_actor = self.model_frame.plot(new_fault.x_nodes, new_fault.y_nodes, color='green',
marker='o', linewidth=0.5, zorder=1, alpha=1.0)
# APPEND THE NEW FAULT TO THE FAULT TREE SIDE PANEL USING add_new_tree_nodes FUNC
# LIST OF FAULT NAMES
self.fault_tree_items.append('fault %s' % (int(self.currently_active_fault_id)))
self.fault_item = 'fault %s' % (int(self.currently_active_fault_id))
self.add_new_tree_nodes(self.fault_tree_root, self.fault_item, self.currently_active_fault_id)
self.fault_tree.SetSpacing(40)
# self.fold_panel_three.Collapse()
# self.fold_panel_three.Expand()
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(new_fault.x_nodes, new_fault.y_nodes)
# UPDATE CURRENT NODE RED DOT GRAPHIC
self.current_node.set_offsets([new_fault.x_nodes[0], new_fault.y_nodes[0]])
# APPEND NEW LAYER TO THE LAYER LIST
self.fault_list.append(new_fault)
# INCREMENT THE TOTAL LAYER COUNT
self.total_fault_count += 1
# UPDATE MODEL
self.draw()
def fault_mode_key_press(self, event):
"""KEY PRESS CALLBACKS WHEN FAULT MODE IS ACTIVATED"""
'i = INSERT NEW NODE AT MOUSE POSITION'
if event.key == 'i':
if event.inaxes is None:
return
# INSERT NEW NODE INTO XY LIST
self.xt = np.insert(self.xt, [self.index_arg + 1], event.xdata)
self.yt = np.insert(self.yt, [self.index_arg + 1], event.ydata)
# UPDATE THE FAULT LIST RECORDS
self.fault_list[self.currently_active_fault_id].x_nodes = self.xt
self.fault_list[self.currently_active_fault_id].y_nodes = self.yt
# UPDATE FAULT GRAPHICS
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_xdata(self.xt)
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_ydata(self.yt)
# UPDATE CURRENT FAULT OVERLAY GRAPHIC
self.currently_active_fault.set_data(self.xt, self.yt)
'd = DELETE NODE AT MOUSE POSITION'
if event.key == 'd':
if event.inaxes is None:
return
# FIND NODE CLOSEST TO CURSOR LOCATION
d = np.sqrt((self.xt - event.xdata) ** 2 + (self.yt - event.ydata) ** 2)
self.index_arg = np.argmin(d)
self.distance = d[self.index_arg]
if self.index_arg == 0 or \
self.index_arg == (len(self.fault_list[self.currently_active_fault_id].x_nodes) - 1):
# PREVENT END NODES BEING DELETED
return 0
if self.distance >= self.node_click_limit:
# CLICK WAS TO FAR AWAY FROM A NODE TO DELETE IT
return 0
else:
# DELETE NODE BY RECREATING XY DATA WITHOUT CURRENT NODE
self.xt = [tup for i, tup in enumerate(self.xt) if i != self.index_arg] # DELETE X
self.yt = [tup for i, tup in enumerate(self.yt) if i != self.index_arg] # DELETE Y
# UPDATE THE FAULT LIST RECORDS
self.fault_list[self.currently_active_fault_id].x_nodes = self.xt
self.fault_list[self.currently_active_fault_id].y_nodes = self.yt
# UPDATE FAULT GRAPHICS
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_xdata(self.xt)
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_ydata(self.yt)
# UPDATE CURRENT FAULT OVERLAY GRAPHIC
self.currently_active_fault.set_data(self.xt, self.yt)
# RESET CURRENT NOT POSITION TO FIRST NODE
self.current_node.set_offsets([self.xt[0], self.yt[0]])
# UPDATE GMG
self.update_layer_data()
'< = INCREMENT WHICH FAULT IS BEING EDITED'
if event.key == ',':
if self.currently_active_fault_id <= self.total_fault_count - 1 and self.currently_active_fault_id > 0:
# INCREMENT TO NEXT FAULT
self.currently_active_fault_id -= 1
else:
# GO TO NEWEST FAULT
self.currently_active_fault_id = self.total_fault_count - 1
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(self.fault_list[self.currently_active_fault_id].x_nodes,
self.fault_list[self.currently_active_fault_id].y_nodes)
self.xt = self.fault_list[self.currently_active_fault_id].x_nodes
self.yt = self.fault_list[self.currently_active_fault_id].y_nodes
'> = INCREMENT WHICH FAULT IS BEING EDITED'
if event.key == '.':
if self.currently_active_fault_id < self.total_fault_count - 1:
# INCREMENT TO NEXT FAULT
self.currently_active_fault_id += 1
elif self.currently_active_fault_id == self.total_fault_count - 1:
# GO BACK TO FIRST FAULT
self.currently_active_fault_id = 0
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(self.fault_list[self.currently_active_fault_id].x_nodes,
self.fault_list[self.currently_active_fault_id].y_nodes)
self.xt = self.fault_list[self.currently_active_fault_id].x_nodes
self.yt = self.fault_list[self.currently_active_fault_id].y_nodes
# UPDATE GMG
self.update_layer_data()
def on_fault_activated(self, event):
"""RESPONSE WHEN A FAULT NAME IS SELECTED"""
# GET THE SELECTED FAULT INDEX NUMBER
self.currently_active_fault_id = self.fault_tree.GetPyData(event.GetItem())
if self.fault_picking_switch is False:
self.fault_picking_switch = True
# SET CHECKBOX AS CHECKED
self.fault_tree.GetSelection().Check(checked=True)
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(self.fault_list[self.currently_active_fault_id].x_nodes,
self.fault_list[self.currently_active_fault_id].y_nodes)
self.xt = self.fault_list[self.currently_active_fault_id].x_nodes
self.yt = self.fault_list[self.currently_active_fault_id].y_nodes
# UPDATE GRAPHICS WITH CURRENT FAULT SELECTED
self.update_layer_data()
def fault_checked(self, event):
"""TOGGLE WHETHER OR NOT A FAULT WILL BE PLOTTED IN THE MODEL FIGURE"""
i = self.fault_tree.GetPyData(event.GetItem())
if self.faults[i][0].get_visible() == True:
# HIDE FAULT
self.faults[i][0].set_visible(False)
self.currently_active_fault.set_visible(False)
else:
# SHOW FAULT
self.faults[i][0].set_visible(True)
self.currently_active_fault.set_visible(True)
# UPDATE FIGURE
self.draw()
def on_fault_tree_right_click_down(self, event):
"""WHEN A FAULT IN THE FAULT TREE MENU IS RIGHT CLICKED"""
# FIRST RUN on_fault_activated
self.on_fault_activated(event)
# CREATE POPOUT MENU WITH OPTIONS AND BIND OPTIONS TO ACTIONS
menu = wx.Menu()
item1 = menu.Append(wx.ID_ANY, "Change fault colour")
item2 = menu.Append(wx.ID_ANY, "Rename fault")
self.Bind(wx.EVT_MENU, self.change_color, item1)
self.Bind(wx.EVT_MENU, self.rename_fault, item2)
self.PopupMenu(menu)
menu.Destroy()
def rename_fault(self, event):
"""USE A POPUP MENU TO RENAME THE FAULT"""
# CREATE POP OUT MENU AND SHOW
fault_name_box = LayerNameDialog(self, -1, 'Rename fault',
self.fault_tree_items[self.currently_active_fault_id])
new = fault_name_box.ShowModal()
# WAIT FOR USER TO CLOSE POP OUT
# GET THE NEW LAYER NAME FROM POP OUT
new_name = fault_name_box.name
# SET THE TREE AND LAYER OBJECT WITH THE NEW NAME
current_tree_items = self.fault_tree.GetRootItem().GetChildren()
self.fault_tree.SetItemText(current_tree_items[self.currently_active_fault_id], str(new_name))
self.fault_tree_items[self.currently_active_fault_id] = str(new_name)
self.fault_list[self.currently_active_fault_id].name = str(new_name)
# LAYER AND MODEL ATTRIBUTE CONTROLS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def set_density(self, value):
self.layer_list[self.currently_active_layer_id].density = float(self.density_input.GetValue() * 1000.)
def set_reference_density(self, value):
self.layer_list[self.currently_active_layer_id].reference_density = \
float(self.ref_density_input.GetValue() * 1000.)
def set_background_density(self, event):
grav_box = SetBackgroundDensityDialog(self, -1, 'Set background density')
answer = grav_box.ShowModal()
self.background_density_upper = float(grav_box.background_density_upper)
for i in range(0, self.total_layer_count):
self.layer_list[i].reference_density = float(self.background_density_upper) * 1000.
# self.background_density_upper = float((grav_box.background_density_lower))
# self.background_density_upper = float((grav_box.background_density_lid))
self.absolute_densities = True
self.draw()
def set_susceptibility(self, value):
self.layer_list[self.currently_active_layer_id].susceptibility = float(self.susceptibility_input.GetValue())
def set_angle_a(self, value):
self.layer_list[self.currently_active_layer_id].angle_a = float(self.angle_a_input.GetValue())
def set_angle_b(self, value):
self.layer_list[self.currently_active_layer_id].angle_b = float(self.angle_b_input.GetValue())
def set_angle_c(self, value):
self.layer_list[self.currently_active_layer_id].angle_c = float(self.angle_c_input.GetValue())
def set_earth_field(self, value):
self.layer_list[self.currently_active_layer_id].earth_field = float(self.earth_field_input.GetValue())
def set_text_size(self, value):
"""GET NEW TEXT SIZE"""
self.textsize = float(self.text_size_input.GetValue())
# WELL DATA
# LOOP THROUGH ALL WELL NAMES
for i in range(len(self.well_data_list)):
self.well_data_list[i].text_size = self.textsize
self.well_data_list[i].mpl_actor_name.set_size(self.textsize)
# LOOP THROUGH ALL WELL HORIZON LABELS
for l in range(len(self.well_data_list[i].labels_list)):
if self.well_data_list[i].labels_list[l] is not None:
self.well_data_list[i].labels_list[l].set_size(self.textsize)
# # LOOP THROUGH OUTCROP DATA LABELS
if self.outcrop_data_count > 0:
for i in range(self.outcrop_data_count):
if self.outcrop_data_list[i] is not None:
for t in range(len(self.outcrop_data_list[i].labels)):
self.outcrop_data_list[i].labels[t].set_fontsize(self.textsize)
# REDRAW ANNOTATIONS WITH NEW TEXT SIZE
self.draw()
def set_obs_grav_rms(self, value):
"""SET THE DATA TO BE USED FOR CALCULATING THE RMS MISTFIT"""
selection = SetObsRmsDialog(self, -1, 'Set RMS Input', self.observed_gravity_list)
answer = selection.ShowModal()
for i in range(0, len(self.observed_gravity_list)):
if self.observed_gravity_list[i].name == selection.obs_name:
self.obs_gravity_data_for_rms = self.observed_gravity_list[i].data
def set_obs_mag_rms(self, value):
"""SET THE DATA TO BE USED FOR CALCULATING THE RMS MISTFIT"""
selection = SetObsRmsDialog(self, -1, 'Set RMS Input', self.observed_magnetic_list)
answer = selection.ShowModal()
for i in range(0, len(self.observed_magnetic_list)):
if self.observed_magnetic_list[i].name == selection.obs_name:
self.obs_mag_data_for_rms = self.observed_magnetic_list[i].data
def model_rms(self, xp):
"""CALCULATE RMS MISFIT OF OBSERVED VS CALCULATED"""
if self.obs_gravity_data_for_rms != [] and self.calc_grav_switch is True:
x = xp * 0.001
y = self.predicted_gravity
self.gravity_rms_value, self.grav_residuals = model_stats.rms(self.obs_gravity_data_for_rms[:, 0],
self.obs_gravity_data_for_rms[:, 1], x, y)
else:
pass
if self.obs_mag_data_for_rms != [] and self.calc_mag_switch is True:
x = self.xp * 0.001
y = self.predicted_nt
self.magnetic_rms_value, self.mag_residuals = model_stats.rms(self.obs_mag_data_for_rms[:, 0],
self.obs_mag_data_for_rms[:, 1], x, y)
else:
pass
# LAYER ATTRIBUTE TABLE~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def open_attribute_table(self, event):
self.attribute_table = AttributeEditor(self, -1, 'Attribute editor', self.tree_items, self.layer_list)
self.attribute_table.Show(True)
def attribute_set(self, new_tree_items, new_layer_list):
"""UPDATE GMG ATTRIBUTES WITH NEW ATTRIBUTES FROM THE ATTRIBUTE TABLE"""
# UPDATE MAIN FRAME TREE ITEMS (RENAME THE ITEMS)
current_tree_items = self.tree.GetRootItem().GetChildren()
for i in range(0, len(self.tree_items) - 1):
new_label = new_tree_items[i]
self.tree.SetItemText(current_tree_items[i], new_tree_items[i + 1])
# UPDATE MAIN FRAME ATTRIBUTES
for l in range(0, len(self.layer_list)):
self.layer_list[l].density = new_layer_list[l].density
self.layer_list[l].reference_density = new_layer_list[l].reference_density
self.layer_list[l].susceptibility = new_layer_list[l].susceptibility
self.layer_list[l].angle_a = new_layer_list[l].angle_a
self.layer_list[l].angle_b = new_layer_list[l].angle_b
self.layer_list[l].angle_c = new_layer_list[l].angle_c
self.layer_list[l].earth_field = new_layer_list[l].earth_field
self.layer_list[l].color = new_layer_list[l].color
# UPDATE LAYER ATTRIBUTE INPUTS
self.density_input.SetValue(0.001 * self.layer_list[self.currently_active_layer_id].density)
self.ref_density_input.SetValue(0.001 * self.layer_list[self.currently_active_layer_id].reference_density)
self.susceptibility_input.SetValue(self.layer_list[self.currently_active_layer_id].susceptibility)
self.angle_a_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_a)
self.angle_b_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_b)
self.angle_c_input.SetValue(self.layer_list[self.currently_active_layer_id].angle_c)
self.earth_field_input.SetValue(self.layer_list[self.currently_active_layer_id].earth_field)
# UPDATE GMG STATE
self.update_layer_data()
self.run_algorithms()
self.draw()
# LIVE GRAPHICS UPDATES~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def update_layer_data(self):
"""UPDATE PROGRAM GRAPHICS AFTER A CHANGE IS MADE - A.K.A REDRAW EVERYTHING"""
# UPDATE FRAME LIMITS
xmin, xmax = self.model_frame.get_xlim()
if self.topo_frame:
self.topo_frame.set_xlim(xmin, xmax)
self.topo_d_frame.set_xlim(xmin, xmax)
if self.gravity_frame:
self.gravity_frame.set_xlim(xmin, xmax)
self.gravity_d_frame.set_xlim(xmin, xmax)
if self.magnetic_frame:
self.magnetic_frame.set_xlim(xmin, xmax)
self.magnetic_d_frame.set_xlim(xmin, xmax)
if self.fault_picking_switch is True:
# GMG IS IN FAULT MODE
# UPDATE FAULT NODES
self.fault_list[self.currently_active_fault_id].x_nodes = self.xt
self.fault_list[self.currently_active_fault_id].y_nodes = self.yt
# UPDATE FAULT MPL ACTOR
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_xdata(self.xt)
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_ydata(self.yt)
self.fault_list[self.currently_active_fault_id].mpl_actor[0].set_color(
self.fault_list[self.currently_active_fault_id].color)
# UPDATE CURRENT PLOT GRAPHICS
self.currently_active_fault.set_data(self.xt, self.yt)
self.currently_active_fault.set_color(self.fault_list[self.currently_active_fault_id].color)
else:
# GMG IS IN LAYER MODE
# UPDATE PLOT LISTS WITH LATEST EDIT
self.layer_list[self.currently_active_layer_id].x_nodes = self.current_x_nodes
self.layer_list[self.currently_active_layer_id].y_nodes = self.current_y_nodes
# CREATE UPDATED POLYGON XYs -------------------------------------------------------------------------------
# FIRST CREATE THE POLYLINE DATA (THE BOTTOM LINE OF THE LAYER POLYGON - (THIS DONE FIRST SO THE WHOLE
# POLYGON ISN'T PASSED TO SELF.POLYPLOTS)
for i in range(0, self.total_layer_count + 1):
# CREATE THE LAYER POLYGONS TO PASS TO SELF.POLYGONS AND ONTO THE GRAV/MAG ALGORITHMS
# FIRST SET UP XY DATA; IF LAYER IS BELOW LAYER 0 THEN ATTACH THE ABOVE LAYER TO COMPLETE THE POLYGON;
# ELSE USE TOP LAYER CHECK FOR 'FIXED' LAYER MODE AND FIND LAST LAYER TO MAKE POLYGON
if i >= 1 and self.layer_list[i].type == 'fixed':
# CHECK FOR LAST PREVIOUS FIXED LAYER AND USE ITS BASE TO COMPLETE THE POLYGON
for layer in range(i, 0, -1):
if self.layer_list[layer - 1].type == 'fixed':
# ASSIGN THE LAST FIXED LAYER INDEX
last_layer_index = layer - 1
# NOW APPEND NODES FOR BOUNDARY CONDITIONS (CONTINUOUS SLAB)
plotx = np.array(self.layer_list[i].x_nodes)
ploty = np.array(self.layer_list[i].y_nodes)
# SET THE PADDING NODES TO THE SAME DEPTH AS THE MODEL LIMIT NODES TO CREATE FLAT SLAB
ploty[0] = ploty[1]
ploty[-1] = ploty[-2]
self.layer_list[i].x_nodes = plotx
self.layer_list[i].y_nodes = ploty
# ADD NODES FROM ABOVE LAYER TO COMPETE POLYGON
layer_above_x = np.array(self.layer_list[last_layer_index].x_nodes)[::-1]
layer_above_y = np.array(self.layer_list[last_layer_index].y_nodes)[::-1]
polygon_x = np.append(np.array(layer_above_x), np.array(plotx))
polygon_y = np.append(np.array(layer_above_y), np.array(ploty))
# UPDATE LAYER POLYGON ATTRIBUTE
self.layer_list[i].polygon = list(zip(polygon_x, polygon_y))
break
else:
continue
else:
# IF THE LAYER IS A SIMPLE 'FLOATING LAYER'
polygon_x = np.array(self.layer_list[i].x_nodes)
polygon_y = np.array(self.layer_list[i].y_nodes)
# UPDATE LAYER POLYGON ATTRIBUTE
self.layer_list[i].polygon = list(zip(polygon_x, polygon_y))
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# UPDATE LAYER POLYGONS AND LINES
for i in range(0, self.total_layer_count + 1):
# SET POLYGON FILL COLOR BASED ON DENSITY
if self.layer_list[i].density != 0.0 and self.layer_list[i].reference_density is True:
# DETERMINE DENSITY CONTRAST FROM (DENSITY - REF DENSITY)
next_color = self.colormap.to_rgba(0.001 * self.layer_list[i].density -
0.001 * self.layer_list[i].reference_density)
elif self.layer_list[i].density != 0.0:
# NO REF DENSITY, SO JUST USE DENSITY VALUE
next_color = self.colormap.to_rgba(0.001 * self.layer_list[i].density)
else:
# NO DENSITY HAS BEEN SET SO LEAVE BLANK
next_color = self.colormap.to_rgba(0.)
# UPDATE POLYGON XY AND COLOR FILL
self.layer_list[i].polygon_mpl_actor[0].set_xy(self.layer_list[i].polygon)
self.layer_list[i].polygon_mpl_actor[0].set_color(next_color)
# UPDATE LAYER LINES
self.layer_list[i].node_mpl_actor[0].set_xdata(self.layer_list[i].x_nodes)
self.layer_list[i].node_mpl_actor[0].set_ydata(self.layer_list[i].y_nodes)
self.layer_list[i].node_mpl_actor[0].set_color(self.layer_list[i].color)
# ----------------------------------------------------------------------------------------------------------
# UPDATE CURRENTLY ACTIVE LAYER LINE AND NODES
self.currently_active_layer.set_xdata(self.layer_list[self.currently_active_layer_id].x_nodes)
self.currently_active_layer.set_ydata(self.layer_list[self.currently_active_layer_id].y_nodes)
self.currently_active_layer.set_color(self.layer_list[self.currently_active_layer_id].color)
# DRAW CANVAS FEATURES
self.model_frame.set_aspect(self.model_aspect)
self.grav_frame_aspect = ((self.gravity_frame.get_xlim()[1] - self.gravity_frame.get_xlim()[0]) /
(self.gravity_frame.get_ylim()[1] - self.gravity_frame.get_ylim()[0]))
# UPDATE INFO
self.display_info()
# CONTENT HAS NOT BEEN SAVED SINCE LAST MODIFICATION
self.model_saved = False
# UPDATE GMG GRAPHICS
self.draw()
def run_algorithms(self):
"""RUN POTENTIAL FIELD CALCULATION ALGORITHMS"""
# --------------------------------------------------------------------------------------------------------------
# CALCULATE TOPOGRAPHY - :FUTURE: PREDICTED TOPOGRAPHY FROM ISOSTATIC FUNC
self.pred_topo = np.zeros_like(self.xp)
# --------------------------------------------------------------------------------------------------------------
# tree.GetRootItem().GetChildren()[i].GetValue()
# --------------------------------------------------------------------------------------------------------------
# CALCULATE GRAVITY
polygons_to_use = []
densities_to_use = []
if self.calc_grav_switch is True:
# SELECT ONLY THOSE LAYERS THAT ARE CHECKED
for layer in range(0, self.total_layer_count + 1):
if self.layer_list[layer].include_in_calculations_switch is True:
# CHOSE POLYGONS
polygons_to_use.append(self.layer_list[layer].polygon)
# DETERMINE DENSITY CONTRASTS
densities_to_use.append((self.layer_list[layer].density -
self.layer_list[layer].reference_density))
# PASS POLYGONS TO BOTT ALGORITHM AND RETURN THE PREDICTED VALUES
bott_input_polygons = []
for p, d in zip(polygons_to_use, densities_to_use):
bott_input_polygons.append(Polygon(1000 * np.array(p), {'density': d}))
# SET THE PREDICTED VALUES AS THE BOTT OUTPUT
# NB: NODES ARE INPUT LEFT TO RIGHT SO WE MUST MULTIPLY BY -1 TO PRODUCE THE CORRECT SIGN AT OUTPUT
self.predicted_gravity = bott.gz(self.xp, self.gravity_observation_elv, bott_input_polygons) * -1
else:
# SET THE PREDICTED VALUES AS ZEROS
self.predicted_gravity = np.zeros_like(self.xp)
# SET THE PREDICTED PLOT LINE WITH THE NEWLY CALCULATED VALUES
self.pred_gravity_plot.set_data(self.xp * 0.001, self.predicted_gravity)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# CALCULATE MAGNETICS
# ZIP POLYGONS WITH SUSCEPTIBILITIES AND PASS TO TALWANI AND HEIRTZLER ALGORITHM
if self.calc_mag_switch is True:
# SELECT ONLY THOSE LAYERS THAT ARE CHECKED
polygons_to_use = []
susceptibilities_to_use = []
angle_a_to_use = []
angle_b_to_use = []
angle_c_to_use = []
earth_field_to_use = []
for layer in range(0, self.total_layer_count + 1):
if self.layer_list[layer].include_in_calculations_switch is True:
polygons_to_use.append(self.layer_list[layer].polygon)
susceptibilities_to_use.append(self.layer_list[layer].susceptibility)
angle_a_to_use.append(self.layer_list[layer].angle_a)
angle_b_to_use.append(self.layer_list[layer].angle_b)
angle_c_to_use.append(self.layer_list[layer].angle_c)
earth_field_to_use.append(self.layer_list[layer].earth_field)
# PASS TO TALWANI & HEIRTZLER ALGORITHM
mag_input_polygons = []
for p, s, a, b, c, f, in zip(polygons_to_use, susceptibilities_to_use, angle_a_to_use, angle_b_to_use,
angle_c_to_use, earth_field_to_use):
mag_input_polygons.append(Polygon(1000. * np.array(p), {'susceptibility': s, 'angle_a': a,
'angle_b': b, 'angle_c': c, 'f': f}))
# SET THE PREDICTED VALUES AS THE TALWANI & HEIRTZLER OUTPUT
# NB: NODES ARE INPUT LEFT TO RIGHT SO WE MUST MULTIPLY BY -1 TO PRODUCE THE CORRECT SIGN AT OUTPUT
self.predicted_nt = talwani_and_heirtzler.nt(self.xp, self.mag_observation_elv, mag_input_polygons) * -1
else:
# SET THE PREDICTED VALUES AS ZEROS
self.predicted_nt = np.zeros_like(self.xp)
# SET THE PREDICTED PLOT LINE WITH THE NEWLY CALCULATED VALUES
self.predicted_nt_plot.set_data(self.xp * 0.001, self.predicted_nt)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# UPDATE RMS VALUES
# RUN THE RMS CALC CODE
self.model_rms(self.xp)
# SET GRAVITY RMS
if self.obs_gravity_data_for_rms != [] and self.calc_grav_switch is True and self.predicted_gravity != []:
self.gravity_rms_plot.set_data(self.grav_residuals[:, 0], self.grav_residuals[:, 1])
else:
pass
# SET MAGNETIC RMS
if self.obs_mag_data_for_rms != [] and self.calc_mag_switch is True and self.predicted_nt != []:
self.mag_rms_plot.set_data(self.mag_residuals[:, 0], self.mag_residuals[:, 1])
else:
pass
# --------------------------------------------------------------------------------------------------------------
# SET FRAME X AND Y LIMITS
self.set_frame_limits()
# AFTER RUNNING ALGORITHMS, SET MODEL AS UNSAVED
self.model_saved = False
# UPDATE GMG GRAPHICS
self.draw()
def set_frame_limits(self):
"""SET FRAME X AND Y LIMITS"""
# --------------------------------------------------------------------------------------------------------------
# SET GRAVITY DISPLAY BOX LIMITS
if self.observed_gravity_switch is True and self.grav_residuals != []:
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
# APPEND OBSERVED MIN AND MAX
for i in range(len(self.observed_gravity_list)):
if self.observed_gravity_list[i] is not None:
ymin_list.append(self.observed_gravity_list[i].data[:, 1].min() - 2.0)
ymax_list.append(self.observed_gravity_list[i].data[:, 1].max() + 2.0)
# APPEND PREDICTED GRAVITY ANOMALY
ymin_list.append(self.predicted_gravity.min())
ymax_list.append(self.predicted_gravity.max())
# # APPEND RMS GRAVITY ANOMALY
# ymin_list.append(self.grav_residuals.min() - 2.0)
# ymax_list.append(self.grav_residuals.max() + 2.0)
# SET YMIN AND YMAX
ymin = min(ymin_list)
ymax = max(ymax_list)
elif self.observed_gravity_switch is True:
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
# APPEND OBSERVED MIN AND MAX
for i in range(len(self.observed_gravity_list)):
if self.observed_gravity_list[i] is not None:
ymin_list.append(self.observed_gravity_list[i].data[:, 1].min() - 2.0)
ymax_list.append(self.observed_gravity_list[i].data[:, 1].max() + 2.0)
# APPEND PREDICTED GRAVITY ANOMALY
if self.predicted_gravity is not None:
ymin_list.append(self.predicted_gravity.min() - 2.0)
ymax_list.append(self.predicted_gravity.max() + 2.0)
# SET YMIN AND YMAX
ymin = min(ymin_list)
ymax = max(ymax_list)
elif self.predicted_gravity is not None:
ymin = self.predicted_gravity.min() - 2.0
ymax = self.predicted_gravity.max() + 2.0
else:
pass
if self.gravity_frame is not None:
self.gravity_frame.set_ylim(ymin, ymax)
# --------------------------------------------------------------------------------------------------------------
# SET DERIVATIVE Y-AXIS LIMITS
# CREATE EMPTY LIST
ymin_list = [-1]
ymax_list = [1]
for i in range(len(self.observed_gravity_list)):
if self.observed_gravity_list[i].type == str('derivative'):
ymin_list.append(self.observed_gravity_list[i].data[:, 1].min() - 0.1)
ymax_list.append(self.observed_gravity_list[i].data[:, 1].max() + 0.1)
if self.gravity_frame is not None:
self.gravity_d_frame.set_ylim(min(ymin_list), max(ymax_list))
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# SET MAGNETIC DISPLAY BOX LIMITS
if self.observed_magnetic_switch is True and self.mag_residuals != []:
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
# APPEND OBSERVED MIN AND MAX
for i in range(len(self.observed_magnetic_list)):
if self.observed_magnetic_list[i] is not None:
ymin_list.append(self.observed_magnetic_list[i].data[:, 1].min() - 2.0)
ymax_list.append(self.observed_magnetic_list[i].data[:, 1].max() + 2.0)
# APPEND PREDICTED GRAVITY ANOMALY
ymin_list.append(self.predicted_nt.min())
ymax_list.append(self.predicted_nt.max())
# APPEND RMS GRAVITY ANOMALY
ymin_list.append(self.mag_residuals.min() - 2.0)
ymax_list.append(self.mag_residuals.max() + 2.0)
# SET YMIN AND YMAX
ymin = min(ymin_list)
ymax = max(ymax_list)
elif self.observed_magnetic_switch is True:
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
# APPEND OBSERVED MIN AND MAX
for i in range(len(self.observed_magnetic_list)):
if self.observed_magnetic_list[i] is not None:
ymin_list.append(self.observed_magnetic_list[i].data[:, 1].min() - 2.0)
ymax_list.append(self.observed_magnetic_list[i].data[:, 1].max() + 2.0)
# APPEND PREDICTED GRAVITY ANOMALY
ymin_list.append(self.predicted_nt.min() - 2.0)
ymax_list.append(self.predicted_nt.max() + 2.0)
# SET YMIN AND YMAX
ymin = min(ymin_list)
ymax = max(ymax_list)
elif self.predicted_nt is not None:
# APPEND PREDICTED GRAVITY ANOMALY
ymin = self.predicted_nt.min() - 2.0
ymax = self.predicted_nt.max() + 2.0
else:
pass
if self.magnetic_frame is not None:
self.magnetic_frame.set_ylim(ymin, ymax)
# SET DERIVATIVE Y-AXIS LIMITS
# --------------------------------------------------------------------------------------------------------------
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
for i in range(len(self.observed_magnetic_list)):
if self.observed_magnetic_list[i].type == str('derivative'):
ymin_list.append(self.observed_magnetic_list[i].data[:, 1].min() - 0.1)
ymax_list.append(self.observed_magnetic_list[i].data[:, 1].max() + 0.1)
self.magnetic_d_frame.set_ylim(ymin, ymax)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# UPDATE GMG GRAPHICS
self.draw()
# EXTERNAL FIGURE CONSTRUCTION~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def plot_model(self, event):
"""CREATE EXTERNAL FIGURE OF MODEL USING INBUILT FIGURE CONSTRUCTION TOOL"""
# GET PLOTTING PARAMETERS FROM DIALOG BOX
self.set_values = PlotSettingsDialog(self, -1, 'Set figure parameters', self.model_aspect,
self.grav_frame_aspect)
self.set_values.Show(True)
def draw_model(self):
# GET USER INPUT FROM POPOUT BOX
self.file_path = self.set_values.file_path
self.file_type = self.set_values.file_type
self.use_tight_layout = self.set_values.use_tight_layout
self.fs = self.set_values.fs # FONT SIZE
self.aspect_ratio = self.set_values.aspect_ratio # MODEL ASPECT RATIO
self.ps = self.set_values.ps # OBSERVED POINT SIZE
self.calc_line_width = self.set_values.lw # CALCUALTED LINE WIDTH
self.font_type = self.set_values.font_type_text.GetValue()
self.topo_frame_min = self.set_values.topo_min_text.GetValue()
self.topo_frame_max = self.set_values.topo_max_text.GetValue()
self.grav_frame_min = self.set_values.grav_min_text.GetValue()
self.grav_frame_max = self.set_values.grav_max_text.GetValue()
self.mag_frame_min = self.set_values.mag_min_text.GetValue()
self.mag_frame_max = self.set_values.mag_max_text.GetValue()
self.draw_polygons = self.set_values.draw_polygons
self.polygon_alpha = self.set_values.polygon_alpha
self.draw_fixed_layers = self.set_values.draw_fixed_layers
self.layer_line_width = self.set_values.layer_line_width
self.draw_floating_layers = self.set_values.draw_floating_layers
self.layer_line_alpha = self.set_values.layer_line_alpha
self.draw_colorbar = self.set_values.draw_colorbar
self.colorbar_x = self.set_values.colorbar_x
self.colorbar_y = self.set_values.colorbar_y
self.colorbar_size_x = self.set_values.colorbar_size_x
self.colorbar_size_y = self.set_values.colorbar_size_y
self.draw_xy_data = self.set_values.draw_xy_data
self.xy_size = self.set_values.xy_size
self.xy_color = self.set_values.xy_color
self.draw_wells = self.set_values.draw_wells
self.well_fs = self.set_values.well_fs
self.well_line_width = self.set_values.well_line_width
self.draw_faults = self.set_values.draw_faults
self.faults_lw = self.set_values.faults_lw
# GET FIGURE DIMENSIONS
xmin, xmax = self.model_frame.get_xlim()
ymin, ymax = self.model_frame.get_ylim()
area = np.array([xmin, xmax, ymin, ymax])
# # RUN PLOT MODEL CODE
fig_plot = plot_model.plot_fig(self.file_path, self.file_type, self.use_tight_layout, self.fs,
self.aspect_ratio, self.ps, self.calc_line_width, self.font_type,
self.topo_frame_min, self.topo_frame_max, self.grav_frame_min,
self.grav_frame_max, self.mag_frame_min, self.mag_frame_max,
self.draw_polygons, self.polygon_alpha, self.draw_fixed_layers,
self.layer_line_width, self.draw_floating_layers, self.layer_line_alpha,
self.draw_colorbar, self.colorbar_x, self.colorbar_y, self.colorbar_size_x,
self.colorbar_size_y, self.draw_xy_data, self.xy_size, self.xy_color,
self.draw_wells, self.well_fs, self.well_line_width, self.draw_faults,
self.faults_lw,
self.layer_list, self.fault_list, self.observed_topography_list,
self.observed_gravity_list, self.observed_magnetic_list,
self.outcrop_data_list, self.well_data_list, self.segy_data_list,
self.topo_frame, self.gravity_frame, self.magnetic_frame, self.predicted_gravity,
self.gravity_rms_value, self.predicted_nt, self.magnetic_rms_value, self.area,
self.xp)
del fig_plot
#
# # IF ON A LINUX SYSTEM OPEN THE FIGURE WITH PDF VIEWER
# try:
# if sys.platform == 'linux2':
# subprocess.call(["xdg-open", self.file_path])
# # IF ON A macOS SYSTEM OPEN THE FIGURE WITH PDF VIEWER
# elif sys.platform == 'darwin':
# os.open(self.file_path)
# except IOError:
# pass
# UPDATE GMG
self.update_layer_data()
self.draw()
return
# DOCUMENTATION ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def open_documentation(self, event):
"""OPEN DOCUMENTATION HTML"""
self.doc_dir = os.path.dirname(os.path.abspath(__file__)).split('/')
doc_url = self.doc_dir[0] + '/' + self.doc_dir[1] + '/' + self.doc_dir[2] + '/' + self.doc_dir[3] + \
'/docs/html/gmg_documentation.html'
if platform == "linux" or platform == "linux2":
# LINUX
webbrowser.open_new(doc_url)
elif platform == "darwin":
# OS X
client = webbrowser.get("open -a /Applications/Safari.app %s")
client.open(doc_url)
elif platform == "win32":
# WINDOWS
webbrowser.open_new(doc_url)
def about_gmg(self, event):
""" SHOW SOFTWARE INFORMATION"""
about = [
"GMG is an Open Source Graphical User Interface (GUI) designed principally for modelling 2D potential "
"field (gravity and magnetic) profiles. The software also includes functions for loading XY data, "
"seismic reflection SEGY data and exploration well horizons. The software therefore provides an "
"integrated geological/geophysical interpretation package. It is anticipated that GMG will also be "
"useful for teaching purposes. \n \n"
"Data I/O is made as simple as possible using space delimited ASCII text files. \n \n"
"The project was instigated after failing to find an adequate open source option (in which the source "
"code can be viewed and modified by the user) for performing 2D geophysical modeling tasks. "
"Inspiration came from fatiando a terra and GMT. \n \n"
"GMG was initially developed at the University of Oxford 2014-2017. \n \n"
"B. Tozer"]
dlg = wx.MessageDialog(self, about[0], "About", wx.OK | wx.ICON_INFORMATION)
result = dlg.ShowModal()
dlg.Destroy()
def legal(self, event):
""" SHOW LICENCE"""
licence = ["Copyright 2015-2019 Brook Tozer \n\nRedistribution and use in source and binary forms, with or "
"without modification, are permitted provided that the following conditions are met: \n \n"
"1. Redistributions of source code must retain the above copyright notice, this list of conditions "
"and the following disclaimer. \n\n2. Redistributions in binary form must reproduce the above "
"copyright notice, this list of conditions and the following disclaimer in the documentation and/or "
"other materials provided with the distribution. \n\n3. Neither the name of the copyright holder "
"nor the names of its contributors may be used to endorse or promote products derived from this "
"software without specific prior written permission. \n\nTHIS SOFTWARE IS PROVIDED BY THE "
"COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT "
"NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE "
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, "
"INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, "
"PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS "
"INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,"
" OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, "
"EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."]
dlg = wx.MessageDialog(self, licence[0], "BSD-3-Clause Licence", wx.OK | wx.ICON_INFORMATION)
result = dlg.ShowModal()
dlg.Destroy()
# EXIT FUNCTIONS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def exit(self, event):
""" SHUTDOWN APP (FROM FILE MENU)"""
dlg = wx.MessageDialog(self, "Do you really want to exit", "Confirm Exit", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
if result == wx.ID_OK:
wx.GetApp().ExitMainLoop()
def on_close_button(self, event):
""" SHUTDOWN APP (X BUTTON)"""
dlg = wx.MessageDialog(self, "Do you really want to exit", "Confirm Exit", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
if result == wx.ID_OK:
wx.GetApp().ExitMainLoop()
# FUTURE MODULES (IN PROCESS)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def set_error(self, value):
pass
# self.error = value
# self.update_layer_data()
# self.run_algorithms()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# START SOFTWARE
if __name__ == "__main__":
app = wx.App(False)
fr = wx.Frame(None, title='GMG: Geophysical Modelling GUI')
app.frame = Gmg()
app.frame.CenterOnScreen()
app.frame.Show()
app.MainLoop()
|
python
|
from typing import List, Union
import numpy as np
import tf_conversions
from geometry_msgs.msg import Pose, Quaternion
def calc_homogeneous_matrix(pose: Pose) -> np.ndarray:
angles = tf_conversions.transformations.euler_from_quaternion([
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w
])
translate = [pose.position.x, pose.position.y, pose.position.z]
homogeneous_matrix = tf_conversions.transformations.compose_matrix(
angles=angles, translate=translate)
return homogeneous_matrix
def calc_relative_pose(pose_from: Pose, pose_to: Pose) -> Pose:
converted_matrix = (
tf_conversions.transformations.inverse_matrix(calc_homogeneous_matrix(pose_from)) @ calc_homogeneous_matrix(pose_to))
quaternion = tf_conversions.transformations.quaternion_from_matrix(
converted_matrix)
translation = tf_conversions.transformations.translation_from_matrix(
converted_matrix)
return get_msg_from_translation_and_quaternion(translation, quaternion)
def calc_global_pose_from_relative_pose(pose_base: Pose, pose_relative: Pose) -> Pose:
converted_matrix = (
calc_homogeneous_matrix(pose_base) @ calc_homogeneous_matrix(pose_relative))
quaternion = tf_conversions.transformations.quaternion_from_matrix(
converted_matrix)
translation = tf_conversions.transformations.translation_from_matrix(
converted_matrix)
return get_msg_from_translation_and_quaternion(translation, quaternion)
def get_msg_from_translation_and_quaternion(translation: Union[np.ndarray, list], quaternion: Union[np.ndarray, list]) -> Pose:
pose = Pose()
pose.position.x = translation[0]
pose.position.y = translation[1]
pose.position.z = translation[2]
pose.orientation.x = quaternion[0]
pose.orientation.y = quaternion[1]
pose.orientation.z = quaternion[2]
pose.orientation.w = quaternion[3]
return pose
def get_msg_from_array_2d(array_2d: Union[np.ndarray, list]) -> Pose:
pose = Pose()
quaternion = Quaternion(
*tf_conversions.transformations.quaternion_from_euler(0, 0, array_2d[2]))
pose.position.x = array_2d[0]
pose.position.y = array_2d[1]
pose.position.z = 0
pose.orientation = quaternion
return pose
def get_array_2d_from_msg(pose: Pose) -> List[float]:
array_2d = [
pose.position.x,
pose.position.y,
tf_conversions.transformations.euler_from_quaternion([
pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w
])[2]
]
return array_2d
|
python
|
"""
DBSCAN Clustering
"""
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
def eps_est(data,n=4,verbose=True):
"""
Minimum data size is 1000
Methodology improvement opportunity: Better elbow locater
"""
if verbose:print("Calculating nearest neighbor distances...")
nbrs = NearestNeighbors(n_neighbors=int(max(n+1,100)), algorithm='ball_tree',n_jobs=-1).fit(data)
distances, indices = nbrs.kneighbors(data)
del nbrs
distArr = distances[:,n] # distance array containing all distances to nth neighbor
distArr.sort()
pts = range(len(distArr))
# The following looks for the first instance (past the mid point)
# where the mean of the following [number] points
# is at least (cutoff-1)*100% greater than the mean of the previous [number] points.
# Number is currently set to be 0.2% of the total data
# This works pretty well on data scaled to unit variance. Area for improvement though.
number = int(np.ceil(len(data)/500))
cutoff = 1.05
if verbose:print("Finding elbow...")
for i in range(int(np.ceil(len(pts)/2)),len(pts)-number):
if np.mean(distArr[i+1:i+number])>=cutoff*np.mean(distArr[i-number:i-1]):
dbEps = distArr[i]
pt=pts[i]
break
if verbose:
print("""
Epsilon is in the neighborhood of {:05.2f}.
""".format(dbEps))
return dbEps,distArr
def dbscan_w_outliers(data,min_n=4,check_tabby=False,verbose=True):
# numpy array of dataframe for fit later
X=np.array([np.array(data.loc[i]) for i in data.index])
if verbose:print("Estimating Parameters...")
if len(X)>10000:
# Large datasets have presented issues where a single high density cluster
# leads to an epsilon of 0.0 for 4 neighbors.
# We adjust for this by calculating epsilon for a sample of the data,
# then we scale min_neighbors accordingly
if verbose:print("Sampling data for parameter estimation...")
X_sample = data.sample(n=10000)
else:
X_sample = data
dbEps,distArr = eps_est(X_sample,n=min_n,verbose=verbose)
if len(X)>10000:
if verbose:print("Scaling density...")
min_n = int(len(X)/10000*min_n)
if verbose:print("Clustering data with DBSCAN, eps={:05.2f},min_samples={}...".format(dbEps,min_n))
#est = DBSCAN(eps=dbEps,min_samples=min_n,n_jobs=-1) # takes too long, deprecated
#est.fit(X)
#clusterLabels = est.labels_
# Outlier score: distance to 4th neighbor?
nbrs = NearestNeighbors(n_neighbors=min_n+1, algorithm='ball_tree',n_jobs=-1).fit(data)
distances, indices = nbrs.kneighbors(data)
del nbrs
distArr = distances[:,min_n]
# The following determines the cluster edge members
# by checking if any outlying points contain a clustered neighbor.
# Necessary given the heuristic nature of epsilon, provides a buffer.
# Optimization opportunity: this could be parellelized pretty easily
d = {True:-1,False:0}
clusterLabels = np.array([d[pt>dbEps] for pt in distArr])
for i,label in enumerate(clusterLabels):
# For all identified outliers (pts w/o enough neighbors):
if label == -1:
j=1
# for the neighbors within epsilon
while distances[i,j]<dbEps:
# if a neighbor is labeled as part of the cluster,
if clusterLabels[indices[i,j]] == 0:
# then this pt is an edge point
clusterLabels[i]=1
break
j+=1
if check_tabby:
if data.index.str.contains('8462852').any():
tabbyInd = list(data.index).index(data[data.index.str.contains('8462852')].index[0])
if clusterLabels[tabbyInd] == -1:
print("Tabby has been found to be an outlier in DBSCAN.")
else:
print("Tabby has NOT been found to be an outlier in DBSCAN")
else:
print("MISSING: Tabby is not in this data.")
numout = len(clusterLabels[clusterLabels==-1])
numedge = len(clusterLabels[clusterLabels==1])
if verbose:
print("There are {:d} total outliers and {:d} edge members.".format(numout,numedge))
return clusterLabels
if __name__=="__main__":
"""
If this is run as a script, the following will parse the arguments it is fed,
or prompt the user for input.
python db_outliers.py path/to/file n_features path/to/output_file
"""
# f - file, pandas dataframe saved as csv with calculated features
if sys.argv[1]:
f = sys.argv[1]
else:
while not f:
f = raw_input("Input path: ")
print("Reading %s..."%f)
df = pd.read_csv(f,index_col=0)
if sys.argv[2]:n_feat=sys.argv[2]
else: n_feat = raw_input("Number of features in data: ")
if not n_feat:
print("No features specified, assuming default number of features, 60.")
n_feat = 60
# of - output file
if sys.argv[3]:
of = sys.argv[3]
else:
of = raw_input("Output path: ")
if not of:
print("No output path specified, saving to 'output.npy' in local folder.")
of = 'output'
np.save(of,dbscan_w_outliers(df[:n_feat]))
print("Done.")
|
python
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: fetchScheduler
Description :
Author : JHao
date: 2019/8/6
-------------------------------------------------
Change Activity:
2019/08/06:
-------------------------------------------------
"""
__author__ = 'JHao'
import os
from handler.logHandler import LogHandler
from handler.proxyHandler import ProxyHandler
from fetcher.proxyFetcher import ProxyFetcher
from handler.configHandler import ConfigHandler
from qqwry import QQwry
class Fetcher(object):
name = "fetcher"
def __init__(self):
self.log = LogHandler(self.name)
self.conf = ConfigHandler()
self.proxy_handler = ProxyHandler()
self.loadIp()
def loadIp(self):
if False != os.path.isfile("qqwry.dat"):
self.ip = QQwry()
self.ip.load_file('qqwry.dat')
else:
self.ip = False
def fetch(self):
"""
fetch proxy into db with proxyFetcher
:return:
"""
proxy_set = set()
self.log.info("ProxyFetch : start")
for fetch_name in self.conf.fetchers:
self.log.info("ProxyFetch - {func}: start".format(func=fetch_name))
fetcher = getattr(ProxyFetcher, fetch_name, None)
if not fetcher:
self.log.error("ProxyFetch - {func}: class method not exists!")
continue
if not callable(fetcher):
self.log.error("ProxyFetch - {func}: must be class method")
continue
try:
for proxy in fetcher():
if proxy in proxy_set:
self.log.info('ProxyFetch - %s: %s exist' % (fetch_name, proxy.ljust(23)))
continue
else:
self.log.info('ProxyFetch - %s: %s success' % (fetch_name, proxy.ljust(23)))
if proxy.strip():
if self.ip:
area = " ".join(self.ip.lookup(proxy.split(':')[0]))
else:
self.loadIp()
area = ''
proxy_set.add((proxy, fetch_name, area))
except Exception as e:
self.log.error("ProxyFetch - {func}: error".format(func=fetch_name))
self.log.error(str(e))
self.log.info("ProxyFetch - all complete!")
return proxy_set
# origin proxy_set = {'1.1.1.1', '2.2.2.2'}
# now proxy_set = [('1.1.1.1', 'fetch1', 'area'), ('2.2.2.2', 'fetch2', 'area')]
def runFetcher():
return Fetcher().fetch()
|
python
|
#!/usr/bin/env python
# Written by Eric Ziegast
# nxdomain.py - look at ch202 dnsqr data from a file and print out any
# qname / type / rcode for data that's not rcode 0 ("NOERROR").
# Forgive awkward key processing. Sometimes an rcode or qtype key
# doesn't exist and would cause the script to break if accessed them.
import nmsg
import wdns
import sys
def main(fname):
i = nmsg.input.open_file(fname)
while True:
m = i.read()
if not m:
break
rcode = 0
qname = qtype = 0
for key in m.keys():
if key == 'rcode':
rcode = m[key]
continue
if key == 'qname':
qname = m[key]
continue
if key == 'qtype':
qtype = m[key]
continue
if rcode != 0 and qname != 0 and qtype != 0:
print('%s %s %s' % (wdns.rcode_to_str(rcode),
wdns.rrtype_to_str(qtype), wdns.domain_to_str(qname)))
if __name__ == '__main__':
main(sys.argv[1])
|
python
|
lista = list()
while True:
lista.append(int(input("Digite um número inteiro:\t")))
while True:
p = str(input("Digitar mais números?\t").strip())[0].upper()
if p in 'SN':
break
else:
print("\033[31mDigite uma opção válida!\033[m")
if p == 'N':
break
par = list()
impar = list()
for n in lista:
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
par.sort()
impar.sort()
if 0 in par:
par.remove(0)
print(f"Entre os números \033[32m{lista}\033[m, os números pares são: \033[33m{par}\033[m e os números ímpares são: \033[34m{impar}\033[m!")
|
python
|
""" A simple TCP echo server. """
import argparse
import socket
import time
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("listen_port",
help="The port this process will listen on.",
type=int)
args = parser.parse_args()
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('localhost', int(args.listen_port))
print('starting up on %s port %s' % server_address)
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
init_time = time.time()
while True:
# Wait for a connection
print('waiting for a connection')
connection, client_address = sock.accept()
try:
print('connection from', client_address)
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(64)
print('received "{}" at time {:.2f}.'.format(
data,
time.time() - init_time))
if data:
print('sending "{}" back to the client at time {:.2f}'.
format(data,
time.time() - init_time))
connection.sendall(data)
else:
print('no more data from {} at time {:.2f}'.format(
client_address,
time.time() - init_time))
break
finally:
# Clean up the connection
connection.close()
|
python
|
import os
import io
import shutil
import base64
import subprocess
import platform
import logging
import numpy as np
import pandas as pd
from tqdm import tqdm
from glob import glob
from pathlib import Path as P
import wget
import urllib3, ftplib
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import matplotlib as mpl
mpl.use("Agg")
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import ms_mint
from ms_mint.io import ms_file_to_df
from ms_mint.targets import standardize_targets, read_targets
from ms_mint.io import convert_ms_file_to_feather
from ms_mint.standards import TARGETS_COLUMNS
from datetime import date
from .filelock import FileLock
def lock(fn):
return FileLock(f"{fn}.lock", timeout=1)
def today():
return date.today().strftime("%y%m%d")
def get_versions():
string = ""
try:
string += subprocess.getoutput("conda env export --no-build")
except:
pass
return string
def get_issue_text():
return f"""
%0A%0A%0A%0A%0A%0A%0A%0A%0A
MINT version: {ms_mint.__version__}%0A
OS: {platform.platform()}%0A
Versions: ---
"""
def parse_ms_files(contents, filename, date, target_dir):
content_type, content_string = contents.split(",")
decoded = base64.b64decode(content_string)
fn_abs = os.path.join(target_dir, filename)
with lock(fn_abs):
with open(fn_abs, "wb") as file:
file.write(decoded)
new_fn = convert_ms_file_to_feather(fn_abs)
if os.path.isfile(new_fn):
os.remove(fn_abs)
def parse_pkl_files(contents, filename, date, target_dir, ms_mode=None):
content_type, content_string = contents.split(",")
decoded = base64.b64decode(content_string)
df = pd.read_csv(io.StringIO(decoded.decode("utf-8")))
df = standardize_targets(df, ms_mode=ms_mode)
df = df.drop_duplicates()
return df
def get_dirnames(path):
dirnames = [f.name for f in os.scandir(path) if f.is_dir()]
return dirnames
def workspace_path(tmpdir, ws_name):
return os.path.join(tmpdir, "workspaces", ws_name)
def maybe_migrate_workspaces(tmpdir):
if not P(tmpdir).is_dir():
return None
dir_names = get_dirnames(tmpdir)
ws_path = get_workspaces_path(tmpdir)
if not os.path.isdir(ws_path) and len(dir_names) > 0:
logging.info("Migrating to new directory scheme.")
os.makedirs(ws_path)
for dir_name in dir_names:
old_dir = os.path.join(tmpdir, dir_name)
new_dir = workspace_path(tmpdir, dir_name)
shutil.move(old_dir, new_dir)
logging.info("Moving", old_dir, "to", new_dir)
def maybe_update_workpace_scheme(wdir):
old_pkl_fn = P(wdir) / "peaklist" / "peaklist.csv"
new_pkl_fn = P(get_targets_fn(wdir))
new_path = new_pkl_fn.parent
old_path = old_pkl_fn.parent
if old_pkl_fn.is_file():
logging.info(f"Moving targets file to new default location ({new_pkl_fn}).")
if not new_path.is_dir():
os.makedirs(new_path)
os.rename(old_pkl_fn, new_pkl_fn)
shutil.rmtree(old_path)
def workspace_exists(tmpdir, ws_name):
path = workspace_path(tmpdir, ws_name)
return os.path.isdir(path)
def get_active_workspace(tmpdir):
"""Returns name of last activated workspace,
if workspace still exists. Otherwise,
return None.
"""
fn_ws_info = os.path.join(tmpdir, ".active-workspace")
if not os.path.isfile(fn_ws_info):
return None
with open(fn_ws_info, "r") as file:
ws_name = file.read()
if ws_name in get_workspaces(tmpdir):
return ws_name
else:
return None
def save_activated_workspace(tmpdir, ws_name):
fn_ws_info = os.path.join(tmpdir, ".active-workspace")
with open(fn_ws_info, "w") as file:
file.write(ws_name)
def create_workspace(tmpdir, ws_name):
path = workspace_path(tmpdir, ws_name)
assert not os.path.isdir(path)
os.makedirs(path)
os.makedirs(os.path.join(path, "ms_files"))
os.makedirs(os.path.join(path, "targets"))
os.makedirs(os.path.join(path, "results"))
os.makedirs(os.path.join(path, "figures"))
os.makedirs(os.path.join(path, "chromato"))
def get_workspaces_path(tmpdir):
# Defines the path to the workspaces
# relative to `tmpdir`
return os.path.join(tmpdir, "workspaces")
def get_workspaces(tmpdir):
ws_path = get_workspaces_path(tmpdir)
if not P(ws_path).is_dir():
return []
ws_names = get_dirnames(ws_path)
ws_names = [ws for ws in ws_names if not ws.startswith(".")]
ws_names.sort()
return ws_names
class Chromatograms:
def __init__(self, wdir, targets, ms_files, progress_callback=None):
self.wdir = wdir
self.targets = targets
self.ms_files = ms_files
self.n_peaks = len(targets)
self.n_files = len(ms_files)
self.progress_callback = progress_callback
def create_all(self):
for fn in tqdm(self.ms_files):
self.create_all_for_ms_file(fn)
return self
def create_all_for_ms_file(self, ms_file: str):
fn = ms_file
df = ms_file_to_df(fn)
for ndx, row in self.targets.iterrows():
mz_mean, mz_width = row[["mz_mean", "mz_width"]]
fn_chro = get_chromatogram_fn(fn, mz_mean, mz_width, self.wdir)
if os.path.isfile(fn_chro):
continue
dirname = os.path.dirname(fn_chro)
if not os.path.isdir(dirname):
os.makedirs(dirname)
dmz = mz_mean * 1e-6 * mz_width
chrom = df[(df["mz"] - mz_mean).abs() <= dmz]
chrom["scan_time_min"] = chrom["scan_time_min"].round(3)
chrom = chrom.groupby("scan_time_min").max().reset_index()
chrom[["scan_time_min", "intensity"]].to_feather(fn_chro)
def get_single(self, mz_mean, mz_width, ms_file):
return get_chromatogram(ms_file, mz_mean, mz_width, self.wdir)
def create_chromatograms(ms_files, targets, wdir):
for fn in tqdm(ms_files):
fn_out = os.path.basename(fn)
fn_out, _ = os.path.splitext(fn_out)
fn_out += ".feather"
for ndx, row in targets.iterrows():
mz_mean, mz_width = row[["mz_mean", "mz_width"]]
fn_chro = get_chromatogram_fn(fn, mz_mean, mz_width, wdir)
if not os.path.isfile(fn_chro):
create_chromatogram(fn, mz_mean, mz_width, fn_chro)
def create_chromatogram(ms_file, mz_mean, mz_width, fn_out, verbose=False):
if verbose:
print("Creating chromatogram")
df = ms_file_to_df(ms_file)
if verbose:
print("...file read")
dirname = os.path.dirname(fn_out)
if not os.path.isdir(dirname):
os.makedirs(dirname)
dmz = mz_mean * 1e-6 * mz_width
chrom = df[(df["mz"] - mz_mean).abs() <= dmz]
chrom["scan_time_min"] = chrom["scan_time_min"].round(3)
chrom = chrom.groupby("scan_time_min").max().reset_index()
with lock(fn_out):
chrom[["scan_time_min", "intensity"]].to_feather(fn_out)
if verbose:
print("...done creating chromatogram.")
return chrom
def get_chromatogram(ms_file, mz_mean, mz_width, wdir):
fn = get_chromatogram_fn(ms_file, mz_mean, mz_width, wdir)
if not os.path.isfile(fn):
chrom = create_chromatogram(ms_file, mz_mean, mz_width, fn)
else:
try:
chrom = pd.read_feather(fn)
except:
os.remove(fn)
logging.warning(f"Cound not read {fn}.")
return None
chrom = chrom.rename(
columns={
"retentionTime": "scan_time_min",
"intensity array": "intensity",
"m/z array": "mz",
}
)
return chrom
def get_chromatogram_fn(ms_file, mz_mean, mz_width, wdir):
ms_file = os.path.basename(ms_file)
base, _ = os.path.splitext(ms_file)
fn = (
os.path.join(wdir, "chromato", f"{mz_mean}-{mz_width}".replace(".", "_"), base)
+ ".feather"
)
return fn
def get_targets_fn(wdir):
return os.path.join(wdir, "targets", "targets.csv")
def get_targets(wdir):
fn = get_targets_fn(wdir)
if os.path.isfile(fn):
targets = read_targets(fn).set_index("peak_label")
else:
targets = pd.DataFrame(columns=TARGETS_COLUMNS)
print("Read targets:", targets)
return targets
def update_targets(wdir, peak_label, rt_min=None, rt_max=None, rt=None):
targets = get_targets(wdir)
if isinstance(peak_label, str):
if rt_min is not None and not np.isnan(rt_min):
targets.loc[peak_label, "rt_min"] = rt_min
if rt_max is not None and not np.isnan(rt_max):
targets.loc[peak_label, "rt_max"] = rt_max
if rt is not None and not np.isnan(rt):
targets.loc[peak_label, "rt"] = rt
if isinstance(peak_label, int):
targets = targets.reset_index()
if rt_min is not None and not np.isnan(rt_min):
targets.loc[peak_label, "rt_min"] = rt_min
if rt_max is not None and not np.isnan(rt_max):
targets.loc[peak_label, "rt_max"] = rt_max
if rt is not None and not np.isnan(rt):
targets.loc[peak_label, "rt"] = rt
targets = targets.set_index("peak_label")
fn = get_targets_fn(wdir)
with lock(fn):
targets.to_csv(fn)
def get_results_fn(wdir):
return os.path.join(wdir, "results", "results.csv")
def get_results(wdir):
fn = get_results_fn(wdir)
df = pd.read_csv(fn)
df["MS-file"] = [filename_to_label(fn) for fn in df["ms_file"]]
return df
def get_metadata(wdir):
fn = get_metadata_fn(wdir)
fn_path = os.path.dirname(fn)
ms_files = get_ms_fns(wdir, abs_path=False)
ms_files = [filename_to_label(fn) for fn in ms_files]
df = None
if not os.path.isdir(fn_path):
os.makedirs(fn_path)
if os.path.isfile(fn):
df = pd.read_csv(fn)
if "MS-file" not in df.columns:
df = None
if df is None or len(df) == 0:
df = init_metadata(ms_files)
for col in [
"Color",
"Column",
"Row",
"Batch",
"Label",
"InAnalysis",
"PeakOpt",
"MS-file",
]:
if col not in df.columns:
df[col] = None
df = df[df["MS-file"] != ""]
df = df.groupby("MS-file").first().reindex(ms_files).reset_index()
if "PeakOpt" not in df.columns:
df["PeakOpt"] = False
else:
df["PeakOpt"] = df["PeakOpt"].astype(bool)
if "InAnalysis" not in df.columns:
df["InAnalysis"] = True
else:
df["InAnalysis"] = df["InAnalysis"].astype(bool)
if "index" in df.columns:
del df["index"]
df["Column"] = df["Column"].apply(format_columns)
df["Type"] = df["Type"].fillna("Not set")
df.reset_index(inplace=True)
return df
def init_metadata(ms_files):
ms_files = list(ms_files)
ms_files = [filename_to_label(fn) for fn in ms_files]
df = pd.DataFrame({"MS-file": ms_files})
df["InAnalysis"] = True
df["Label"] = ""
df["Color"] = None
df["Type"] = "Biological Sample"
df["RunOrder"] = ""
df["Batch"] = ""
df["Row"] = ""
df["Column"] = ""
df["PeakOpt"] = ""
return df
def write_metadata(meta, wdir):
fn = get_metadata_fn(wdir)
with lock(fn):
meta.to_csv(fn, index=False)
def get_metadata_fn(wdir):
fn = os.path.join(wdir, "metadata", "metadata.csv")
return fn
def get_ms_dirname(wdir):
return os.path.join(wdir, "ms_files")
def get_ms_fns(wdir, abs_path=True):
path = get_ms_dirname(wdir)
fns = glob(os.path.join(path, "**", "*.*"), recursive=True)
fns = [fn for fn in fns if is_ms_file(fn)]
if not abs_path:
fns = [os.path.basename(fn) for fn in fns]
return fns
def is_ms_file(fn: str):
if (
fn.lower().endswith(".mzxml")
or fn.lower().endswith(".mzml")
or fn.lower().endswith(".feather")
):
return True
return False
def Basename(fn):
fn = os.path.basename(fn)
fn, _ = os.path.splitext(fn)
return fn
def format_columns(x):
try:
if (x is None) or (x == "") or np.isnan(x):
return None
except:
print(type(x))
print(x)
assert False
return f"{int(x):02.0f}"
def get_complete_results(
wdir,
include_labels=None,
exclude_labels=None,
file_types=None,
include_excluded=False,
):
meta = get_metadata(wdir)
resu = get_results(wdir)
if not include_excluded:
meta = meta[meta["InAnalysis"]]
df = pd.merge(meta, resu, on=["MS-file"])
if include_labels is not None and len(include_labels) > 0:
df = df[df.peak_label.isin(include_labels)]
if exclude_labels is not None and len(exclude_labels) > 0:
df = df[~df.peak_label.isin(exclude_labels)]
if file_types is not None and file_types != []:
df = df[df.Type.isin(file_types)]
df["log(peak_max+1)"] = df.peak_max.apply(np.log1p)
if "index" in df.columns:
df = df.drop("index", axis=1)
return df
def gen_tabulator_columns(
col_names=None,
add_ms_file_col=False,
add_color_col=False,
add_peakopt_col=False,
add_ms_file_active_col=False,
col_width="12px",
editor="input",
):
if col_names is None:
col_names = []
col_names = list(col_names)
standard_columns = [
"MS-file",
"InAnalysis",
"Color",
"index",
"PeakOpt",
]
for col in standard_columns:
if col in col_names:
col_names.remove(col)
columns = [
{
"formatter": "rowSelection",
"titleFormatter": "rowSelection",
"titleFormatterParams": {
"rowRange": "active" # only toggle the values of the active filtered rows
},
"hozAlign": "center",
"headerSort": False,
"width": "1px",
"frozen": True,
}
]
if add_ms_file_col:
columns.append(
{
"title": "MS-file",
"field": "MS-file",
"headerFilter": True,
"headerSort": True,
"editor": "input",
"sorter": "string",
"frozen": True,
}
)
if add_color_col:
columns.append(
{
"title": "Color",
"field": "Color",
"headerFilter": False,
"editor": "input",
"formatter": "color",
"width": "3px",
"headerSort": False,
}
)
if add_peakopt_col:
columns.append(
{
"title": "PeakOpt",
"field": "PeakOpt",
"headerFilter": False,
"formatter": "tickCross",
"width": "6px",
"headerSort": True,
"hozAlign": "center",
"editor": True,
}
)
if add_ms_file_active_col:
columns.append(
{
"title": "InAnalysis",
"field": "InAnalysis",
"headerFilter": True,
"formatter": "tickCross",
"width": "6px",
"headerSort": True,
"hozAlign": "center",
"editor": True,
}
)
for col in col_names:
content = {
"title": col,
"field": col,
"headerFilter": True,
"width": col_width,
"editor": editor,
}
columns.append(content)
return columns
def parse_table_content(content, filename):
content_type, content_string = content.split(",")
decoded = base64.b64decode(content_string)
if filename.lower().endswith(".csv"):
df = pd.read_csv(io.StringIO(decoded.decode("utf-8")))
elif filename.lower().endswith(".xlsx"):
df = pd.read_excel(io.BytesIO(decoded))
return df
def fig_to_src(dpi=100):
out_img = io.BytesIO()
plt.savefig(out_img, format="jpeg", bbox_inches="tight", dpi=dpi)
plt.close("all")
out_img.seek(0) # rewind file
encoded = base64.b64encode(out_img.read()).decode("ascii").replace("\n", "")
return "data:image/png;base64,{}".format(encoded)
def merge_metadata(old, new):
old = old.set_index("MS-file")
new = new.groupby("MS-file").first().replace("null", None)
for col in new.columns:
if col == "" or col.startswith("Unnamed"):
continue
if not col in old.columns:
old[col] = None
for ndx in new.index:
value = new.loc[ndx, col]
if value is None:
continue
if ndx in old.index:
old.loc[ndx, col] = value
return old.reset_index()
def file_colors(wdir):
meta = get_metadata(wdir)
colors = {}
for ndx, (fn, co) in meta[["MS-file", "Color"]].iterrows():
if not (isinstance(co, str)):
co = None
colors[fn] = co
return colors
def get_figure_fn(kind, wdir, label, format):
path = os.path.join(wdir, "figures", kind)
clean_label = clean_string(label)
fn = f"{kind}__{clean_label}.{format}"
fn = os.path.join(path, fn)
return path, fn
def clean_string(fn: str):
for x in ['"', "'", "(", ")", "[", "]", " ", "\\", "/", "{", "}"]:
fn = fn.replace(x, "_")
return fn
def savefig(kind=None, wdir=None, label=None, format="png", dpi=150):
path, fn = get_figure_fn(kind=kind, wdir=wdir, label=label, format=format)
maybe_create(path)
try:
with lock(fn):
plt.savefig(fn, dpi=dpi, bbox_inches="tight")
except:
print(f"Could not save figure {fn}, maybe no figure was created: {label}")
return fn
def maybe_create(dir_name):
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
def png_fn_to_src(fn):
encoded_image = base64.b64encode(open(fn, "rb").read())
return "data:image/png;base64,{}".format(encoded_image.decode())
def get_ms_fns_for_peakopt(wdir):
"""Extract the filenames for peak optimization from
the metadata table and recreate the complete filename."""
df = get_metadata(wdir)
fns = df[df.PeakOpt.astype(bool) == True]["MS-file"]
ms_files = get_ms_fns(wdir)
mapping = {filename_to_label(fn): fn for fn in ms_files}
fns = [mapping[fn] for fn in fns]
return fns
def float_to_color(x, vmin=0, vmax=2, cmap=None):
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
m = cm.ScalarMappable(norm=norm, cmap=cmap)
return m.to_rgba(x)
def write_targets(targets, wdir):
fn = get_targets_fn(wdir)
if "peak_label" in targets.columns:
targets = targets.set_index("peak_label")
with lock(fn):
targets.to_csv(fn)
def filename_to_label(fn: str):
if is_ms_file(fn):
fn = os.path.splitext(fn)[0]
return os.path.basename(fn)
def import_from_url(url, target_dir, fsc=None):
filenames = get_filenames_from_url(url)
filenames = [fn for fn in filenames if is_ms_file(fn)]
if len(filenames) == 0:
return None
fns = []
n_files = len(filenames)
for i, fn in enumerate(tqdm(filenames)):
_url = url + "/" + fn
logging.info("Downloading", _url)
if fsc is not None:
fsc.set("progress", int(100 * (1 + i) / n_files))
wget.download(_url, out=target_dir)
return fns
def get_filenames_from_url(url):
if url.startswith("ftp"):
return get_filenames_from_ftp_directory(url)
if "://" in url:
url = url.split("://")[1]
with urllib3.PoolManager() as http:
r = http.request("GET", url)
soup = BeautifulSoup(r.data, "html")
files = [A["href"] for A in soup.find_all("a", href=True)]
return files
def get_filenames_from_ftp_directory(url):
url_parts = urlparse(url)
domain = url_parts.netloc
path = url_parts.path
ftp = ftplib.FTP(domain)
ftp.login()
ftp.cwd(path)
filenames = ftp.nlst()
ftp.quit()
return filenames
def import_from_local_path(path, target_dir, fsc=None):
fns = glob(os.path.join(path, "**", "*.*"), recursive=True)
fns = [fn for fn in fns if is_ms_file(fn)]
fns_out = []
n_files = len(fns)
for i, fn in enumerate(tqdm(fns)):
if fsc is not None:
fsc.set("progress", int(100 * (1 + i) / n_files))
fn_out = P(target_dir) / P(fn).with_suffix(".feather").name
if P(fn_out).is_file():
continue
fns_out.append(fn_out)
try:
convert_ms_file_to_feather(fn, fn_out)
except:
logging.warning(f"Could not convert {fn}")
return fns_out
def df_to_in_memory_csv_file(df):
buffer = io.StringIO()
df.to_csv(buffer)
buffer.seek(0)
return buffer.getvalue
def df_to_in_memory_excel_file(df):
def to_xlsx(bytes_io):
xslx_writer = pd.ExcelWriter(bytes_io, engine="xlsxwriter")
df.to_excel(xslx_writer, index=True, sheet_name="sheet1")
xslx_writer.save()
return to_xlsx
def has_na(df):
return df.isna().sum().sum() > 0
|
python
|
# Generated by Django 3.1.2 on 2020-11-09 15:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0015_auto_20201106_1737'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=False)),
],
),
]
|
python
|
# Copyright 2009 by Tiago Antao <[email protected]>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import os
import unittest
from Bio import MissingExternalDependencyError
from Bio.PopGen.GenePop.Controller import GenePopController
#Tests genepop related code. Note: this case requires genepop
#test_PopGen_GenePop_nodepend tests code that does not require genepop
found = False
for path in os.environ['PATH'].split(os.pathsep):
try:
for filename in os.listdir(path):
if filename.startswith('Genepop'):
found = True
except os.error:
pass #Path doesn't exist - correct to pass
if not found:
raise MissingExternalDependencyError(\
"Install GenePop if you want to use Bio.PopGen.GenePop.")
class AppTest(unittest.TestCase):
"""Tests genepop execution via biopython.
"""
def test_allele_genotype_frequencies(self):
"""Test genepop execution on basic allele and genotype frequencies.
"""
ctrl = GenePopController()
pop_iter, locus_iter = ctrl.calc_allele_genotype_freqs("PopGen" + os.sep + "big.gen")
#print pop, loci
#for popc in pop_iter:
# pop_name, loci_content = popc
# print pop_name
# for locus in loci_content.keys():
# geno_list, hets, freq_fis = loci_content[locus]
# print locus
# print hets
# print freq_fis
# print geno_list
# print
def test_calc_diversities_fis_with_identity(self):
"""Test calculations of diversities ...
"""
ctrl = GenePopController()
iter, avg_fis, avg_Qintra = ctrl.calc_diversities_fis_with_identity(
"PopGen" + os.sep + "big.gen")
liter = list(iter)
assert len(liter) == 37
assert liter[0][0] == "Locus1"
assert len(avg_fis)==10
assert len(avg_Qintra)==10
def test_estimate_nm(self):
"""Test Nm estimation.
"""
ctrl = GenePopController()
mean_sample_size, mean_priv_alleles, mig10, mig25, mig50, mig_corrected =\
ctrl.estimate_nm("PopGen" + os.sep + "big.gen")
assert (mean_sample_size, mean_priv_alleles, mig10, mig25, mig50, mig_corrected) == \
(28.0, 0.016129, 52.5578, 15.3006, 8.94583, 13.6612)
def test_fst_all(self):
"""Test genepop execution on all fst.
"""
ctrl = GenePopController()
(allFis, allFst, allFit), itr = ctrl.calc_fst_all("PopGen" + os.sep + "c2line.gen")
results = list(itr)
assert (len(results) == 3)
assert (results[0][0] == "136255903")
assert (results[1][3] - 0.33 < 0.01)
def test_haploidy(self):
"""Test haploidy.
"""
ctrl = GenePopController()
(allFis, allFst, allFit), itr = ctrl.calc_fst_all("PopGen" + os.sep + "haplo.gen")
litr = list(itr)
assert not type(allFst) == int
assert len(litr) == 37
assert litr[36][0] == "Locus37"
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity = 2)
unittest.main(testRunner=runner)
|
python
|
from zope.browserpage import ViewPageTemplateFile
import pkg_resources
import zeit.cms.related.interfaces
import zeit.cms.workflow.interfaces
import zeit.content.video.interfaces
class Details(zeit.cms.browser.objectdetails.Details):
index = ViewPageTemplateFile(pkg_resources.resource_filename(
'zeit.content.video.browser', 'object-details-body.pt'))
def __call__(self):
return self.index()
@property
def graphical_preview_url(self):
return self.context.video_still
|
python
|
import requests
import os
import time
from datetime import datetime
def get_edgar_index_files(date_input, download_folder=None):
date = datetime.strptime(str(date_input), '%Y%m%d')
# Confirm that date format is correct
error = 'Date format should be in YYYYMMDD. '
if date > datetime.now():
raise Exception(error+'The date utlized occurs in the future.')
if date.year < 1993 or date.year > datetime.now().year:
raise Exception(error+'The year is out of range. EDGAR did not begin accepting electronic filings until 1993.')
index_type = ['company', 'form', 'master', 'xbrl']
if 1 <= date.month <= 3:
qtr ="Q1"
elif 4 <= date.month <= 6:
qtr ="Q2"
elif 7 <= date.month <= 9:
qtr ="Q3"
elif 10 <= date.month <= 12:
qtr ="Q4"
if download_folder is None:
base_path = os.path.dirname(os.path.realpath(__file__))
current_dirs = os.listdir(path=base_path)
if 'indexes' not in current_dirs:
os.mkdir('/'.join([base_path,'indexes']))
base_path = os.path.dirname(os.path.realpath(__file__))+'/indexes'
else:
base_path = os.path.dirname(os.path.realpath(__file__))+'/indexes'
else:
base_path = download_folder
current_dirs = os.listdir(path=base_path)
if 'indexes' not in current_dirs:
os.mkdir('/'.join([base_path,'indexes']))
base_path = base_path+'/indexes'
else:
base_path = download_folder+'/indexes'
current_dirs = os.listdir(path=base_path)
if str(date.year) not in current_dirs:
os.mkdir('/'.join([base_path, str(date.year)]))
for it in index_type:
# Use the following filename pattern to store the index files locally
local_filename = f'{date.year}-{qtr}-{it}-index.txt'
# Create the absolute path for storing the index files
local_file_path = '/'.join([base_path, str(date.year), local_filename])
# Define the url at which to get the index file.
url = f'https://www.sec.gov/Archives/edgar/full-index/{date.year}/{qtr}/{it}.idx'
# Get the index file from EDGAR and save it to a text file. Note that to save a file
# rather than bringing it into memory, set stream=True and use r.iter_content()
# to break the incoming stream into chunks (here arbitrarily of size 10240 bytes)
r = requests.get(url, stream=True)
with open(local_file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=10240):
f.write(chunk)
# Wait one-tenth of a second before sending another request to EDGAR.
time.sleep(0.1)
get_edgar_index_files(20200630)
|
python
|
from pylab import *
import tensorflow as tf
from sklearn.datasets import make_moons
from sklearn.datasets import load_digits
import tensorflow as tf
import sys
sys.path.insert(0, '../utils')
from layers import *
from utils import *
import cPickle
import os
SAVE_DIR = os.environ['SAVE_DIR']
runnb=int(sys.argv[-4])
DATASET = sys.argv[-3]
sigmass=sys.argv[-2]
if(sys.argv[-1]=='None'):
leakiness=None
else:
leakiness = float(sys.argv[-1])
supss = 1
x_train,y_train,x_test,y_test = load_data(DATASET)
pp = permutation(x_train.shape[0])[:8000]
XX = x_train[pp]/10+randn(len(pp),1,28,28)*0.002
YY = y_train[pp]
XX = transpose(XX,[0,2,3,1])
x_test = transpose(x_test,[0,2,3,1])
input_shape = XX.shape
layers1 = [InputLayer(input_shape)]
layers1.append(DenseLayer(layers1[-1],K=64,R=2,leakiness=leakiness,sigma=sigmass,sparsity_prior=0.0))
layers1.append(DenseLayer(layers1[-1],K=32,R=2,leakiness=leakiness,sigma=sigmass))
layers1.append(FinalLayer(layers1[-1],R=10,sigma=sigmass,sparsity_prior=0.))
model1 = model(layers1)
if(supss):
model1.init_dataset(XX,YY)
else:
model1.init_dataset(XX)
LOSSES = train_layer_model(model1,rcoeff_schedule=schedule(0.0000,'linear'),CPT=200,random=0,fineloss=0,verbose=0,mp_opt=0,per_layer=1)
reconstruction=model1.reconstruct()[:1500]
samplesclass0=[model1.sampleclass(0,k)[:150] for k in xrange(10)]
samplesclass1=[model1.sampleclass(1,k)[:150] for k in xrange(10)]
samples1=model1.sample(1)[:300]
params = model1.get_params()
f=open(SAVE_DIR+'exp_nonlinearity_'+DATASET+'_'+sigmass+'_'+sys.argv[-1]+'_run'+str(runnb)+'.pkl','wb')
cPickle.dump([LOSSES,reconstruction,XX[:1500],samplesclass0,samplesclass1,samples1,params],f)
f.close()
|
python
|
#!/usr/bin/env python3
import os
import unittest
import boto3
from moto import mock_kms
from psyml.awsutils import (
decrypt_with_psyml,
encrypt_with_psyml,
get_psyml_key_arn,
)
from psyml.settings import PSYML_KEY_REGION, PSYML_KEY_ALIAS
class TestAWSUtils(unittest.TestCase):
@mock_kms
def kms_setup(self):
conn = boto3.client("kms", region_name=PSYML_KEY_REGION)
key = conn.create_key(Description="my key", KeyUsage="ENCRYPT_DECRYPT")
self.conn = conn
self.key_arn = key["KeyMetadata"]["Arn"]
conn.create_alias(AliasName=PSYML_KEY_ALIAS, TargetKeyId=self.key_arn)
@mock_kms
def test_get_psyml_key_arn(self):
self.kms_setup()
self.assertEqual(get_psyml_key_arn(), self.key_arn)
@mock_kms
def test_encrypt_decrypt(self):
self.kms_setup()
encrypted = encrypt_with_psyml("some-name", "plaintext")
self.assertNotEqual(encrypted, "plaintext")
decrypted = decrypt_with_psyml("some-name", encrypted)
self.assertEqual(decrypted, "plaintext")
with self.assertRaises(self.conn.exceptions.InvalidCiphertextException):
decrypt_with_psyml("another-name", encrypted)
|
python
|
import json
from job.model_template.utils import ModelTemplateDriver
DEBUG = False
class DeeFMTemplateDriver(ModelTemplateDriver):
def __init__(self, output_file=None, args=None, local=False):
super(DeeFMTemplateDriver, self).__init__("DeepFM model template" + ("(debuging)" if DEBUG else ""),
output_file, args, local)
if __name__ == "__main__":
if DEBUG:
# import tensorflow as tf
# tf.config.run_functions_eagerly(True)
with open('./job_config_demo.json', 'r') as jcf:
demo_job = json.load(jcf)
driver = DeeFMTemplateDriver(args=[
"--job", json.dumps(demo_job),
"--export-path", "./runs"
], local=True)
driver.run()
else:
driver = DeeFMTemplateDriver()
driver.run()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0x412948c
# Compiled with Coconut version 1.5.0-post_dev62 [Fish License]
# Coconut Header: -------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os as _coconut_os
_coconut_file_dir = _coconut_os.path.dirname(_coconut_os.path.abspath(__file__))
_coconut_cached_module = _coconut_sys.modules.get(str("__coconut__"))
if _coconut_cached_module is not None and _coconut_os.path.dirname(_coconut_cached_module.__file__) != _coconut_file_dir:
del _coconut_sys.modules[str("__coconut__")]
_coconut_sys.path.insert(0, _coconut_file_dir)
_coconut_module_name = _coconut_os.path.splitext(_coconut_os.path.basename(_coconut_file_dir))[0]
if _coconut_module_name and _coconut_module_name[0].isalpha() and all(c.isalpha() or c.isdigit() for c in _coconut_module_name) and "__init__.py" in _coconut_os.listdir(_coconut_file_dir):
_coconut_full_module_name = str(_coconut_module_name + ".__coconut__")
import __coconut__ as _coconut__coconut__
_coconut__coconut__.__name__ = _coconut_full_module_name
for _coconut_v in vars(_coconut__coconut__).values():
if getattr(_coconut_v, "__module__", None) == str("__coconut__"):
try:
_coconut_v.__module__ = _coconut_full_module_name
except AttributeError:
type(_coconut_v).__module__ = _coconut_full_module_name
_coconut_sys.modules[_coconut_full_module_name] = _coconut__coconut__
from __coconut__ import *
from __coconut__ import _coconut_tail_call, _coconut_tco, _coconut_call_set_names, _coconut, _coconut_MatchError, _coconut_igetitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_forward_dubstar_compose, _coconut_back_dubstar_compose, _coconut_pipe, _coconut_star_pipe, _coconut_dubstar_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_back_dubstar_pipe, _coconut_none_pipe, _coconut_none_star_pipe, _coconut_none_dubstar_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial, _coconut_get_function_match_error, _coconut_base_pattern_func, _coconut_addpattern, _coconut_sentinel, _coconut_assert, _coconut_mark_as_match, _coconut_reiterable
_coconut_sys.path.pop(0)
# Compiled Coconut: -----------------------------------------------------------
# Imports:
from pyprover.logic import Proposition
from pyprover.logic import Constant
from pyprover.logic import Implies
from pyprover.logic import wff
from pyprover.logic import bot
# Functions:
@_coconut_tco
def props(names):
"""Constructs propositions from a space-seperated string of names."""
return _coconut_tail_call(map, Proposition, names.split())
@_coconut_tco
def terms(names):
"""Constructs constants from a space-seperated string of names."""
return _coconut_tail_call(map, Constant, names.split())
@_coconut_tco
def solve(expr, **kwargs):
"""Converts to CNF and performs all possible resolutions."""
return _coconut_tail_call(expr.simplify(dnf=False, **kwargs).resolve, **kwargs)
strict_solve = _coconut.functools.partial(solve, nonempty_universe=False)
def no_proof_of(givens, conclusion):
"""Finds a formula that represents the givens not implying the conclusion."""
if wff(givens):
givens = (givens,)
else:
givens = tuple(givens)
return ~Implies(*(givens + (conclusion,)))
def proves(givens, conclusion, **kwargs):
"""Determines if the givens prove the conclusion."""
return (solve)(no_proof_of(givens, conclusion), **kwargs) == bot
strict_proves = _coconut.functools.partial(proves, nonempty_universe=False)
def iff(a, b):
"""Creates a formula for a implies b and b implies a."""
assert wff(a), a
assert wff(b), b
return a >> b & b >> a
def proves_and_proved_by(a, b, **kwargs):
"""Determines if a is true if and only if b."""
a = a.simplify(dnf=False, **kwargs)
b = b.simplify(dnf=False, **kwargs)
return (_coconut.functools.partial(proves, **kwargs))(a, b) and (_coconut.functools.partial(proves, **kwargs))(b, a)
strict_proves_and_proved_by = _coconut.functools.partial(proves_and_proved_by, nonempty_universe=False)
@_coconut_tco
def simplify(expr, *exprs, **kwargs):
"""Simplify the given expression[s]."""
if exprs:
return _coconut_tail_call((tuple), (map)(lambda x: x.simplify(**kwargs), (expr,) + exprs))
else:
return _coconut_tail_call(expr.simplify, **kwargs)
strict_simplify = _coconut.functools.partial(simplify, nonempty_universe=False)
def simplest_form(expr, **kwargs):
"""Finds the shortest simplification for the given expression."""
cnf_expr = expr.simplify(dnf=False, **kwargs)
dnf_expr = cnf_expr.simplify(dnf=True, **kwargs)
if len(cnf_expr) <= len(dnf_expr):
return cnf_expr
else:
return dnf_expr
strict_simplest_form = _coconut.functools.partial(simplest_form, nonempty_universe=False)
@_coconut_tco
def simplest_solution(expr, **kwargs):
"""Finds the shortest resolved simplification for the given expression."""
return _coconut_tail_call((simplest_form), (solve)(expr, **kwargs), **kwargs)
strict_simplest_solution = _coconut.functools.partial(simplest_solution, nonempty_universe=False)
@_coconut_tco
def substitute(expr, subs, **kwargs):
"""Substitutes expressions or booleans into the given expression."""
return _coconut_tail_call(expr.substitute, subs, **kwargs)
|
python
|
"""
Feed File which implements the Hiven Feed type, which should contain social
media information of a specific user and their linked acccounts.
---
Under MIT License
Copyright © 2020 - 2021 Luna Klatzer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Used for type hinting and not having to use annotations for the objects
from __future__ import annotations
import logging
# Only importing the Objects for the purpose of type hinting and not actual use
from typing import TYPE_CHECKING
from ..base_types import DataClassObject
from ..utils import log_type_exception
if TYPE_CHECKING:
from .. import HivenClient
logger = logging.getLogger(__name__)
__all__ = ['Feed']
class Feed(DataClassObject):
"""
Represents the feed that is displayed on Hiven specifically for the user
"""
@log_type_exception('Feed')
def __init__(self, data: dict, client: HivenClient):
super().__init__()
def __str__(self) -> str:
return repr(self)
def __repr__(self) -> str:
info = [
('unknown', "")
]
return '<Feed {}>'.format(' '.join('%s=%s' % t for t in info))
|
python
|
from .flownets import FlowNetS
__all__ = ['FlowNetS']
|
python
|
import torch
import torch.distributed as dist
class GatherLayer(torch.autograd.Function):
'''Gather tensors from all process, supporting backward propagation.
'''
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
output = [torch.zeros_like(input) \
for _ in range(dist.get_world_size())]
dist.all_gather(output, input)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
input, = ctx.saved_tensors
grad_out = torch.zeros_like(input)
grad_out[:] = grads[dist.get_rank()]
return grad_out
|
python
|
from abc import ABC
from typing import Callable, Dict, List, Optional, Sequence
from guardpost.authentication import Identity
class AuthorizationError(Exception):
pass
class AuthorizationConfigurationError(Exception):
pass
class PolicyNotFoundError(AuthorizationConfigurationError, RuntimeError):
def __init__(self, name: str):
super().__init__(f"Cannot find policy with name {name}")
class BaseRequirement(ABC):
"""Base class for authorization requirements"""
def __str__(self):
return self.__class__.__name__
class UnauthorizedError(AuthorizationError):
def __init__(
self,
forced_failure: Optional[str],
failed_requirements: Sequence[BaseRequirement],
scheme: Optional[str] = None,
error: Optional[str] = None,
error_description: Optional[str] = None,
):
"""
Creates a new instance of UnauthorizedError, with details.
:param forced_failure: if applicable, the reason for a forced failure.
:param failed_requirements: a sequence of requirements that failed.
:param scheme: optional authentication scheme that should be used.
:param error: optional error short text.
:param error_description: optional error details.
"""
super().__init__(self._get_message(forced_failure, failed_requirements))
self.failed = forced_failure
self.failed_requirements = failed_requirements
self.scheme = scheme
self.error = error
self.error_description = error_description
@staticmethod
def _get_message(forced_failure, failed_requirements):
if forced_failure:
return (
"The user is not authorized to perform the selected action."
+ f" {forced_failure}."
)
if failed_requirements:
errors = ", ".join(str(requirement) for requirement in failed_requirements)
return (
f"The user is not authorized to perform the selected action. "
f"Failed requirements: {errors}."
)
return "Unauthorized"
class AuthorizationContext:
__slots__ = ("identity", "requirements", "_succeeded", "_failed_forced")
def __init__(self, identity: Identity, requirements: Sequence[BaseRequirement]):
self.identity = identity
self.requirements = requirements
self._succeeded = set()
self._failed_forced = None
@property
def pending_requirements(self) -> List[BaseRequirement]:
return [item for item in self.requirements if item not in self._succeeded]
@property
def has_succeeded(self) -> bool:
if self._failed_forced:
return False
return all(requirement in self._succeeded for requirement in self.requirements)
@property
def forced_failure(self) -> Optional[str]:
return self._failed_forced
def fail(self, reason: str):
"""
Called to indicate that this authorization context has failed.
Forces failure, regardless of succeeded requirements.
"""
self._failed_forced = reason or "Authorization failed."
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
def succeed(self, requirement: BaseRequirement):
"""Marks the given requirement as succeeded for this authorization context."""
self._succeeded.add(requirement)
def clear(self):
self._failed_forced = False
self._succeeded.clear()
class Policy:
__slots__ = ("name", "requirements")
def __init__(self, name: str, *requirements: BaseRequirement):
self.name = name
self.requirements = list(requirements) or []
def add(self, requirement: BaseRequirement) -> "Policy":
self.requirements.append(requirement)
return self
def __iadd__(self, other: BaseRequirement):
if not isinstance(other, BaseRequirement):
raise ValueError("Only requirements can be added using __iadd__ syntax")
self.requirements.append(other)
return self
def __repr__(self):
return f'<Policy "{self.name}" at {id(self)}>'
class BaseAuthorizationStrategy(ABC):
def __init__(
self,
*policies: Policy,
default_policy: Optional[Policy] = None,
identity_getter: Optional[Callable[[Dict], Identity]] = None,
):
self.policies = list(policies)
self.default_policy = default_policy
self.identity_getter = identity_getter
def get_policy(self, name: str) -> Optional[Policy]:
for policy in self.policies:
if policy.name == name:
return policy
return None
def add(self, policy: Policy) -> "BaseAuthorizationStrategy":
self.policies.append(policy)
return self
def __iadd__(self, policy: Policy) -> "BaseAuthorizationStrategy":
self.policies.append(policy)
return self
def with_default_policy(self, policy: Policy) -> "BaseAuthorizationStrategy":
self.default_policy = policy
return self
|
python
|
import warnings
import numpy as np
from tqdm import tqdm
from noise import pnoise3, snoise3
import vtk
from vtk.util.numpy_support import numpy_to_vtk
from vtk.numpy_interface import dataset_adapter as dsa
def add_noise_to_sphere(poly_data, octaves, offset=0, scale=0.5):
"""
Expects sphere with radius 1 centered at the origin
"""
wrap_data_object = dsa.WrapDataObject(poly_data)
points = wrap_data_object.Points
normals = wrap_data_object.PointData['Normals']
points_with_noise = []
zipped = list(zip(points, normals))
for point, normal in tqdm(zipped):
offset_point = point + offset
noise = scale * snoise3(*offset_point, octaves=octaves)
point_with_noise = point + noise * normal
points_with_noise.append(point_with_noise)
points_with_noise = np.array(points_with_noise)
vertices = vtk.vtkPoints()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
points_with_noise_vtk = numpy_to_vtk(points_with_noise)
vertices.SetData(points_with_noise_vtk)
poly_data.SetPoints(vertices)
return poly_data
def center_poly_data(poly_data):
centerOfMassFilter = vtk.vtkCenterOfMass()
centerOfMassFilter.SetInputData(poly_data)
centerOfMassFilter.SetUseScalarsAsWeights(False)
centerOfMassFilter.Update()
center = np.array(centerOfMassFilter.GetCenter())
transform = vtk.vtkTransform()
transform.Translate(-center)
transform_filter = vtk.vtkTransformPolyDataFilter()
transform_filter.SetTransform(transform)
transform_filter.SetInputData(poly_data)
transform_filter.Update()
poly_data = transform_filter.GetOutput()
return poly_data
def transform_poly_data(poly_data, center, radii, angles):
transform = vtk.vtkTransform()
transform.Translate(center)
x_angle, y_angle, z_angle = angles # there must be a better way
transform.RotateX(x_angle)
transform.RotateY(y_angle)
transform.RotateZ(z_angle)
transform.Scale(*radii)
transform_filter = vtk.vtkTransformPolyDataFilter()
transform_filter.SetTransform(transform)
transform_filter.SetInputData(poly_data)
transform_filter.Update()
poly_data = transform_filter.GetOutput()
return poly_data
def compute_normals(poly_data):
normal_filter = vtk.vtkPolyDataNormals()
normal_filter.AutoOrientNormalsOn()
normal_filter.SetComputePointNormals(True)
normal_filter.SetComputeCellNormals(True)
normal_filter.SplittingOff()
normal_filter.SetInputData(poly_data)
normal_filter.ConsistencyOn()
normal_filter.Update()
poly_data = normal_filter.GetOutput()
return poly_data
def get_resection_poly_data(
poly_data,
offset,
center,
radii,
angles,
octaves=4,
scale=0.5,
deepcopy=True,
):
if deepcopy:
new_poly_data = vtk.vtkPolyData()
new_poly_data.DeepCopy(poly_data)
poly_data = new_poly_data
poly_data = add_noise_to_sphere(poly_data, octaves=octaves, offset=offset)
poly_data = center_poly_data(poly_data)
poly_data = transform_poly_data(poly_data, center, radii, angles)
poly_data = compute_normals(poly_data)
return poly_data
input_path = '/tmp/geodesic_polyhedron.vtp'
offset = 1000
octaves = 4
N = 5
np.random.seed(42)
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(input_path)
reader.Update()
input_poly_data = reader.GetOutput()
for _ in range(5):
offset = np.random.randint(1000)
output_path = f'/tmp/geodesic_polyhedron_noise_{offset}.vtp'
volume = np.random.randint(100, 10000)
center = np.random.randint(-50, 50, size=(3))
k = 4/3 * np.pi
radius = (volume / k) ** (1/3)
ratio = np.random.uniform(0.8, 1)
a = radius
b = radius * ratio
c = radius / ratio
radii = a, b, c
angles = np.random.uniform(0, 180, size=3)
output_poly_data = get_resection_poly_data(
input_poly_data,
offset,
center,
radii,
angles,
)
writer = vtk.vtkXMLPolyDataWriter()
writer.SetInputData(output_poly_data)
writer.SetFileName(output_path)
writer.Write()
|
python
|
import logging
import os
import sys
from logging import fatal, debug, info, warning
from os import path
from . import utils
from .argument_parser import build_argument_parser
from .metadata_parser import MetadataParser
from .settings import Settings
def main():
# Read arguments
argument_parser = build_argument_parser()
args = argument_parser.parse_args()
# Define settings and log level
settings = Settings(args)
logging.getLogger().setLevel(logging.DEBUG if settings.debug_enabled else logging.INFO)
debug(args)
debug(settings)
if not path.isdir(settings.comic_path):
fatal('Comic path "{}" does not exist'.format(path.abspath(settings.comic_path)))
sys.exit(1)
# Read comic info
comic = MetadataParser.parse(settings.metadata_path)
comic.overwrite_user_metadata(args)
debug(comic)
# Prepare temporary directory
temp_directory = settings.temp_directory(comic)
if path.isdir(temp_directory):
fatal('Directory {} exists...somehow. Stopping right here to prevent a bigger mess'.format(temp_directory))
sys.exit(1)
debug('Using temporary directory "{}"'.format(temp_directory))
os.makedirs(temp_directory)
# Find files
files = settings.read_files()
# Search for chapters
chapters = utils.read_files_as_chapters(files)
if len(chapters) == 0:
warning('No chapter saved was found!')
utils.clean_temporary_data(temp_directory, force_clean=True)
sys.exit(1)
# Merge chapters in volumes
volumes = utils.read_chapters_as_volumes(chapters)
# Extract chapter pages
utils.extract_volume_pages(settings, comic, volumes)
# Assemble volumes in ebook format
assembled_ebooks = utils.assemble_volumes(settings, comic, volumes)
if len(assembled_ebooks) <= 0:
warning('No file was assembled!')
sys.exit(0)
# Fill metadata
utils.fill_metadata(settings, comic, chapters, assembled_ebooks)
# Convert to MOBI
if settings.comic_format == 'MOBI':
assembled_ebooks = utils.convert_to_mobi(assembled_ebooks)
# Show results
info('Finished!')
if len(assembled_ebooks) > 0:
info('Files can be found in: {}'.format(path.abspath(settings.output)))
else:
warning('No file was assembled!')
# Remove temporary data
utils.clean_temporary_data(temp_directory, assembled_ebooks)
if __name__ == '__main__':
main()
|
python
|
import click
import questionary as q
import docker
import os
import time
import subprocess
from threading import Thread
from functools import wraps
from colorama import (Fore, Style)
from sqlalchemy.engine.url import make_url
from vantage6.common import (info, warning, error, debug as debug_msg,
check_config_write_permissions)
from vantage6.common.docker_addons import pull_if_newer
from vantage6.common.globals import (
APPNAME,
STRING_ENCODING,
DEFAULT_DOCKER_REGISTRY,
DEFAULT_SERVER_IMAGE
)
# from vantage6.cli import fixture
from vantage6.cli.globals import (DEFAULT_SERVER_ENVIRONMENT,
DEFAULT_SERVER_SYSTEM_FOLDERS)
from vantage6.cli.context import ServerContext
from vantage6.cli.configuration_wizard import (
select_configuration_questionaire,
configuration_wizard
)
from vantage6.cli import __version__
def click_insert_context(func):
# add option decorators
@click.option('-n', '--name', default=None,
help="name of the configutation you want to use.")
@click.option('-c', '--config', default=None,
help='absolute path to configuration-file; overrides NAME')
@click.option('-e', '--environment',
default=DEFAULT_SERVER_ENVIRONMENT,
help='configuration environment to use')
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False,
default=DEFAULT_SERVER_SYSTEM_FOLDERS)
@wraps(func)
def func_with_context(name, config, environment, system_folders,
*args, **kwargs):
# select configuration if none supplied
if config:
ctx = ServerContext.from_external_config_file(
config,
environment,
system_folders
)
else:
if name:
name, environment = (name, environment)
else:
try:
name, environment = select_configuration_questionaire(
"server", system_folders
)
except Exception:
error("No configurations could be found!")
exit(1)
# raise error if config could not be found
if not ServerContext.config_exists(
name,
environment,
system_folders
):
scope = "system" if system_folders else "user"
error(
f"Configuration {Fore.RED}{name}{Style.RESET_ALL} with "
f"{Fore.RED}{environment}{Style.RESET_ALL} does not exist "
f"in the {Fore.RED}{scope}{Style.RESET_ALL} folders!"
)
exit(1)
# create server context, and initialize db
ServerContext.LOGGING_ENABLED = False
ctx = ServerContext(
name,
environment=environment,
system_folders=system_folders
)
return func(ctx, *args, **kwargs)
return func_with_context
@click.group(name='server')
def cli_server():
"""Subcommand `vserver`."""
pass
#
# start
#
@cli_server.command(name='start')
@click.option('--ip', default=None, help='ip address to listen on')
@click.option('-p', '--port', default=None, type=int, help='port to listen on')
@click.option('--debug', is_flag=True,
help='run server in debug mode (auto-restart)')
@click.option('-i', '--image', default=None, help="Node Docker image to use")
@click.option('--keep/--auto-remove', default=False,
help="Keep image after finishing")
@click.option('--attach/--detach', default=False,
help="Attach server logs to the console after start")
@click_insert_context
def cli_server_start(ctx, ip, port, debug, image, keep, attach):
"""Start the server."""
info("Starting server...")
info("Finding Docker daemon.")
docker_client = docker.from_env()
# will print an error if not
check_if_docker_deamon_is_running(docker_client)
# check that this server is not already running
running_servers = docker_client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
for server in running_servers:
if server.name == f"{APPNAME}-{ctx.name}-{ctx.scope}-server":
error(f"Server {Fore.RED}{ctx.name}{Style.RESET_ALL} "
"is already running")
exit(1)
# Determine image-name. First we check if the option --image has been used.
# Then we check if the image has been specified in the config file, and
# finally we use the default settings from the package.
if image is None:
image = ctx.config.get(
"image",
f"{DEFAULT_DOCKER_REGISTRY}/{DEFAULT_SERVER_IMAGE}"
)
info(f"Pulling latest server image '{image}'.")
try:
pull_if_newer(docker.from_env(), image)
# docker_client.images.pull(image)
except Exception:
warning("... alas, no dice!")
else:
info(" ... success!")
info("Creating mounts")
mounts = [
docker.types.Mount(
"/mnt/config.yaml", str(ctx.config_file), type="bind"
)
]
# FIXME: code duplication with cli_server_import()
# try to mount database
uri = ctx.config['uri']
url = make_url(uri)
environment_vars = None
# If host is None, we're dealing with a file-based DB, like SQLite
if (url.host is None):
db_path = url.database
if not os.path.isabs(db_path):
# We're dealing with a relative path here -> make it absolute
db_path = ctx.data_dir / url.database
basename = os.path.basename(db_path)
dirname = os.path.dirname(db_path)
os.makedirs(dirname, exist_ok=True)
# we're mounting the entire folder that contains the database
mounts.append(docker.types.Mount(
"/mnt/database/", dirname, type="bind"
))
environment_vars = {
"VANTAGE6_DB_URI": f"sqlite:////mnt/database/{basename}"
}
else:
warning(f"Database could not be transfered, make sure {url.host} "
"is reachable from the Docker container")
info("Consider using the docker-compose method to start a server")
ip_ = f"--ip {ip}" if ip else ""
port_ = f"--port {port}" if port else ""
cmd = f'vserver-local start -c /mnt/config.yaml -e {ctx.environment} ' \
f'{ip_} {port_}'
info(cmd)
info("Run Docker container")
port_ = str(port or ctx.config["port"] or 5000)
container = docker_client.containers.run(
image,
command=cmd,
mounts=mounts,
detach=True,
labels={
f"{APPNAME}-type": "server",
"name": ctx.config_file_name
},
environment=environment_vars,
ports={f"{port_}/tcp": ("127.0.0.1", port_)},
name=ctx.docker_container_name,
auto_remove=not keep,
tty=True
)
info(f"Success! container id = {container}")
if attach:
logs = container.attach(stream=True, logs=True, stdout=True)
Thread(target=print_log_worker, args=(logs,), daemon=True).start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
info("Closing log file. Keyboard Interrupt.")
exit(0)
#
# list
#
@cli_server.command(name='list')
def cli_server_configuration_list():
"""Print the available configurations."""
client = docker.from_env()
check_if_docker_deamon_is_running(client)
running_server = client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
running_node_names = []
for node in running_server:
running_node_names.append(node.name)
header = \
"\nName"+(21*" ") + \
"Environments"+(20*" ") + \
"Status"+(10*" ") + \
"System/User"
click.echo(header)
click.echo("-"*len(header))
running = Fore.GREEN + "Online" + Style.RESET_ALL
stopped = Fore.RED + "Offline" + Style.RESET_ALL
# system folders
configs, f1 = ServerContext.available_configurations(
system_folders=True)
for config in configs:
status = running if f"{APPNAME}-{config.name}-system-server" in \
running_node_names else stopped
click.echo(
f"{config.name:25}"
f"{str(config.available_environments):32}"
f"{status:25} System "
)
# user folders
configs, f2 = ServerContext.available_configurations(
system_folders=False)
for config in configs:
status = running if f"{APPNAME}-{config.name}-user-server" in \
running_node_names else stopped
click.echo(
f"{config.name:25}"
f"{str(config.available_environments):32}"
f"{status:25} User "
)
click.echo("-"*85)
if len(f1)+len(f2):
warning(
f"{Fore.RED}Failed imports: {len(f1)+len(f2)}{Style.RESET_ALL}")
#
# files
#
@cli_server.command(name='files')
@click_insert_context
def cli_server_files(ctx):
"""List files locations of a server instance."""
info(f"Configuration file = {ctx.config_file}")
info(f"Log file = {ctx.log_file}")
info(f"Database = {ctx.get_database_uri()}")
#
# new
#
@cli_server.command(name='new')
@click.option('-n', '--name', default=None,
help="name of the configutation you want to use.")
@click.option('-e', '--environment', default=DEFAULT_SERVER_ENVIRONMENT,
help='configuration environment to use')
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False,
default=DEFAULT_SERVER_SYSTEM_FOLDERS)
def cli_server_new(name, environment, system_folders):
"""Create new configuration."""
if not name:
name = q.text("Please enter a configuration-name:").ask()
name_new = name.replace(" ", "-")
if name != name_new:
info(f"Replaced spaces from configuration name: {name}")
name = name_new
# check that this config does not exist
try:
if ServerContext.config_exists(name, environment, system_folders):
error(
f"Configuration {Fore.RED}{name}{Style.RESET_ALL} with "
f"environment {Fore.RED}{environment}{Style.RESET_ALL} "
f"already exists!"
)
exit(1)
except Exception as e:
print(e)
exit(1)
# Check that we can write in this folder
if not check_config_write_permissions(system_folders):
error("Your user does not have write access to all folders. Exiting")
info(f"Create a new server using '{Fore.GREEN}vserver new "
"--user{Style.RESET_ALL}' instead!")
exit(1)
# create config in ctx location
cfg_file = configuration_wizard(
"server",
name,
environment=environment,
system_folders=system_folders
)
info(f"New configuration created: {Fore.GREEN}{cfg_file}{Style.RESET_ALL}")
# info(f"root user created.")
flag = "" if system_folders else "--user"
info(
f"You can start the server by running "
f"{Fore.GREEN}vserver start {flag}{Style.RESET_ALL}"
)
#
# import
#
# TODO this method has a lot of duplicated code from `start`
@cli_server.command(name='import')
@click.argument('file_', type=click.Path(exists=True))
@click.option('--drop-all', is_flag=True, default=False)
@click.option('-i', '--image', default=None, help="Node Docker image to use")
@click.option('--keep/--auto-remove', default=False,
help="Keep image after finishing")
@click_insert_context
def cli_server_import(ctx, file_, drop_all, image, keep):
""" Import organizations/collaborations/users and tasks.
Especially usefull for testing purposes.
"""
info("Starting server...")
info("Finding Docker daemon.")
docker_client = docker.from_env()
# will print an error if not
check_if_docker_deamon_is_running(docker_client)
# pull lastest Docker image
if image is None:
image = ctx.config.get(
"image",
"harbor.vantage6.ai/infrastructure/server:latest"
)
info(f"Pulling latest server image '{image}'.")
try:
docker_client.images.pull(image)
except Exception:
warning("... alas, no dice!")
else:
info(" ... success!")
info("Creating mounts")
mounts = [
docker.types.Mount(
"/mnt/config.yaml", str(ctx.config_file), type="bind"
),
docker.types.Mount(
"/mnt/import.yaml", str(file_), type="bind"
)
]
# FIXME: code duplication with cli_server_start()
# try to mount database
uri = ctx.config['uri']
url = make_url(uri)
environment_vars = None
# If host is None, we're dealing with a file-based DB, like SQLite
if (url.host is None):
db_path = url.database
if not os.path.isabs(db_path):
# We're dealing with a relative path here -> make it absolute
db_path = ctx.data_dir / url.database
basename = os.path.basename(db_path)
dirname = os.path.dirname(db_path)
os.makedirs(dirname, exist_ok=True)
# we're mounting the entire folder that contains the database
mounts.append(docker.types.Mount(
"/mnt/database/", dirname, type="bind"
))
environment_vars = {
"VANTAGE6_DB_URI": f"sqlite:////mnt/database/{basename}"
}
else:
warning(f"Database could not be transfered, make sure {url.host} "
"is reachable from the Docker container")
info("Consider using the docker-compose method to start a server")
drop_all_ = "--drop-all" if drop_all else ""
cmd = f'vserver-local import -c /mnt/config.yaml -e {ctx.environment} ' \
f'{drop_all_} /mnt/import.yaml'
info(cmd)
info("Run Docker container")
container = docker_client.containers.run(
image,
command=cmd,
mounts=mounts,
detach=True,
labels={
f"{APPNAME}-type": "server",
"name": ctx.config_file_name
},
environment=environment_vars,
auto_remove=not keep,
tty=True
)
logs = container.logs(stream=True, stdout=True)
Thread(target=print_log_worker, args=(logs,), daemon=False).start()
info(f"Success! container id = {container.id}")
# print_log_worker(container.logs(stream=True))
# for log in container.logs(stream=True):
# print(log.decode("utf-8"))
# info(f"Check logs files using {Fore.GREEN}docker logs {container.id}"
# f"{Style.RESET_ALL}")
# info("Reading yaml file.")
# with open(file_) as f:
# entities = yaml.safe_load(f.read())
# info("Adding entities to database.")
# fixture.load(entities, drop_all=drop_all)
#
# shell
#
@cli_server.command(name='shell')
@click_insert_context
def cli_server_shell(ctx):
""" Run a iPython shell. """
docker_client = docker.from_env()
# will print an error if not
check_if_docker_deamon_is_running(docker_client)
running_servers = docker_client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
if not ctx.docker_container_name in [s.name for s in running_servers]:
error(f"Server {Fore.RED}{ctx.name}{Style.RESET_ALL} is not running?")
return
try:
subprocess.run(['docker', 'exec', '-it', ctx.docker_container_name,
'vserver-local', 'shell', '-c', '/mnt/config.yaml'])
except Exception as e:
info("Failed to start subprocess...")
debug_msg(e)
#
# stop
#
@cli_server.command(name='stop')
@click.option("-n", "--name", default=None, help="configuration name")
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False,
default=DEFAULT_SERVER_SYSTEM_FOLDERS)
@click.option('--all', 'all_servers', flag_value=True)
def cli_server_stop(name, system_folders, all_servers):
"""Stop a or all running server. """
client = docker.from_env()
check_if_docker_deamon_is_running(client)
running_servers = client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
if not running_servers:
warning("No servers are currently running.")
return
running_server_names = [server.name for server in running_servers]
if all_servers:
for name in running_server_names:
container = client.containers.get(name)
container.kill()
info(f"Stopped the {Fore.GREEN}{name}{Style.RESET_ALL} server.")
else:
if not name:
name = q.select("Select the server you wish to stop:",
choices=running_server_names).ask()
else:
post_fix = "system" if system_folders else "user"
name = f"{APPNAME}-{name}-{post_fix}-server"
if name in running_server_names:
container = client.containers.get(name)
container.kill()
info(f"Stopped the {Fore.GREEN}{name}{Style.RESET_ALL} server.")
else:
error(f"{Fore.RED}{name}{Style.RESET_ALL} is not running?")
#
# attach
#
@cli_server.command(name='attach')
@click.option("-n", "--name", default=None, help="configuration name")
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False,
default=DEFAULT_SERVER_SYSTEM_FOLDERS)
def cli_server_attach(name, system_folders):
"""Attach the logs from the docker container to the terminal."""
client = docker.from_env()
check_if_docker_deamon_is_running(client)
running_servers = client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
running_server_names = [node.name for node in running_servers]
if not name:
name = q.select("Select the server you wish to inspect:",
choices=running_server_names).ask()
else:
post_fix = "system" if system_folders else "user"
name = f"{APPNAME}-{name}-{post_fix}-server"
if name in running_server_names:
container = client.containers.get(name)
logs = container.attach(stream=True, logs=True, stdout=True)
Thread(target=print_log_worker, args=(logs,), daemon=True).start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
info("Closing log file. Keyboard Interrupt.")
exit(0)
else:
error(f"{Fore.RED}{name}{Style.RESET_ALL} was not running!?")
def check_if_docker_deamon_is_running(docker_client):
try:
docker_client.ping()
except Exception:
error("Docker socket can not be found. Make sure Docker is running.")
exit()
#
# version
#
@cli_server.command(name='version')
@click.option("-n", "--name", default=None, help="configuration name")
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False, default=
DEFAULT_SERVER_SYSTEM_FOLDERS)
def cli_server_version(name, system_folders):
"""Returns current version of vantage6 services installed."""
client = docker.from_env()
check_if_docker_deamon_is_running(client)
running_servers = client.containers.list(
filters={"label": f"{APPNAME}-type=node"})
running_server_names = [node.name for node in running_servers]
if not name:
name = q.select("Select the server you wish to inspect:",
choices=running_server_names).ask()
else:
post_fix = "system" if system_folders else "user"
name = f"{APPNAME}-{name}-{post_fix}"
if name in running_server_names:
container = client.containers.get(name)
version = container.exec_run(cmd='vserver-local version', stdout = True)
click.echo({"server": version.output.decode('utf-8'), "cli":__version__})
def print_log_worker(logs_stream):
for log in logs_stream:
print(log.decode(STRING_ENCODING), end="")
|
python
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
urlpatterns = [
url(r'^rides/$', views.RideList.as_view(), name='ride_list'),
url(r'^rides/(?P<pk>\d+)/$', views.RideDetail.as_view(), name='ride_detail'),
url(r'^users/$', views.UserList.as_view()),
url(r'^users/(?P<pk>[0-9]+)/$', views.UserDetail.as_view()),
url(r'^users/register', views.AuthUser.as_view())
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
python
|
#!/usr/bin/env python
# Demonstration GET friendships/lookup
# See https://dev.twitter.com/rest/reference/get/friendships/lookup
from secret import twitter_instance
from json import dump
import sys
tw = twitter_instance()
# [1]
ids = ','.join((str(i) for i in (577367985, 1220723053, 1288619659)))
response = tw.friendships.lookup(user_id=ids)
# [2]
dump(response, sys.stdout, ensure_ascii=False, indent=4, sort_keys=True)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BelongGreenMerchantInfo import BelongGreenMerchantInfo
class AlipayCommerceGreenItemenergySendModel(object):
def __init__(self):
self._alipay_uid = None
self._belong_merchant_info = None
self._goods_id = None
self._goods_name = None
self._industry_type = None
self._order_link = None
self._qr_code_id = None
self._scan_time = None
@property
def alipay_uid(self):
return self._alipay_uid
@alipay_uid.setter
def alipay_uid(self, value):
self._alipay_uid = value
@property
def belong_merchant_info(self):
return self._belong_merchant_info
@belong_merchant_info.setter
def belong_merchant_info(self, value):
if isinstance(value, BelongGreenMerchantInfo):
self._belong_merchant_info = value
else:
self._belong_merchant_info = BelongGreenMerchantInfo.from_alipay_dict(value)
@property
def goods_id(self):
return self._goods_id
@goods_id.setter
def goods_id(self, value):
self._goods_id = value
@property
def goods_name(self):
return self._goods_name
@goods_name.setter
def goods_name(self, value):
self._goods_name = value
@property
def industry_type(self):
return self._industry_type
@industry_type.setter
def industry_type(self, value):
self._industry_type = value
@property
def order_link(self):
return self._order_link
@order_link.setter
def order_link(self, value):
self._order_link = value
@property
def qr_code_id(self):
return self._qr_code_id
@qr_code_id.setter
def qr_code_id(self, value):
self._qr_code_id = value
@property
def scan_time(self):
return self._scan_time
@scan_time.setter
def scan_time(self, value):
self._scan_time = value
def to_alipay_dict(self):
params = dict()
if self.alipay_uid:
if hasattr(self.alipay_uid, 'to_alipay_dict'):
params['alipay_uid'] = self.alipay_uid.to_alipay_dict()
else:
params['alipay_uid'] = self.alipay_uid
if self.belong_merchant_info:
if hasattr(self.belong_merchant_info, 'to_alipay_dict'):
params['belong_merchant_info'] = self.belong_merchant_info.to_alipay_dict()
else:
params['belong_merchant_info'] = self.belong_merchant_info
if self.goods_id:
if hasattr(self.goods_id, 'to_alipay_dict'):
params['goods_id'] = self.goods_id.to_alipay_dict()
else:
params['goods_id'] = self.goods_id
if self.goods_name:
if hasattr(self.goods_name, 'to_alipay_dict'):
params['goods_name'] = self.goods_name.to_alipay_dict()
else:
params['goods_name'] = self.goods_name
if self.industry_type:
if hasattr(self.industry_type, 'to_alipay_dict'):
params['industry_type'] = self.industry_type.to_alipay_dict()
else:
params['industry_type'] = self.industry_type
if self.order_link:
if hasattr(self.order_link, 'to_alipay_dict'):
params['order_link'] = self.order_link.to_alipay_dict()
else:
params['order_link'] = self.order_link
if self.qr_code_id:
if hasattr(self.qr_code_id, 'to_alipay_dict'):
params['qr_code_id'] = self.qr_code_id.to_alipay_dict()
else:
params['qr_code_id'] = self.qr_code_id
if self.scan_time:
if hasattr(self.scan_time, 'to_alipay_dict'):
params['scan_time'] = self.scan_time.to_alipay_dict()
else:
params['scan_time'] = self.scan_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceGreenItemenergySendModel()
if 'alipay_uid' in d:
o.alipay_uid = d['alipay_uid']
if 'belong_merchant_info' in d:
o.belong_merchant_info = d['belong_merchant_info']
if 'goods_id' in d:
o.goods_id = d['goods_id']
if 'goods_name' in d:
o.goods_name = d['goods_name']
if 'industry_type' in d:
o.industry_type = d['industry_type']
if 'order_link' in d:
o.order_link = d['order_link']
if 'qr_code_id' in d:
o.qr_code_id = d['qr_code_id']
if 'scan_time' in d:
o.scan_time = d['scan_time']
return o
|
python
|
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import copy
from MTLCNN_single import MTLCNN_single
from MultitaskClassifier import MultitaskClassifier
from Util import Util
class Train_Classifier:
def train_classifier(self, train_arguments, device, dataset_name=""):
self.__train(train_arguments, device, dataset_name=dataset_name)
def train_classifier_single(self, train_arguments, device):
self.__train_single(train_arguments, device)
def __train_single(self, train_arguments, device):
TEXTURE_LABELS = train_arguments["TEXTURE_LABELS"]
data_loader_list = train_arguments["texture_data_loader_list"]
train_parameters = train_arguments["train_parameters"]
saved_model_name = train_arguments["saved_model_name"]
print("..Training started..")
epochs = train_parameters["epochs"]
lr = train_parameters["learning_rate"]
phases = ['train', 'val']
# set batch size
# set optimizer - Adam
split_id = 0
# start training
for data_loader_dict in data_loader_list:
# initialise network for each dataset
network = MTLCNN_single(TEXTURE_LABELS).to(device)
optimizer = optim.Adam(network.parameters(), lr=lr, weight_decay=0.0005)
criterion = nn.CrossEntropyLoss()
min_correct = 0
split_id += 1
print('-' * 50)
print("Split: {0} =======>".format(split_id))
# start epoch
for epoch in range(epochs):
print('Epoch {}/{}'.format(epoch, epochs - 1))
print('-' * 20)
for phase in phases:
if phase == 'train':
network.train() # Set model to training mode
else:
network.eval() # Set model to evaluate mode
running_loss = 0
running_correct = 0
total_image_per_epoch = 0
for batch in data_loader_dict[phase]:
images, label = batch
images = images.to(device)
label = label.to(device)
optimizer.zero_grad()
output = network(images)
loss = criterion(output, label).to(device)
total_image_per_epoch += images.size(0)
if phase == "train":
loss.backward()
optimizer.step()
running_loss += loss.item() * images.size(0) * 2
running_correct += Util.get_num_correct(output, label)
epoch_loss = running_loss / total_image_per_epoch
epoch_accuracy = running_correct / total_image_per_epoch
print(
"{0} ==> loss: {1}, correct: {2}/{3}, accuracy: {4}".format(phase, epoch_loss, running_correct,
total_image_per_epoch,
epoch_accuracy))
if phase == 'val' and running_correct > min_correct:
print("saving model with correct: {0}, improved over previous {1}"
.format(running_correct, min_correct))
min_correct = running_correct
best_model_wts = copy.deepcopy(network.state_dict())
torch.save(best_model_wts, saved_model_name.format(split_id))
def __train(self, train_arguments, device, dataset_name=""):
# labels
IMAGE_NET_LABELS = train_arguments["IMAGE_NET_LABELS"]
TEXTURE_LABELS = train_arguments["TEXTURE_LABELS"]
# data loader
image_net_data_loader_dict = train_arguments["image_net_data_loader_dict"]
# image_net_T_data_loader_dict = train_arguments["image_net_T_data_loader_dict"]
texture_data_loader_list = train_arguments["texture_data_loader_list"]
train_parameters = train_arguments["train_parameters"]
saved_model_name = train_arguments["saved_model_name"]
print("..Training started..")
epochs = train_parameters["epochs"]
lr = train_parameters["learning_rate"]
weight_decay = train_parameters["weight_decay"]
labels = {
"image_net_labels": IMAGE_NET_LABELS,
# "image_net_labels_S2": IMAGE_NET_LABELS_S2,
# "image_net_labels_T": IMAGE_NET_LABELS_T,
"texture_labels": TEXTURE_LABELS
}
phases = ['train', 'val']
split_id = 0
# task = ["Object_detection", "Texture_classification"]
task = ["Object_detection", "Texture_classification"]
for texture_data_loader_dict in texture_data_loader_list:
print("Dataset name: {0}".format(dataset_name))
split_id += 1
print('-' * 50)
print("Split: {0} =======>".format(split_id))
model_path = saved_model_name.format(split_id)
print("Model: {0}".format(model_path))
network = MultitaskClassifier(labels).to(device)
optimizer = optim.Adam(network.parameters(), lr=lr, weight_decay=weight_decay)
criterion = [nn.CrossEntropyLoss(), nn.CrossEntropyLoss()]
texture_min_val_correct = 0
for epoch in range(epochs):
print('Epoch {}/{}'.format(epoch, epochs - 1))
print('-' * 20)
for phase in phases:
print("Phase: " + phase)
if phase == 'train':
network.train() # Set model to training mode
else:
network.eval() # Set model to evaluate mode
running_loss = 0
running_loss_imagenet = 0
running_loss_texture = 0
running_imagenet_correct = 0
running_texture_correct = 0
total_imagenet_image_per_epoch = 0
total_texture_image_per_epoch = 0
batch_set = self.__get_batch_set(image_net_data_loader_dict,
texture_data_loader_dict, phase, task)
if phase == "train":
random.shuffle(batch_set)
for batch_dict in batch_set:
for task_id, batch in batch_dict.items():
images, label = batch
images = images.to(device)
label = label.to(device)
optimizer.zero_grad()
outputs = network(images)
if task_id == task[0]:
total_imagenet_image_per_epoch += images.size(0)
loss = criterion[0](outputs[0], label).to(device)
running_loss_imagenet += loss.item()
# network.conv4.weight.requires_grad = True
# network.conv4.bias.requires_grad = True
# network.conv5.weight.requires_grad = True
# network.conv5.bias.requires_grad = True
# network.object_detect_fc1.weight.requires_grad = True
# network.object_detect_fc1.bias.requires_grad = True
# network.object_detect_fc2.weight.requires_grad = True
# network.object_detect_fc2.bias.requires_grad = True
# network.object_detect_out.weight.requires_grad = True
# network.object_detect_out.bias.requires_grad = True
#
# network.fc_texture_1.weight.requires_grad = False
# network.fc_texture_1.bias.requires_grad = False
# network.fc_texture_2.weight.requires_grad = False
# network.fc_texture_2.bias.requires_grad = False
# network.texture_out.weight.requires_grad = False
# network.texture_out.bias.requires_grad = False
elif task_id == task[1]:
total_texture_image_per_epoch += images.size(0)
loss = criterion[1](outputs[1], label).to(device)
running_loss_texture += loss.item()
# network.conv4.weight.requires_grad = False
# network.conv4.bias.requires_grad = False
# network.conv5.weight.requires_grad = False
# network.conv5.bias.requires_grad = False
# network.object_detect_fc1.weight.requires_grad = False
# network.object_detect_fc1.bias.requires_grad = False
# network.object_detect_fc2.weight.requires_grad = False
# network.object_detect_fc2.bias.requires_grad = False
# network.object_detect_out.weight.requires_grad = False
# network.object_detect_out.bias.requires_grad = False
#
# network.fc_texture_1.weight.requires_grad = True
# network.fc_texture_1.bias.requires_grad = True
# network.fc_texture_2.weight.requires_grad = True
# network.fc_texture_2.bias.requires_grad = True
# network.texture_out.weight.requires_grad = True
# network.texture_out.bias.requires_grad = True
if phase == "train":
loss.backward()
optimizer.step()
running_loss += loss.item()
if task_id == task[0]:
running_imagenet_correct += Util.get_num_correct(outputs[0], label)
elif task_id == task[1]:
running_texture_correct += Util.get_num_correct(outputs[1], label)
epoch_loss = running_loss
epoch_loss_imagenet = running_loss_imagenet
epoch_loss_texture = running_loss_texture
epoch_imagenet_accuracy = running_imagenet_correct / total_imagenet_image_per_epoch
epoch_texture_accuracy = running_texture_correct / total_texture_image_per_epoch
print(
"{0} ==> loss: {1}, imagenet loss:{2}, texture loss:{3}, "
"imagenet correct: {4}/{5}, imagenet accuracy: {6} "
"texture correct: {7}/{8}, texture accuracy: {9}, ".format(phase,
epoch_loss,
epoch_loss_imagenet,
epoch_loss_texture,
running_imagenet_correct,
total_imagenet_image_per_epoch,
epoch_imagenet_accuracy,
running_texture_correct,
total_texture_image_per_epoch,
epoch_texture_accuracy
))
if phase == 'val' and running_texture_correct > texture_min_val_correct:
print("saving model with correct: {0}, improved over previous {1}"
.format(running_texture_correct, texture_min_val_correct))
texture_min_val_correct = running_texture_correct
# saved_model_name = saved_model_name.format(split_id)
torch.save(network.state_dict(), model_path)
print("Training ended")
@staticmethod
def __get_batch_set(image_net_data_loader_dict,
texture_data_loader_dict, phase, task):
batch_set = []
for image_net_S2_data in image_net_data_loader_dict[phase]:
batch_set.append({task[0]: image_net_S2_data})
for texture_data in texture_data_loader_dict[phase]:
batch_set.append({task[1]: texture_data})
return batch_set
def __save_init_weights(self, network):
np.save("./init_weights/enc1_weight.npy", network.enc1.weight.cpu().data.numpy())
np.save("./init_weights/enc1_bias.npy", network.enc1.bias.cpu().data.numpy())
np.save("./init_weights/enc2_weight.npy", network.enc2.weight.cpu().data.numpy())
np.save("./init_weights/enc2_bias.npy", network.enc2.bias.cpu().data.numpy())
np.save("./init_weights/enc3_weight.npy", network.enc3.weight.cpu().data.numpy())
np.save("./init_weights/enc3_bias.npy", network.enc3.bias.cpu().data.numpy())
# @staticmethod
# def initialize_model(auto_encoder_model_path, dataset_labels, device):
# model = Autoencoder().to(device)
# model.load_state_dict(torch.load(auto_encoder_model_path, map_location=device))
# TEXTURE_LABELS = dataset_labels["TEXTURE_LABELS"]
# IMAGE_NET_LABELS = dataset_labels["IMAGE_NET_LABELS"]
# auto_encoder_model = Autoencoder().to(device)
# init_weights = {
# "conv1_wt": model.enc1.weight.data,
# "conv1_bias": model.enc1.bias.data,
# "conv2_wt": model.enc2.weight.data,
# "conv2_bias": model.enc2.bias.data,
# "conv3_wt": model.enc3.weight.data,
# "conv3_bias": model.enc3.bias.data
# }
# auto_encoder_model.load_state_dict(torch.load(auto_encoder_model_path, map_location=device))
# network = MTLCNN(init_weights, TEXTURE_LABELS, IMAGE_NET_LABELS, device).to(device)
# return network
|
python
|
"""
Name: jupyter.py
Purpose: Installs a jupyter notebook
Author: PNDA team
Created: 03/10/2016
Copyright (c) 2016 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
The code, technical concepts, and all information contained herein, are the property of Cisco Technology, Inc.
and/or its affiliated entities, under various laws including copyright, international treaties, patent,
and/or contract. Any use of the material herein must be in accordance with the terms of the License.
All rights not expressly granted by the License are reserved.
Unless required by applicable law or agreed to separately in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied.
"""
# pylint: disable=C0103
import json
import os
import logging
import shutil
import stat
import deployer_utils
from plugins.base_creator import Creator
class JupyterCreator(Creator):
def validate_component(self, component):
errors = []
notebook_found = False
file_list = component['component_detail']
for file_name in file_list:
if file_name.endswith(r'.ipynb'):
notebook_found = True
if notebook_found is False:
errors.append('missing ipynb file')
return errors
def get_component_type(self):
return 'jupyter'
def destroy_component(self, application_name, create_data):
logging.debug("destroy_component: %s %s", application_name, json.dumps(create_data))
for command in create_data['delete_commands']:
os.system(command)
def start_component(self, application_name, create_data):
logging.debug("start_component (nothing to do for jupyter): %s %s", application_name, json.dumps(create_data))
def stop_component(self, application_name, create_data):
logging.debug("stop_component (nothing to do for jupyter): %s %s", application_name, json.dumps(create_data))
def create_component(self, staged_component_path, application_name, user_name, component, properties):
logging.debug("create_component: %s %s %s %s", application_name, user_name, json.dumps(component), properties)
application_user = properties['application_user']
delete_commands = []
## Create local git repo for application_user if not exist.
repo_path = '{}/jupyter-{}'.format(self._config['git_repos_root'], application_user)
if os.path.isdir(repo_path) == False:
os.makedirs(repo_path)
os.system('git init {}'.format(repo_path))
shutil.copyfile(repo_path+'/.git/hooks/post-update.sample', repo_path+'/.git/hooks/post-update')
os.chmod(repo_path+'/.git/hooks/post-update',0o755)
this_dir = os.path.dirname(os.path.realpath(__file__))
shutil.copyfile(this_dir+'/jupyter_README.ipynb',repo_path+'/README.ipynb')
os.system('cd {0} && git add README.ipynb && git commit -m "Initial commit"'.format(repo_path))
## add notebooks to application_user github repo.
notebook_install_path = '{}/{}'.format(repo_path, application_name)
os.makedirs('{}'.format(notebook_install_path))
file_list = component['component_detail']
for file_name in file_list:
# We copy all files in package to jupyter folder to let the user work with all kind of files/datasets.
#if file_name.endswith(r'.ipynb'):
if file_name != 'properties.json':
if os.path.isfile('{}/{}'.format(staged_component_path,file_name)):
self._fill_properties('%s/%s' % (staged_component_path, file_name), properties)
logging.debug('Copying {} to {}'.format(file_name, notebook_install_path))
shutil.copyfile('{}/{}'.format(staged_component_path, file_name),
'{}/{}'.format(notebook_install_path, file_name ))
else:
logging.debug('creating {}/{} folder'.format(notebook_install_path, file_name))
os.makedirs('{}/{}'.format(notebook_install_path, file_name))
# Create a properties.json file in notebooks to access application jupyter component properties.
with open('{}/properties.json'.format(notebook_install_path), 'w') as prop_file:
prop_dict = { k.replace('component_',''): v for k, v in properties.items() if k.startswith('component_')}
json.dump(prop_dict, prop_file)
# update local github repo:
os.system('cd {0} && git add {1} && git commit -m "added {1} app notebooks"'.format(repo_path, application_name))
delete_commands.append('rm -rf {}\n'.format( notebook_install_path))
delete_commands.append('cd {0} && git rm -r {1} && git commit -m "deleted {1} app notebooks"'.format( repo_path, application_name))
logging.debug("uninstall commands: %s", delete_commands)
return {'delete_commands': delete_commands}
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" test_hdf5xltek.py
Description:
"""
# Package Header #
from src.hdf5objects.__header__ import *
# Header #
__author__ = __author__
__credits__ = __credits__
__maintainer__ = __maintainer__
__email__ = __email__
# Imports #
# Standard Libraries #
import datetime
import pathlib
import timeit
# Third-Party Packages #
import pytest
import numpy as np
# Local Packages #
from src.hdf5objects import *
from .test_hdf5objects import HDF5File, ClassTest
# Definitions #
# Functions #
@pytest.fixture
def tmp_dir(tmpdir):
"""A pytest fixture that turn the tmpdir into a Path object."""
return pathlib.Path(tmpdir)
# Classes #
class TestHDF5XLTEK(ClassTest):
class_ = HDF5XLTEK
load_path = pathlib.Path.cwd().joinpath("tests/pytest_cache/EC228_2020-09-21_14~53~19.h5")
save_path = pathlib.Path.cwd().joinpath("tests/pytest_cache/")
@pytest.fixture
def load_file(self):
return self.class_(file=self.load_path)
def test_validate_file(self):
assert self.class_.validate_file_type(self.load_path)
@pytest.mark.parametrize("mode", ['r', 'r+', 'a'])
def test_new_object(self, mode):
with self.class_(file=self.load_path, mode=mode) as f_obj:
assert f_obj is not None
assert True
@pytest.mark.parametrize("mode", ['r', 'r+', 'a'])
def test_load_whole_file(self, mode):
with self.class_(file=self.load_path, mode=mode, load=True) as f_obj:
assert f_obj is not None
assert True
def test_load_fragment(self):
f_obj = self.class_(file=self.load_path)
data = f_obj["data"]
f_obj.close()
assert data is not None
def test_load_from_property(self):
f_obj = self.class_(file=self.load_path)
data = f_obj.data
f_obj.close()
assert data is not None
def test_get_attribute(self):
f_obj = self.class_(file=self.load_path)
attribute = f_obj.attributes["start"]
f_obj.close()
assert attribute is not None
def test_get_attribute_property(self):
f_obj = self.class_(file=self.load_path)
attribute = f_obj.start
f_obj.close()
assert attribute is not None
def test_get_data(self):
f_obj = self.class_(file=self.load_path)
data = f_obj.data[0:1]
f_obj.close()
assert data.shape is not None
def test_get_times(self):
f_obj = self.class_(file=self.load_path)
start = f_obj.time_axis.start_datetime
f_obj.close()
assert start is not None
@pytest.mark.xfail
def test_activate_swmr_mode(self):
f_obj = self.class_(file=self.load_path)
f_obj.swmr_mode = True
assert f_obj.swmr_mode
f_obj.close()
assert True
def test_create_file_build_empty(self, tmpdir):
start = datetime.datetime.now()
f_obj = self.class_(s_id="EC_test", s_dir=tmpdir, start=start, create=True, mode="a", build=True)
assert f_obj.is_open
f_obj.close()
assert True
def test_create_file_build_data(self, tmpdir):
sample_rate = 1024
n_channels = 128
n_samples = 2048
data = np.random.rand(n_samples, n_channels)
start = datetime.datetime.now()
f_obj = self.class_(s_id="EC_test", s_dir=tmpdir, start=start, create=True, mode="a", build=True)
dataset = f_obj.data
dataset.require(data=data, sample_rate=sample_rate, start=start)
f_obj.close()
assert True
@pytest.mark.xfail
def test_data_speed(self, load_file):
def assignment():
x = 10
def get_data():
x = load_file.eeg_data[:10000, :100]
mean_new = timeit.timeit(get_data, number=self.timeit_runs) / self.timeit_runs * 1000000
mean_old = timeit.timeit(assignment, number=self.timeit_runs) / self.timeit_runs * 1000000
percent = (mean_new / mean_old) * 100
print(f"\nNew speed {mean_new:.3f} μs took {percent:.3f}% of the time of the old function.")
assert percent < self.speed_tolerance
# Main #
if __name__ == '__main__':
pytest.main(["-v", "-s"])
|
python
|
"""
Provides basic logging functionality
"""
import logging
import inspect
from datetime import datetime
import os, sys
import traceback
class Logger:
"""
generic logger class
"""
def __init__(self, logname=None, project_name=None):
"""
:param level:
:param project_name: if provided the logger will put its log files in a subdirectory named after the project
:return:
"""
parent = inspect.stack()[1]
parent_file = inspect.getfile(parent[0])
path, path_filename = os.path.split(parent_file)
if logname:
filename = logname
else:
filename = path_filename
if project_name:
log_path = 'logs/' + project_name + '/' + filename
else:
log_path = 'logs/' + filename
if project_name:
if not os.path.exists('logs/' + project_name + '/'):
os.makedirs('logs/' + project_name + '/')
else:
if not os.path.exists('logs/'):
os.makedirs('logs/')
log_name = datetime.now().strftime(log_path + "-%Y%m%d-%H%M%S.log")
l = logging.getLogger("test")
l.setLevel('DEBUG')
l_fh = logging.FileHandler(log_name)
l_format = logging.Formatter('%(asctime)s %(message)s')
l_fh.setFormatter(l_format)
l.addHandler(l_fh)
self.logger = l
def l_error(self, msg):
self.l(msg, level=logging.ERROR)
def l_exception(self, msg='general exception'):
"""
new logging method will log as error with full traceback
:param msg:
:param exception_obj:
:return:
"""
etype, ex, tb = sys.exc_info()
tb_s = traceback.format_exception(etype, ex, tb)
msg = msg + ':\n' + ' '.join(tb_s)
self.l(msg, level=logging.ERROR)
def l(self, msg, level=logging.INFO):
""" Write a log message. Utilizes (default) '_logger'.
:param msg: Data to write to log file (Can be anything ...)
:type msg: string
:param level: Default debug; info & error are supported.
:type level: string
:raises: RuntimeError if log level us unknown.
"""
## only print 'DEBUG' messages if overall log level is set to debug
if level is logging.DEBUG:
self.logger.debug(msg)
if level is logging.INFO:
self.logger.info(msg)
print(msg)
elif level is logging.ERROR:
self.logger.error(msg)
self.send_failure_email(msg)
print(msg)
else:
pass # raise RuntimeError("Log level: %s not supported" % level)
def d(self, msg):
self.l(msg, level=logging.DEBUG)
def handle_exception(self, exc_type, exc_value, exc_traceback):
"""
"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
self.logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
|
python
|
#!/usr/bin/env python3
import os
import unittest
import numpy as np
import torch
from pytorch_translate import rnn # noqa
from pytorch_translate import train
from pytorch_translate.tasks import pytorch_translate_task as tasks
from pytorch_translate.test import utils as test_utils
class TestRNNModel(unittest.TestCase):
@unittest.skipIf(torch.cuda.device_count() < 1, "No GPU available for test.")
def test_gpu_train_step(self):
test_args = test_utils.ModelParamsDict()
trainer, _ = test_utils.gpu_train_step(test_args)
assert trainer.get_meter("gnorm").avg > 0
@unittest.skipIf(torch.cuda.device_count() < 1, "No GPU available for test.")
def test_gpu_freeze_embedding(self):
test_args = test_utils.ModelParamsDict(
encoder_freeze_embed=True, decoder_freeze_embed=True
)
test_utils.gpu_train_step(test_args)
def test_load_pretrained_embedding(self):
test_args = test_utils.ModelParamsDict()
_, src_dict, tgt_dict = test_utils.prepare_inputs(test_args)
encoder_embed_path, embed_weights = test_utils.create_pretrained_embed(
src_dict, test_args.encoder_hidden_dim
)
test_args.encoder_pretrained_embed = encoder_embed_path
task = tasks.DictionaryHolderTask(src_dict, tgt_dict)
model = task.build_model(test_args)
assert np.allclose(
model.encoder.embed_tokens.weight.data.numpy(), embed_weights
)
os.remove(encoder_embed_path)
@unittest.skipIf(torch.cuda.device_count() < 1, "No GPU available for test.")
def test_milstm_cell(self):
test_args = test_utils.ModelParamsDict(cell_type="milstm")
trainer, _ = test_utils.gpu_train_step(test_args)
assert trainer.get_meter("gnorm").avg > 0
@unittest.skipIf(torch.cuda.device_count() < 1, "No GPU available for test.")
def test_sequence_lstm_encoder(self):
test_args = test_utils.ModelParamsDict(
encoder_bidirectional=True, sequence_lstm=True
)
trainer, _ = test_utils.gpu_train_step(test_args)
assert trainer.get_meter("gnorm").avg > 0
@unittest.skipIf(torch.cuda.device_count() < 1, "No GPU available for test.")
def test_layer_norm_lstm_cell(self):
test_args = test_utils.ModelParamsDict(cell_type="layer_norm_lstm")
trainer, _ = test_utils.gpu_train_step(test_args)
assert trainer.get_meter("gnorm").avg > 0
@unittest.skipIf(torch.cuda.device_count() < 1, "No GPU available for test.")
def test_first_layer_multihead_attention_(self):
test_args = test_utils.ModelParamsDict(
attention_type="multihead", attention_heads=2, first_layer_attention=True
)
trainer, _ = test_utils.gpu_train_step(test_args)
assert trainer.get_meter("gnorm").avg > 0
|
python
|
# -*- coding: utf-8 -*-
"""
CRUD operation for fan model
"""
import database
from DB import exception
from DB.models import Fan
from DB.api import dbutils as utils
RESP_FIELDS = ['id', 'resource', 'status', 'created_at']
SRC_EXISTED_FIELD = {
'id': 'id',
# 'uuid': 'uuid',
'status': 'status',
'resource_id': 'resource_id',
'created_at': 'created_at'
}
@database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS)
def new(session, src_dic, content={}):
for k, v in SRC_EXISTED_FIELD.items():
content[k] = src_dic.get(v, None)
return utils.add_db_object(session, Fan, **content)
def _get_fan(session, resource_id, order_by=[], limit=None, **kwargs):
if isinstance(resource_id, int):
resource_ids = {'eq': resource_id}
elif isinstance(resource_id, list):
resource_ids = {'in': resource_id}
else:
raise exception.InvalidParameter('parameter resource id format are not supported.')
return \
utils.list_db_objects(session, Fan, order_by=order_by, limit=limit, resource_id=resource_ids, **kwargs)
@database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS) # wrap the raw DB object into dict
def get_fan_by_gateway_uuid(session, resource_id):
return _get_fan(session, resource_id)
@database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS) # wrap the raw DB object into dict
def get_latest_by_gateway_uuid(session, resource_id, ):
fan = _get_fan(session, resource_id, order_by=[('id', True)], limit=1)
return fan[0] if len(fan) else None
@database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS)
def get_fan_by_time(session, start_time, end_time):
return utils.list_db_objects(session, Fan, created_at={'ge': str(start_time), 'le': str(end_time)})
|
python
|
#!/usr/bin/env python
from random import choice
temperaments = ["sanguine", "choleric", "melancholic", "phlegmatic"]
personalities = ["introvert", "extrovert"]
# Based on the former job will be the starting inventory
former_jobs = ["a fireman", "a policeman", "a medic", "unemployed", "a doctor", "a demolitions expert", "an IT guy", "an accountant"]
def generate_character():
results = []
results.append(choice(personalities))
results.append(choice(temperaments))
return results
|
python
|
import os
import traceback
from concurrent.futures import CancelledError
import numpy as np
import cv2
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import sklearn.cluster
from pebble import concurrent
from mss import mss
from src.constants import *
from src.cv_img import CvImg
from src.components.qtrangeslider import QRangeSlider
from src.components.image_plotter import ImagePlotter
from src.components.image_hist_plotter import ImageHistPlotter
from src.components.global_data_tree import GlobalDataTreeWidget
from src.components.plot_3d import Plot3D
from src.gui_busy_lock import GuiBusyLock
from src.image_clusterers import CLUSTER_ALGORITHMS
DEFAULT_IMG_FILENAME = './test-images/starry-night.jpg'
DIALOG_SUPPORTED_IMG_EXTS = ''
for title, exts in SUPPORTED_IMG_EXTS.items():
exts_str = ' '.join([f'*.{ext}' for ext in exts])
DIALOG_SUPPORTED_IMG_EXTS += f'{title} ({exts_str});;'
DIALOG_SUPPORTED_IMG_EXTS += 'All Files (*)'
HOME_DIR = os.path.expanduser('~')
HOME_DIR = os.path.curdir # FIXME
DEFAULT_MAX_PIXELS = 10 ** 6
# NOTE: These constants will be initialized later
SCREEN_WIDTH = -1
SCREEN_HEIGHT = -1
ALL_CLUSTER_ALGORITHMS = list(CLUSTER_ALGORITHMS.keys())
IMG_CLUSTERERS = list(CLUSTER_ALGORITHMS.values())
CLUSTER_INPUTS = {
'color' : 'Color-only',
'spatial': 'Spatial-only',
'both' : 'Color & Spatial',
}
INTERNAL_CLUSTER_INPUTS = list(CLUSTER_INPUTS.keys())
CLUSTER_INPUT_TYPES = list(CLUSTER_INPUTS.values())
IMG_SCPLOT_SCALE = 4
CH_SCPLOT_SCALE = 5
CH_SCPLOT_SCALE_Z = 2
CH_PLOT_GRID_SZ = 8
def process_img_plot_mouse_event(img_plot, curr_img, fn):
def handle_mouse_event(mouse_pos):
if img_plot.sceneBoundingRect().contains(mouse_pos):
mouse_point = img_plot.getViewBox().mapSceneToView(mouse_pos)
(mouse_x, mouse_y) = int(mouse_point.x()), int(mouse_point.y())
(height, width) = curr_img.shape[:2]
if (0 <= mouse_y and mouse_y < height) and (0 <= mouse_x and mouse_x < width):
return fn(mouse_x, mouse_y, curr_img[mouse_y, mouse_x])
return handle_mouse_event
def cluster_points_plot(color_centers, rgb_colored_centers, scale_factor=IMG_SCPLOT_SCALE):
return gl.GLScatterPlotItem(
pos=color_centers / 255 * scale_factor, color=rgb_colored_centers / 255,
size=0.75, pxMode=not True,
glOptions='opaque'
)
def img_scatterplot(cv_img, color_mode, crop_bounds=None, thresh_bounds=None, scale_factor=IMG_SCPLOT_SCALE):
rgb_img = cv_img.RGB
converted_img = cv_img[color_mode]
if crop_bounds is not None:
x_min, y_min, x_max, y_max = crop_bounds
else:
height, width = rgb_img.shape[:2]
x_min, y_min, x_max, y_max = (0, 0, width, height)
rgb_img = rgb_img[y_min:y_max, x_min:x_max]
converted_img = converted_img[y_min:y_max, x_min:x_max]
if thresh_bounds is None:
thresh_bounds = [(0, 255), (0, 255), (0, 255)]
for (ch_index, bounds) in enumerate(thresh_bounds):
lower_ch, upper_ch = bounds
channel_arr = converted_img[:, :, ch_index]
thresh_indicies = ( (channel_arr < lower_ch) | (channel_arr > upper_ch) )
converted_img[thresh_indicies] = -1
pos_arr = converted_img.reshape(-1, 3)
color_arr = rgb_img.reshape(-1, 3) / 255
non_zero_pixels = np.all(pos_arr != -1, axis=1)
pos_arr = pos_arr[non_zero_pixels]
color_arr = color_arr[non_zero_pixels]
pos_arr = converted_img.reshape(-1, 3) / 255 * scale_factor
return gl.GLScatterPlotItem(
pos=pos_arr, color=color_arr,
size=1, pxMode=True,
glOptions='opaque'
)
def pos_color_scatterplot(cv_img, color_mode, ch_index, crop_bounds=None, thresh_bounds=None, scale_factor=CH_SCPLOT_SCALE, scale_z=CH_SCPLOT_SCALE_Z):
rgb_img = cv_img.RGB.copy()
converted_img = cv_img[color_mode].copy()
if crop_bounds is not None:
x_min, y_min, x_max, y_max = crop_bounds
else:
height, width = rgb_img.shape[:2]
x_min, y_min, x_max, y_max = (0, 0, width, height)
rgb_img = rgb_img[y_min:y_max, x_min:x_max]
converted_img = converted_img[y_min:y_max, x_min:x_max]
if thresh_bounds is not None:
lower_ch, upper_ch = thresh_bounds[ch_index]
else:
lower_ch, upper_ch = (0, 255)
rows, cols = converted_img.shape[:2]
c_arr, r_arr = np.meshgrid(np.arange(cols), np.arange(rows))
channel_arr = converted_img[:, :, ch_index]
keep_indicies = ( (channel_arr > lower_ch) & (channel_arr < upper_ch) )
flat_keep_indices = keep_indicies.flatten()
flat_r_arr = r_arr.flatten()[flat_keep_indices]
flat_c_arr = c_arr.flatten()[flat_keep_indices]
flat_channel_arr = channel_arr.flatten()[flat_keep_indices]
scaled_dim = scale_factor / max(rows, cols)
scaled_z = scale_z / 255
flat_r_arr = (flat_r_arr - rows // 2) * scaled_dim
flat_c_arr = (flat_c_arr - cols // 2) * scaled_dim
flat_channel_arr = flat_channel_arr * scaled_z
pos_arr = np.vstack( (flat_r_arr, flat_c_arr, flat_channel_arr) ).T
color_arr = rgb_img.reshape(-1, 3) / 255
color_arr = color_arr[flat_keep_indices, :]
return gl.GLScatterPlotItem(
pos=pos_arr, color=color_arr,
size=1, pxMode=True,
glOptions='opaque'
)
# Link the image plot axes together for consistent panning and zooming
def setup_axes_links(leader_plot, follower_plots):
for plot in follower_plots:
plot.setXLink(leader_plot)
plot.setYLink(leader_plot)
# Load image with approximate max number of pixels
def load_image_max_pixels(input_img, max_pixels):
num_pixels = image_num_pixels(input_img)
if num_pixels > max_pixels:
resize_factor = img_resize_factor(input_img, max_pixels)
resized_img = cv2.resize(input_img, None, fx=resize_factor, fy=resize_factor)
else:
resize_factor = 1
resized_img = input_img[:, :, :]
return resized_img
# Returns the number of pixels in a 2D or 3D image
def image_num_pixels(img):
return int(np.product(img.shape[:2]))
# Return required resize factor to shrink image to contain given max number of pixels
def img_resize_factor(input_img, max_pixels):
resize_factor = 1 / ( (image_num_pixels(input_img) / max_pixels) ** 0.5 )
if resize_factor < 1:
return resize_factor
return 1
# Interpret image data as row-major instead of col-major
pg.setConfigOptions(imageAxisOrder='row-major')
class MyWindow(pg.GraphicsLayoutWidget):
def __init__(self):
super().__init__()
self.input_img = None
self.cv_img = None
self.dataset_mode = False
self.dataset_imgs = []
self.dataset_index = None
self.ch_index = 0
self.cs_index = 0
self.cluster_algo_index = 0
self.cluster_input_index = 0
self.orig_img_plot = None
self.glvw_color_vis = None
self.channel_plot = None
self.glvw_channel_vis = None
self.roi = None
self.menubar = None
self.statusbar = None
self.apply_crop = False
self.apply_thresh = False
self.mod_img_realtime = False
self.max_pixels_to_load = DEFAULT_MAX_PIXELS
self.channel_thresholds = [(0, 255), (0, 255), (0, 255)]
self.cluster_future = None
self.cluster_check_timer = None
self.main_window = None
@property
def gui_ready(self):
# HACK: This only checks the 4 main plots
return None not in [self.orig_img_plot, self.glvw_color_vis, self.channel_plot, self.glvw_channel_vis]
@property
def color_mode(self):
return ALL_COLOR_SPACES[self.cs_index]
@property
def channel_mode(self):
return COLOR_SPACE_LABELS[self.color_mode][self.ch_index]
@property
def cluster_input_mode(self):
return INTERNAL_CLUSTER_INPUTS[self.cluster_input_index]
@property
def curr_image(self):
return self.cv_img[self.color_mode]
@property
def curr_image_gray(self):
return self.cv_img.GRAY
@property
def curr_image_cropped(self):
if self.apply_crop:
x_min, y_min, x_max, y_max = self.roi_bounds
return self.curr_image[y_min:y_max, x_min:x_max]
else:
return self.curr_image
@property
def curr_image_gray_cropped(self):
if self.apply_crop:
x_min, y_min, x_max, y_max = self.roi_bounds
return self.curr_image_gray[y_min:y_max, x_min:x_max]
else:
return self.curr_image_gray
@property
def curr_image_slice(self):
img_slice = self.cv_img[self.color_mode][:, :, self.ch_index]
if self.apply_thresh:
lower_ch, upper_ch = self.thresh_bounds
thresh_indicies = ( (img_slice < lower_ch) | (img_slice > upper_ch) )
img_slice[thresh_indicies] = 0
return img_slice
@property
def roi_bounds(self):
height, width = self.cv_img.RGB.shape[:2]
if self.apply_crop:
x, y, w, h = self.roi.parentBounds().toAlignedRect().getRect()
x_min, y_min = max(x, 0), max(y, 0)
x_max, y_max = min(x + w, width), min(y + h, height)
return (x_min, y_min, x_max, y_max)
else:
return (0, 0, width, height)
@property
def thresh_bounds(self):
if self.apply_thresh:
return self.channel_thresholds[self.ch_index]
return None
@property
def curr_img_scatterplot(self):
return img_scatterplot(
self.cv_img, self.color_mode,
crop_bounds=self.roi_bounds,
thresh_bounds=self.channel_thresholds if self.apply_thresh else None
)
@property
def curr_pos_color_scatterplot(self):
return pos_color_scatterplot(
self.cv_img, self.color_mode, self.ch_index,
crop_bounds=self.roi_bounds,
thresh_bounds=self.channel_thresholds if self.apply_thresh else None
)
def load_image_file(self, img_path, max_pixels):
input_img = cv2.imread(img_path)
if input_img is None:
QtGui.QMessageBox.warning(self, 'Error!', f'Unable to load image from "{img_path}"')
if self.gui_ready:
return
else:
exit(-1)
self.load_image(input_img, max_pixels)
self.set_window_title(f'Now viewing "{img_path.split("/")[-1]}"')
def load_image(self, input_img, max_pixels):
if max_pixels is None:
max_pixels = self.max_pixels_to_load
with GuiBusyLock(self):
self.input_img = input_img
resized_img = load_image_max_pixels(self.input_img, max_pixels)
self.cv_img = CvImg.from_ndarray(resized_img)
if self.gui_ready:
self.data_tree['Image Info/Total Pixels'] = image_num_pixels(self.input_img)
self.data_tree['Image Info/Pixels Loaded'] = image_num_pixels(self.curr_image)
self.data_tree['Image Info/Resize Factor'] = img_resize_factor(self.input_img, max_pixels)
self.data_tree['Image Info/Original Image Size'] = np.array(self.input_img.shape[:2][::-1])
self.data_tree['Image Info/Loaded Image Size'] = np.array(self.curr_image.shape[:2][::-1])
self.orig_img_plot.set_image(self.cv_img.RGB)
self.on_color_space_change(self.cs_index)
def setup_gui(self):
if self.cv_img is None:
raise Exception('Error: Image has not been loaded yet! Please load an image before calling setup_gui()')
# Setup widgets according to grid layout
self.main_grid_layout = QtGui.QGridLayout()
# Optimal plot size is determined so that the app takes 75% total width and 80% total height (for 2 plots high and 3 plots wide)
optimal_plot_size = (SCREEN_WIDTH // 4, SCREEN_HEIGHT // 2.5)
# Setup main plots
self.orig_img_plot = ImagePlotter(title='Original Image', img=self.cv_img.RGB, enable_crosshair=True, size=optimal_plot_size)
self.glvw_color_vis = Plot3D(plot=self.curr_img_scatterplot, size=optimal_plot_size)
self.channel_plot = ImagePlotter(title=self.channel_mode, img=self.curr_image_slice, size=optimal_plot_size)
self.glvw_channel_vis = Plot3D(plot=self.curr_pos_color_scatterplot, enable_axes=False, size=optimal_plot_size)
self.glvw_channel_vis.grid_item.setPosition(x=-CH_PLOT_GRID_SZ / 2, y=-CH_PLOT_GRID_SZ / 2, z=0)
self.glvw_channel_vis.grid_item.setSize(x=CH_PLOT_GRID_SZ, y=CH_PLOT_GRID_SZ, z=0)
# Tie the axes bewteen the original image plot and the channel sliced image plot
setup_axes_links(self.orig_img_plot, [self.channel_plot])
# Layout main plots
self.main_grid_layout.addWidget(self.orig_img_plot, 0, 0)
self.main_grid_layout.addWidget(self.glvw_color_vis, 1, 0)
self.main_grid_layout.addWidget(self.channel_plot, 0, 1)
self.main_grid_layout.addWidget(self.glvw_channel_vis, 1, 1)
# Setup the color histogram plot
self.color_hist_plot = ImageHistPlotter(title='Color/Gray Histogram', size=optimal_plot_size)
self.color_hist_plot.plot_hist(self.curr_image_cropped, self.curr_image_gray_cropped)
self.main_grid_layout.addWidget(self.color_hist_plot, 0, 2)
# Setup settings/data tabs
info_tabs = QtGui.QTabWidget()
general_data_settings_tab = QtGui.QWidget()
cluster_settings_tab = QtGui.QWidget()
info_tabs.addTab(general_data_settings_tab, 'Settings/Data')
info_tabs.addTab(cluster_settings_tab, 'Clustering')
# Lay everything out for general settings/data tab
self.general_settings_layout = QtGui.QGridLayout()
# Setup max pixels loading slider
self.max_pixels_slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.max_pixels_slider.setMinimum(0)
self.max_pixels_slider.setMaximum(10)
self.max_pixels_slider.setValue(6)
self.max_pixels_slider.setTickPosition(QtGui.QSlider.TicksBelow)
self.max_pixels_slider.setTickInterval(1)
def on_max_pixels_slider_change(val):
self.max_pixels_to_load = 10 ** val
self.load_image(self.input_img, self.max_pixels_to_load)
self.data_tree['Image Info/Pixels Loaded'] = image_num_pixels(self.curr_image)
self.max_pixels_slider.valueChanged.connect(on_max_pixels_slider_change)
self.general_settings_layout.addWidget(QtGui.QLabel('Max Pixels (10^x):'), 0, 0)
self.general_settings_layout.addWidget(self.max_pixels_slider, 0, 1)
# Setup image realtime modding check box
self.mod_img_realtime_box = QtGui.QCheckBox()
self.mod_img_realtime_box.setChecked(self.mod_img_realtime)
self.mod_img_realtime_box.toggled.connect(self.on_mod_img_realtime_toggle)
self.general_settings_layout.addWidget(QtGui.QLabel('Realtime updates:'), 1, 0)
self.general_settings_layout.addWidget(self.mod_img_realtime_box, 1, 1)
# Setup color space combo box
self.color_space_cbox = QtGui.QComboBox()
self.color_space_cbox.addItems(ALL_COLOR_SPACES)
self.color_space_cbox.setCurrentIndex(self.cs_index)
self.color_space_cbox.currentIndexChanged.connect(self.on_color_space_change)
self.general_settings_layout.addWidget(QtGui.QLabel('Color Space:'), 2, 0)
self.general_settings_layout.addWidget(self.color_space_cbox, 2, 1)
# Setup channel combo box
self.channel_cbox = QtGui.QComboBox()
self.channel_cbox.addItems(COLOR_SPACE_LABELS[self.color_mode])
self.channel_cbox.setCurrentIndex(self.ch_index)
self.channel_cbox.currentIndexChanged.connect(self.on_channel_view_change)
self.general_settings_layout.addWidget(QtGui.QLabel('Channel:'), 3, 0)
self.general_settings_layout.addWidget(self.channel_cbox, 3, 1)
# Setup cropping checkbox
self.apply_crop_box = QtGui.QCheckBox()
self.apply_crop_box.setChecked(self.apply_crop)
self.apply_crop_box.toggled.connect(self.on_apply_crop_toggle)
self.general_settings_layout.addWidget(QtGui.QLabel('Apply Cropping:'), 4, 0)
self.general_settings_layout.addWidget(self.apply_crop_box, 4, 1)
# Setup thresholding checkboxes
self.apply_thresh_box = QtGui.QCheckBox()
self.apply_thresh_box.setChecked(self.apply_thresh)
self.apply_thresh_box.toggled.connect(self.on_apply_thresh_toggle)
self.general_settings_layout.addWidget(QtGui.QLabel('Apply Thresholding:'), 5, 0)
self.general_settings_layout.addWidget(self.apply_thresh_box, 5, 1)
# Setup thresholding sliders for all channels
thresh_row_offset = 6
self.all_channel_thresh_sliders = []
self.all_channel_labels = []
for i in range(3):
# Setup thresholding channel label
channel_label = QtGui.QLabel(f'Threshold ({COLOR_SPACE_LABELS[self.color_mode][i]}):')
self.general_settings_layout.addWidget(channel_label, thresh_row_offset + i, 0)
self.all_channel_labels += [channel_label]
# Setup thresholding channel range slider
channel_thresh_slider = QRangeSlider(QtCore.Qt.Horizontal)
channel_thresh_slider.range = (0, 255)
channel_thresh_slider.values = (0, 255)
channel_thresh_slider.setEnabled(False)
self.general_settings_layout.addWidget(channel_thresh_slider, thresh_row_offset + i, 1)
self.all_channel_thresh_sliders += [channel_thresh_slider]
# Setup the data tree widget
# NOTE: Top level keys will be rendered in reverse insertion order
initial_data = {
'Image Controls': {
'Crop Dimensions': np.array(self.roi_bounds),
'Channel Thresholds': np.array(self.channel_thresholds).T
},
'Mouse Info': {
'Mouse Location': np.array([-1, -1]),
'Color at Mouse': np.array([-1, -1, -1]),
},
'Image Info': {
'Total Pixels': image_num_pixels(self.input_img),
'Pixels Loaded': image_num_pixels(self.curr_image),
'Resize Factor': img_resize_factor(self.input_img, self.max_pixels_to_load),
'Original Image Size': np.array(self.input_img.shape[:2][::-1]),
'Loaded Image Size': np.array(self.curr_image.shape[:2][::-1]),
},
}
self.data_tree = GlobalDataTreeWidget()
self.data_tree.set_data(initial_data)
self.general_settings_layout.addWidget(self.data_tree, 9, 0, 1, 2)
def handle_on_mouse_hover(x, y, color):
self.data_tree['Mouse Info/Mouse Location'] = np.array([x, y])
self.data_tree['Mouse Info/Color at Mouse'] = color
show_color_on_hover = process_img_plot_mouse_event(self.orig_img_plot, self.curr_image, handle_on_mouse_hover)
self.orig_img_plot.scene().sigMouseMoved.connect(show_color_on_hover)
# HACK: Add dummy label widget to squish all widgets to the top
self.general_settings_layout.addWidget(QtGui.QLabel(''), 10, 0, 999, 2)
# Place all general settings widgets in 'Settings' tab
general_data_settings_tab.setLayout(self.general_settings_layout)
# Lay everything out for clustering settings tab
self.clustering_settings_layout = QtGui.QGridLayout()
# Setup clustering algorithm combo box
self.cluster_algo_cbox = QtGui.QComboBox()
self.cluster_algo_cbox.addItems(ALL_CLUSTER_ALGORITHMS)
self.cluster_algo_cbox.setCurrentIndex(self.cluster_algo_index)
self.cluster_algo_cbox.currentIndexChanged.connect(self.on_cluster_algo_change)
self.clustering_settings_layout.addWidget(QtGui.QLabel('Cluster Algorithm:'), 0, 0)
self.clustering_settings_layout.addWidget(self.cluster_algo_cbox, 0, 1)
# Setup clustering algorithm input data combo box
self.cluster_input_cbox = QtGui.QComboBox()
self.cluster_input_cbox.addItems(CLUSTER_INPUT_TYPES)
self.cluster_input_cbox.setCurrentIndex(self.cluster_input_index)
self.cluster_input_cbox.currentIndexChanged.connect(self.on_cluster_input_change)
self.clustering_settings_layout.addWidget(QtGui.QLabel('Cluster Input Type:'), 1, 0)
self.clustering_settings_layout.addWidget(self.cluster_input_cbox, 1, 1)
# Setup the cluster sub-settings widgets
self.clusterer_controller = IMG_CLUSTERERS[self.cluster_algo_index]
cluster_sub_settings_layout = self.clusterer_controller.setup_settings_layout()
self.cluster_settings_widget = QtGui.QWidget()
self.cluster_settings_widget.setLayout(cluster_sub_settings_layout)
self.clustering_settings_layout.addWidget(self.cluster_settings_widget, 2, 0, 1, 2)
# Setup clustering buttons
self.run_clustering_button = QtGui.QPushButton('Run Clustering')
self.run_clustering_button.clicked.connect(self.on_run_clustering)
self.run_clustering_button.setEnabled(True)
self.clustering_settings_layout.addWidget(self.run_clustering_button, 3, 0)
self.cancel_clustering_button = QtGui.QPushButton('Cancel Clustering')
self.cancel_clustering_button.clicked.connect(self.on_cancel_clustering)
self.cancel_clustering_button.setEnabled(False)
self.clustering_settings_layout.addWidget(self.cancel_clustering_button, 3, 1)
# HACK: Add dummy label widget to squish all widgets to the top
self.clustering_settings_layout.addWidget(QtGui.QLabel(''), 4, 0, 999, 2)
# Place all cluster settings widgets in 'Clustering' tab
cluster_settings_tab.setLayout(self.clustering_settings_layout)
# Add the tabs into the main layout
self.main_grid_layout.addWidget(info_tabs, 1, 2)
# Set the layout and resize the window accordingly
self.setLayout(self.main_grid_layout)
self.resize(self.main_grid_layout.sizeHint() + QtCore.QSize(10, 30))
def bind_to_main_window(self, main_window):
self.main_window = main_window
self.main_window.setCentralWidget(self)
self.setup_menubar(self.main_window)
self.setup_statusbar(self.main_window)
self.setup_shortcuts()
self.autosize()
def open_file_dialog(self, title, supported_exts, starting_dir=HOME_DIR):
filename, _ = pg.FileDialog().getOpenFileName(self, title, starting_dir, supported_exts)
return filename
def save_file_dialog(self, title, supported_exts, starting_dir=HOME_DIR):
filename, _ = pg.FileDialog().getSaveFileName(self, title, starting_dir, supported_exts)
return filename
def open_folder_dialog(self, title, starting_dir=HOME_DIR):
dirname = pg.FileDialog().getExistingDirectory(self, title, starting_dir)
return dirname
def on_color_space_change(self, cspace_index):
with GuiBusyLock(self):
self.cs_index = cspace_index
# NOTE: Temporarily disable the 'currentIndexChanged' since
# it'll be triggered when removing and adding new items
self.channel_cbox.currentIndexChanged.disconnect()
self.channel_cbox.clear()
self.channel_cbox.addItems(COLOR_SPACE_LABELS[self.color_mode])
self.channel_cbox.currentIndexChanged.connect(self.on_channel_view_change)
for i in range(3):
channel_label = self.all_channel_labels[i]
channel_label.setText(f'Threshold ({COLOR_SPACE_LABELS[self.color_mode][i]}):')
channel_thresh_slider = self.all_channel_thresh_sliders[i]
self.channel_thresholds[i] = (0, 255)
channel_thresh_slider.values = (0, 255)
self.channel_plot.setTitle(title=self.channel_mode)
self.update_all_plots()
self.channel_plot.autoRange()
self.glvw_color_vis.remove_cluster_plot()
def on_channel_view_change(self, ch_index):
with GuiBusyLock(self):
self.ch_index = ch_index
self.channel_plot.setTitle(title=self.channel_mode)
self.update_all_plots()
self.channel_plot.autoRange()
def on_cluster_algo_change(self, cluster_algo_index):
self.cluster_algo_index = cluster_algo_index
self.clusterer_controller = IMG_CLUSTERERS[self.cluster_algo_index]
cluster_settings_layout = self.clusterer_controller.setup_settings_layout()
old_widget = self.cluster_settings_widget
self.cluster_settings_widget = QtGui.QWidget()
self.cluster_settings_widget.setLayout(cluster_settings_layout)
self.clustering_settings_layout.replaceWidget(old_widget, self.cluster_settings_widget)
QtCore.QObjectCleanupHandler().add(old_widget)
self.clustering_settings_layout.update()
def on_cluster_input_change(self, cluster_input_index):
self.cluster_input_index = cluster_input_index
def on_crop_modify(self):
if self.apply_crop:
self.update_all_plots()
def on_crop_modify_realtime(self):
if self.apply_crop:
self.data_tree['Image Controls/Crop Dimensions'] = np.array(self.roi_bounds)
self.update_2d_plots()
if self.mod_img_realtime:
self.update_3d_plots()
def on_thresh_change(self, thresh_ch_index, lower_val, upper_val):
if self.apply_thresh:
self.channel_thresholds[thresh_ch_index] = (lower_val, upper_val)
self.update_all_plots()
def on_thresh_change_realtime(self, thresh_ch_index, lower_val, upper_val):
if self.apply_thresh:
self.channel_thresholds[thresh_ch_index] = (lower_val, upper_val)
self.data_tree['Image Controls/Channel Thresholds'] = np.array(self.channel_thresholds).T
self.update_2d_plots()
if self.mod_img_realtime:
self.update_3d_plots()
def on_apply_crop_toggle(self, should_apply_crop):
self.apply_crop = should_apply_crop
if self.apply_crop:
self.orig_img_plot.enable_roi_rect()
self.roi = self.orig_img_plot.roi_item
self.roi.sigRegionChanged.connect(self.on_crop_modify_realtime)
self.roi.sigRegionChangeFinished.connect(self.on_crop_modify)
else:
self.roi.sigRegionChanged.disconnect()
self.roi.sigRegionChangeFinished.disconnect()
self.roi = None
self.orig_img_plot.disable_roi_rect()
self.data_tree['Image Controls/Crop Dimensions'] = np.array(self.roi_bounds)
self.update_all_plots()
def on_mod_img_realtime_toggle(self, should_mod_img_realtime):
self.mod_img_realtime = should_mod_img_realtime
def on_apply_thresh_toggle(self, should_apply_thresh):
self.apply_thresh = should_apply_thresh
for (i, channel_thresh_slider) in enumerate(self.all_channel_thresh_sliders):
channel_thresh_slider.setEnabled(self.apply_thresh)
channel_thresh_value_changed_realtime = lambda i: (lambda lower, upper: self.on_thresh_change_realtime(i, lower, upper))
channel_thresh_value_changed = lambda i: (lambda lower, upper: self.on_thresh_change(i, lower, upper))
if self.apply_thresh:
channel_thresh_slider.valueChanged.connect(channel_thresh_value_changed_realtime(i))
channel_thresh_slider.valueChangedFinished.connect(channel_thresh_value_changed(i))
else:
channel_thresh_slider.valueChanged.disconnect()
channel_thresh_slider.valueChangedFinished.disconnect()
self.update_all_plots()
@property
def is_clustering(self):
return self.cluster_future is not None and self.cluster_future.running()
def on_run_clustering(self):
if not self.is_clustering:
self.run_clustering_button.setEnabled(False)
self.cancel_clustering_button.setEnabled(True)
self.glvw_color_vis.remove_cluster_plot()
@concurrent.process
def _run_clustering(cv_img, color_mode, input_mode, roi_bounds):
outcome = {
'results': None,
'exception': None,
}
try:
results = self.clusterer_controller.run_clustering(cv_img, color_mode, input_mode, roi_bounds)
color_centers, color_labels, rgb_colored_centers, cluster_error, num_iterations = results
outcome['results'] = (color_centers, rgb_colored_centers)
except Exception as ex:
err_name = str(ex)
err_type = str(type(ex))
err_stacktrace = ''.join(traceback.format_tb(ex.__traceback__))
outcome['exception'] = {
'name': err_name,
'type': err_type,
'stacktrace': err_stacktrace,
}
return outcome
def _check_clustering_results():
if self.cluster_future.done():
self.cluster_check_timer.stop()
try:
outcome = self.cluster_future.result()
if outcome['exception'] is not None:
error_msg = f'A problem occurred when running the clustering algorithm:'
error_msg += f"\n{outcome['exception']['name']}"
error_msg += f"\n{outcome['exception']['stacktrace']}"
QtGui.QMessageBox.warning(self, 'Error!', error_msg)
else:
color_centers, rgb_colored_centers = outcome['results']
self.glvw_color_vis.set_cluster_plot(cluster_points_plot(color_centers, rgb_colored_centers))
except CancelledError as ex:
# NOTE: The user requested to cancel the clustering operation
pass
finally:
self.run_clustering_button.setEnabled(True)
self.cancel_clustering_button.setEnabled(False)
self.cluster_future = _run_clustering(self.cv_img, self.color_mode, self.cluster_input_mode, self.roi_bounds)
self.cluster_check_timer = QtCore.QTimer()
self.cluster_check_timer.timeout.connect(_check_clustering_results)
self.cluster_check_timer.start(250)
def on_cancel_clustering(self):
if self.is_clustering:
self.cluster_future.cancel()
self.glvw_color_vis.remove_cluster_plot()
self.run_clustering_button.setEnabled(True)
self.cancel_clustering_button.setEnabled(False)
def update_2d_plots(self):
self.channel_plot.set_image(self.curr_image_slice, auto_range=False)
self.color_hist_plot.plot_hist(self.curr_image_cropped, self.curr_image_gray_cropped)
def update_3d_plots(self):
self.glvw_color_vis.set_plot(plot=self.curr_img_scatterplot)
self.glvw_channel_vis.set_plot(plot=self.curr_pos_color_scatterplot)
def update_all_plots(self):
self.update_2d_plots()
self.update_3d_plots()
def setup_menubar(self, main_window):
self.menubar = main_window.menuBar()
file_menu = self.menubar.addMenu('File')
help_menu = self.menubar.addMenu('Help')
open_image_action = QtGui.QAction('Open Image', self)
open_image_action.setShortcut('Ctrl+O')
open_image_action.setStatusTip('Open Image')
def on_img_file_select():
img_path = self.open_file_dialog('Open image file', DIALOG_SUPPORTED_IMG_EXTS)
if len(img_path) > 0:
self.dataset_mode = False
self.dataset_imgs = []
self.dataset_index = None
self.load_image_file(img_path, self.max_pixels_to_load)
open_image_action.triggered.connect(on_img_file_select)
file_menu.addAction(open_image_action)
open_dataset_action = QtGui.QAction('Open Dataset', self)
open_dataset_action.setShortcut('Ctrl+Shift+O')
open_dataset_action.setStatusTip('Open dataset of images')
def on_dataset_folder_select():
dataset_dir = self.open_folder_dialog('Open image dataset folder')
if len(dataset_dir) > 0:
raw_paths = [os.path.join(dataset_dir, filepath) for filepath in os.listdir(dataset_dir)]
dataset_image_paths = [filepath for filepath in raw_paths if os.path.isfile(filepath) and filepath.endswith(ALL_SUPPORTED_IMG_EXTS)]
self.dataset_mode = True
self.dataset_imgs = dataset_image_paths
self.dataset_index = 0
self.load_image_file(self.dataset_imgs[self.dataset_index], self.max_pixels_to_load)
open_dataset_action.triggered.connect(on_dataset_folder_select)
file_menu.addAction(open_dataset_action)
export_screenshot_action = QtGui.QAction('Export Screenshot', self)
export_screenshot_action.setShortcut('Ctrl+E')
export_screenshot_action.setStatusTip('Export screenshot of app')
def on_export_screenshot_request():
self.main_window.move(10, 10)
win_geometry = self.geometry()
position = self.mapToGlobal(self.geometry().topLeft())
size = self.geometry().size()
x, y = position.x(), position.y()
width, height = size.width(), size.height()
window_bounds = {
'top': y - 20,
'left': x,
'width': width,
'height': height,
}
with mss() as sct:
window_view = np.array(sct.grab(window_bounds))
window_view = cv2.cvtColor(window_view, cv2.COLOR_RGBA2RGB)
save_filepath = self.save_file_dialog('Save screenshot export', DIALOG_SUPPORTED_IMG_EXTS)
cv2.imwrite(save_filepath, window_view)
export_screenshot_action.triggered.connect(on_export_screenshot_request)
file_menu.addAction(export_screenshot_action)
exit_action = QtGui.QAction('Exit', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Exit application')
exit_action.triggered.connect(main_window.close)
file_menu.addAction(exit_action)
def setup_shortcuts(self):
QtGui.QShortcut(QtCore.Qt.Key_Left, self, self.load_previous_image_in_dataset)
QtGui.QShortcut(QtCore.Qt.Key_Right, self, self.load_next_image_in_dataset)
def load_previous_image_in_dataset(self):
if self.dataset_mode:
self.dataset_index -= 1
if self.dataset_index < 0:
self.dataset_index += len(self.dataset_imgs)
self.load_image_file(self.dataset_imgs[self.dataset_index], self.max_pixels_to_load)
def load_next_image_in_dataset(self):
if self.dataset_mode:
self.dataset_index += 1
self.dataset_index %= len(self.dataset_imgs)
self.load_image_file(self.dataset_imgs[self.dataset_index], self.max_pixels_to_load)
def setup_statusbar(self, main_window):
self.statusbar = main_window.statusBar()
def show_status(self, text, timeout=0):
if self.statusbar is not None:
self.statusbar.showMessage(text, timeout)
def set_window_title(self, text):
if self.main_window is not None:
self.main_window.setWindowTitle(text)
def autosize(self):
self.main_window.resize(self.size())
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
app = pg.mkQApp()
screen_resolution = app.desktop().screenGeometry()
SCREEN_WIDTH, SCREEN_HEIGHT = screen_resolution.width(), screen_resolution.height()
with open('src/app.css') as fp:
app.setStyleSheet('\n'.join(fp.readlines()).strip())
MainWindow = QtGui.QMainWindow()
gui = MyWindow()
gui.load_image_file(DEFAULT_IMG_FILENAME, DEFAULT_MAX_PIXELS)
gui.setup_gui()
gui.bind_to_main_window(MainWindow)
gui.set_window_title(f'Now viewing "{DEFAULT_IMG_FILENAME.split("/")[-1]}"')
MainWindow.show()
# HACK: This dummy timer lets us properly Ctrl+C from the app
timer = QtCore.QTimer()
timer.timeout.connect(lambda: None)
timer.start(100)
sys.exit(app.exec_())
|
python
|
#!/usr/bin/env python
import argparse
from Toggl.togglApi import getTogglReport
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Obtain report from Toggl.')
parser.add_argument("--toggl-token", "-tt", help="Set Toggl token.")
parser.add_argument("--toggl-workspace", "-w", help="Set Toggl workspace")
parser.add_argument("--since", "-s", help="Start date for the report.")
parser.add_argument("--until", "-u", help="End date for the report.")
args = parser.parse_args()
report = getTogglReport(args.toggl_token, int(args.toggl_workspace), args.since, args.until)
print(report)
|
python
|
"""
Level generator utilities.
"""
import weakref
from math import floor
from random import randrange, choice
from .blt.nice_terminal import terminal
from .geom import Point, Rect, Size
from .draw import draw_rect
class BSPNode:
"""
Node in a binary space partitioning tree
.. py:attribute:: rect
:py:class:`Rect` represented by this node
.. py:attribute:: is_horz
``True`` iff this node is divided down its Y axis; ``False`` otherwise
.. py:attribute:: value
Int representing split point between the two children of this node. So
if this is a horizontal node and the width is 10, the value could be 6,
with the left node taking up 6 cells and the right taking up 4.
.. py:attribute:: child_a
:py:class:`BSPNode` either on the left (horizontal) or on top (vertical).
.. py:attribute:: child_b
:py:class:`BSPNode` either on the right (horizontal) or on bottom (vertical).
.. py:attribute:: level
How many levels of parents does this node have?
.. py:attribute:: data
Dict of arbitrary data for your game's use.
"""
def __init__(self, rect, is_horz=True, value=None, level=0):
"""
:param Rect rect:
:param bool is_horz:
:param int|None value:
:param int level:
"""
self.parent_weakref = lambda: None
self.rect = rect
self.level = level
self.is_horz = is_horz
self.value = value
self.child_a = None
self.child_b = None
self.data = {} # put whatever you want in here
@property
def max_value(self):
"""Max value of :py:attr:`BSPNode.value`"""
if self.is_horz:
return self.rect.size.width - 1
else:
return self.rect.size.height - 1
def _get_next_rect(self, is_a):
if self.is_horz:
if is_a:
return Rect(self.rect.origin, Size(self.value, self.rect.height))
else:
return Rect(
self.rect.origin + Point(self.value + 1, 0),
Size(self.rect.width - self.value - 1, self.rect.height))
else:
if is_a:
return Rect(self.rect.origin, Size(self.rect.width, self.value))
else:
return Rect(
self.rect.origin + Point(0, self.value + 1),
Size(self.rect.width, self.rect.height - self.value - 1))
@property
def rect_a(self):
"""Assuming :py:attr:`BSPNode.value` has already been set, return the
:py:class:`Rect` of child A"""
return self._get_next_rect(True)
@property
def rect_b(self):
"""Assuming :py:attr:`BSPNode.value` has already been set, return the
:py:class:`Rect` of child B"""
return self._get_next_rect(False)
def get_node_at_path(self, spec=''):
"""
Given a string containing only the characters ``'a'`` and ``'b'``, return
the node matching the given branches. For example, in a tree with 4
leaves, ``root.get_node_at_path('aa')`` would return the left/top-most
leaf.
"""
if spec:
if spec[0] == 'a':
return self.child_a.get_node_at_path(spec[1:])
elif spec[0] == 'b':
return self.child_b.get_node_at_path(spec[1:])
else:
raise ValueError("Invalid character: {}".format(spec[0]))
else:
return self
def __repr__(self):
tag = 'horz' if self.is_horz else 'vert'
return 'BSPNode({}, {})'.format(tag, self.value)
@property
def leaves(self):
"""Iterator of all leaves, left/top-to-right/bottom"""
if self.child_a and self.child_b:
yield from self.child_a.leaves
yield from self.child_b.leaves
else:
yield self
@property
def sibling_pairs(self):
"""Iterator of all pairs of siblings"""
if not self.child_a or not self.child_b:
return
yield from self.child_a.sibling_pairs
yield from self.child_b.sibling_pairs
yield (self.child_a, self.child_b)
@property
def leftmost_leaf(self):
"""The left/top-most leaf in the tree"""
if self.child_a:
return self.child_a.leftmost_leaf
else:
return self
@property
def rightmost_leaf(self):
"""The right/bottom-most leaf in the tree"""
if self.child_b:
return self.child_b.leftmost_leaf
else:
return self
def random_leaf(self):
"""Returns a random leaf"""
if self.child_a or self.child_b:
return choice((self.child_a, self.child_b)).random_leaf
else:
return self
@property
def ancestors(self):
"""Iterator of ``self`` and all parents, starting with first parent"""
yield self
parent = self.parent_weakref()
if parent:
yield from parent.ancestors
def DEFAULT_RANDRANGE_FUNC(_, a, b): return randrange(a, b)
class RandomBSPTree:
"""
A randomly generated BSP tree. Pass a dungeon size and minimum leaf size.
After initialization, the root's leaves represent non-overlapping rectangles
that completely fill the space.
.. py:attribute:: root
:py:class:`BSPNode` root of all children
"""
def __init__(self, size, min_leaf_size, randrange_func=DEFAULT_RANDRANGE_FUNC):
"""
:param Size size:
:param int min_leaf_size: Minimum size of leaf nodes on both axes
:param function randrange_func: A function ``fn(level, min_size, max_size)``
that returns the ``value`` (see
:py:class:`BSPnode`) of the node at the
given level of recursion. Defaults to
``randrange()``, but you can use this to
de-randomize specific splits.
"""
self.randrange_func = randrange_func
self.min_leaf_size = min_leaf_size
self.root = BSPNode(Rect(Point(0, 0), size))
self.subdivide(self.root)
def subdivide(self, node, iterations_left=8):
if iterations_left < 1:
return
if self.add_children(node):
self.subdivide(node.child_a, iterations_left=iterations_left - 1)
self.subdivide(node.child_b, iterations_left=iterations_left - 1)
def add_children(self, node):
a = self.min_leaf_size
b = node.max_value - self.min_leaf_size * 2
if b - a < 1:
return False
node.value = self.randrange_func(node.level, a, b)
node.child_a = BSPNode(
node.rect_a, not node.is_horz, level=node.level + 1)
node.child_a.parent_weakref = weakref.ref(node)
node.child_b = BSPNode(
node.rect_b, not node.is_horz, level=node.level + 1)
node.child_b.parent_weakref = weakref.ref(node)
return True
def draw(self, n=None, color_so_far='#f'):
if n is None:
n = self.root
if n.child_a or n.child_b:
if n.child_a:
self.draw(n.child_a, color_so_far + 'f')
if n.child_b:
self.draw(n.child_b, color_so_far + '0')
return
color = color_so_far + '0' * (7 - len(color_so_far))
terminal.color(color)
draw_rect(n.rect)
terminal.print(n.rect.origin, color)
terminal.bkcolor('#000000')
|
python
|
from shortcodes import Shortcode
# Allowed values for position options
ALLOWED_POSITION = ['right', 'left']
class BevelShortcode(Shortcode):
name = 'bevel'
same_tag_closes = True
standalone = True
render_empty = True
template = 'views/partials/bevel.j2'
def _get_position(self, options):
position = options.get('position', None)
if position not in ALLOWED_POSITION:
position = ALLOWED_POSITION[0]
return position
def transform(self, value, options):
self.context['position'] = self._get_position(options)
return value
shortcode = BevelShortcode
|
python
|
# Generated by Django 4.0.3 on 2022-03-15 20:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0007_alter_category_options'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ticket_categories', to='core.category'),
),
]
|
python
|
import random
import math
def prime_factor_generator(n):
yield 2
for i in range(3, int(math.sqrt(n)) + 1, 2):
yield i
def prime_factors(n):
for i in prime_factor_generator(n):
if n % i == 0:
yield i
while True:
n //= i
if n % i != 0:
break
if n > 2:
yield n
def coprimes(n, maxx=None):
if maxx == None:
maxx = n
sieve = [True] * (maxx - 1)
for p in prime_factors(n):
m = p
while m < maxx:
sieve[m - 1] = False
m += p
res = []
for i, coprime in enumerate(sieve):
if coprime:
res.append(i + 1)
return res
def maxcoprime(n, maxx=None):
if maxx == None:
maxx = n
sieve = [True] * maxx
for p in prime_factors(n):
m = p
while m <= maxx:
sieve[m - 1] = False
m += p
for i, coprime in enumerate(reversed(sieve)):
if coprime:
return maxx - i
def cycle(n):
"""
https://en.wikipedia.org/wiki/Full_cycle
"""
seed = random.randrange(n)
inc = maxcoprime(n, n + random.randint(n, n * 10))
for _ in range(n):
yield seed
seed = (seed + inc) % n
|
python
|
def largest_palindrome_product(n):
up_digits = "1" + n * "0"
low_digits = "1" + (n-1) * "0"
num_up_digits = int(up_digits)
num_low_digits = int(low_digits)
x = range(num_low_digits, num_up_digits)
largest_num = 0
for i in x:
for j in x:
multi = i * j
str_multi = str(multi)
if str_multi == str_multi[::-1] and largest_num < multi:
largest_num = multi
return largest_num
print(largest_palindrome_product(3))
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/ssl.h>
/*
* This is part of a work-around for the difficulty cffi has in dealing with
* `STACK_OF(foo)` as the name of a type. We invent a new, simpler name that
* will be an alias for this type and use the alias throughout. This works
* together with another opaque typedef for the same name in the TYPES section.
* Note that the result is an opaque type.
*/
typedef STACK_OF(X509) Cryptography_STACK_OF_X509;
typedef STACK_OF(X509_REVOKED) Cryptography_STACK_OF_X509_REVOKED;
"""
TYPES = """
typedef ... Cryptography_STACK_OF_X509;
typedef ... Cryptography_STACK_OF_X509_REVOKED;
typedef struct {
ASN1_OBJECT *algorithm;
...;
} X509_ALGOR;
typedef ... X509_ATTRIBUTE;
typedef struct {
X509_ALGOR *signature;
...;
} X509_CINF;
typedef struct {
ASN1_OBJECT *object;
ASN1_BOOLEAN critical;
ASN1_OCTET_STRING *value;
} X509_EXTENSION;
typedef ... X509_EXTENSIONS;
typedef ... X509_REQ;
typedef struct {
ASN1_INTEGER *serialNumber;
ASN1_TIME *revocationDate;
X509_EXTENSIONS *extensions;
int sequence;
...;
} X509_REVOKED;
typedef struct {
Cryptography_STACK_OF_X509_REVOKED *revoked;
...;
} X509_CRL_INFO;
typedef struct {
X509_CRL_INFO *crl;
...;
} X509_CRL;
typedef struct {
X509_CINF *cert_info;
...;
} X509;
typedef ... X509_STORE;
typedef ... NETSCAPE_SPKI;
"""
FUNCTIONS = """
X509 *X509_new(void);
void X509_free(X509 *);
X509 *X509_dup(X509 *);
int X509_print_ex(BIO *, X509 *, unsigned long, unsigned long);
int X509_set_version(X509 *, long);
EVP_PKEY *X509_get_pubkey(X509 *);
int X509_set_pubkey(X509 *, EVP_PKEY *);
unsigned char *X509_alias_get0(X509 *, int *);
int X509_sign(X509 *, EVP_PKEY *, const EVP_MD *);
int X509_digest(const X509 *, const EVP_MD *, unsigned char *, unsigned int *);
ASN1_TIME *X509_gmtime_adj(ASN1_TIME *, long);
unsigned long X509_subject_name_hash(X509 *);
X509_NAME *X509_get_subject_name(X509 *);
int X509_set_subject_name(X509 *, X509_NAME *);
X509_NAME *X509_get_issuer_name(X509 *);
int X509_set_issuer_name(X509 *, X509_NAME *);
int X509_get_ext_count(X509 *);
int X509_add_ext(X509 *, X509_EXTENSION *, int);
X509_EXTENSION *X509_EXTENSION_dup(X509_EXTENSION *);
X509_EXTENSION *X509_get_ext(X509 *, int);
int X509_EXTENSION_get_critical(X509_EXTENSION *);
ASN1_OBJECT *X509_EXTENSION_get_object(X509_EXTENSION *);
void X509_EXTENSION_free(X509_EXTENSION *);
int X509_REQ_set_version(X509_REQ *, long);
X509_REQ *X509_REQ_new(void);
void X509_REQ_free(X509_REQ *);
int X509_REQ_set_pubkey(X509_REQ *, EVP_PKEY *);
int X509_REQ_sign(X509_REQ *, EVP_PKEY *, const EVP_MD *);
int X509_REQ_verify(X509_REQ *, EVP_PKEY *);
EVP_PKEY *X509_REQ_get_pubkey(X509_REQ *);
int X509_REQ_print_ex(BIO *, X509_REQ *, unsigned long, unsigned long);
int X509V3_EXT_print(BIO *, X509_EXTENSION *, unsigned long, int);
ASN1_OCTET_STRING *X509_EXTENSION_get_data(X509_EXTENSION *);
X509_REVOKED *X509_REVOKED_new(void);
void X509_REVOKED_free(X509_REVOKED *);
int X509_REVOKED_set_serialNumber(X509_REVOKED *, ASN1_INTEGER *);
int X509_REVOKED_add1_ext_i2d(X509_REVOKED *, int, void *, int, unsigned long);
X509_CRL *d2i_X509_CRL_bio(BIO *, X509_CRL **);
X509_CRL *X509_CRL_new(void);
void X509_CRL_free(X509_CRL *);
int X509_CRL_add0_revoked(X509_CRL *, X509_REVOKED *);
int i2d_X509_CRL_bio(BIO *, X509_CRL *);
int X509_CRL_print(BIO *, X509_CRL *);
int X509_CRL_set_issuer_name(X509_CRL *, X509_NAME *);
int X509_CRL_sign(X509_CRL *, EVP_PKEY *, const EVP_MD *);
int NETSCAPE_SPKI_verify(NETSCAPE_SPKI *, EVP_PKEY *);
int NETSCAPE_SPKI_sign(NETSCAPE_SPKI *, EVP_PKEY *, const EVP_MD *);
char *NETSCAPE_SPKI_b64_encode(NETSCAPE_SPKI *);
EVP_PKEY *NETSCAPE_SPKI_get_pubkey(NETSCAPE_SPKI *);
int NETSCAPE_SPKI_set_pubkey(NETSCAPE_SPKI *, EVP_PKEY *);
NETSCAPE_SPKI *NETSCAPE_SPKI_new(void);
void NETSCAPE_SPKI_free(NETSCAPE_SPKI *);
/* ASN1 serialization */
int i2d_X509_bio(BIO *, X509 *);
X509 *d2i_X509_bio(BIO *, X509 **);
int i2d_X509_REQ_bio(BIO *, X509_REQ *);
X509_REQ *d2i_X509_REQ_bio(BIO *, X509_REQ **);
int i2d_PrivateKey_bio(BIO *, EVP_PKEY *);
EVP_PKEY *d2i_PrivateKey_bio(BIO *, EVP_PKEY **);
ASN1_INTEGER *X509_get_serialNumber(X509 *);
int X509_set_serialNumber(X509 *, ASN1_INTEGER *);
/* X509_STORE */
X509_STORE *X509_STORE_new(void);
void X509_STORE_free(X509_STORE *);
int X509_STORE_add_cert(X509_STORE *, X509 *);
int X509_verify_cert(X509_STORE_CTX *);
const char *X509_verify_cert_error_string(long);
const char *X509_get_default_cert_area(void);
const char *X509_get_default_cert_dir(void);
const char *X509_get_default_cert_file(void);
const char *X509_get_default_cert_dir_env(void);
const char *X509_get_default_cert_file_env(void);
const char *X509_get_default_private_dir(void);
"""
MACROS = """
long X509_get_version(X509 *);
ASN1_TIME *X509_get_notBefore(X509 *);
ASN1_TIME *X509_get_notAfter(X509 *);
long X509_REQ_get_version(X509_REQ *);
X509_NAME *X509_REQ_get_subject_name(X509_REQ *);
Cryptography_STACK_OF_X509 *sk_X509_new_null(void);
void sk_X509_free(Cryptography_STACK_OF_X509 *);
int sk_X509_num(Cryptography_STACK_OF_X509 *);
int sk_X509_push(Cryptography_STACK_OF_X509 *, X509 *);
X509 *sk_X509_value(Cryptography_STACK_OF_X509 *, int);
X509_EXTENSIONS *sk_X509_EXTENSION_new_null(void);
int sk_X509_EXTENSION_num(X509_EXTENSIONS *);
X509_EXTENSION *sk_X509_EXTENSION_value(X509_EXTENSIONS *, int);
int sk_X509_EXTENSION_push(X509_EXTENSIONS *, X509_EXTENSION *);
X509_EXTENSION *sk_X509_EXTENSION_delete(X509_EXTENSIONS *, int);
void sk_X509_EXTENSION_free(X509_EXTENSIONS *);
int sk_X509_REVOKED_num(Cryptography_STACK_OF_X509_REVOKED *);
X509_REVOKED *sk_X509_REVOKED_value(Cryptography_STACK_OF_X509_REVOKED *, int);
/* These aren't macros these arguments are all const X on openssl > 1.0.x */
int X509_CRL_set_lastUpdate(X509_CRL *, ASN1_TIME *);
int X509_CRL_set_nextUpdate(X509_CRL *, ASN1_TIME *);
/* these use STACK_OF(X509_EXTENSION) in 0.9.8e. Once we drop support for
RHEL/CentOS 5 we should move these back to FUNCTIONS. */
int X509_REQ_add_extensions(X509_REQ *, X509_EXTENSIONS *);
X509_EXTENSIONS *X509_REQ_get_extensions(X509_REQ *);
"""
CUSTOMIZATIONS = """
// OpenSSL 0.9.8e does not have this definition
#if OPENSSL_VERSION_NUMBER <= 0x0090805fL
typedef STACK_OF(X509_EXTENSION) X509_EXTENSIONS;
#endif
"""
CONDITIONAL_NAMES = {}
|
python
|
import sys
from importlib import import_module
from bumps.pymcfit import PyMCProblem
if len(sys.argv) != 2:
raise ValueError("Expected name of pymc file containing a model")
module =sys.argv[1]
__name__ = module.split('.')[-1]
model = import_module(module)
problem = PyMCProblem(model)
|
python
|
#!/usr/bin/env python
import numpy as np
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
import cv2
import scipy.io
import scipy.stats as st
from scipy import ndimage as ndi
from skimage import feature
from skimage.morphology import skeletonize
import pdb
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
interval = (2*nsig+1.)/(kernlen)
x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)
kern1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = kernel_raw/np.max(kernel_raw)#.sum()
return kernel
def point_gaussian(k_w, k_h, sigma_w, sigma_h):
return gkern(k_w, sigma_w)
def map_gaussian(im, k_w, k_h, sigma_w, sigma_h):
if im.ndim == 3:
h,w,_ = im.shape
elif im.ndim == 2:
h,w = im.shape
else:
print "Unknow im format."
return None
k_w_r = (k_w-1)/2
k_h_r = (k_h-1)/2
point_gs = point_gaussian(k_w, k_h, sigma_w, sigma_h)
map_gs = np.zeros((h,w),dtype=np.float)
occ_loc = np.where(im[:,:,0] == 0)
for i in range(len(occ_loc[1])):
loc_i = [occ_loc[0][i],occ_loc[1][i]]
#map_gs[loc_i[0]-k_h_r: loc_i[0]+k_h_r+1, loc_i[1]-k_w_r: loc_i[1]+k_w_r+1] = np.maximum(map_gs[loc_i[0]-k_h_r: loc_i[0]+k_h_r+1, loc_i[1]-k_w_r: loc_i[1]+k_w_r+1], point_gs)
#map_gs bound
lby, lbx = max(0,loc_i[0]-k_h_r), max(0,loc_i[1]-k_w_r)
uby, ubx = min(h-1,loc_i[0]+k_h_r), min(w-1,loc_i[1]+k_w_r)
#kernel bound
k_lby, k_lbx = -min(0,loc_i[0]-k_h_r), -min(0,loc_i[1]-k_w_r)
k_uby, k_ubx = k_h + (h-1 - max(h-1, loc_i[0]+k_h_r)), k_w + (w-1 - max(w-1, loc_i[1]+k_w_r))
#maximum on pixel
map_gs[lby:uby+1, lbx:ubx+1] = np.maximum(map_gs[lby:uby+1, lbx:ubx+1], point_gs[k_lby:k_uby, k_lbx:k_ubx])
return map_gs
def preprocess(im_path):
im = imread(im_path)
map_gs = map_gaussian(im, 455,455,5,5)
return im, map_gs
def edgeprocess(gs):
#laplacian
gray_lap = cv2.Laplacian(gs*255,cv2.CV_64F, ksize=5)
dst = cv2.convertScaleAbs(gray_lap)
return dst
def binarize(dst, th):
res = np.zeros(dst.shape, dtype=np.float)
res[np.where(dst > th)] = 1
return res
Tidu = (np.array([0,1,1,1]),np.array([1,0,1,2]))
Tidd = (np.array([1,1,1,2]),np.array([0,1,2,1]))
Tidl = (np.array([0,1,1,2]),np.array([1,0,1,1]))
Tidr = (np.array([0,1,1,2]),np.array([1,1,2,1]))
def remove_Tcenter(skele):
'''
it is possible to have T shape in skeleton, we should remove the center of it
'''
w,h = skele.shape
for i in range(1,w-1):
for j in range(1,h-1):
if skele[i,j]:
patch = skele[i-1:i+2,j-1:j+2].copy()
if np.sum(patch[Tidu]) == 4 or \
np.sum(patch[Tidd]) == 4 or \
np.sum(patch[Tidl]) == 4 or \
np.sum(patch[Tidr]) == 4:
skele[i,j] = False
#i = 0
for j in range(1,h-1):
if skele[0,j]:
if np.sum(skele[0,j-1:j+2]) + skele[1,j] == 4:
skele[0,j] = False
#i = w-1
for j in range(1,h-1):
if skele[w-1,j]:
if np.sum(skele[w-1,j-1:j+2]) + skele[w-2,j] == 4:
skele[w-1,j] = False
#j = 0
for i in range(1,w-1):
if skele[i,0]:
if np.sum(skele[i-1:i+2,0]) + skele[i,1] == 4:
skele[i,0] = False
#j = h-1
for i in range(1,w-1):
if skele[i,h-1]:
if np.sum(skele[i-1:i+2,h-1]) + skele[i,h-2] == 4:
skele[i,h-1] = False
#i=0,j=0
if skele[0,0]:
if skele[0,0] + skele[0,1] + skele[1,0] == 3:
skele[0,0] = False
#i=0,j=h-1
if skele[0,h-1]:
if skele[0,h-2] + skele[0,h-1] + skele[1,h-1] == 3:
skele[0,h-1] = False
#i=w-1,j=0
if skele[w-1,0]:
if skele[w-2,0] + skele[w-1,0] + skele[w-1,1] == 3:
skele[w-1,0] = False
#i=w-1,j=h-1
if skele[w-1,h-1]:
if skele[w-1,h-2] + skele[w-1,h-1] + skele[w-2,h-1] == 3:
skele[w-1,h-1] = False
return skele
def im2skeleton(im_path, give_bin = False):
#1. gaussian map
im, map_gs = preprocess(im_path)
#2. edge detection
dst = edgeprocess(map_gs)
#3. binarize
res = binarize(dst,10)
#4. suppress
skeleton = skeletonize(res)
#5. remove T shape center
skeleton = remove_Tcenter(skeleton)
#plt.imshow(skeleton)
#plt.show()
# print np.max(skeleton)
if give_bin:
binary_wall = im[:,:,0] == 0
return im, map_gs, skeleton, binary_wall
return im, map_gs, skeleton
|
python
|
"""Test a hydrogen-like atom."""
import jax
import numpy as np
import pytest
import vmcnet.examples.hydrogen_like_atom as hla
from vmcnet.mcmc.simple_position_amplitude import make_simple_position_amplitude_data
from .sgd_train import sgd_vmc_loop_with_logging
from .kfac_train import kfac_vmc_loop_with_logging
def _setup_hla_hyperparams_and_model():
"""Setup the hyperparams and model for a hydrogen-like atom."""
# Problem parameters
model_decay = 5.0
nuclear_charge = 3.0
ndim = 3
# Training hyperparameters
nchains = 100 * jax.local_device_count()
nburn = 100
nepochs = 100
nsteps_per_param_update = 5
std_move = 0.4
learning_rate = 1.0
# Initialize model and chains of walkers
log_psi_model = hla.HydrogenLikeWavefunction(model_decay)
key = jax.random.PRNGKey(0)
key, subkey = jax.random.split(key)
init_elec_pos = jax.random.normal(subkey, shape=(nchains, 1, ndim))
key, subkey = jax.random.split(key)
params = log_psi_model.init(key, init_elec_pos)
amplitudes = log_psi_model.apply(params, init_elec_pos)
data = make_simple_position_amplitude_data(init_elec_pos, amplitudes)
# Local energy
local_energy_fn = hla.make_hydrogen_like_local_energy(
log_psi_model.apply, nuclear_charge, d=ndim
)
return (
params,
nuclear_charge,
nchains,
nburn,
nepochs,
nsteps_per_param_update,
std_move,
learning_rate,
log_psi_model,
key,
data,
local_energy_fn,
)
@pytest.mark.slow
def test_hydrogen_like_sgd_vmc(caplog):
"""Test the wavefn exp(-a * r) converges (in 3-D) to a = nuclear charge with SGD."""
(
params,
nuclear_charge,
nchains,
nburn,
nepochs,
nsteps_per_param_update,
std_move,
learning_rate,
log_psi_model,
key,
data,
local_energy_fn,
) = _setup_hla_hyperparams_and_model()
_, params, _, _ = sgd_vmc_loop_with_logging(
caplog,
data,
params,
key,
nchains,
nburn,
nepochs,
nsteps_per_param_update,
std_move,
learning_rate,
log_psi_model,
local_energy_fn,
)
# Make sure the decay rate converged to the nuclear charge, since we're in 3-d
np.testing.assert_allclose(jax.tree_leaves(params)[0], nuclear_charge, rtol=1e-5)
@pytest.mark.slow
def test_hydrogen_like_kfac_vmc(caplog):
"""Test exp(-a * r) converges (in 3-D) to a = nuclear charge with KFAC."""
(
params,
nuclear_charge,
nchains,
nburn,
nepochs,
nsteps_per_param_update,
std_move,
learning_rate,
log_psi_model,
key,
data,
local_energy_fn,
) = _setup_hla_hyperparams_and_model()
_, params, _, _ = kfac_vmc_loop_with_logging(
caplog,
data,
params,
key,
nchains,
nburn,
nepochs,
nsteps_per_param_update,
std_move,
learning_rate,
log_psi_model,
local_energy_fn,
)
# Make sure the decay rate converged to the nuclear charge, since we're in 3-d
np.testing.assert_allclose(jax.tree_leaves(params)[0], nuclear_charge, rtol=1e-5)
|
python
|
from django.conf import settings
from cybox.objects.address_object import Address
from cybox.objects.uri_object import URI
cfg = settings.ACTIVE_CONFIG
LOCAL_ALIAS = cfg.by_key('company_alias')
OBJECT_FIELDS = {
'AddressObjectType': [['address_value']],
'DomainNameObjectType': [['value']],
'EmailMessageObjectType': [
['header','from', 'address_value'],
['header','to','address_value'],
],
'FileObjectType': [['hashes','simple_hash_value']],
'HTTPSessionObjectType': [['http_request_response','http_client_request','http_request_header','parsed_header','user_agent']],
'SocketAddressObjectType': [['ip_address','address_value']],
'URIObjectType': [['value']],
}
OBJECT_CONSTRAINTS = {
'Address': {
'category': [Address.CAT_IPV4, Address.CAT_IPV6],
},
'URI': {
'type_': [URI.TYPE_URL],
},
}
STRING_CONDITION_CONSTRAINT = ['None', 'Equals']
HEADER_LABELS = [
'indicator', 'indicator_type', 'meta.source', 'meta.url',
'meta.do_notice', 'meta.if_in', 'meta.whitelist',
]
# Map Cybox object type to Bro Intel types.
BIF_TYPE_MAPPING = {
'AddressObjectType': 'Intel::ADDR',
'DomainNameObjectType': 'Intel::DOMAIN',
'EmailMessageObjectType': 'Intel::EMAIL',
'FileObjectType': 'Intel::FILE_HASH',
'HTTPSessionObjectType': 'Intel::SOFTWARE',
'SocketAddressObjectType': 'Intel::ADDR',
'URIObjectType': 'Intel::URL',
}
# Map observable id prefix to source and url.
BIF_SOURCE_MAPPING = {
'cert_au': {
'source': 'CERT-AU',
'url': 'https://www.cert.gov.au/',
},
'CCIRC-CCRIC': {
'source': 'CCIRC',
'url': ('https://www.publicsafety.gc.ca/' +
'cnt/ntnl-scrt/cbr-scrt/ccirc-ccric-eng.aspx'),
},
'NCCIC': {
'source': 'NCCIC',
'url': 'https://www.us-cert.gov/',
},
}
def generate_bro(obs, obs_type, id_prefix):
# Deals with nested structure for fields which have attributes
def flatten_nested_values(obj):
if isinstance(obj, dict):
if isinstance(obj["value"], list):
return ','.join(obj["value"])
else:
return obj["value"]
else:
return obj
def rgetvalue(o, l, d=None):
"""recursive walk dict using a list"""
if o is None:
return d
if isinstance(o, list):
return rgetvalue(o[0], l, d)
if len(l) == 1:
return o[l[0]]
else:
return rgetvalue(o[l[0]], l[1:], d)
text = ''
if obs_type in BIF_TYPE_MAPPING:
# Look up source and url from observable ID
if id_prefix in BIF_SOURCE_MAPPING:
source = BIF_SOURCE_MAPPING[id_prefix]['source']
url = BIF_SOURCE_MAPPING[id_prefix]['url']
else:
source = id_prefix
url = ''
bif_type = BIF_TYPE_MAPPING[obs_type]
for field in OBJECT_FIELDS[obs_type]:
retrieved_value = rgetvalue(obs, field)
if retrieved_value is not None:
field_values = [
flatten_nested_values(retrieved_value),
'\t',
bif_type,
'\t',
source,
'\t',
url,
'\t',
'T',
'\t',
'-',
'\t',
'-',
]
text += text.join(field_values)
return text
|
python
|
# -*- coding: utf-8 -*-
"""
@Author: oisc <[email protected]>
@Date: 2018/5/4
@Description:
"""
import math
from collections import namedtuple
from copy import copy
from interface import Annotator
from transition import Session
from transition.shiftreduce import SRTransition, SRConfiguration
class SPINNTreeBuilder(Annotator):
def __init__(self, model, beam_size=1):
self.model = model
self.model.eval()
self.transition = SRTransition()
self.beam_size = beam_size
def annotate(self, discourse):
conf = SRConfiguration(discourse)
state = self.model.new_state(conf)
BeamNode = namedtuple("BeamNode", "session cost")
fringe = [BeamNode(Session(conf, self.transition, state=state), cost=0)]
hypotheses = []
next_fringe = []
while fringe:
for node in fringe:
if node.session.terminate():
hypotheses.append(node)
else:
valid_action = node.session.valid()
for (action, nuclear), prob in self.model.score(node.session.state).items():
if action in valid_action:
session = copy(node.session)
if action == SRTransition.SHIFT:
session(action)
session.state = self.model.shift(session.state)
else:
session(action, nuclear=nuclear)
session.state = self.model.reduce(session.state, nuclear)
cost = -math.log(prob)
next_fringe.append(BeamNode(session=session, cost=node.cost + cost))
fringe = sorted(next_fringe, key=lambda n: n.cost)[:self.beam_size]
next_fringe = []
hypotheses.sort(key=lambda n: n.cost)
high_rank_discourse = hypotheses[0].session.current.discourse
return high_rank_discourse
|
python
|
import requests
import StringIO
import csv
import urllib
import json
import datetime
import md5
from time import sleep
from limit import Limit
from source_iterator import SourceIterator
from source_row_manager import SourceRowManager
class SourceReader:
def __init__(self, date, product, catalogue, url, index, login_data):
self.date = date
self.product = product
self.catalogue = catalogue
self.url = url
self.index = index
self.login_data = login_data
self.is_last = False
self.auth = False
self.beg_time = None
self.end_time = None
self.row_manager = SourceRowManager(self.date)
def set_beg_time(self, time):
self.beg_time = time
def set_end_time(self, time):
self.end_time = time
def set_log(self, log):
self.log = log
def set_limit(self, limit_min, limit_max):
self.limit = Limit(limit_min, limit_max)
def set_iter(self, iter_type, index):
self.iter = SourceIterator(iter_type, index)
self.iter_type = iter_type
if self.iter_type == 'HH:MM:SS':
self.hour = 0
def next_bulk(self):
if self.is_last:
return None
data, limit = self._get_data()
if data == None:
return None
self.first_row = data.next()
lines = 0
fullBulk = False
while not fullBulk:
fullBulk = True
result = []
#try:
for cur_row in data:
lines += 1
row = self._get_row( cur_row )
if row == None:
self.log("ERROR! Failed to get row.")
continue
if row == 'ignore':
continue
json_row = json.dumps(row)
result.append(json_row)
#except:
# self.log("EXCEPTION! Failed to get row. id=["+self.iter.get_str()+"]")
# if lines != limit:
# fullBulk = False
if self.iter_type == 'HH:MM:SS':
self.hour += 1
if lines < 2:
if not (self.iter_type == 'HH:MM:SS' and self.hour <= 24):
self.log("FINISH. id=["+self.iter.get_str()+"] limit=["+str(limit)+"]")
self.is_last = True
else:
self.log("Bulk got. id=["+self.iter.get_str()+"] limit=["+str(limit)+"]")
return result
def _get_data(self):
while True:
url = self._get_url()
try:
response = requests.get(url)
except KeyboardInterrupt:
self.log("FINISH")
return None, 0
except:
self.log("GET_DATA ERROR! id=[" + self.iter.get_str() + "]")
self.limit.decrease()
sleep(1)
continue
if response.status_code != 200:
self.log( "GET_DATA ERROR! code=" + str(response.status_code) + \
" text=" + response.text)
self.limit.decrease()
continue
limit = self.limit.get()
self.limit.increase()
stream = StringIO.StringIO( response.text.encode('utf-8') )
return csv.reader( stream, delimiter=',' ), limit
def _get_row(self, data):
if len(data) <= 0:
self.log("ROW ERROR! empty")
return None
if self.iter_type == 'id':
self.iter.set(data[0])
else:
self.iter.set(data[1])
row = self.row_manager.init_row()
i = 0
while i < len(data):
self._add_param(self.first_row[i], data[i], row)
i += 1
self.row_manager.apply_rules_to_row(row)
return row
def _add_param(self, key, value, obj):
obj[key] = value
def _get_url(self):
params = "date=" + self.date + \
self.iter.get_param()+"&p="+self.product+\
"&limit=" + str(self.limit.get())+"&pc="+self.catalogue
if self.beg_time != None:
params += "&beg_time=" + str(self.beg_time)
if self.end_time != None:
params += "&end_time=" + str(self.end_time)
params = self._add_auth(params)
url = self.url + "&" + params
return url
def _add_auth(self, params):
if self.login_data == None:
return params
if self.login_data.get('login') == None:
return params
if self.login_data.get('password') == None:
return params
if self.auth == False:
if self._auth() != True:
return params
params += '&login=' + self.login_data['login']
data = params.split('&')
data.sort()
data = ''.join(data)
data += self.login_data['password']
sign = md5.new(data).hexdigest()
params += '&sign=' + sign
return params
def _auth(self):
url = self.url.split('?')[0]
url += '?method=auth'
url += '&login=' + self.login_data['login']
url += '&password=' + self.login_data['password']
try:
response = requests.get(url)
except:
return False
return True
|
python
|
"""
A Python module for antenna array analysis
----------
AntArray - Antenna Array Analysis Module
Copyright (C) 2018 - 2019 Zhengyu Peng
E-mail: [email protected]
Website: https://zpeng.me
` `
-:. -#:
-//:. -###:
-////:. -#####:
-/:.://:. -###++##:
.. `://:- -###+. :##:
`:/+####+. :##:
.::::::::/+###. :##:
.////-----+##: `:###:
`-//:. :##: `:###/.
`-//:. :##:`:###/.
`-//:+######/.
`-/+####/.
`+##+.
:##:
:##:
:##:
:##:
:##:
.+:
"""
import numpy
from scipy import signal
from .antennaarray import AntennaArray
from .lineararray import LinearArray
from .rectarray import RectArray
__version__ = '1.0.5'
|
python
|
# importacion de librerias
# constantes
# funciones y metodos
def es_primo(numero):
primo = True
for n in range(2, numero, 1):
if numero % n ==0:
primo = False
break
return primo
# mi programa principal
# ingreso de numero por teclado
numero = int(input('Ingrese un numero: '))
# valido si es primo
primo = es_primo(numero)
# imprimo según validacion
if primo:
print(f'el numero: {numero} es primo')
else:
print(f'el numero: {numero} NO es primo')
|
python
|
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8:noet:tabstop=4:softtabstop=4:shiftwidth=8:expandtab
""" python3 class """
# Copyright (c) 2010 - 2020, © Badassops LLC / Luc Suryo
# All rights reserved.
# BSD 3-Clause License : http://www.freebsd.org/copyright/freebsd-license.html
from pprint import PrettyPrinter
from logging import critical, warning
import awsbuild.const as const
from awsbuild.misc.spinner import spin_message
from awsbuild.aws.tag import create_resource_id_tag as set_tag
class EIP():
""" Class for the AWS EIP
"""
def __init__(self, **kwargs):
""" initial the object """
self.cmd_cfg = kwargs.get('cmd_cfg', {})
self.session = kwargs.get('session', {})
self.tag = self.cmd_cfg['tag']
# DANGER WILL ROBINSON : we using wildcard as filter!
self.tag_filter = str('*' + self.tag + '*')
self.filter = [{'Name' : 'tag:Name', 'Values' : [self.tag_filter]}]
def do_cmd(self):
""" main command handler """
if self.cmd_cfg['command'] == 'describe':
return self.describe()
if self.cmd_cfg['command'] == 'create':
return self.create()
if self.cmd_cfg['command'] == 'modify':
return self.modify()
if self.cmd_cfg['command'] == 'destroy':
return self.destroy()
return False
def create(self, **kwargs):
""" create an eip """
tag = kwargs.get('tag', '')
if not tag:
tag = self.tag
try:
eip_session = self.session.get_client_session(service='ec2')
eip_info = eip_session.allocate_address(
Domain='vpc'
)
spin_message(
message='Waiting {} seconds for the eip to become available.'.format(const.TIMER),
seconds=const.TIMER
)
set_tag(session=eip_session, resource_id=eip_info['AllocationId'],\
tag_name='Name', tag_value=tag)
return eip_info['AllocationId']
except Exception as err:
warning('Unable to create an eip, error: {}'.format(err))
return None
def describe(self):
""" get the eip(s) info """
eip_info = self.__get_info(session=self.session,\
Filters=self.filter)
if len(eip_info['Addresses']) == 0:
print('\n⚬ No eip found, filter {}'.format(self.filter))
return
output = PrettyPrinter(indent=2, width=41, compact=False)
for info in eip_info['Addresses']:
print('\n⚬ eip ID {}'.format(info['AllocationId']))
output.pprint(info)
def get_info(self):
""" get the eip(s) info """
eip_info = self.__get_info(session=self.session,\
Filters=self.filter)
if len(eip_info['Addresses']) == 0:
return None
return eip_info
def modify(self, **kwargs):
""" modify an eip """
modify = kwargs.get('modify', {})
eip = kwargs.get('eip', {})
instance = kwargs.get('instance', {})
try:
eip_session = self.session.get_client_session(service='ec2')
if modify == 'associate':
eip_session.associate_address(
InstanceId=instance,
AllocationId=eip
)
if modify == 'disassociate':
eip_session.disassociate_address(
AllocationId=eip
)
return True
except Exception as err:
critical('Unable to {} the eip, error {}'.format(modify, err))
return False
def destroy(self, **kwargs):
""" destroy an eip """
eip = kwargs.get('eip', {})
try:
eip_session = self.session.get_client_session(service='ec2')
eip_session.release_address(
AllocationId=eip
)
return True
except Exception as err:
warning('Unable to release the eip, error {}'.format(err))
return False
@classmethod
def __get_info(cls, **kwargs):
""" get info """
cls.session = kwargs.get('session', {})
cls.filters = kwargs.get('filters', {})
try:
cls.eip_session = cls.session.get_client_session(service='ec2')
eip_info = cls.eip_session.describe_addresses(
Filters=cls.filters
)
return eip_info
except Exception as err:
warning('Unable to get info eip, error: {}'.format(err))
return None
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018 Taha Emre Demirkol
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import zipfile
from Util.PackageUtil import PackageUtil as packageControl
class FileUtil:
"""
THIS IS NOT DESCRIPTION FOR "CommandUtil.py" CLASS.
This part represent to Static Initiliazer Code on Java, it's check to version for the avaiable version.
At least current Python version must be 3.6.4+, anything else you should update Python version
"""
####################################################################################################################
if not packageControl.checkVersionForApplicable(3, 6, 4):
raise EnvironmentError("At least current must be 3.6.4+, anything else you should update Python version !!!")
if packageControl.getPlatformInfo() != 'Linux' and packageControl.getPlatformInfo() != 'OS X':
raise SystemError("This class only execute Linux or Posix OS platform !!!")
####################################################################################################################
def __init__(self):
"""
INFO: Default Constructure cannot be Creatable
WARNING: If attand to create this object, it will raise NotImplementedError
"""
assert NotImplementedError("This object useing only library, cannot be creatable!!!")
def __new__(cls):
"""
INFO: This method was overrided only avoided to __new__ operator
:return: NOISE_OBJECT
"""
return object.__new__(cls)
@staticmethod
def checkClassName(obj, className):
"""
INFO: This function will compare object's class name
WARNING: If not equal will raise exception
:param obj: Which is the checking object
:param className: Which is the important class name
:return: NONE
"""
if obj.__class__.__name__ != className:
raise EnvironmentError("Object not equal to class name !!!")
@staticmethod
def checkFile(path):
"""
INFO: This function checks the path state
WARNING: If Not exist or not avaiable format raise exception
:param path: File path on which is the target source
:return:
"""
if (not FileUtil.isDirectory(path)):
raise IsADirectoryError("Parameter not Directory!!!")
if (not FileUtil.isDirectory(path)):
raise NotADirectoryError("Directory does not exist !!!")
@staticmethod
def isDirectory(path):
"""
INFO: This function checking directory applicable
:param path: File path on which is the target source
:return: BOOLEAN
"""
return os.path.isdir(path)
@staticmethod
def isDirectoryExist(path):
"""
INFO: This function checking directory exist state
:param path: File path on which is the target source
:return: BOOLEAN
"""
return os.path.exists(path)
@staticmethod
def getFileLinesAsList(fileName):
"""
INFO: This function will return file's line as list
:param fileName: File name for which is the result for list
:return: LIST[STRING]
"""
try:
fOpen = open(fileName)
result = fOpen.readlines()
return result
except Exception as exception:
raise exception
@staticmethod
def compareConfigurationFileResult(firstConfList, secondConfList):
"""
INFO: This function will return to result of list compare
WARNING: If parameter different class object from list, it will raise error
:param firstConfList: First conf file list
:param secondConfList: Second conf file list
:return: LIST[STRING]
"""
FileUtil.checkClassName(firstConfList, "list")
FileUtil.checkClassName(secondConfList, "list")
resultArray = []
secondArrayCounter = 0
for i in range(0, firstConfList.__len__()):
firstListLine = firstConfList[i].strip()
addFlag = True
for j in range(secondArrayCounter, secondConfList):
secondListLine = secondConfList[j].strip()
if firstListLine is secondListLine:
addFlag = False
break
secondArrayCounter += 1
if addFlag:
resultArray.append(firstListLine)
return resultArray
@staticmethod
def compareConfigurationFileResultWithFile(firstConfFile, secondConfFile):
"""
INFO: This function will return to result of Files list compare
WARNING: If parameter different class object from list, it will raise error
or if file error occured on that block it will re-raise excepton
:param firstConfFile: First conf file name
:param secondConfFile: Second conf file name
:return: LIST[STRING]
"""
try:
return FileUtil.compareConfigurationFileResult(FileUtil.getFileLinesAsList(firstConfFile),
FileUtil.getFileLinesAsList(secondConfFile))
except Exception as exception:
raise exception
@staticmethod
def removeFileInCurrentPath(fileName):
"""
INFO: This function will remove current path file
:param fileName: which will removes the file name
:return: NONE
"""
path = os.path.dirname(os.path.realpath(__file__))
dirs = os.listdir(path)
for file in dirs:
if fileName in file:
os.remove(file)
@staticmethod
def removeFileInExternalPath(fileName, pathName):
"""
INFO: This function will remove specific path file
:param fileName: which will removes the file name
:param pathName: which will removes the files pathName
:return: NONE
"""
path = os.path.dirname(pathName)
dirs = os.listdir(path)
for file in dirs:
if fileName in file:
os.remove(file)
@staticmethod
def dataFileSplit(fileName, maximumChaperSize = 500 * 1024 * 1024, memoryBufferSize = 50 * 1024 * 1024 * 1024):
"""
INFO: This function will sperate files to datasize. This file applicable for data file
:param fileName: fileName for which is the source file name
:param maximumChaperSize: Maximum chapter size default value 500MB
:param memoryBufferSize: Memory buffer size default vaule 50GB
:return: NONE
"""
chapterCount = 0
bufferText = ''
with open(fileName, 'rb') as src:
while True:
target = open(fileName + '.%03d' % chapterCount, 'wb')
written = 0
while written < maximumChaperSize:
if len(bufferText) > 0:
target.write(bufferText)
target.write(src.read(min(memoryBufferSize, maximumChaperSize - written)))
written += min(memoryBufferSize, maximumChaperSize - written)
bufferText = src.read(1)
if len(bufferText) == 0:
break
target.close()
if len(bufferText) == 0:
break
chapterCount += 1
@staticmethod
def textFileSplit(fileName, lineCount = 20, outputFileName = "output.txt"):
"""
INFO: This function will sperate files to line count, this is applicable for text file
:param fileName: fileName for which is the source file name
:param lineCount: Split for each file default value 20
:param outputFileName: Output file names for each one output.1.txt, output.2.txt, etc.
:return: NONE
"""
fOpen = open(fileName, 'r')
count = 0
at = 0
dest = None
for line in fOpen:
if count % lineCount == 0:
if dest: dest.close()
dest = open(outputFileName + str(at) + '.txt', 'w')
at += 1
dest.write(line)
count += 1
@staticmethod
def zipFiles(zipFileName, fileName, filesPath = os.path.dirname(os.path.realpath(__file__))):
"""
INFO: This function will return to Zipped file name which is the insert to function argument.
TRICKS: if fileName will insert to keywork it will zip as file name
:param zipFileName: Zipped last file name
:param fileName: Source file name
:param filesPath: Source file name source path
:return: STRING
"""
try:
fileName = fileName.strip()
ziph = zipfile.ZipFile(zipFileName.strip() + '.zip', 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(filesPath):
for file in files:
if fileName in file:
ziph.write(os.path.join(file))
ziph.close()
except Exception as e:
raise IOError("File IO process has been occured IOError : " + str(e))
return filesPath + '/' + fileName
|
python
|
from itertools import combinations
class Edge:
def __init__(self, frm, to, weight):
self.from_vertex = frm
self.to_vertex = to
self.dist = weight
def _get_edges(graph):
edges = []
for a, b in combinations(range(len(graph)), 2):
if graph[a][b]:
edges.append(Edge(a, b, graph[a][b]))
return edges
def _find_group(parents, i):
while i != parents[i]:
i = parents[i]
return i
def print_MST(graph):
n = len(graph)
edges = _get_edges(graph)
result = []
parents = [ i for i in range(n) ]
while len(result) < n - 1:
closest_index = [ -1 ] * n
for i in range(len(edges)):
a = _find_group(parents, edges[i].from_vertex)
b = _find_group(parents, edges[i].to_vertex)
if a != b:
if closest_index[a] == -1 or edges[closest_index[a]].dist > edges[i].dist:
closest_index[a] = i
if closest_index[b] == -1 or edges[closest_index[b]].dist > edges[i].dist:
closest_index[b] = i
for index in closest_index:
if index != -1:
a = _find_group(parents, edges[index].from_vertex)
b = _find_group(parents, edges[index].to_vertex)
if a != b:
result.append(edges[index])
parents[a] = b
for edge in result:
print(f'{edge.from_vertex} -> {edge.to_vertex}')
def main():
graph = [
[0, 3, 6, 5],
[3, 0, 0, 9],
[6, 0, 0, 4],
[5, 9, 4, 0]
]
print_MST(graph)
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Simulation/OPslip.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Simulation/OPslip
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from .OP import OP
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Simulation.OPslip.get_Id_Iq import get_Id_Iq
except ImportError as error:
get_Id_Iq = error
try:
from ..Methods.Simulation.OPslip.get_felec import get_felec
except ImportError as error:
get_felec = error
try:
from ..Methods.Simulation.OPslip.get_N0 import get_N0
except ImportError as error:
get_N0 = error
try:
from ..Methods.Simulation.OPslip.get_Ud_Uq import get_Ud_Uq
except ImportError as error:
get_Ud_Uq = error
try:
from ..Methods.Simulation.OPslip.set_Id_Iq import set_Id_Iq
except ImportError as error:
set_Id_Iq = error
try:
from ..Methods.Simulation.OPslip.get_I0_Phi0 import get_I0_Phi0
except ImportError as error:
get_I0_Phi0 = error
try:
from ..Methods.Simulation.OPslip.get_slip import get_slip
except ImportError as error:
get_slip = error
try:
from ..Methods.Simulation.OPslip.set_I0_Phi0 import set_I0_Phi0
except ImportError as error:
set_I0_Phi0 = error
try:
from ..Methods.Simulation.OPslip.set_Ud_Uq import set_Ud_Uq
except ImportError as error:
set_Ud_Uq = error
try:
from ..Methods.Simulation.OPslip.get_U0_UPhi0 import get_U0_UPhi0
except ImportError as error:
get_U0_UPhi0 = error
try:
from ..Methods.Simulation.OPslip.set_U0_UPhi0 import set_U0_UPhi0
except ImportError as error:
set_U0_UPhi0 = error
from ._check import InitUnKnowClassError
class OPslip(OP):
"""Operating Point defined with slip, I0"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Simulation.OPslip.get_Id_Iq
if isinstance(get_Id_Iq, ImportError):
get_Id_Iq = property(
fget=lambda x: raise_(
ImportError("Can't use OPslip method get_Id_Iq: " + str(get_Id_Iq))
)
)
else:
get_Id_Iq = get_Id_Iq
# cf Methods.Simulation.OPslip.get_felec
if isinstance(get_felec, ImportError):
get_felec = property(
fget=lambda x: raise_(
ImportError("Can't use OPslip method get_felec: " + str(get_felec))
)
)
else:
get_felec = get_felec
# cf Methods.Simulation.OPslip.get_N0
if isinstance(get_N0, ImportError):
get_N0 = property(
fget=lambda x: raise_(
ImportError("Can't use OPslip method get_N0: " + str(get_N0))
)
)
else:
get_N0 = get_N0
# cf Methods.Simulation.OPslip.get_Ud_Uq
if isinstance(get_Ud_Uq, ImportError):
get_Ud_Uq = property(
fget=lambda x: raise_(
ImportError("Can't use OPslip method get_Ud_Uq: " + str(get_Ud_Uq))
)
)
else:
get_Ud_Uq = get_Ud_Uq
# cf Methods.Simulation.OPslip.set_Id_Iq
if isinstance(set_Id_Iq, ImportError):
set_Id_Iq = property(
fget=lambda x: raise_(
ImportError("Can't use OPslip method set_Id_Iq: " + str(set_Id_Iq))
)
)
else:
set_Id_Iq = set_Id_Iq
# cf Methods.Simulation.OPslip.get_I0_Phi0
if isinstance(get_I0_Phi0, ImportError):
get_I0_Phi0 = property(
fget=lambda x: raise_(
ImportError("Can't use OPslip method get_I0_Phi0: " + str(get_I0_Phi0))
)
)
else:
get_I0_Phi0 = get_I0_Phi0
# cf Methods.Simulation.OPslip.get_slip
if isinstance(get_slip, ImportError):
get_slip = property(
fget=lambda x: raise_(
ImportError("Can't use OPslip method get_slip: " + str(get_slip))
)
)
else:
get_slip = get_slip
# cf Methods.Simulation.OPslip.set_I0_Phi0
if isinstance(set_I0_Phi0, ImportError):
set_I0_Phi0 = property(
fget=lambda x: raise_(
ImportError("Can't use OPslip method set_I0_Phi0: " + str(set_I0_Phi0))
)
)
else:
set_I0_Phi0 = set_I0_Phi0
# cf Methods.Simulation.OPslip.set_Ud_Uq
if isinstance(set_Ud_Uq, ImportError):
set_Ud_Uq = property(
fget=lambda x: raise_(
ImportError("Can't use OPslip method set_Ud_Uq: " + str(set_Ud_Uq))
)
)
else:
set_Ud_Uq = set_Ud_Uq
# cf Methods.Simulation.OPslip.get_U0_UPhi0
if isinstance(get_U0_UPhi0, ImportError):
get_U0_UPhi0 = property(
fget=lambda x: raise_(
ImportError(
"Can't use OPslip method get_U0_UPhi0: " + str(get_U0_UPhi0)
)
)
)
else:
get_U0_UPhi0 = get_U0_UPhi0
# cf Methods.Simulation.OPslip.set_U0_UPhi0
if isinstance(set_U0_UPhi0, ImportError):
set_U0_UPhi0 = property(
fget=lambda x: raise_(
ImportError(
"Can't use OPslip method set_U0_UPhi0: " + str(set_U0_UPhi0)
)
)
)
else:
set_U0_UPhi0 = set_U0_UPhi0
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
I0_ref=None,
IPhi0_ref=None,
slip_ref=0,
U0_ref=None,
UPhi0_ref=None,
N0=None,
felec=None,
Tem_av_ref=None,
Pem_av_ref=None,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "I0_ref" in list(init_dict.keys()):
I0_ref = init_dict["I0_ref"]
if "IPhi0_ref" in list(init_dict.keys()):
IPhi0_ref = init_dict["IPhi0_ref"]
if "slip_ref" in list(init_dict.keys()):
slip_ref = init_dict["slip_ref"]
if "U0_ref" in list(init_dict.keys()):
U0_ref = init_dict["U0_ref"]
if "UPhi0_ref" in list(init_dict.keys()):
UPhi0_ref = init_dict["UPhi0_ref"]
if "N0" in list(init_dict.keys()):
N0 = init_dict["N0"]
if "felec" in list(init_dict.keys()):
felec = init_dict["felec"]
if "Tem_av_ref" in list(init_dict.keys()):
Tem_av_ref = init_dict["Tem_av_ref"]
if "Pem_av_ref" in list(init_dict.keys()):
Pem_av_ref = init_dict["Pem_av_ref"]
# Set the properties (value check and convertion are done in setter)
self.I0_ref = I0_ref
self.IPhi0_ref = IPhi0_ref
self.slip_ref = slip_ref
self.U0_ref = U0_ref
self.UPhi0_ref = UPhi0_ref
# Call OP init
super(OPslip, self).__init__(
N0=N0, felec=felec, Tem_av_ref=Tem_av_ref, Pem_av_ref=Pem_av_ref
)
# The class is frozen (in OP init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this object in a readeable string (for print)"""
OPslip_str = ""
# Get the properties inherited from OP
OPslip_str += super(OPslip, self).__str__()
OPslip_str += "I0_ref = " + str(self.I0_ref) + linesep
OPslip_str += "IPhi0_ref = " + str(self.IPhi0_ref) + linesep
OPslip_str += "slip_ref = " + str(self.slip_ref) + linesep
OPslip_str += "U0_ref = " + str(self.U0_ref) + linesep
OPslip_str += "UPhi0_ref = " + str(self.UPhi0_ref) + linesep
return OPslip_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from OP
if not super(OPslip, self).__eq__(other):
return False
if other.I0_ref != self.I0_ref:
return False
if other.IPhi0_ref != self.IPhi0_ref:
return False
if other.slip_ref != self.slip_ref:
return False
if other.U0_ref != self.U0_ref:
return False
if other.UPhi0_ref != self.UPhi0_ref:
return False
return True
def compare(self, other, name="self", ignore_list=None):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
# Check the properties inherited from OP
diff_list.extend(super(OPslip, self).compare(other, name=name))
if other._I0_ref != self._I0_ref:
diff_list.append(name + ".I0_ref")
if other._IPhi0_ref != self._IPhi0_ref:
diff_list.append(name + ".IPhi0_ref")
if other._slip_ref != self._slip_ref:
diff_list.append(name + ".slip_ref")
if other._U0_ref != self._U0_ref:
diff_list.append(name + ".U0_ref")
if other._UPhi0_ref != self._UPhi0_ref:
diff_list.append(name + ".UPhi0_ref")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
# Get size of the properties inherited from OP
S += super(OPslip, self).__sizeof__()
S += getsizeof(self.I0_ref)
S += getsizeof(self.IPhi0_ref)
S += getsizeof(self.slip_ref)
S += getsizeof(self.U0_ref)
S += getsizeof(self.UPhi0_ref)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
type_handle_ndarray: int
How to handle ndarray (0: tolist, 1: copy, 2: nothing)
keep_function : bool
True to keep the function object, else return str
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
# Get the properties inherited from OP
OPslip_dict = super(OPslip, self).as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
OPslip_dict["I0_ref"] = self.I0_ref
OPslip_dict["IPhi0_ref"] = self.IPhi0_ref
OPslip_dict["slip_ref"] = self.slip_ref
OPslip_dict["U0_ref"] = self.U0_ref
OPslip_dict["UPhi0_ref"] = self.UPhi0_ref
# The class name is added to the dict for deserialisation purpose
# Overwrite the mother class name
OPslip_dict["__class__"] = "OPslip"
return OPslip_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.I0_ref = None
self.IPhi0_ref = None
self.slip_ref = None
self.U0_ref = None
self.UPhi0_ref = None
# Set to None the properties inherited from OP
super(OPslip, self)._set_None()
def _get_I0_ref(self):
"""getter of I0_ref"""
return self._I0_ref
def _set_I0_ref(self, value):
"""setter of I0_ref"""
check_var("I0_ref", value, "float")
self._I0_ref = value
I0_ref = property(
fget=_get_I0_ref,
fset=_set_I0_ref,
doc=u"""Current rms value
:Type: float
""",
)
def _get_IPhi0_ref(self):
"""getter of IPhi0_ref"""
return self._IPhi0_ref
def _set_IPhi0_ref(self, value):
"""setter of IPhi0_ref"""
check_var("IPhi0_ref", value, "float")
self._IPhi0_ref = value
IPhi0_ref = property(
fget=_get_IPhi0_ref,
fset=_set_IPhi0_ref,
doc=u"""Current phase
:Type: float
""",
)
def _get_slip_ref(self):
"""getter of slip_ref"""
return self._slip_ref
def _set_slip_ref(self, value):
"""setter of slip_ref"""
check_var("slip_ref", value, "float")
self._slip_ref = value
slip_ref = property(
fget=_get_slip_ref,
fset=_set_slip_ref,
doc=u"""Rotor mechanical slip
:Type: float
""",
)
def _get_U0_ref(self):
"""getter of U0_ref"""
return self._U0_ref
def _set_U0_ref(self, value):
"""setter of U0_ref"""
check_var("U0_ref", value, "float")
self._U0_ref = value
U0_ref = property(
fget=_get_U0_ref,
fset=_set_U0_ref,
doc=u"""stator voltage (phase to neutral)
:Type: float
""",
)
def _get_UPhi0_ref(self):
"""getter of UPhi0_ref"""
return self._UPhi0_ref
def _set_UPhi0_ref(self, value):
"""setter of UPhi0_ref"""
check_var("UPhi0_ref", value, "float")
self._UPhi0_ref = value
UPhi0_ref = property(
fget=_get_UPhi0_ref,
fset=_set_UPhi0_ref,
doc=u"""Voltage phase
:Type: float
""",
)
|
python
|
#!C:/Python35/python.exe
# -*- coding: UTF-8 -*-
#
# belmih 2016
#
import threading
import queue
import argparse
import os
import time
import zipfile
import shutil
import xml.etree.cElementTree as ET
import csv
abspath = os.path.abspath(__file__)
workdir = os.path.dirname(abspath)
os.chdir(workdir)
CSV_ID_LEVEL = "id_level.csv"
CSV_ID_OBJECT = "id_object_name.csv"
UNZIPFOLDER = './unzip'
DELETEXML = True
# create new csv file
def create_new_file(filename, fieldnames):
with open(filename, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
# write csv file
def write_csv(filename, fieldnames, data):
with open(filename, 'a', newline='') as csvfile:
writer = csv.DictWriter(csvfile, dialect='excel', fieldnames=fieldnames)
writer.writerow(data)
# xml parser and writer csv
def parse_xml(xmlfile):
# with lock:
# print(xmlfile)
tree = ET.parse(xmlfile)
root = tree.getroot()
id = root.findall("./var[@name='id']")[0].get('value')
level = root.findall("./var[@name='level']")[0].get('value')
quecsvidlevel.put({'id': id, 'level': level})
for obj in root.findall('./objects/object'):
for key, value in obj.items():
quecsvidobject.put({'id': id, 'object_name': value})
def unzip_archive(item):
tmpfoldername = os.path.basename(item).split('.')[0]
tmpfolderpath = os.path.join('unzip', tmpfoldername)
zfile = zipfile.ZipFile(item)
for name in zfile.namelist():
(dirname, filename) = os.path.split(name)
dirnamepath = os.path.join(tmpfolderpath, dirname)
if not os.path.exists(dirnamepath):
os.makedirs(dirnamepath)
zfile.extract(name, dirnamepath)
return tmpfolderpath
def worker1():
while True:
item = quecsvidlevel.get()
if item is None:
with lock:
print(CSV_ID_LEVEL + ' done.')
break
write_csv(CSV_ID_LEVEL, ['id', 'level'], item)
quecsvidlevel.task_done()
def worker2():
while True:
item = quecsvidobject.get()
if item is None:
with lock:
print(CSV_ID_OBJECT + ' done.')
break
write_csv(CSV_ID_OBJECT, ['id', 'object_name'], item)
quecsvidobject.task_done()
def worker():
while True:
item = quezip.get()
if item is None:
break
with lock:
print(item)
tmpfolderpath = unzip_archive(item)
# find all xml
for root, dirs, files in os.walk(tmpfolderpath):
for file in files:
if file.endswith(".xml"):
f = os.path.join(root, file)
parse_xml(f)
quezip.task_done()
def remove_unzip_folder():
if DELETEXML and os.path.exists(UNZIPFOLDER):
print ("remove {} ...".format(UNZIPFOLDER))
shutil.rmtree(UNZIPFOLDER)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Do *.csv files.')
parser.add_argument('-p', type=int, help='count processes', default=2)
args = parser.parse_args()
numworkerthreads = args.p
start = time.perf_counter()
print(os.getcwd())
create_new_file(CSV_ID_LEVEL, ['id', 'level'])
create_new_file(CSV_ID_OBJECT, ['id', 'object'])
remove_unzip_folder()
lock = threading.Lock()
quezip = queue.Queue()
quecsvidlevel = queue.Queue()
quecsvidobject = queue.Queue()
threads = []
for i in range(numworkerthreads):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
for root, dirs, files in os.walk("zip"):
for file in files:
zf = os.path.normpath(os.path.join(root, file))
quezip.put(zf)
quezip.join()
for i in range(numworkerthreads):
quezip.put(None)
for t in threads:
t.join()
t1 = threading.Thread(target=worker1)
t2 = threading.Thread(target=worker2)
t1.start()
t2.start()
quecsvidlevel.join()
quecsvidobject.join()
quecsvidlevel.put(None)
quecsvidobject.put(None)
t1.join
t2.join
time.sleep(.1)
remove_unzip_folder()
print('time:', time.perf_counter() - start)
|
python
|
import itertools as it, operator as op, functools as ft
from collections import ChainMap, Mapping, OrderedDict, defaultdict
from pathlib import Path
from pprint import pprint
import os, sys, unittest, types, datetime, re, math
import tempfile, warnings, shutil, zipfile
import yaml # PyYAML module is required for tests
path_project = Path(__file__).parent.parent
sys.path.insert(1, str(path_project))
import tb_routing as tb
verbose = os.environ.get('TB_DEBUG')
if verbose:
tb.u.logging.basicConfig(
format='%(asctime)s :: %(name)s %(levelname)s :: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=tb.u.logging.DEBUG )
class dmap(ChainMap):
maps = None
def __init__(self, *maps, **map0):
maps = list((v if not isinstance( v,
(types.GeneratorType, list, tuple) ) else OrderedDict(v)) for v in maps)
if map0 or not maps: maps = [map0] + maps
super(dmap, self).__init__(*maps)
def __repr__(self):
return '<{} {:x} {}>'.format(
self.__class__.__name__, id(self), repr(self._asdict()) )
def _asdict(self):
items = dict()
for k, v in self.items():
if isinstance(v, self.__class__): v = v._asdict()
items[k] = v
return items
def _set_attr(self, k, v):
self.__dict__[k] = v
def __iter__(self):
key_set = dict.fromkeys(set().union(*self.maps), True)
return filter(lambda k: key_set.pop(k, False), it.chain.from_iterable(self.maps))
def __getitem__(self, k):
k_maps = list()
for m in self.maps:
if k in m:
if isinstance(m[k], Mapping): k_maps.append(m[k])
elif not (m[k] is None and k_maps): return m[k]
if not k_maps: raise KeyError(k)
return self.__class__(*k_maps)
def __getattr__(self, k):
try: return self[k]
except KeyError: raise AttributeError(k)
def __setattr__(self, k, v):
for m in map(op.attrgetter('__dict__'), [self] + self.__class__.mro()):
if k in m:
self._set_attr(k, v)
break
else: self[k] = v
def __delitem__(self, k):
for m in self.maps:
if k in m: del m[k]
def yaml_load(stream, dict_cls=OrderedDict, loader_cls=yaml.SafeLoader):
if not hasattr(yaml_load, '_cls'):
class CustomLoader(loader_cls): pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return dict_cls(loader.construct_pairs(node))
CustomLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping )
# Do not auto-resolve dates/timestamps, as PyYAML does that badly
res_map = CustomLoader.yaml_implicit_resolvers = CustomLoader.yaml_implicit_resolvers.copy()
res_int = list('-+0123456789')
for c in res_int: del res_map[c]
CustomLoader.add_implicit_resolver(
'tag:yaml.org,2002:int',
re.compile(r'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+)$''', re.X), res_int )
yaml_load._cls = CustomLoader
return yaml.load(stream, yaml_load._cls)
def load_test_data(path_dir, path_stem, name):
'Load test data from specified YAML file and return as dmap object.'
with (path_dir / '{}.test.{}.yaml'.format(path_stem, name)).open() as src:
return dmap(yaml_load(src))
def struct_from_val(val, cls, as_tuple=False):
if isinstance(val, (tuple, list)): val = cls(*val)
elif isinstance(val, (dmap, dict, OrderedDict)): val = cls(**val)
else: raise ValueError(val)
return val if not as_tuple else tb.u.attr.astuple(val)
@tb.u.attr_struct
class JourneyStats: keys = 'start end'
@tb.u.attr_struct
class JourneySeg: keys = 'type src dst'
@tb.u.attr_struct
class TestGoal:
src = tb.u.attr_init()
dst = tb.u.attr_init()
dts_start = tb.u.attr_init()
dts_latest = tb.u.attr_init(None)
class GTFSTestFixture:
def __init__(self, path_gtfs_zip, path_file):
self.path_gtfs_zip = Path(path_gtfs_zip)
self.path_file = Path(path_file)
self.path_test = self.path_file.parent
self.path_project = self.path_test.parent
self.path_tmp_base = '{}.test.{}'.format(
self.path_project.parent.resolve().name, self.path_file.stem )
self._path_cache_state = defaultdict(lambda: ...)
def load_test_data(self, name):
return load_test_data(self.path_test, self.path_file.stem, name)
_path_unzip = None
@property
def path_unzip(self):
if self._path_unzip: return self._path_unzip
paths_unzip = [ self.path_test / '{}.data.unzip'.format(self.path_file.stem),
Path(tempfile.gettempdir()) / '{}.data.unzip'.format(self.path_tmp_base) ]
for p in paths_unzip:
if not p.exists():
try: p.mkdir(parents=True)
except OSError: continue
path_unzip = p
break
else:
raise OSError( 'Failed to find/create path to unzip data to.'
' Paths checked: {}'.format(' '.join(repr(str(p)) for p in paths_unzip)) )
path_done = path_unzip / '.unzip-done.check'
mtime_src = self.path_gtfs_zip.stat().st_mtime
mtime_done = path_done.stat().st_mtime if path_done.exists() else 0
if mtime_done < mtime_src:
shutil.rmtree(str(path_unzip))
path_unzip.mkdir(parents=True)
mtime_done = None
if not mtime_done:
with zipfile.ZipFile(str(self.path_gtfs_zip)) as src: src.extractall(str(path_unzip))
path_done.touch()
self._path_unzip = path_unzip
return self._path_unzip
def _paths_src_mtimes(self):
paths_src = [Path(tb.__file__).parent, path_project]
for root, dirs, files in it.chain.from_iterable(os.walk(str(p)) for p in paths_src):
p = Path(root)
for name in files: yield (p / name).stat().st_mtime
def _path_cache(self, ext):
path = self._path_cache_state[ext]
if path is not ...: return state
path = self._path_cache_state[ext] = None
paths_cache = [ self.path_test / '{}.cache.{}'.format(self.path_file.stem, ext),
Path(tempfile.gettempdir()) / '{}.cache.{}'.format(self.path_tmp_base, ext) ]
for p in paths_cache:
if not p.exists():
try:
p.touch()
p.unlink()
except OSError: continue
path = self._path_cache_state[ext] = p
break
else:
warnings.warn('Failed to find writable cache-path, disabling cache')
warnings.warn(
'Cache paths checked: {}'.format(' '.join(repr(str(p)) for p in paths_cache)) )
mtime_src = max(self._paths_src_mtimes())
mtime_cache = 0 if not path.exists() else path.stat().st_mtime
if mtime_cache and mtime_src > mtime_cache:
warnings.warn( 'Existing timetable/transfer cache'
' file is older than code, but using it anyway: {}'.format(path) )
return path
@property
def path_cache(self): return self._path_cache('graph.bin')
@property
def path_timetable(self): return self._path_cache('tt.pickle')
class GraphAssertions:
dts_slack = 3 * 60
def __init__(self, graph=None): self.graph = graph
def debug_trip_transfers(self, stop1, stop2, stop3, max_km=0.2, max_td=3600, graph=None):
'''Show info on possible T[stop-1] -> T[stop-2] -> U[stop-2] -> U[stop-3]
transfers between trips (both passing stop-2), going only by timetable data.'''
graph = graph or self.graph
stop1, stop2, stop3 = (graph.timetable.stops[s] for s in [stop1, stop2, stop3])
for (n1_min, line1), (n2_max, line2) in it.product(
graph.lines.lines_with_stop(stop1), graph.lines.lines_with_stop(stop3) ):
for ts1 in line1[0]:
if ts1.stop == stop2: break
else: continue
for ts2 in line2[0]:
if ts2.stop == stop2: break
else: continue
n1_max, n2_min = ts1.stopidx, ts2.stopidx
for ts1, ts2 in it.product(line1[0][n1_min:n1_max+1], line2[0][n2_min:n2_max+1]):
n1, n2 = ts1.stopidx, ts2.stopidx
if ts1.stop == ts2.stop: km = 0
else:
lon1, lat1, lon2, lat2 = (
math.radians(float(v)) for v in
[ts1.stop.lon, ts1.stop.lat, ts2.stop.lon, ts2.stop.lat] )
km = 6367 * 2 * math.asin(math.sqrt(
math.sin((lat2 - lat1)/2)**2 +
math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1)/2)**2 ))
if km <= max_km:
fp_delta = graph.timetable.footpaths.time_delta(ts1.stop, ts2.stop)
if fp_delta is None: fp_delta = -1
print(
'X-{}: lon={:.4f} lat={:.4f}\n walk:'
' {:,.1f}m, dt={:,.0f}s\n Y-{}: {:.4f} {:.4f}'.format(
n1, ts1.stop.lon, ts1.stop.lat,
km * 1000, fp_delta, n2, ts2.stop.lon, ts2.stop.lat ))
for trip1, trip2 in it.product(line1, line2):
ts1, ts2 = trip1[n1], trip2[n2]
td = ts2.dts_dep - ts1.dts_arr
if 0 <= td <= max_td:
print(' X-arr[{}]: {} -> Y-dep[{}]: {} (delta: {:,.1f}s)'.format(
trip1.id, tb.u.dts_format(ts1.dts_arr),
trip2.id, tb.u.dts_format(ts2.dts_dep), td ))
print()
def assert_journey_components(self, test, graph=None, verbose=verbose):
'''Check that lines, trips, footpaths
and transfers for all test journeys can be found individually.'''
graph = graph or self.graph
goal = struct_from_val(test.goal, TestGoal)
goal_src, goal_dst = op.itemgetter(goal.src, goal.dst)(graph.timetable.stops)
assert goal_src and goal_dst
def raise_error(tpl, *args, **kws):
jn_seg = kws.get('err_seg', seg_name)
jn_seg = ':{}'.format(jn_seg) if jn_seg else ''
raise AssertionError('[{}{}] {}'.format(jn_name, jn_seg, tpl).format(*args, **kws))
for jn_name, jn_info in (test.journey_set or dict()).items():
jn_stats = struct_from_val(jn_info.stats, JourneyStats)
jn_start, jn_end = map(graph.timetable.dts_parse, [jn_stats.start, jn_stats.end])
ts_first, ts_last, ts_transfer = set(), set(), set()
# Check segments
for seg_name, seg in jn_info.segments.items():
seg = struct_from_val(seg, JourneySeg)
a, b = op.itemgetter(seg.src, seg.dst)(graph.timetable.stops)
ts_transfer_chk, ts_transfer_found, line_found = list(ts_transfer), False, False
ts_transfer.clear()
if seg.type == 'trip':
for n, line in graph.lines.lines_with_stop(a):
for m, stop in enumerate(line.stops[n:], n):
if stop is b: break
else: continue
for trip in line:
for ts in ts_transfer_chk:
if not (ts.trip.id == trip.id and ts.stop is a):
for transfer in graph.transfers.from_trip_stop(ts):
if transfer.ts_to.stop is trip[n].stop: break
else: continue
ts_transfer_found = True
ts_transfer_chk.clear()
break
if a is goal_src: ts_first.add(trip[n])
if b is goal_dst: ts_last.add(trip[m])
ts_transfer.add(trip[m])
line_found = True
if not line_found: raise_error('No Lines/Trips found for trip-segment')
elif seg.type == 'fp':
if not graph.timetable.footpaths.connected(a, b):
raise_error('No footpath-transfer found between src/dst: {} -> {}', a, b)
for ts in ts_transfer_chk:
if ts.stop is not a: continue
ts_transfer_found = True
ts_transfer_chk.clear()
break
for m, line in graph.lines.lines_with_stop(b):
for trip in line:
# if b is goal_dst: ts_last.add(trip[m])
ts_transfer.add(trip[m])
line_found = True
if not line_found and b is not goal_dst:
raise_error('No Lines/Trips found for footpath-segment dst')
else: raise NotImplementedError
if not ts_transfer_found and a is not goal_src:
raise_error( 'No transfers found from'
' previous segment (checked: {})', len(ts_transfer_chk) )
if not ts_transfer and b is not goal_dst:
raise_error('No transfers found from segment (type={}) end ({!r})', seg.type, seg.dst)
# Check start/end times
seg_name = None
for k, ts_set, chk in [('dts_dep', ts_first, jn_start), ('dts_arr', ts_last, jn_end)]:
dt_min = min(abs(chk - getattr(ts, k)) for ts in ts_set) if ts_set else 0
if dt_min > self.dts_slack:
if verbose:
print('[{}] All TripStops for {} goal-point:'.format(jn_name, k))
for ts in ts_set:
print( ' TripStop(trip_id={}, stopidx={}, stop_id={}, {}={})'\
.format(ts.trip.id, ts.stopidx, ts.stop.id, k, tb.u.dts_format(getattr(ts, k))) )
print('[{}] Checking {} against: {}'.format(jn_name, k, tb.u.dts_format(chk)))
raise_error( 'No trip-stops close to {} goal-point'
' in time (within {:,}s), min diff: {:,}s', k, self.dts_slack, dt_min )
def assert_journey_results(self, test, journeys, graph=None, verbose=verbose):
'Assert that all journeys described by test-data (from YAML) match journeys (JourneySet).'
graph = graph or self.graph
if verbose:
print('\n' + ' -'*5, 'Journeys found:')
journeys.pretty_print()
jn_matched = set()
for jn_name, jn_info in (test.journey_set or dict()).items():
jn_info_match = False
for journey in journeys:
if id(journey) in jn_matched: continue
if verbose: print('\n[{}] check vs journey:'.format(jn_name), journey)
jn_stats = struct_from_val(jn_info.stats, JourneyStats)
dts_dep_test, dts_arr_test = map(graph.timetable.dts_parse, [jn_stats.start, jn_stats.end])
dts_dep_jn, dts_arr_jn = journey.dts_dep, journey.dts_arr
time_check = max(
abs(dts_dep_test - dts_dep_jn),
abs(dts_arr_test - dts_arr_jn) ) <= self.dts_slack
if verbose:
print(' ', 'time check - {}: {} == {} and {} == {}'.format(
['fail', 'pass'][time_check],
*map(tb.u.dts_format, [dts_dep_test, dts_dep_jn, dts_arr_test, dts_arr_jn]) ))
if not time_check: continue
for seg_jn, seg_test in it.zip_longest(journey, jn_info.segments.items()):
seg_test_name, seg_test = seg_test
if not (seg_jn and seg_test): break
seg_test = struct_from_val(seg_test, JourneySeg)
a_test, b_test = op.itemgetter(seg_test.src, seg_test.dst)(graph.timetable.stops)
type_test = seg_test.type
if isinstance(seg_jn, tb.t.public.JourneyTrip):
type_jn, a_jn, b_jn = 'trip', seg_jn.ts_from.stop, seg_jn.ts_to.stop
elif isinstance(seg_jn, tb.t.public.JourneyFp):
type_jn, a_jn, b_jn = 'fp', seg_jn.stop_from, seg_jn.stop_to
else: raise ValueError(seg_jn)
if verbose:
print(' ', seg_test_name, type_test == type_jn, a_test is a_jn, b_test is b_jn)
if not (type_test == type_jn and a_test is a_jn and b_test is b_jn): break
else:
jn_info_match = True
jn_matched.add(id(journey))
break
if not jn_info_match:
raise AssertionError('No journeys to match test-data for: {}'.format(jn_name))
if verbose: print('[{}] match found'.format(jn_name))
for journey in journeys:
if id(journey) not in jn_matched:
raise AssertionError('Unmatched journey found: {}'.format(journey))
|
python
|
import aredis
from ddtrace import config
from ddtrace.vendor import wrapt
from ...internal.utils.wrappers import unwrap
from ...pin import Pin
from ..redis.util import _trace_redis_cmd
from ..redis.util import _trace_redis_execute_pipeline
from ..redis.util import format_command_args
config._add("aredis", dict(_default_service="redis"))
def patch():
"""Patch the instrumented methods"""
if getattr(aredis, "_datadog_patch", False):
return
setattr(aredis, "_datadog_patch", True)
_w = wrapt.wrap_function_wrapper
_w("aredis.client", "StrictRedis.execute_command", traced_execute_command)
_w("aredis.client", "StrictRedis.pipeline", traced_pipeline)
_w("aredis.pipeline", "StrictPipeline.execute", traced_execute_pipeline)
_w("aredis.pipeline", "StrictPipeline.immediate_execute_command", traced_execute_command)
Pin(service=None).onto(aredis.StrictRedis)
def unpatch():
if getattr(aredis, "_datadog_patch", False):
setattr(aredis, "_datadog_patch", False)
unwrap(aredis.client.StrictRedis, "execute_command")
unwrap(aredis.client.StrictRedis, "pipeline")
unwrap(aredis.pipeline.StrictPipeline, "execute")
unwrap(aredis.pipeline.StrictPipeline, "immediate_execute_command")
#
# tracing functions
#
async def traced_execute_command(func, instance, args, kwargs):
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return await func(*args, **kwargs)
with _trace_redis_cmd(pin, config.aredis, instance, args):
# run the command
return await func(*args, **kwargs)
async def traced_pipeline(func, instance, args, kwargs):
pipeline = await func(*args, **kwargs)
pin = Pin.get_from(instance)
if pin:
pin.onto(pipeline)
return pipeline
async def traced_execute_pipeline(func, instance, args, kwargs):
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return await func(*args, **kwargs)
cmds = [format_command_args(c) for c, _ in instance.command_stack]
resource = "\n".join(cmds)
with _trace_redis_execute_pipeline(pin, config.aredis, resource, instance):
return await func(*args, **kwargs)
|
python
|
import uuid
import datetime
import typing
from abc import ABC, abstractmethod
from sensory_cloud.config import Config
from sensory_cloud.services.crypto_service import CryptoService
import sensory_cloud.generated.common.common_pb2 as common_pb2
import sensory_cloud.generated.oauth.oauth_pb2_grpc as oauth_pb2_grpc
import sensory_cloud.generated.oauth.oauth_pb2 as oauth_pb2
import sensory_cloud.generated.v1.management.device_pb2_grpc as device_pb2_grpc
import sensory_cloud.generated.v1.management.device_pb2 as device_pb2
class OAuthClient:
"""
Class that holds OAuth client id and secret
"""
def __init__(
self,
client_id: str,
client_secret: str,
):
"""
Constructor method for the OAuthClient class
Arguments:
client_id: String containing the client id
client_secret: String containing the client secret
"""
self._client_id = client_id
self._client_secret = client_secret
@property
def client_id(self) -> str:
"""
Get method for the client id attribute
Returns:
String containing the client id
"""
return self._client_id
@property
def client_secret(self) -> str:
"""
Get method for the client secret attribute
Returns:
String containing the client secret
"""
return self._client_secret
class OAuthToken:
"""
Class that holds OAuth token and expiration
"""
def __init__(self, token: str, expires: datetime.datetime):
"""
Constructor method for the OAuthToken class
Arguments:
token: String containing the oauth token
expires: datetime.datetime object containing the token's
expiration time stamp
"""
self._token = token
self._expires = expires
@property
def token(self) -> str:
"""
Get method that returns the oauth token attribute
Returns:
String containing the oauth token
"""
return self._token
@property
def expires(self) -> datetime.datetime:
"""
Get method that returns the expiration date attribute
Returns:
A datetime.datetime object containing the token's
expiration time stamp
"""
return self._expires
class IOauthService(ABC):
"""
Abstract class that manages OAuth interactions with Sensory Cloud
"""
@abstractmethod
def generate_credentials(self) -> OAuthClient:
"""Method that generates a client id and a client secret"""
@abstractmethod
def get_token(self) -> OAuthToken:
"""Method that gets a token for the provided credentials"""
@abstractmethod
def register(
self, device_id: str, device_name: str, credential: str
) -> device_pb2.DeviceResponse:
"""
Method that registers credentials provided by the attached SecureCredentialStore to Sensory Cloud.
This should only be called once per unique credential pair. An error will be thrown if registration fails.
"""
class ISecureCredentialStore(ABC):
@abstractmethod
def client_id(self):
"""Method that gets the client id"""
@abstractmethod
def client_secret(self):
"""Method that gets the client secret"""
class OauthService(IOauthService):
"""
Class that manages OAuth interactions with Sensory Cloud
"""
def __init__(self, config: Config, secure_credential_store: ISecureCredentialStore):
"""
Constructor method for OauthService
Arguments:
config: Config object containing the relevant grpc connection information
secure_credential_store: ISecureCredentialStore that stores the client id
and client secret
"""
self._config: Config = config
self._oauth_client: oauth_pb2_grpc.OauthServiceStub = (
oauth_pb2_grpc.OauthServiceStub(channel=config.channel)
)
self._device_client: device_pb2_grpc.DeviceServiceStub = (
device_pb2_grpc.DeviceServiceStub(channel=config.channel)
)
self._secure_credential_store: ISecureCredentialStore = secure_credential_store
def generate_credentials(self) -> OAuthClient:
"""
Can be called to generate secure and guaranteed unique credentials.
Should be used the first time the SDK registers and OAuth token with the cloud.
Returns:
An OAuthClient
"""
client: OAuthClient = OAuthClient(
client_id=str(uuid.uuid4()),
client_secret=CryptoService().get_secure_random_string(length=24),
)
return client
def get_who_am_i(self) -> device_pb2.DeviceResponse:
"""
Get information about the current registered device as inferred by the OAuth credentials supplied by the credential manager.
A new token is request every time this call is made, so use sparingly.
Returns:
Information about the current device
"""
oauth_token: OAuthToken = self.get_token()
metadata: typing.Tuple[typing.Tuple] = (
("authorization", f"Bearer {oauth_token.token}"),
)
return self._device_client.GetWhoAmI(
request=device_pb2.DeviceGetWhoAmIRequest(), metadata=metadata
)
def get_token(self) -> OAuthToken:
"""
Obtains an Oauth JWT from Sensory Cloud
Returns:
An OAuth JWT and expiration
"""
client_id: str = self._secure_credential_store.client_id
if client_id in [None, ""]:
raise ValueError(
"null client_id was returned from the secure credential store"
)
client_secret: str = self._secure_credential_store.client_secret
if client_secret in [None, ""]:
raise ValueError(
"null client_secret was returned from the secure credential store"
)
now: datetime.datetime = datetime.datetime.utcnow()
request: oauth_pb2.TokenRequest = oauth_pb2.TokenRequest(
clientId=client_id, secret=client_secret
)
response: common_pb2.TokenResponse = self._oauth_client.GetToken(request)
return OAuthToken(
token=response.accessToken,
expires=now + datetime.timedelta(response.expiresIn),
)
def register(
self, device_id: str, device_name: str, credential: str
) -> device_pb2.DeviceResponse:
"""
Register credentials provided by the attached SecureCredentialStore to Sensory Cloud.
This function should only be called once per unique credential pair. An error will be
thrown if registration fails.
Arguments:
device_id: String containing the uuid device id
device_name: The friendly name of the registering device
credential: The credential configured on the Sensory Cloud server
Returns:
A DeviceResponse indicating if the device was successfully registered
"""
client_id: str = self._secure_credential_store.client_id
if client_id in [None, ""]:
raise ValueError(
"null client_id was returned from the secure credential store"
)
client_secret: str = self._secure_credential_store.client_secret
if client_secret in [None, ""]:
raise ValueError(
"null client_secret was returned from the secure credential store"
)
client: common_pb2.GenericClient = common_pb2.GenericClient(
clientId=client_id, secret=client_secret
)
request: device_pb2.EnrollDeviceRequest = device_pb2.EnrollDeviceRequest(
name=device_name,
deviceId=device_id,
tenantId=self._config.tenant_id,
client=client,
credential=credential,
)
device_response: device_pb2.DeviceResponse = self._device_client.EnrollDevice(
request
)
return device_response
def renew_device_credential(
self, device_id: str, credential: str
) -> device_pb2.DeviceResponse:
"""
Renew the credential associated with the given device
Arguments:
device_id: String containing the uuid device id
credential: The credential configured on the Sensory Cloud server
Returns:
A DeviceResponse indicating if the device credential was changed
"""
client_id: str = self._secure_credential_store.client_id
if client_id in [None, ""]:
raise ValueError(
"null client_id was returned from the secure credential store"
)
request: device_pb2.RenewDeviceCredentialRequest = (
device_pb2.RenewDeviceCredentialRequest(
deviceId=device_id,
clientId=client_id,
tenantId=self._config.tenant_id,
credential=credential,
)
)
device_response: device_pb2.DeviceResponse = (
self._device_client.RenewDeviceCredential(request=request)
)
return device_response
|
python
|
import IPython.nbformat.v4 as nbformat
from IPython.nbformat import write as nbwrite
import numpy as np
def format_line(line):
"""
Format a line of Matlab into either a markdown line or a code line.
Parameters
----------
line : str
The line of code to be formatted. Formatting occurs according to the
following rules:
- If the line starts with (at least) two %% signs, a new cell will be
started.
- If the line doesn't start with a '%' sign, it is assumed to be legit
matlab code. We will continue to add to the same cell until reaching
the next comment line
"""
if line.startswith('%%'):
md = True
new_cell = True
source = line.split('%%')[1] + '\n' # line-breaks in md require a line
# gap!
elif line.startswith('%'):
md = True
new_cell = False
source = line.split('%')[1] + '\n'
else:
md = False
new_cell = False
source = line
return new_cell, md, source
def mfile_to_lines(mfile):
"""
Read the lines from an mfile
Parameters
----------
mfile : string
Full path to an m file
"""
# We should only be able to read this file:
with open(mfile) as fid:
return fid.readlines()
def lines_to_notebook(lines, name=None):
"""
Convert the lines of an m file into an IPython notebook
Parameters
----------
lines : list
A list of strings. Each element is a line in the m file
Returns
-------
notebook : an IPython NotebookNode class instance, containing the
information required to create a file
"""
source = []
md = np.empty(len(lines), dtype=object)
new_cell = np.empty(len(lines), dtype=object)
for idx, l in enumerate(lines):
new_cell[idx], md[idx], this_source = format_line(l)
# Transitions between markdown and code and vice-versa merit a new
# cell, even if no newline, or "%%" is found. Make sure not to do this
# check for the very first line!
if idx>1 and not new_cell[idx]:
if md[idx] != md[idx-1]:
new_cell[idx] = True
source.append(this_source)
# This defines the breaking points between cells:
new_cell_idx = np.hstack([np.where(new_cell)[0], -1])
# Listify the sources:
cell_source = [source[new_cell_idx[i]:new_cell_idx[i+1]]
for i in range(len(new_cell_idx)-1)]
cell_md = [md[new_cell_idx[i]] for i in range(len(new_cell_idx)-1)]
cells = []
# Append the notebook with loading matlab magic extension
notebook_head = "import pymatbridge as pymat\n" + "ip = get_ipython()\n" \
+ "pymat.load_ipython_extension(ip)"
cells.append(nbformat.new_code_cell(notebook_head))#, language='python'))
for cell_idx, cell_s in enumerate(cell_source):
if cell_md[cell_idx]:
cells.append(nbformat.new_markdown_cell(cell_s))
else:
cell_s.insert(0, '%%matlab\n')
cells.append(nbformat.new_code_cell(cell_s))#, language='matlab'))
#ws = nbformat.new_worksheet(cells=cells)
notebook = nbformat.new_notebook(cells=cells)
return notebook
def convert_mfile(mfile, outfile=None):
"""
Convert a Matlab m-file into a Matlab notebook in ipynb format
Parameters
----------
mfile : string
Full path to a matlab m file to convert
outfile : string (optional)
Full path to the output ipynb file
"""
lines = mfile_to_lines(mfile)
nb = lines_to_notebook(lines)
if outfile is None:
outfile = mfile.split('.m')[0] + '.ipynb'
with open(outfile, 'w') as fid:
nbwrite(nb, fid)
|
python
|
import os
import sys
from fabric.colors import cyan, red, yellow
from fabric.api import task, env, sudo, get, hide, execute
from dploy.context import ctx, get_project_dir
from dploy.commands import manage as django_manage
from dploy.utils import (
FabricException, version_supports_migrations, select_template,
upload_template,
)
@task
def manage(cmd):
"""
Runs django manage.py with a given command
"""
print(cyan("Django manage {} on {}".format(cmd, env.stage)))
django_manage(cmd)
@task
def setup_log_files_owner():
"""
Runs django manage.py check command and sets logs folder owner after
"""
django_manage('check')
sudo('chown -R {user}:{group} {logpath}'.format(
user=ctx('system.user'),
group=ctx('system.group'),
logpath=ctx('logs.dirs.root')))
@task
def setup_settings():
"""
Takes the dploy/<STAGE>_settings.py template and upload it to remote
django project location (as local_settings.py)
"""
print(cyan("Setuping django settings project on {}".format(env.stage)))
project_dir = get_project_dir()
project_name = ctx('django.project_name')
stage_settings = '{stage}_settings.py'.format(stage=env.stage)
templates = [
os.path.join('./dploy/', stage_settings),
os.path.join('./', project_name, 'local_settings.py-dist'),
os.path.join('./', project_name, 'local_settings.py-default'),
os.path.join('./', project_name, 'local_settings.py-example'),
os.path.join('./', project_name, 'local_settings.py.dist'),
os.path.join('./', project_name, 'local_settings.py.default'),
os.path.join('./', project_name, 'local_settings.py.example'),
]
template = select_template(templates)
if not template:
print(red('ERROR: the project does not have a settings template'))
print("The project must provide at least one of these file:")
print("\n - {}\n".format("\n - ".join(templates)))
sys.exit(1)
filename = os.path.basename(template)
templates_dir = os.path.dirname(template)
_settings_dest = os.path.join(project_dir, project_name,
'local_settings.py')
upload_template(filename, _settings_dest, template_dir=templates_dir)
@task
def migrate():
"""
Perform django migration (only if the django version is >= 1.7)
"""
with hide('running', 'stdout'):
version = django_manage('--version')
if version_supports_migrations(version):
print(cyan("Django migrate on {}".format(env.stage)))
try:
django_manage('migrate --noinput')
except FabricException as e:
print(yellow(
'WARNING: faked migrations because of exception {}'.format(e)))
django_manage('migrate --noinput --fake')
else:
print(yellow(
"Django {} does not support migration, skipping.".format(version)))
@task
def collectstatic():
"""
Collect static medias
"""
print(cyan("Django collectstatic on {}".format(env.stage)))
django_manage(ctx('django.commands.collectstatic'))
@task
def dumpdata(app, dest=None):
"""
Runs dumpdata on a given app and fetch the file locally
"""
if dest is None:
django_manage('dumpdata --indent=2 {}'.format(app))
else:
tmp_file = '/tmp/{}.tmp'.format(app)
django_manage('dumpdata --indent=2 {} > {}'.format(app, tmp_file))
with open(dest, 'wb') as fd:
get(tmp_file, fd, use_sudo=True)
sudo('rm -f {}'.format(tmp_file))
@task
def setup():
"""
Performs django_setup_settings, django_migrate, django_collectstatic
and django_check
"""
execute(setup_settings)
execute(migrate)
execute(collectstatic)
execute(setup_log_files_owner)
|
python
|
import IPython
from google.colab import output
display(
IPython.display.HTML('''<div class="container">
<div class="circle red" color="red"></div>
<div class="circle" color="yellow"></div>
<div class="circle" color="green"></div>
<div class="floating-text">
Intelligent Traffic Management System </div>
<style>
@import url('https://fonts.googleapis.com/css?family=Muli&display=swap');
* {
box-sizing: border-box;
}
body {
background-color: #1abc9c;
display: flex;
align-items: center;
justify-content: center;
min-height: 100vh;
margin: 0;
}
.container {
background-color: #2c3e50;
border-radius: 50px;
display: flex;
flex-direction: column;
align-items: center;
justify-content: space-around;
padding: 15px 0;
height: 200px;
width: 70px;
}
.circle {
background-color: rgba(0, 0, 0, 0.3);
border-radius: 100%;
position: relative;
height: 40px;
width: 40px;
}
.circle::after {
border-right: 4px solid rgba(255, 255, 255, 0.6);
border-radius: 100%;
content: ' ';
position: absolute;
top: 5px;
left: 0px;
width: 30px;
height: 30px;
}
.circle.red {
background-color: #c0392b;
box-shadow: 0 0 20px 5px #c0392b;
}
.circle.yellow {
background-color: #f1c40f;
box-shadow: 0 0 20px 5px #f1c40f;
}
.circle.green {
background-color: #2ecc71;
box-shadow: 0 0 20px 5px #2ecc71;
}
/* SOCIAL PANEL CSS */
.social-panel-container {
position: fixed;
right: 0;
bottom: 80px;
transform: translateX(100%);
transition: transform 0.4s ease-in-out;
}
.social-panel-container.visible {
transform: translateX(-10px);
}
.social-panel {
background-color: #fff;
border-radius: 16px;
box-shadow: 0 16px 31px -17px rgba(0,31,97,0.6);
border: 5px solid #001F61;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
font-family: 'Muli';
position: relative;
height: 169px;
width: 370px;
max-width: calc(100% - 10px);
}
.social-panel button.close-btn {
border: 0;
color: #97A5CE;
cursor: pointer;
font-size: 20px;
position: absolute;
top: 5px;
right: 5px;
}
.social-panel button.close-btn:focus {
outline: none;
}
.social-panel p {
background-color: #001F61;
border-radius: 0 0 10px 10px;
color: #fff;
font-size: 14px;
line-height: 18px;
padding: 2px 17px 6px;
position: absolute;
top: 0;
left: 50%;
margin: 0;
transform: translateX(-50%);
text-align: center;
width: 235px;
}
.social-panel p i {
margin: 0 5px;
}
.social-panel p a {
color: #FF7500;
text-decoration: none;
}
.social-panel h4 {
margin: 20px 0;
color: #97A5CE;
font-family: 'Muli';
font-size: 14px;
line-height: 18px;
text-transform: uppercase;
}
.social-panel ul {
display: flex;
list-style-type: none;
padding: 0;
margin: 0;
}
.social-panel ul li {
margin: 0 10px;
}
.social-panel ul li a {
border: 1px solid #DCE1F2;
border-radius: 50%;
color: #001F61;
font-size: 20px;
display: flex;
justify-content: center;
align-items: center;
height: 50px;
width: 50px;
text-decoration: none;
}
.social-panel ul li a:hover {
border-color: #FF6A00;
box-shadow: 0 9px 12px -9px #FF6A00;
}
.floating-btn {
border-radius: 26.5px;
background-color: #001F61;
border: 1px solid #001F61;
box-shadow: 0 16px 22px -17px #03153B;
color: #fff;
cursor: pointer;
font-size: 16px;
line-height: 20px;
padding: 12px 20px;
position: fixed;
bottom: 20px;
right: 20px;
z-index: 999;
}
.floating-btn:hover {
background-color: #ffffff;
color: #001F61;
}
.floating-btn:focus {
outline: none;
}
.floating-text {
background-color: #001F61;
border-radius: 10px 10px 0 0;
color: #fff;
font-family: 'Muli';
padding: 7px 15px;
position: fixed;
bottom: 0;
left: 50%;
transform: translateX(-50%);
text-align: center;
z-index: 998;
}
.floating-text a {
color: #FF7500;
text-decoration: none;
}
@media screen and (max-width: 480px) {
.social-panel-container.visible {
transform: translateX(0px);
}
.floating-btn {
right: 10px;
}
}
</style>
<script>
const circles = document.querySelectorAll('.circle')
let activeLight = 0;
setInterval(() => {
changeLight();
}, 1000);
function changeLight() {
circles[activeLight].className = 'circle';
activeLight++;
if(activeLight > 2) {
activeLight = 0;
}
const currentLight = circles[activeLight]
currentLight.classList.add(currentLight.getAttribute('color'));
}
</script>
'''))
|
python
|
import rq
import numpy as np
import time
import subprocess
import shlex
import sys
import redis
import vislab.tests.testrqaux
def get_redis_client():
host, port = ["0.0.0.0", 6379]
try:
connection = redis.Redis(host, port)
connection.ping()
except redis.ConnectionError:
raise Exception(
"Need a Redis server running on {}, port {}".format(host, port))
return connection
def run_workers_cmd(q_name, num_workers):
print("Starting {} workers...".format(num_workers))
cmd = "rqworker --burst {}".format(q_name)
print(cmd)
pids = []
for i in range(num_workers):
# time.sleep(np.random.rand()) # stagger the jobs a little bit
pids.append(subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE, stderr=subprocess.PIPE))
out, err = pids[-1].communicate()
print '==='
print out
print err
def run_workers_py(q):
for i in xrange(2):
print 'Started worker {}'.format(i)
worker = rq.Worker([q], connection=q.connection)
worker.work(burst=True) # Runs enqueued job
name = 'test_q'
async = True
redis_conn = get_redis_client()
queue = rq.Queue(name, connection=redis_conn, async=async)
queue.empty()
jobs = [queue.enqueue_call(
func=vislab.tests.testrqaux.foo, args=[i],
timeout=10000, result_ttl=999) for i in xrange(1)]
t = time.time()
if async:
run_workers_py(queue)
# run_workers_cmd(name, 1)
# Wait until all jobs are completed.
known_jobs = {}
while True:
for job in jobs:
if job not in known_jobs:
if job.is_finished:
from pprint import pprint
print ''
pprint(vars(job))
known_jobs[job] = 0
elif job.is_failed:
from pprint import pprint
print ''
pprint(vars(job))
known_jobs[job] = 1
num_failed = sum(known_jobs.values())
num_succeeded = len(known_jobs) - num_failed
msg = "\r{:.1f} s passed, {} succeeded / {} failed".format(
time.time() - t, num_succeeded, num_failed)
msg += " out of {} total".format(len(jobs))
sys.stdout.write(msg)
sys.stdout.flush()
if num_succeeded + num_failed == len(jobs):
break
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
print('Done with all jobs.')
fq = rq.Queue('failed', connection=redis_conn)
failed_jobs = [j for j in fq.get_jobs() if j.origin == name]
print("{} jobs failed and went into the failed queue.".format(
len(failed_jobs)))
|
python
|
import ctypes as ct
from .base import Object, Error, ccs_error, _ccs_get_function, ccs_context, ccs_binding, ccs_hyperparameter, ccs_datum, ccs_datum_fix, ccs_hash, ccs_int
from .hyperparameter import Hyperparameter
ccs_binding_get_context = _ccs_get_function("ccs_binding_get_context", [ccs_binding, ct.POINTER(ccs_context)])
ccs_binding_get_user_data = _ccs_get_function("ccs_binding_get_user_data", [ccs_binding, ct.POINTER(ct.c_void_p)])
ccs_binding_get_value = _ccs_get_function("ccs_binding_get_value", [ccs_binding, ct.c_size_t, ct.POINTER(ccs_datum)])
ccs_binding_set_value = _ccs_get_function("ccs_binding_set_value", [ccs_binding, ct.c_size_t, ccs_datum_fix])
ccs_binding_get_values = _ccs_get_function("ccs_binding_get_values", [ccs_binding, ct.c_size_t, ct.POINTER(ccs_datum), ct.POINTER(ct.c_size_t)])
ccs_binding_get_value_by_name = _ccs_get_function("ccs_binding_get_value_by_name", [ccs_binding, ct.c_char_p, ct.POINTER(ccs_datum)])
ccs_binding_hash = _ccs_get_function("ccs_binding_hash", [ccs_binding, ct.POINTER(ccs_hash)])
ccs_binding_cmp = _ccs_get_function("ccs_binding_cmp", [ccs_binding, ccs_binding, ct.POINTER(ccs_int)])
class Binding(Object):
@property
def user_data(self):
if hasattr(self, "_user_data"):
return self._user_data
v = ct.c_void_p()
res = ccs_binding_get_user_data(self.handle, ct.byref(v))
Error.check(res)
self._user_data = v
return v
@property
def context(self):
if hasattr(self, "_context"):
return self._context
v = ccs_context()
res = ccs_binding_get_context(self.handle, ct.byref(v))
Error.check(res)
self._context = Object.from_handle(v)
return self._context
@property
def num_values(self):
if hasattr(self, "_num_values"):
return self._num_values
v = ct.c_size_t()
res = ccs_binding_get_values(self.handle, 0, None, ct.byref(v))
Error.check(res)
self._num_values = v.value
return self._num_values
def set_value(self, hyperparameter, value):
if isinstance(hyperparameter, Hyperparameter):
hyperparameter = self.context.hyperparameter_index(hyperparameter)
elif isinstance(hyperparameter, str):
hyperparameter = self.context.hyperparameter_index_by_name(hyperparameter)
pv = ccs_datum(value)
v = ccs_datum_fix()
v.value = pv._value.i
v.type = pv.type
v.flags = pv.flags
res = ccs_binding_set_value(self.handle, hyperparameter, v)
Error.check(res)
def value(self, hyperparameter):
v = ccs_datum()
if isinstance(hyperparameter, Hyperparameter):
res = ccs_binding_get_value(self.handle, self.context.hyperparameter_index(hyperparameter), ct.byref(v))
elif isinstance(hyperparameter, str):
res = ccs_binding_get_value_by_name(self.handle, str.encode(hyperparameter), ct.byref(v))
else:
res = ccs_binding_get_value(self.handle, hyperparameter, ct.byref(v))
Error.check(res)
return v.value
@property
def values(self):
sz = self.num_values
if sz == 0:
return []
v = (ccs_datum * sz)()
res = ccs_binding_get_values(self.handle, sz, v, None)
Error.check(res)
return [x.value for x in v]
def cmp(self, other):
v = ccs_int()
res = ccs_binding_cmp(self.handle, other.handle, ct.byref(v))
Error.check(res)
return v.value
def __lt__(self, other):
v = ccs_int()
res = ccs_binding_cmp(self.handle, other.handle, ct.byref(v))
Error.check(res)
return v.value < 0
def __le__(self, other):
v = ccs_int()
res = ccs_binding_cmp(self.handle, other.handle, ct.byref(v))
Error.check(res)
return v.value <= 0
def __gt__(self, other):
v = ccs_int()
res = ccs_binding_cmp(self.handle, other.handle, ct.byref(v))
Error.check(res)
return v.value > 0
def __ge__(self, other):
v = ccs_int()
res = ccs_binding_cmp(self.handle, other.handle, ct.byref(v))
Error.check(res)
return v.value >= 0
def __eq__(self, other):
v = ccs_int()
res = ccs_binding_cmp(self.handle, other.handle, ct.byref(v))
Error.check(res)
return v.value == 0
def __ne__(self, other):
v = ccs_int()
res = ccs_binding_cmp(self.handle, other.handle, ct.byref(v))
Error.check(res)
return v.value != 0
@property
def hash(self):
v = ccs_hash()
res = ccs_binding_hash(self.handle, ct.byref(v))
Error.check(res)
return self.value
def __hash__(self):
return self.hash
def asdict(self):
res = {}
hyperparameters = self.context.hyperparameters
values = self.values
for i in range(len(hyperparameters)):
res[hyperparameters[i].name] = values[i]
return res
|
python
|
"""Example implementation of the Lorenz forecast model.
References
----------
- Dutta, R., Corander, J., Kaski, S. and Gutmann, M.U., 2016.
Likelihood-free inference by ratio estimation. arXiv preprint arXiv:1611.10242.
https://arxiv.org/abs/1611.10242
"""
from functools import partial
import numpy as np
import elfi
def _lorenz_ode(y, params):
"""Parametrized Lorenz 96 system defined by a coupled stochastic differential equations (SDE).
Parameters
----------
y : numpy.ndarray of dimension (batch_size, n_obs)
Current state of the SDE.
params : list
The list of parameters needed to evaluate function. In this case it is
list of four elements - eta, theta1, theta2 and f.
Returns
-------
dy_dt : np.array
Rate of change of the SDE.
"""
dy_dt = np.empty_like(y)
eta = params[0]
theta1 = params[1]
theta2 = params[2]
f = params[3]
g = theta1 + y * theta2
dy_dt[:, 0] = -y[:, -2] * y[:, -1] + y[:, -1] * y[:, 1] - y[:, 0] + f - g[:, 0] + eta[:, 0]
dy_dt[:, 1] = -y[:, -1] * y[:, 0] + y[:, 0] * y[:, 2] - y[:, 1] + f - g[:, 1] + eta[:, 1]
dy_dt[:, 2:-1] = (-y[:, :-3] * y[:, 1:-2] + y[:, 1:-2] * y[:, 3:] - y[:, 2:-1] + f - g[:, 2:-1]
+ eta[:, 2:-1])
dy_dt[:, -1] = (-y[:, -3] * y[:, -2] + y[:, -2] * y[:, 0] - y[:, -1] + f - g[:, -1]
+ eta[:, -1])
return dy_dt
def runge_kutta_ode_solver(ode, time_step, y, params):
"""4th order Runge-Kutta ODE solver.
Carnahan, B., Luther, H. A., and Wilkes, J. O. (1969).
Applied Numerical Methods. Wiley, New York.
Parameters
----------
ode : function
Ordinary differential equation function. In the Lorenz model it is SDE.
time_step : float
y : np.ndarray of dimension (batch_size, n_obs)
Current state of the time-series.
params : list of parameters
The parameters needed to evaluate the ode. In this case it is
list of four elements - eta, theta1, theta2 and f.
Returns
-------
np.ndarray
Resulting state initiated at y and satisfying ode solved by this solver.
"""
k1 = time_step * ode(y, params)
k2 = time_step * ode(y + k1 / 2, params)
k3 = time_step * ode(y + k2 / 2, params)
k4 = time_step * ode(y + k3, params)
y = y + (k1 + 2 * k2 + 2 * k3 + k4) / 6
return y
def forecast_lorenz(theta1=None, theta2=None, f=10., phi=0.984, n_obs=40, n_timestep=160,
batch_size=1, initial_state=None, random_state=None, total_duration=4):
"""Forecast Lorenz model.
Wilks, D. S. (2005). Effects of stochastic parametrizations in the
Lorenz ’96 system. Quarterly Journal of the Royal Meteorological Society,
131(606), 389–407.
Parameters
----------
theta1, theta2: list or numpy.ndarray
Closure parameters.
phi : float, optional
This value is used to express stochastic forcing term. It should be configured according
to force term and eventually impacts to the result of eta.
More details in Wilks (2005) et al.
initial_state: numpy.ndarray, optional
Initial state value of the time-series.
f : float, optional
Force term
n_obs : int, optional
Size of the observed 1D grid
n_timestep : int, optional
Number of the time step intervals
batch_size : int, optional
random_state : np.random.RandomState, optional
total_duration : float, optional
Returns
-------
np.ndarray of size (b, n, m) which is (batch_size, time, n_obs)
The computed SDE with time series.
"""
if not initial_state:
initial_state = np.tile([2.40711741e-01, 4.75597337e+00, 1.19145654e+01, 1.31324866e+00,
2.82675744e+00, 3.96016971e+00, 2.10479504e+00, 5.47742826e+00,
5.42519447e+00, -1.45166074e+00, 2.01991521e+00, 3.93873313e+00,
8.22837848e+00, 4.89401702e+00, -5.66278973e+00, 1.58617220e+00,
-1.23849251e+00, -6.04649288e-01, 6.04132264e+00, 7.47588536e+00,
1.82761402e+00, 3.19209639e+00, -7.58539653e-02, -6.00928508e-03,
4.52902964e-01, 3.22063602e+00, 7.18613523e+00, 2.39210634e+00,
-2.65743666e+00, 2.32046235e-01, 1.28079141e+00, 4.23344286e+00,
6.94213238e+00, -1.15939497e+00, -5.23037351e-01, 1.54618811e+00,
1.77863869e+00, 3.30139201e+00, 7.47769309e+00, -3.91312909e-01],
(batch_size, 1))
y = initial_state
eta = 0
theta1 = np.asarray(theta1).reshape(-1, 1)
theta2 = np.asarray(theta2).reshape(-1, 1)
time_step = total_duration / n_timestep
random_state = random_state or np.random
time_series = np.empty(shape=(batch_size, n_timestep, n_obs))
time_series[:, 0, :] = y
for i in range(1, n_timestep):
e = random_state.normal(0, 1, y.shape)
eta = phi * eta + e * np.sqrt(1 - pow(phi, 2))
params = (eta, theta1, theta2, f)
y = runge_kutta_ode_solver(ode=_lorenz_ode, time_step=time_step, y=y, params=params)
time_series[:, i, :] = y
return time_series
def get_model(true_params=None, seed_obs=None, initial_state=None, n_obs=40, f=10., phi=0.984,
total_duration=4):
"""Return a complete Lorenz model in inference task.
This is a simplified example that achieves reasonable predictions.
Hakkarainen, J., Ilin, A., Solonen, A., Laine, M., Haario, H., Tamminen,
J., Oja, E., and Järvinen, H. (2012). On closure parameter estimation in
chaotic systems. Nonlinear Processes in Geophysics, 19(1), 127–143.
Parameters
----------
true_params : list, optional
Parameters with which the observed data is generated.
seed_obs : int, optional
Seed for the observed data generation.
initial_state : ndarray
Initial state value of the time-series.
n_obs : int, optional
Number of observed variables
f : float, optional
Force term
phi : float, optional
This value is used to express stochastic forcing term. It should be configured according
to force term and eventually impacts to the result of eta.
More details in Wilks (2005) et al.
total_duration : float, optional
Returns
-------
m : elfi.ElfiModel
"""
simulator = partial(forecast_lorenz, initial_state=initial_state, f=f, n_obs=n_obs, phi=phi,
total_duration=total_duration)
if not true_params:
true_params = [2.0, 0.1]
m = elfi.ElfiModel()
y_obs = simulator(*true_params, random_state=np.random.RandomState(seed_obs))
sumstats = []
elfi.Prior('uniform', 0.5, 3., model=m, name='theta1')
elfi.Prior('uniform', 0, 0.3, model=m, name='theta2')
elfi.Simulator(simulator, m['theta1'], m['theta2'], observed=y_obs, name='Lorenz')
sumstats.append(elfi.Summary(mean, m['Lorenz'], name='Mean'))
sumstats.append(elfi.Summary(var, m['Lorenz'], name='Var'))
sumstats.append(elfi.Summary(autocov, m['Lorenz'], name='Autocov'))
sumstats.append(elfi.Summary(cov, m['Lorenz'], name='Cov'))
sumstats.append(elfi.Summary(xcov, m['Lorenz'], True, name='CrosscovPrev'))
sumstats.append(elfi.Summary(xcov, m['Lorenz'], False, name='CrosscovNext'))
elfi.Distance('euclidean', *sumstats, name='d')
return m
def mean(x):
"""Return the mean of Y_{k}.
Parameters
----------
x : np.array of size (b, n, m) which is (batch_size, time, n_obs)
Returns
-------
np.array of size (b,)
The computed mean of statistics over time and space.
"""
return np.mean(x, axis=(1, 2))
def var(x):
"""Return the variance of Y_{k}.
Parameters
----------
x : np.array of size (b, n, m) which is (batch_size, time, n_obs)
Returns
-------
np.array of size (b,)
The average over space of computed variance with respect to time.
"""
return np.mean(np.var(x, axis=1), axis=1)
def cov(x):
"""Return the covariance of Y_{k} with its neighbour Y_{k+1}.
Parameters
----------
x : np.array of size (b, n, m) which is (batch_size, time, n_obs)
Returns
-------
np.array of size (b,)
The average over space of computed covariance with respect to time.
"""
x_next = np.roll(x, -1, axis=2)
return np.mean(np.mean((x - np.mean(x, keepdims=True, axis=1))
* (x_next - np.mean(x_next, keepdims=True, axis=1)),
axis=1), axis=1)
def xcov(x, prev=True):
"""Return the cross-covariance of Y_{k} with its neighbours from previous time step.
Parameters
----------
x : np.array of size (b, n, m) which is (batch_size, time, n_obs)
prev : bool
The side of previous neighbour. True for previous neighbour, False for next.
Returns
-------
np.array of size (b,)
The average over space of computed cross-covariance with respect to time.
"""
x_lag = np.roll(x, 1, axis=2) if prev else np.roll(x, -1, axis=2)
return np.mean((x[:, :-1, :] - np.mean(x[:, :-1, :], keepdims=True, axis=1))
* (x_lag[:, 1:, :] - np.mean(x_lag[:, 1:, :], keepdims=True, axis=1)),
axis=(1, 2))
def autocov(x):
"""Return the auto-covariance with time lag 1.
Parameters
----------
x : np.array of size (b, n, m) which is (batch_size, time, n_obs)
Returns
-------
C : np.array of size (b,)
The average over space of computed auto-covariance with respect to time.
"""
c = np.mean((x[:, :-1, :] - np.mean(x[:, :-1, :], keepdims=True, axis=1))
* (x[:, 1:, :] - np.mean(x[:, 1:, :], keepdims=True, axis=1)),
axis=(1, 2))
return c
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.